Upgrade V8 to 5.1.281.57  DO NOT MERGE

FPIIM-449

Change-Id: Id981b686b4d587ac31697662eb98bb34be42ad90
(cherry picked from commit 3b9bc31999c9787eb726ecdbfd5796bfdec32a18)
diff --git a/src/DEPS b/src/DEPS
index b0b703b..1bb616e 100644
--- a/src/DEPS
+++ b/src/DEPS
@@ -21,7 +21,4 @@
   "d8\.cc": [
     "+include/libplatform/libplatform.h",
   ],
-  "api-experimental\.cc": [
-    "+src/compiler/fast-accessor-assembler.h",
-  ],
 }
diff --git a/src/accessors.cc b/src/accessors.cc
index 766509e..374c0a2 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -32,6 +32,7 @@
   info->set_all_can_read(false);
   info->set_all_can_write(false);
   info->set_is_special_data_property(true);
+  info->set_is_sloppy(false);
   name = factory->InternalizeName(name);
   info->set_name(*name);
   Handle<Object> get = v8::FromCData(isolate, getter);
@@ -817,7 +818,7 @@
   } else {
     // If the function isn't compiled yet, the length is not computed
     // correctly yet. Compile it now and return the right length.
-    if (Compiler::Compile(function, KEEP_EXCEPTION)) {
+    if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
       length = function->shared()->length();
     }
     if (isolate->has_pending_exception()) {
diff --git a/src/api-arguments.cc b/src/api-arguments.cc
new file mode 100644
index 0000000..c4b698c
--- /dev/null
+++ b/src/api-arguments.cc
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api-arguments.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
+  Isolate* isolate = this->isolate();
+  VMState<EXTERNAL> state(isolate);
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+  FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_,
+                                       is_construct_call_);
+  f(info);
+  return GetReturnValue<Object>(isolate);
+}
+
+Handle<JSObject> PropertyCallbackArguments::Call(
+    IndexedPropertyEnumeratorCallback f) {
+  Isolate* isolate = this->isolate();
+  VMState<EXTERNAL> state(isolate);
+  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+  PropertyCallbackInfo<v8::Array> info(begin());
+  f(info);
+  return GetReturnValue<JSObject>(isolate);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/api-arguments.h b/src/api-arguments.h
new file mode 100644
index 0000000..3bfe34d
--- /dev/null
+++ b/src/api-arguments.h
@@ -0,0 +1,254 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_API_ARGUMENTS_H_
+#define V8_API_ARGUMENTS_H_
+
+#include "src/api.h"
+#include "src/isolate.h"
+#include "src/tracing/trace-event.h"
+#include "src/vm-state-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Custom arguments replicate a small segment of stack that can be
+// accessed through an Arguments object the same way the actual stack
+// can.
+template <int kArrayLength>
+class CustomArgumentsBase : public Relocatable {
+ public:
+  virtual inline void IterateInstance(ObjectVisitor* v) {
+    v->VisitPointers(values_, values_ + kArrayLength);
+  }
+
+ protected:
+  inline Object** begin() { return values_; }
+  explicit inline CustomArgumentsBase(Isolate* isolate)
+      : Relocatable(isolate) {}
+  Object* values_[kArrayLength];
+};
+
+template <typename T>
+class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
+ public:
+  static const int kReturnValueOffset = T::kReturnValueIndex;
+
+  typedef CustomArgumentsBase<T::kArgsLength> Super;
+  ~CustomArguments() {
+    this->begin()[kReturnValueOffset] =
+        reinterpret_cast<Object*>(kHandleZapValue);
+  }
+
+ protected:
+  explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
+
+  template <typename V>
+  Handle<V> GetReturnValue(Isolate* isolate);
+
+  inline Isolate* isolate() {
+    return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
+  }
+};
+
+template <typename T>
+template <typename V>
+Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
+  // Check the ReturnValue.
+  Object** handle = &this->begin()[kReturnValueOffset];
+  // Nothing was set, return empty handle as per previous behaviour.
+  if ((*handle)->IsTheHole()) return Handle<V>();
+  Handle<V> result = Handle<V>::cast(Handle<Object>(handle));
+  result->VerifyApiCallResultType();
+  return result;
+}
+
+class PropertyCallbackArguments
+    : public CustomArguments<PropertyCallbackInfo<Value> > {
+ public:
+  typedef PropertyCallbackInfo<Value> T;
+  typedef CustomArguments<T> Super;
+  static const int kArgsLength = T::kArgsLength;
+  static const int kThisIndex = T::kThisIndex;
+  static const int kHolderIndex = T::kHolderIndex;
+  static const int kDataIndex = T::kDataIndex;
+  static const int kReturnValueDefaultValueIndex =
+      T::kReturnValueDefaultValueIndex;
+  static const int kIsolateIndex = T::kIsolateIndex;
+  static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
+
+  PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
+                            JSObject* holder, Object::ShouldThrow should_throw)
+      : Super(isolate) {
+    Object** values = this->begin();
+    values[T::kThisIndex] = self;
+    values[T::kHolderIndex] = holder;
+    values[T::kDataIndex] = data;
+    values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
+    values[T::kShouldThrowOnErrorIndex] =
+        Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
+
+    // Here the hole is set as default value.
+    // It cannot escape into js as it's remove in Call below.
+    values[T::kReturnValueDefaultValueIndex] =
+        isolate->heap()->the_hole_value();
+    values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
+    DCHECK(values[T::kHolderIndex]->IsHeapObject());
+    DCHECK(values[T::kIsolateIndex]->IsSmi());
+  }
+
+/*
+ * The following Call functions wrap the calling of all callbacks to handle
+ * calling either the old or the new style callbacks depending on which one
+ * has been registered.
+ * For old callbacks which return an empty handle, the ReturnValue is checked
+ * and used if it's been set to anything inside the callback.
+ * New style callbacks always use the return value.
+ */
+  Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F)                  \
+  F(AccessorNameGetterCallback, "get", v8::Value, Object)          \
+  F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
+  F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
+
+#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn)         \
+  Handle<InternalReturn> Call(Function f, Handle<Name> name) {               \
+    Isolate* isolate = this->isolate();                                      \
+    VMState<EXTERNAL> state(isolate);                                        \
+    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));             \
+    PropertyCallbackInfo<ApiReturn> info(begin());                           \
+    LOG(isolate,                                                             \
+        ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
+    f(v8::Utils::ToLocal(name), info);                                       \
+    return GetReturnValue<InternalReturn>(isolate);                          \
+  }
+
+  FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
+
+#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
+#undef WRITE_CALL_1_NAME
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F)            \
+  F(IndexedPropertyGetterCallback, "get", v8::Value, Object)  \
+  F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
+  F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
+
+#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn)  \
+  Handle<InternalReturn> Call(Function f, uint32_t index) {            \
+    Isolate* isolate = this->isolate();                                \
+    VMState<EXTERNAL> state(isolate);                                  \
+    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));       \
+    PropertyCallbackInfo<ApiReturn> info(begin());                     \
+    LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
+                                          holder(), index));           \
+    f(index, info);                                                    \
+    return GetReturnValue<InternalReturn>(isolate);                    \
+  }
+
+  FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
+
+#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
+#undef WRITE_CALL_1_INDEX
+
+  Handle<Object> Call(GenericNamedPropertySetterCallback f, Handle<Name> name,
+                      Handle<Object> value) {
+    Isolate* isolate = this->isolate();
+    VMState<EXTERNAL> state(isolate);
+    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+    PropertyCallbackInfo<v8::Value> info(begin());
+    LOG(isolate,
+        ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
+    f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+    return GetReturnValue<Object>(isolate);
+  }
+
+  Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
+                      Handle<Object> value) {
+    Isolate* isolate = this->isolate();
+    VMState<EXTERNAL> state(isolate);
+    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+    PropertyCallbackInfo<v8::Value> info(begin());
+    LOG(isolate,
+        ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
+    f(index, v8::Utils::ToLocal(value), info);
+    return GetReturnValue<Object>(isolate);
+  }
+
+  void Call(AccessorNameSetterCallback f, Handle<Name> name,
+            Handle<Object> value) {
+    Isolate* isolate = this->isolate();
+    VMState<EXTERNAL> state(isolate);
+    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+    PropertyCallbackInfo<void> info(begin());
+    LOG(isolate,
+        ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
+    f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+  }
+
+ private:
+  inline JSObject* holder() {
+    return JSObject::cast(this->begin()[T::kHolderIndex]);
+  }
+};
+
+class FunctionCallbackArguments
+    : public CustomArguments<FunctionCallbackInfo<Value> > {
+ public:
+  typedef FunctionCallbackInfo<Value> T;
+  typedef CustomArguments<T> Super;
+  static const int kArgsLength = T::kArgsLength;
+  static const int kHolderIndex = T::kHolderIndex;
+  static const int kDataIndex = T::kDataIndex;
+  static const int kReturnValueDefaultValueIndex =
+      T::kReturnValueDefaultValueIndex;
+  static const int kIsolateIndex = T::kIsolateIndex;
+  static const int kCalleeIndex = T::kCalleeIndex;
+  static const int kContextSaveIndex = T::kContextSaveIndex;
+
+  FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
+                            internal::HeapObject* callee,
+                            internal::Object* holder, internal::Object** argv,
+                            int argc, bool is_construct_call)
+      : Super(isolate),
+        argv_(argv),
+        argc_(argc),
+        is_construct_call_(is_construct_call) {
+    Object** values = begin();
+    values[T::kDataIndex] = data;
+    values[T::kCalleeIndex] = callee;
+    values[T::kHolderIndex] = holder;
+    values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
+    values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
+    // Here the hole is set as default value.
+    // It cannot escape into js as it's remove in Call below.
+    values[T::kReturnValueDefaultValueIndex] =
+        isolate->heap()->the_hole_value();
+    values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
+    DCHECK(values[T::kCalleeIndex]->IsJSFunction() ||
+           values[T::kCalleeIndex]->IsFunctionTemplateInfo());
+    DCHECK(values[T::kHolderIndex]->IsHeapObject());
+    DCHECK(values[T::kIsolateIndex]->IsSmi());
+  }
+
+  /*
+   * The following Call function wraps the calling of all callbacks to handle
+   * calling either the old or the new style callbacks depending on which one
+   * has been registered.
+   * For old callbacks which return an empty handle, the ReturnValue is checked
+   * and used if it's been set to anything inside the callback.
+   * New style callbacks always use the return value.
+   */
+  Handle<Object> Call(FunctionCallback f);
+
+ private:
+  internal::Object** argv_;
+  int argc_;
+  bool is_construct_call_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_API_ARGUMENTS_H_
diff --git a/src/api-experimental.cc b/src/api-experimental.cc
index 98d62e3..3928434 100644
--- a/src/api-experimental.cc
+++ b/src/api-experimental.cc
@@ -11,20 +11,17 @@
 #include "include/v8.h"
 #include "include/v8-experimental.h"
 #include "src/api.h"
-#include "src/compiler/fast-accessor-assembler.h"
+#include "src/fast-accessor-assembler.h"
 
 namespace {
 
-
-v8::internal::compiler::FastAccessorAssembler* FromApi(
+v8::internal::FastAccessorAssembler* FromApi(
     v8::experimental::FastAccessorBuilder* builder) {
-  return reinterpret_cast<v8::internal::compiler::FastAccessorAssembler*>(
-      builder);
+  return reinterpret_cast<v8::internal::FastAccessorAssembler*>(builder);
 }
 
-
 v8::experimental::FastAccessorBuilder* FromInternal(
-    v8::internal::compiler::FastAccessorAssembler* fast_accessor_assembler) {
+    v8::internal::FastAccessorAssembler* fast_accessor_assembler) {
   return reinterpret_cast<v8::experimental::FastAccessorBuilder*>(
       fast_accessor_assembler);
 }
@@ -57,8 +54,8 @@
 
 FastAccessorBuilder* FastAccessorBuilder::New(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  internal::compiler::FastAccessorAssembler* faa =
-      new internal::compiler::FastAccessorAssembler(i_isolate);
+  internal::FastAccessorAssembler* faa =
+      new internal::FastAccessorAssembler(i_isolate);
   return FromInternal(faa);
 }
 
diff --git a/src/api-natives.cc b/src/api-natives.cc
index 3be2df0..adf4b6a 100644
--- a/src/api-natives.cc
+++ b/src/api-natives.cc
@@ -266,28 +266,45 @@
   return obj;
 }
 
-void CacheTemplateInstantiation(Isolate* isolate, Handle<Smi> serial_number,
+void CacheTemplateInstantiation(Isolate* isolate, uint32_t serial_number,
                                 Handle<JSObject> object) {
   auto cache = isolate->template_instantiations_cache();
-  auto new_cache = ObjectHashTable::Put(cache, serial_number, object);
+  auto new_cache =
+      UnseededNumberDictionary::AtNumberPut(cache, serial_number, object);
   isolate->native_context()->set_template_instantiations_cache(*new_cache);
 }
 
-void UncacheTemplateInstantiation(Isolate* isolate, Handle<Smi> serial_number) {
+void UncacheTemplateInstantiation(Isolate* isolate, uint32_t serial_number) {
   auto cache = isolate->template_instantiations_cache();
-  bool was_present = false;
-  auto new_cache = ObjectHashTable::Remove(cache, serial_number, &was_present);
-  DCHECK(was_present);
+  int entry = cache->FindEntry(serial_number);
+  DCHECK(entry != UnseededNumberDictionary::kNotFound);
+  Handle<Object> result =
+      UnseededNumberDictionary::DeleteProperty(cache, entry);
+  USE(result);
+  DCHECK(result->IsTrue());
+  auto new_cache = UnseededNumberDictionary::Shrink(cache, entry);
   isolate->native_context()->set_template_instantiations_cache(*new_cache);
 }
 
 MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
                                         Handle<ObjectTemplateInfo> info,
                                         bool is_hidden_prototype) {
-  // Enter a new scope.  Recursion could otherwise create a lot of handles.
-  HandleScope scope(isolate);
   // Fast path.
   Handle<JSObject> result;
+  uint32_t serial_number =
+      static_cast<uint32_t>(Smi::cast(info->serial_number())->value());
+  if (serial_number) {
+    // Probe cache.
+    auto cache = isolate->template_instantiations_cache();
+    int entry = cache->FindEntry(serial_number);
+    if (entry != UnseededNumberDictionary::kNotFound) {
+      Object* boilerplate = cache->ValueAt(entry);
+      result = handle(JSObject::cast(boilerplate), isolate);
+      return isolate->factory()->CopyJSObject(result);
+    }
+  }
+  // Enter a new scope.  Recursion could otherwise create a lot of handles.
+  HandleScope scope(isolate);
   auto constructor = handle(info->constructor(), isolate);
   Handle<JSFunction> cons;
   if (constructor->IsUndefined()) {
@@ -297,18 +314,6 @@
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate, cons, InstantiateFunction(isolate, cons_templ), JSFunction);
   }
-  auto serial_number = handle(Smi::cast(info->serial_number()), isolate);
-  if (serial_number->value()) {
-    // Probe cache.
-    auto cache = isolate->template_instantiations_cache();
-    Object* boilerplate = cache->Lookup(serial_number);
-    if (boilerplate->IsJSObject()) {
-      result = handle(JSObject::cast(boilerplate), isolate);
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate, result, JSObject::DeepCopyApiBoilerplate(result), JSObject);
-      return scope.CloseAndEscape(result);
-    }
-  }
   auto object = isolate->factory()->NewJSObject(cons);
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate, result,
@@ -317,10 +322,9 @@
   // TODO(dcarney): is this necessary?
   JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
 
-  if (serial_number->value()) {
+  if (serial_number) {
     CacheTemplateInstantiation(isolate, serial_number, result);
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, result, JSObject::DeepCopyApiBoilerplate(result), JSObject);
+    result = isolate->factory()->CopyJSObject(result);
   }
   return scope.CloseAndEscape(result);
 }
@@ -329,12 +333,14 @@
 MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
                                             Handle<FunctionTemplateInfo> data,
                                             Handle<Name> name) {
-  auto serial_number = handle(Smi::cast(data->serial_number()), isolate);
-  if (serial_number->value()) {
+  uint32_t serial_number =
+      static_cast<uint32_t>(Smi::cast(data->serial_number())->value());
+  if (serial_number) {
     // Probe cache.
     auto cache = isolate->template_instantiations_cache();
-    Object* element = cache->Lookup(serial_number);
-    if (element->IsJSFunction()) {
+    int entry = cache->FindEntry(serial_number);
+    if (entry != UnseededNumberDictionary::kNotFound) {
+      Object* element = cache->ValueAt(entry);
       return handle(JSFunction::cast(element), isolate);
     }
   }
@@ -378,7 +384,7 @@
   if (!name.is_null() && name->IsString()) {
     function->shared()->set_name(*name);
   }
-  if (serial_number->value()) {
+  if (serial_number) {
     // Cache the function.
     CacheTemplateInstantiation(isolate, serial_number, function);
   }
@@ -386,7 +392,7 @@
       ConfigureInstance(isolate, function, data, data->hidden_prototype());
   if (result.is_null()) {
     // Uncache on error.
-    if (serial_number->value()) {
+    if (serial_number) {
       UncacheTemplateInstantiation(isolate, serial_number);
     }
     return MaybeHandle<JSFunction>();
@@ -536,7 +542,13 @@
     InstanceType type;
     switch (instance_type) {
       case JavaScriptObjectType:
-        type = JS_OBJECT_TYPE;
+        if (!obj->needs_access_check() &&
+            obj->named_property_handler()->IsUndefined() &&
+            obj->indexed_property_handler()->IsUndefined()) {
+          type = JS_OBJECT_TYPE;
+        } else {
+          type = JS_SPECIAL_API_OBJECT_TYPE;
+        }
         instance_size += JSObject::kHeaderSize;
         break;
       case GlobalObjectType:
@@ -564,7 +576,7 @@
     result->shared()->set_instance_class_name(*class_name);
     result->shared()->set_name(*class_name);
   }
-  result->shared()->set_function_data(*obj);
+  result->shared()->set_api_func_data(*obj);
   result->shared()->set_construct_stub(*construct_stub);
   result->shared()->DontAdaptArguments();
 
diff --git a/src/api.cc b/src/api.cc
index a71dcfa..853bd50 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -156,6 +156,18 @@
 };
 
 
+#ifdef DEBUG
+void CheckMicrotasksScopesConsistency(i::Isolate* isolate) {
+  auto handle_scope_implementer = isolate->handle_scope_implementer();
+  if (handle_scope_implementer->microtasks_policy() ==
+      v8::MicrotasksPolicy::kScoped) {
+    DCHECK(handle_scope_implementer->GetMicrotasksScopeDepth() ||
+           !handle_scope_implementer->DebugMicrotasksScopeDepthIsZero());
+  }
+}
+#endif
+
+
 class CallDepthScope {
  public:
   explicit CallDepthScope(i::Isolate* isolate, Local<Context> context,
@@ -175,6 +187,9 @@
     if (!context_.IsEmpty()) context_->Exit();
     if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth();
     if (do_callback_) isolate_->FireCallCompletedCallback();
+#ifdef DEBUG
+    if (do_callback_) CheckMicrotasksScopesConsistency(isolate_);
+#endif
   }
 
   void Escape() {
@@ -326,36 +341,6 @@
   i::V8::SetSnapshotBlob(snapshot_blob);
 }
 
-
-bool RunExtraCode(Isolate* isolate, Local<Context> context,
-                  const char* utf8_source) {
-  // Run custom script if provided.
-  base::ElapsedTimer timer;
-  timer.Start();
-  TryCatch try_catch(isolate);
-  Local<String> source_string;
-  if (!String::NewFromUtf8(isolate, utf8_source, NewStringType::kNormal)
-           .ToLocal(&source_string)) {
-    return false;
-  }
-  Local<String> resource_name =
-      String::NewFromUtf8(isolate, "<embedded script>", NewStringType::kNormal)
-          .ToLocalChecked();
-  ScriptOrigin origin(resource_name);
-  ScriptCompiler::Source source(source_string, origin);
-  Local<Script> script;
-  if (!ScriptCompiler::Compile(context, &source).ToLocal(&script)) return false;
-  if (script->Run(context).IsEmpty()) return false;
-  if (i::FLAG_profile_deserialization) {
-    i::PrintF("Executing custom snapshot script took %0.3f ms\n",
-              timer.Elapsed().InMillisecondsF());
-  }
-  timer.Stop();
-  CHECK(!try_catch.HasCaught());
-  return true;
-}
-
-
 namespace {
 
 class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
@@ -368,79 +353,180 @@
   virtual void Free(void* data, size_t) { free(data); }
 };
 
+bool RunExtraCode(Isolate* isolate, Local<Context> context,
+                  const char* utf8_source, const char* name) {
+  base::ElapsedTimer timer;
+  timer.Start();
+  Context::Scope context_scope(context);
+  TryCatch try_catch(isolate);
+  Local<String> source_string;
+  if (!String::NewFromUtf8(isolate, utf8_source, NewStringType::kNormal)
+           .ToLocal(&source_string)) {
+    return false;
+  }
+  Local<String> resource_name =
+      String::NewFromUtf8(isolate, name, NewStringType::kNormal)
+          .ToLocalChecked();
+  ScriptOrigin origin(resource_name);
+  ScriptCompiler::Source source(source_string, origin);
+  Local<Script> script;
+  if (!ScriptCompiler::Compile(context, &source).ToLocal(&script)) return false;
+  if (script->Run(context).IsEmpty()) return false;
+  if (i::FLAG_profile_deserialization) {
+    i::PrintF("Executing custom snapshot script %s took %0.3f ms\n", name,
+              timer.Elapsed().InMillisecondsF());
+  }
+  timer.Stop();
+  CHECK(!try_catch.HasCaught());
+  return true;
+}
+
+StartupData SerializeIsolateAndContext(
+    Isolate* isolate, Persistent<Context>* context,
+    i::Snapshot::Metadata metadata,
+    i::StartupSerializer::FunctionCodeHandling function_code_handling) {
+  if (context->IsEmpty()) return {NULL, 0};
+
+  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+  // If we don't do this then we end up with a stray root pointing at the
+  // context even after we have disposed of the context.
+  internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
+
+  // GC may have cleared weak cells, so compact any WeakFixedArrays
+  // found on the heap.
+  i::HeapIterator iterator(internal_isolate->heap(),
+                           i::HeapIterator::kFilterUnreachable);
+  for (i::HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
+    if (o->IsPrototypeInfo()) {
+      i::Object* prototype_users = i::PrototypeInfo::cast(o)->prototype_users();
+      if (prototype_users->IsWeakFixedArray()) {
+        i::WeakFixedArray* array = i::WeakFixedArray::cast(prototype_users);
+        array->Compact<i::JSObject::PrototypeRegistryCompactionCallback>();
+      }
+    } else if (o->IsScript()) {
+      i::Object* shared_list = i::Script::cast(o)->shared_function_infos();
+      if (shared_list->IsWeakFixedArray()) {
+        i::WeakFixedArray* array = i::WeakFixedArray::cast(shared_list);
+        array->Compact<i::WeakFixedArray::NullCallback>();
+      }
+    }
+  }
+
+  i::Object* raw_context = *v8::Utils::OpenPersistent(*context);
+  context->Reset();
+
+  i::SnapshotByteSink snapshot_sink;
+  i::StartupSerializer ser(internal_isolate, &snapshot_sink,
+                           function_code_handling);
+  ser.SerializeStrongReferences();
+
+  i::SnapshotByteSink context_sink;
+  i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
+  context_ser.Serialize(&raw_context);
+  ser.SerializeWeakReferencesAndDeferred();
+
+  return i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
+}
+
 }  // namespace
 
+StartupData V8::CreateSnapshotDataBlob(const char* embedded_source) {
+  // Create a new isolate and a new context from scratch, optionally run
+  // a script to embed, and serialize to create a snapshot blob.
+  StartupData result = {NULL, 0};
 
-StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
-  i::Isolate* internal_isolate = new i::Isolate(true);
+  base::ElapsedTimer timer;
+  timer.Start();
+
   ArrayBufferAllocator allocator;
+  i::Isolate* internal_isolate = new i::Isolate(true);
   internal_isolate->set_array_buffer_allocator(&allocator);
   Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
-  StartupData result = {NULL, 0};
+
   {
-    base::ElapsedTimer timer;
-    timer.Start();
     Isolate::Scope isolate_scope(isolate);
     internal_isolate->Init(NULL);
     Persistent<Context> context;
-    i::Snapshot::Metadata metadata;
     {
       HandleScope handle_scope(isolate);
       Local<Context> new_context = Context::New(isolate);
       context.Reset(isolate, new_context);
-      if (custom_source != NULL) {
-        metadata.set_embeds_script(true);
-        Context::Scope context_scope(new_context);
-        if (!RunExtraCode(isolate, new_context, custom_source)) context.Reset();
+      if (embedded_source != NULL &&
+          !RunExtraCode(isolate, new_context, embedded_source, "<embedded>")) {
+        context.Reset();
       }
     }
-    if (!context.IsEmpty()) {
-      // If we don't do this then we end up with a stray root pointing at the
-      // context even after we have disposed of the context.
-      internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
 
-      // GC may have cleared weak cells, so compact any WeakFixedArrays
-      // found on the heap.
-      i::HeapIterator iterator(internal_isolate->heap(),
-                               i::HeapIterator::kFilterUnreachable);
-      for (i::HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
-        if (o->IsPrototypeInfo()) {
-          i::Object* prototype_users =
-              i::PrototypeInfo::cast(o)->prototype_users();
-          if (prototype_users->IsWeakFixedArray()) {
-            i::WeakFixedArray* array = i::WeakFixedArray::cast(prototype_users);
-            array->Compact<i::JSObject::PrototypeRegistryCompactionCallback>();
-          }
-        } else if (o->IsScript()) {
-          i::Object* shared_list = i::Script::cast(o)->shared_function_infos();
-          if (shared_list->IsWeakFixedArray()) {
-            i::WeakFixedArray* array = i::WeakFixedArray::cast(shared_list);
-            array->Compact<i::WeakFixedArray::NullCallback>();
-          }
-        }
-      }
+    i::Snapshot::Metadata metadata;
+    metadata.set_embeds_script(embedded_source != NULL);
 
-      i::Object* raw_context = *v8::Utils::OpenPersistent(context);
-      context.Reset();
-
-      i::SnapshotByteSink snapshot_sink;
-      i::StartupSerializer ser(internal_isolate, &snapshot_sink);
-      ser.SerializeStrongReferences();
-
-      i::SnapshotByteSink context_sink;
-      i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
-      context_ser.Serialize(&raw_context);
-      ser.SerializeWeakReferencesAndDeferred();
-
-      result = i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
-    }
-    if (i::FLAG_profile_deserialization) {
-      i::PrintF("Creating snapshot took %0.3f ms\n",
-                timer.Elapsed().InMillisecondsF());
-    }
-    timer.Stop();
+    result = SerializeIsolateAndContext(
+        isolate, &context, metadata, i::StartupSerializer::CLEAR_FUNCTION_CODE);
+    DCHECK(context.IsEmpty());
   }
   isolate->Dispose();
+
+  if (i::FLAG_profile_deserialization) {
+    i::PrintF("Creating snapshot took %0.3f ms\n",
+              timer.Elapsed().InMillisecondsF());
+  }
+  timer.Stop();
+  return result;
+}
+
+StartupData V8::WarmUpSnapshotDataBlob(StartupData cold_snapshot_blob,
+                                       const char* warmup_source) {
+  CHECK(cold_snapshot_blob.raw_size > 0 && cold_snapshot_blob.data != NULL);
+  CHECK(warmup_source != NULL);
+  // Use following steps to create a warmed up snapshot blob from a cold one:
+  //  - Create a new isolate from the cold snapshot.
+  //  - Create a new context to run the warmup script. This will trigger
+  //    compilation of executed functions.
+  //  - Create a new context. This context will be unpolluted.
+  //  - Serialize the isolate and the second context into a new snapshot blob.
+  StartupData result = {NULL, 0};
+
+  base::ElapsedTimer timer;
+  timer.Start();
+
+  ArrayBufferAllocator allocator;
+  i::Isolate* internal_isolate = new i::Isolate(true);
+  internal_isolate->set_array_buffer_allocator(&allocator);
+  internal_isolate->set_snapshot_blob(&cold_snapshot_blob);
+  Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
+
+  {
+    Isolate::Scope isolate_scope(isolate);
+    i::Snapshot::Initialize(internal_isolate);
+    Persistent<Context> context;
+    bool success;
+    {
+      HandleScope handle_scope(isolate);
+      Local<Context> new_context = Context::New(isolate);
+      success = RunExtraCode(isolate, new_context, warmup_source, "<warm-up>");
+    }
+    if (success) {
+      HandleScope handle_scope(isolate);
+      isolate->ContextDisposedNotification(false);
+      Local<Context> new_context = Context::New(isolate);
+      context.Reset(isolate, new_context);
+    }
+
+    i::Snapshot::Metadata metadata;
+    metadata.set_embeds_script(i::Snapshot::EmbedsScript(internal_isolate));
+
+    result = SerializeIsolateAndContext(
+        isolate, &context, metadata, i::StartupSerializer::KEEP_FUNCTION_CODE);
+    DCHECK(context.IsEmpty());
+  }
+  isolate->Dispose();
+
+  if (i::FLAG_profile_deserialization) {
+    i::PrintF("Warming up snapshot took %0.3f ms\n",
+              timer.Elapsed().InMillisecondsF());
+  }
+  timer.Stop();
   return result;
 }
 
@@ -591,6 +677,10 @@
   return result.location();
 }
 
+void V8::RegisterExternallyReferencedObject(i::Object** object,
+                                            i::Isolate* isolate) {
+  isolate->heap()->RegisterExternallyReferencedObject(object);
+}
 
 void V8::MakeWeak(i::Object** object, void* parameter,
                   WeakCallback weak_callback) {
@@ -937,9 +1027,16 @@
   i::Isolate* isolate = templ->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
+  auto value_obj = Utils::OpenHandle(*value);
+  if (value_obj->IsObjectTemplateInfo()) {
+    templ->set_serial_number(i::Smi::FromInt(0));
+    if (templ->IsFunctionTemplateInfo()) {
+      i::Handle<i::FunctionTemplateInfo>::cast(templ)->set_do_not_cache(true);
+    }
+  }
   // TODO(dcarney): split api to allow values of v8::Value or v8::TemplateInfo.
   i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
-                                 Utils::OpenHandle(*value),
+                                 value_obj,
                                  static_cast<i::PropertyAttributes>(attribute));
 }
 
@@ -1759,7 +1856,7 @@
     if (!source->source_map_url.IsEmpty()) {
       source_map_url = Utils::OpenHandle(*(source->source_map_url));
     }
-    result = i::Compiler::CompileScript(
+    result = i::Compiler::GetSharedFunctionInfoForScript(
         str, name_obj, line_offset, column_offset, source->resource_options,
         source_map_url, isolate->native_context(), NULL, &script_data, options,
         i::NOT_NATIVES_CODE, is_module);
@@ -1828,7 +1925,6 @@
 MaybeLocal<Script> ScriptCompiler::CompileModule(Local<Context> context,
                                                  Source* source,
                                                  CompileOptions options) {
-  CHECK(i::FLAG_harmony_modules);
   auto isolate = context->GetIsolate();
   auto maybe = CompileUnboundInternal(isolate, source, options, true);
   Local<UnboundScript> generic;
@@ -2025,8 +2121,8 @@
   i::Handle<i::SharedFunctionInfo> result;
   if (source->info->literal() != nullptr) {
     // Parsing has succeeded.
-    result = i::Compiler::CompileStreamedScript(script, source->info.get(),
-                                                str->length());
+    result = i::Compiler::GetSharedFunctionInfoForStreamedScript(
+        script, source->info.get(), str->length());
   }
   has_pending_exception = result.is_null();
   if (has_pending_exception) isolate->ReportPendingMessages();
@@ -2200,7 +2296,7 @@
   if (!maybe.FromJust()) return v8::Local<Value>();
   Local<Value> result;
   has_pending_exception =
-      !ToLocal<Value>(i::Object::GetProperty(obj, name), &result);
+      !ToLocal<Value>(i::JSReceiver::GetProperty(obj, name), &result);
   RETURN_ON_FAILED_EXECUTION(Value);
   RETURN_ESCAPED(result);
 }
@@ -2429,7 +2525,7 @@
   ENTER_V8(isolate);
   EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
   auto self = Utils::OpenHandle(this);
-  auto obj = i::Object::GetElement(isolate, self, index).ToHandleChecked();
+  auto obj = i::JSReceiver::GetElement(isolate, self, index).ToHandleChecked();
   auto jsobj = i::Handle<i::JSObject>::cast(obj);
   return scope.Escape(Utils::StackFrameToLocal(jsobj));
 }
@@ -2469,7 +2565,7 @@
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(f);
   i::Handle<i::Object> obj =
-      i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
+      i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
   return obj->IsSmi() ? i::Smi::cast(*obj)->value() : defaultValue;
 }
 
@@ -2496,7 +2592,7 @@
   EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
   i::Handle<i::JSObject> self = Utils::OpenHandle(f);
   i::Handle<i::Object> obj =
-      i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
+      i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
   return obj->IsString()
              ? scope.Escape(Local<String>::Cast(Utils::ToLocal(obj)))
              : Local<String>();
@@ -2524,7 +2620,7 @@
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(f);
   i::Handle<i::Object> obj =
-      i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
+      i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
   return obj->IsTrue();
 }
 
@@ -3472,7 +3568,7 @@
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
 
   i::LookupIterator it = i::LookupIterator::PropertyOrElement(
-      isolate, self, key_obj, i::LookupIterator::OWN);
+      isolate, self, key_obj, self, i::LookupIterator::OWN);
   Maybe<bool> result =
       i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
   has_pending_exception = result.IsNothing();
@@ -3489,7 +3585,7 @@
   i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
 
-  i::LookupIterator it(isolate, self, index, i::LookupIterator::OWN);
+  i::LookupIterator it(isolate, self, index, self, i::LookupIterator::OWN);
   Maybe<bool> result =
       i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
   has_pending_exception = result.IsNothing();
@@ -3595,7 +3691,7 @@
         i::Handle<i::Symbol>::cast(key_obj), &desc, i::Object::DONT_THROW);
   }
   auto js_object = i::Handle<i::JSObject>::cast(self);
-  i::LookupIterator it(js_object, key_obj);
+  i::LookupIterator it(js_object, key_obj, js_object);
   has_pending_exception = i::JSObject::DefineOwnPropertyIgnoreAttributes(
                               &it, value_obj, i::DONT_ENUM)
                               .is_null();
@@ -3628,7 +3724,7 @@
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> result;
   has_pending_exception =
-      !i::Object::GetElement(isolate, self, index).ToHandle(&result);
+      !i::JSReceiver::GetElement(isolate, self, index).ToHandle(&result);
   RETURN_ON_FAILED_EXECUTION(Value);
   RETURN_ESCAPED(Utils::ToLocal(result));
 }
@@ -3755,11 +3851,10 @@
       !i::JSReceiver::GetKeys(self, i::INCLUDE_PROTOS, i::ENUMERABLE_STRINGS)
            .ToHandle(&value);
   RETURN_ON_FAILED_EXECUTION(Array);
-  // Because we use caching to speed up enumeration it is important
-  // to never change the result of the basic enumeration function so
-  // we clone the result.
-  auto elms = isolate->factory()->CopyFixedArray(value);
-  auto result = isolate->factory()->NewJSArrayWithElements(elms);
+  DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
+         self->map()->EnumLength() == 0 ||
+         self->map()->instance_descriptors()->GetEnumCache() != *value);
+  auto result = isolate->factory()->NewJSArrayWithElements(value);
   RETURN_ESCAPED(Utils::ToLocal(result));
 }
 
@@ -3778,11 +3873,10 @@
       !i::JSReceiver::GetKeys(self, i::OWN_ONLY, i::ENUMERABLE_STRINGS)
            .ToHandle(&value);
   RETURN_ON_FAILED_EXECUTION(Array);
-  // Because we use caching to speed up enumeration it is important
-  // to never change the result of the basic enumeration function so
-  // we clone the result.
-  auto elms = isolate->factory()->CopyFixedArray(value);
-  auto result = isolate->factory()->NewJSArrayWithElements(elms);
+  DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
+         self->map()->EnumLength() == 0 ||
+         self->map()->instance_descriptors()->GetEnumCache() != *value);
+  auto result = isolate->factory()->NewJSArrayWithElements(value);
   RETURN_ESCAPED(Utils::ToLocal(result));
 }
 
@@ -3816,6 +3910,19 @@
   return Utils::ToLocal(name);
 }
 
+Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
+                                          IntegrityLevel level) {
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetIntegrityLevel()",
+                                  bool);
+  auto self = Utils::OpenHandle(this);
+  i::JSReceiver::IntegrityLevel i_level =
+      level == IntegrityLevel::kFrozen ? i::FROZEN : i::SEALED;
+  Maybe<bool> result =
+      i::JSReceiver::SetIntegrityLevel(self, i_level, i::Object::DONT_THROW);
+  has_pending_exception = result.IsNothing();
+  RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+  return result;
+}
 
 Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
   PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Delete()", bool);
@@ -4146,7 +4253,7 @@
   auto self = Utils::OpenHandle(this);
   auto key_obj = Utils::OpenHandle(*key);
   i::LookupIterator it = i::LookupIterator::PropertyOrElement(
-      isolate, self, key_obj,
+      isolate, self, key_obj, self,
       i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
   Local<Value> result;
   has_pending_exception = !ToLocal<Value>(i::Object::GetProperty(&it), &result);
@@ -4170,7 +4277,7 @@
   auto self = Utils::OpenHandle(this);
   auto key_obj = Utils::OpenHandle(*key);
   i::LookupIterator it = i::LookupIterator::PropertyOrElement(
-      isolate, self, key_obj,
+      isolate, self, key_obj, self,
       i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
   auto result = i::JSReceiver::GetPropertyAttributes(&it);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
@@ -5400,13 +5507,15 @@
   return true;
 }
 
-
-HeapStatistics::HeapStatistics(): total_heap_size_(0),
-                                  total_heap_size_executable_(0),
-                                  total_physical_size_(0),
-                                  used_heap_size_(0),
-                                  heap_size_limit_(0) { }
-
+HeapStatistics::HeapStatistics()
+    : total_heap_size_(0),
+      total_heap_size_executable_(0),
+      total_physical_size_(0),
+      total_available_size_(0),
+      used_heap_size_(0),
+      heap_size_limit_(0),
+      malloced_memory_(0),
+      does_zap_garbage_(0) {}
 
 HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0),
                                             space_size_(0),
@@ -6848,7 +6957,7 @@
     else
       symbol = isolate->factory()->NewSymbol();
     i::Handle<i::Symbol>::cast(symbol)->set_name(*name);
-    i::JSObject::SetProperty(symbols, name, symbol, i::STRICT).Assert();
+    i::Object::SetPropertyOrElement(symbols, name, symbol, i::STRICT).Assert();
   }
   return i::Handle<i::Symbol>::cast(symbol);
 }
@@ -7084,6 +7193,10 @@
       reinterpret_cast<v8::Isolate::GCCallback>(callback), gc_type, false);
 }
 
+void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->heap()->SetEmbedderHeapTracer(tracer);
+}
 
 void Isolate::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
                                           ObjectSpace space,
@@ -7283,10 +7396,12 @@
     Isolate* isolate)
     : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
   isolate_->handle_scope_implementer()->IncrementCallDepth();
+  isolate_->handle_scope_implementer()->IncrementMicrotasksSuppressions();
 }
 
 
 Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() {
+  isolate_->handle_scope_implementer()->DecrementMicrotasksSuppressions();
   isolate_->handle_scope_implementer()->DecrementCallDepth();
 }
 
@@ -7301,6 +7416,8 @@
   heap_statistics->total_available_size_ = heap->Available();
   heap_statistics->used_heap_size_ = heap->SizeOfObjects();
   heap_statistics->heap_size_limit_ = heap->MaxReserved();
+  heap_statistics->malloced_memory_ =
+      isolate->allocator()->GetCurrentMemoryUsage();
   heap_statistics->does_zap_garbage_ = heap->ShouldZapGarbage();
 }
 
@@ -7428,6 +7545,7 @@
 
 
 void Isolate::RunMicrotasks() {
+  DCHECK(MicrotasksPolicy::kScoped != GetMicrotasksPolicy());
   reinterpret_cast<i::Isolate*>(this)->RunMicrotasks();
 }
 
@@ -7451,12 +7569,41 @@
 
 
 void Isolate::SetAutorunMicrotasks(bool autorun) {
-  reinterpret_cast<i::Isolate*>(this)->set_autorun_microtasks(autorun);
+  SetMicrotasksPolicy(
+      autorun ? MicrotasksPolicy::kAuto : MicrotasksPolicy::kExplicit);
 }
 
 
 bool Isolate::WillAutorunMicrotasks() const {
-  return reinterpret_cast<const i::Isolate*>(this)->autorun_microtasks();
+  return GetMicrotasksPolicy() == MicrotasksPolicy::kAuto;
+}
+
+
+void Isolate::SetMicrotasksPolicy(MicrotasksPolicy policy) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->handle_scope_implementer()->set_microtasks_policy(policy);
+}
+
+
+MicrotasksPolicy Isolate::GetMicrotasksPolicy() const {
+  i::Isolate* isolate =
+      reinterpret_cast<i::Isolate*>(const_cast<Isolate*>(this));
+  return isolate->handle_scope_implementer()->microtasks_policy();
+}
+
+
+void Isolate::AddMicrotasksCompletedCallback(
+    MicrotasksCompletedCallback callback) {
+  DCHECK(callback);
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->AddMicrotasksCompletedCallback(callback);
+}
+
+
+void Isolate::RemoveMicrotasksCompletedCallback(
+    MicrotasksCompletedCallback callback) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->RemoveMicrotasksCompletedCallback(callback);
 }
 
 
@@ -7535,6 +7682,11 @@
   return isolate->heap()->SetOptimizeForMemoryUsage();
 }
 
+void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  return isolate->heap()->MemoryPressureNotification(level,
+                                                     Locker::IsLocked(this));
+}
 
 void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
                                      JitCodeEventHandler event_handler) {
@@ -7680,6 +7832,49 @@
 }
 
 
+MicrotasksScope::MicrotasksScope(Isolate* isolate, MicrotasksScope::Type type)
+    : isolate_(reinterpret_cast<i::Isolate*>(isolate)),
+      run_(type == MicrotasksScope::kRunMicrotasks) {
+  auto handle_scope_implementer = isolate_->handle_scope_implementer();
+  if (run_) handle_scope_implementer->IncrementMicrotasksScopeDepth();
+#ifdef DEBUG
+  if (!run_) handle_scope_implementer->IncrementDebugMicrotasksScopeDepth();
+#endif
+}
+
+
+MicrotasksScope::~MicrotasksScope() {
+  auto handle_scope_implementer = isolate_->handle_scope_implementer();
+  if (run_) {
+    handle_scope_implementer->DecrementMicrotasksScopeDepth();
+    if (MicrotasksPolicy::kScoped ==
+        handle_scope_implementer->microtasks_policy()) {
+      PerformCheckpoint(reinterpret_cast<Isolate*>(isolate_));
+    }
+  }
+#ifdef DEBUG
+  if (!run_) handle_scope_implementer->DecrementDebugMicrotasksScopeDepth();
+#endif
+}
+
+
+void MicrotasksScope::PerformCheckpoint(Isolate* v8Isolate) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
+  if (IsExecutionTerminatingCheck(isolate)) return;
+  auto handle_scope_implementer = isolate->handle_scope_implementer();
+  if (!handle_scope_implementer->GetMicrotasksScopeDepth() &&
+      !handle_scope_implementer->HasMicrotasksSuppressions()) {
+    isolate->RunMicrotasks();
+  }
+}
+
+
+int MicrotasksScope::GetCurrentDepth(Isolate* v8Isolate) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
+  return isolate->handle_scope_implementer()->GetMicrotasksScopeDepth();
+}
+
+
 String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
     : str_(NULL), length_(0) {
   if (obj.IsEmpty()) return;
@@ -7874,7 +8069,7 @@
   RETURN_ON_FAILED_EXECUTION(Value);
   i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
   auto name = isolate->factory()->NewStringFromStaticChars("MakeMirror");
-  auto fun_obj = i::Object::GetProperty(debug, name).ToHandleChecked();
+  auto fun_obj = i::JSReceiver::GetProperty(debug, name).ToHandleChecked();
   auto v8_fun = Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
   const int kArgc = 1;
   v8::Local<v8::Value> argv[kArgc] = {obj};
@@ -7919,6 +8114,15 @@
   internal_isolate->debug()->set_live_edit_enabled(enable);
 }
 
+bool Debug::IsTailCallEliminationEnabled(Isolate* isolate) {
+  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  return internal_isolate->is_tail_call_elimination_enabled();
+}
+
+void Debug::SetTailCallEliminationEnabled(Isolate* isolate, bool enabled) {
+  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  internal_isolate->SetTailCallEliminationEnabled(enabled);
+}
 
 MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
                                                Local<Value> value) {
diff --git a/src/api.h b/src/api.h
index 5567652..cb2b5c3 100644
--- a/src/api.h
+++ b/src/api.h
@@ -452,6 +452,12 @@
         saved_contexts_(0),
         spare_(NULL),
         call_depth_(0),
+        microtasks_depth_(0),
+        microtasks_suppressions_(0),
+#ifdef DEBUG
+        debug_microtasks_depth_(0),
+#endif
+        microtasks_policy_(v8::MicrotasksPolicy::kAuto),
         last_handle_before_deferred_block_(NULL) { }
 
   ~HandleScopeImplementer() {
@@ -472,10 +478,36 @@
   inline internal::Object** GetSpareOrNewBlock();
   inline void DeleteExtensions(internal::Object** prev_limit);
 
+  // Call depth represents nested v8 api calls.
   inline void IncrementCallDepth() {call_depth_++;}
   inline void DecrementCallDepth() {call_depth_--;}
   inline bool CallDepthIsZero() { return call_depth_ == 0; }
 
+  // Microtasks scope depth represents nested scopes controlling microtasks
+  // invocation, which happens when depth reaches zero.
+  inline void IncrementMicrotasksScopeDepth() {microtasks_depth_++;}
+  inline void DecrementMicrotasksScopeDepth() {microtasks_depth_--;}
+  inline int GetMicrotasksScopeDepth() { return microtasks_depth_; }
+
+  // Possibly nested microtasks suppression scopes prevent microtasks
+  // from running.
+  inline void IncrementMicrotasksSuppressions() {microtasks_suppressions_++;}
+  inline void DecrementMicrotasksSuppressions() {microtasks_suppressions_--;}
+  inline bool HasMicrotasksSuppressions() { return !!microtasks_suppressions_; }
+
+#ifdef DEBUG
+  // In debug we check that calls not intended to invoke microtasks are
+  // still correctly wrapped with microtask scopes.
+  inline void IncrementDebugMicrotasksScopeDepth() {debug_microtasks_depth_++;}
+  inline void DecrementDebugMicrotasksScopeDepth() {debug_microtasks_depth_--;}
+  inline bool DebugMicrotasksScopeDepthIsZero() {
+    return debug_microtasks_depth_ == 0;
+  }
+#endif
+
+  inline void set_microtasks_policy(v8::MicrotasksPolicy policy);
+  inline v8::MicrotasksPolicy microtasks_policy() const;
+
   inline void EnterContext(Handle<Context> context);
   inline void LeaveContext();
   inline bool LastEnteredContextWas(Handle<Context> context);
@@ -532,6 +564,12 @@
   List<Context*> saved_contexts_;
   Object** spare_;
   int call_depth_;
+  int microtasks_depth_;
+  int microtasks_suppressions_;
+#ifdef DEBUG
+  int debug_microtasks_depth_;
+#endif
+  v8::MicrotasksPolicy microtasks_policy_;
   Object** last_handle_before_deferred_block_;
   // This is only used for threading support.
   HandleScopeData handle_scope_data_;
@@ -550,6 +588,17 @@
 const int kHandleBlockSize = v8::internal::KB - 2;  // fit in one page
 
 
+void HandleScopeImplementer::set_microtasks_policy(
+    v8::MicrotasksPolicy policy) {
+  microtasks_policy_ = policy;
+}
+
+
+v8::MicrotasksPolicy HandleScopeImplementer::microtasks_policy() const {
+  return microtasks_policy_;
+}
+
+
 void HandleScopeImplementer::SaveContext(Context* context) {
   saved_contexts_.Add(context);
 }
diff --git a/src/arguments.cc b/src/arguments.cc
index 077991b..815f5de 100644
--- a/src/arguments.cc
+++ b/src/arguments.cc
@@ -4,93 +4,9 @@
 
 #include "src/arguments.h"
 
-#include "src/api.h"
-#include "src/vm-state-inl.h"
-
 namespace v8 {
 namespace internal {
 
-
-template <typename T>
-template <typename V>
-v8::Local<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
-  // Check the ReturnValue.
-  Object** handle = &this->begin()[kReturnValueOffset];
-  // Nothing was set, return empty handle as per previous behaviour.
-  if ((*handle)->IsTheHole()) return v8::Local<V>();
-  return Utils::Convert<Object, V>(Handle<Object>(handle));
-}
-
-
-v8::Local<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
-  Isolate* isolate = this->isolate();
-  VMState<EXTERNAL> state(isolate);
-  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
-  FunctionCallbackInfo<v8::Value> info(begin(),
-                                       argv_,
-                                       argc_,
-                                       is_construct_call_);
-  f(info);
-  return GetReturnValue<v8::Value>(isolate);
-}
-
-
-#define WRITE_CALL_0(Function, ReturnValue)                            \
-  v8::Local<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
-    Isolate* isolate = this->isolate();                                \
-    VMState<EXTERNAL> state(isolate);                                  \
-    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));       \
-    PropertyCallbackInfo<ReturnValue> info(begin());                   \
-    f(info);                                                           \
-    return GetReturnValue<ReturnValue>(isolate);                       \
-  }
-
-
-#define WRITE_CALL_1(Function, ReturnValue, Arg1)                     \
-  v8::Local<ReturnValue> PropertyCallbackArguments::Call(Function f,  \
-                                                         Arg1 arg1) { \
-    Isolate* isolate = this->isolate();                               \
-    VMState<EXTERNAL> state(isolate);                                 \
-    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));      \
-    PropertyCallbackInfo<ReturnValue> info(begin());                  \
-    f(arg1, info);                                                    \
-    return GetReturnValue<ReturnValue>(isolate);                      \
-  }
-
-
-#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2)          \
-  v8::Local<ReturnValue> PropertyCallbackArguments::Call(        \
-      Function f, Arg1 arg1, Arg2 arg2) {                        \
-    Isolate* isolate = this->isolate();                          \
-    VMState<EXTERNAL> state(isolate);                            \
-    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
-    PropertyCallbackInfo<ReturnValue> info(begin());             \
-    f(arg1, arg2, info);                                         \
-    return GetReturnValue<ReturnValue>(isolate);                 \
-  }
-
-
-#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2)               \
-  void PropertyCallbackArguments::Call(Function f, Arg1 arg1, Arg2 arg2) { \
-    Isolate* isolate = this->isolate();                                    \
-    VMState<EXTERNAL> state(isolate);                                      \
-    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));           \
-    PropertyCallbackInfo<ReturnValue> info(begin());                       \
-    f(arg1, arg2, info);                                                   \
-  }
-
-
-FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
-FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
-FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
-FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
-
-#undef WRITE_CALL_0
-#undef WRITE_CALL_1
-#undef WRITE_CALL_2
-#undef WRITE_CALL_2_VOID
-
-
 double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
   // TODO(ulan): This clobbers only subset of registers depending on compiler,
   // Rewrite this in assembly to really clobber all registers.
@@ -98,6 +14,5 @@
   return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
 }
 
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/arguments.h b/src/arguments.h
index 3509677..02090f9 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -6,7 +6,8 @@
 #define V8_ARGUMENTS_H_
 
 #include "src/allocation.h"
-#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/tracing/trace-event.h"
 
 namespace v8 {
 namespace internal {
@@ -70,217 +71,30 @@
   Object** arguments_;
 };
 
-
-// For each type of callback, we have a list of arguments
-// They are used to generate the Call() functions below
-// These aren't included in the list as they have duplicate signatures
-// F(GenericNamedPropertyEnumeratorCallback, ...)
-// F(GenericNamedPropertyGetterCallback, ...)
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
-  F(IndexedPropertyEnumeratorCallback, v8::Array)
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F)                               \
-  F(AccessorNameGetterCallback, v8::Value, v8::Local<v8::Name>)            \
-  F(GenericNamedPropertyQueryCallback, v8::Integer, v8::Local<v8::Name>)   \
-  F(GenericNamedPropertyDeleterCallback, v8::Boolean, v8::Local<v8::Name>) \
-  F(IndexedPropertyGetterCallback, v8::Value, uint32_t)                    \
-  F(IndexedPropertyQueryCallback, v8::Integer, uint32_t)                   \
-  F(IndexedPropertyDeleterCallback, v8::Boolean, uint32_t)
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F)                            \
-  F(GenericNamedPropertySetterCallback, v8::Value, v8::Local<v8::Name>, \
-    v8::Local<v8::Value>)                                               \
-  F(IndexedPropertySetterCallback, v8::Value, uint32_t, v8::Local<v8::Value>)
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
-  F(AccessorNameSetterCallback, \
-    void, \
-    v8::Local<v8::Name>, \
-    v8::Local<v8::Value>) \
-
-
-// Custom arguments replicate a small segment of stack that can be
-// accessed through an Arguments object the same way the actual stack
-// can.
-template<int kArrayLength>
-class CustomArgumentsBase : public Relocatable {
- public:
-  virtual inline void IterateInstance(ObjectVisitor* v) {
-    v->VisitPointers(values_, values_ + kArrayLength);
-  }
- protected:
-  inline Object** begin() { return values_; }
-  explicit inline CustomArgumentsBase(Isolate* isolate)
-      : Relocatable(isolate) {}
-  Object* values_[kArrayLength];
-};
-
-
-template<typename T>
-class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
- public:
-  static const int kReturnValueOffset = T::kReturnValueIndex;
-
-  typedef CustomArgumentsBase<T::kArgsLength> Super;
-  ~CustomArguments() {
-    this->begin()[kReturnValueOffset] =
-        reinterpret_cast<Object*>(kHandleZapValue);
-  }
-
- protected:
-  explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
-
-  template <typename V>
-  v8::Local<V> GetReturnValue(Isolate* isolate);
-
-  inline Isolate* isolate() {
-    return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
-  }
-};
-
-
-class PropertyCallbackArguments
-    : public CustomArguments<PropertyCallbackInfo<Value> > {
- public:
-  typedef PropertyCallbackInfo<Value> T;
-  typedef CustomArguments<T> Super;
-  static const int kArgsLength = T::kArgsLength;
-  static const int kThisIndex = T::kThisIndex;
-  static const int kHolderIndex = T::kHolderIndex;
-  static const int kDataIndex = T::kDataIndex;
-  static const int kReturnValueDefaultValueIndex =
-      T::kReturnValueDefaultValueIndex;
-  static const int kIsolateIndex = T::kIsolateIndex;
-  static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
-
-  PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
-                            JSObject* holder, Object::ShouldThrow should_throw)
-      : Super(isolate) {
-    Object** values = this->begin();
-    values[T::kThisIndex] = self;
-    values[T::kHolderIndex] = holder;
-    values[T::kDataIndex] = data;
-    values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
-    values[T::kShouldThrowOnErrorIndex] =
-        Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
-
-    // Here the hole is set as default value.
-    // It cannot escape into js as it's remove in Call below.
-    values[T::kReturnValueDefaultValueIndex] =
-        isolate->heap()->the_hole_value();
-    values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
-    DCHECK(values[T::kHolderIndex]->IsHeapObject());
-    DCHECK(values[T::kIsolateIndex]->IsSmi());
-  }
-
-  /*
-   * The following Call functions wrap the calling of all callbacks to handle
-   * calling either the old or the new style callbacks depending on which one
-   * has been registered.
-   * For old callbacks which return an empty handle, the ReturnValue is checked
-   * and used if it's been set to anything inside the callback.
-   * New style callbacks always use the return value.
-   */
-#define WRITE_CALL_0(Function, ReturnValue) \
-  v8::Local<ReturnValue> Call(Function f);
-
-#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
-  v8::Local<ReturnValue> Call(Function f, Arg1 arg1);
-
-#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
-  v8::Local<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2);
-
-#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2)                 \
-  void Call(Function f, Arg1 arg1, Arg2 arg2);                               \
-
-FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
-FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
-FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
-FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
-
-#undef WRITE_CALL_0
-#undef WRITE_CALL_1
-#undef WRITE_CALL_2
-#undef WRITE_CALL_2_VOID
-};
-
-
-class FunctionCallbackArguments
-    : public CustomArguments<FunctionCallbackInfo<Value> > {
- public:
-  typedef FunctionCallbackInfo<Value> T;
-  typedef CustomArguments<T> Super;
-  static const int kArgsLength = T::kArgsLength;
-  static const int kHolderIndex = T::kHolderIndex;
-  static const int kDataIndex = T::kDataIndex;
-  static const int kReturnValueDefaultValueIndex =
-      T::kReturnValueDefaultValueIndex;
-  static const int kIsolateIndex = T::kIsolateIndex;
-  static const int kCalleeIndex = T::kCalleeIndex;
-  static const int kContextSaveIndex = T::kContextSaveIndex;
-
-  FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
-                            internal::HeapObject* callee,
-                            internal::Object* holder, internal::Object** argv,
-                            int argc, bool is_construct_call)
-      : Super(isolate),
-        argv_(argv),
-        argc_(argc),
-        is_construct_call_(is_construct_call) {
-    Object** values = begin();
-    values[T::kDataIndex] = data;
-    values[T::kCalleeIndex] = callee;
-    values[T::kHolderIndex] = holder;
-    values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
-    values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
-    // Here the hole is set as default value.
-    // It cannot escape into js as it's remove in Call below.
-    values[T::kReturnValueDefaultValueIndex] =
-        isolate->heap()->the_hole_value();
-    values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
-    DCHECK(values[T::kCalleeIndex]->IsJSFunction() ||
-           values[T::kCalleeIndex]->IsFunctionTemplateInfo());
-    DCHECK(values[T::kHolderIndex]->IsHeapObject());
-    DCHECK(values[T::kIsolateIndex]->IsSmi());
-  }
-
-  /*
-   * The following Call function wraps the calling of all callbacks to handle
-   * calling either the old or the new style callbacks depending on which one
-   * has been registered.
-   * For old callbacks which return an empty handle, the ReturnValue is checked
-   * and used if it's been set to anything inside the callback.
-   * New style callbacks always use the return value.
-   */
-  v8::Local<v8::Value> Call(FunctionCallback f);
-
- private:
-  internal::Object** argv_;
-  int argc_;
-  bool is_construct_call_;
-};
-
-
 double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
 
-
 #ifdef DEBUG
 #define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4);
 #else
 #define CLOBBER_DOUBLE_REGISTERS()
 #endif
 
-#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name)                         \
-  static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
-  Type Name(int args_length, Object** args_object, Isolate* isolate) {    \
-    CLOBBER_DOUBLE_REGISTERS();                                           \
-    RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();  \
-    RuntimeCallTimerScope timer(isolate, &stats->Name);                   \
-    Arguments args(args_length, args_object);                             \
-    Type value = __RT_impl_##Name(args, isolate);                         \
-    return value;                                                         \
-  }                                                                       \
+#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name)                          \
+  static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate));  \
+  Type Name(int args_length, Object** args_object, Isolate* isolate) {     \
+    CLOBBER_DOUBLE_REGISTERS();                                            \
+    Type value;                                                            \
+    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), "V8." #Name);    \
+    Arguments args(args_length, args_object);                              \
+    if (FLAG_runtime_call_stats) {                                         \
+      RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
+      RuntimeCallTimerScope timer(isolate, &stats->Name);                  \
+      value = __RT_impl_##Name(args, isolate);                             \
+    } else {                                                               \
+      value = __RT_impl_##Name(args, isolate);                             \
+    }                                                                      \
+    return value;                                                          \
+  }                                                                        \
   static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
 
 #define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 0de9642..b0b22b6 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -71,6 +71,10 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
 
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -114,6 +118,18 @@
   }
 }
 
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, size_t old_size, size_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  DCHECK(old_base <= wasm_memory_reference() &&
+         wasm_memory_reference() < old_base + old_size);
+  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+  DCHECK(new_base <= updated_reference &&
+         updated_reference < new_base + new_size);
+  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+                                   icache_flush_mode);
+}
 
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index b0fa462..62516e8 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -145,7 +145,10 @@
   if (cpu.implementer() == base::CPU::NVIDIA &&
       cpu.variant() == base::CPU::NVIDIA_DENVER &&
       cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
-    supported_ |= 1u << COHERENT_CACHE;
+    // TODO(jkummerow): This is turned off as an experiment to see if it
+    // affects crash rates. Keep an eye on crash reports and either remove
+    // coherent cache support permanently, or re-enable it!
+    // supported_ |= 1u << COHERENT_CACHE;
   }
 #endif
 
@@ -1966,7 +1969,8 @@
 
 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
                     Condition cond) {
-  DCHECK(fields >= B16 && fields < B20);  // at least one field set
+  DCHECK((fields & 0x000f0000) != 0);  // At least one field must be set.
+  DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
   Instr instr;
   if (!src.rm_.is_valid()) {
     // Immediate.
@@ -2546,12 +2550,6 @@
 }
 
 
-void Assembler::vmov(const SwVfpRegister dst, float imm) {
-  mov(ip, Operand(bit_cast<int32_t>(imm)));
-  vmov(dst, ip);
-}
-
-
 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
   uint64_t i;
   memcpy(&i, &d, 8);
@@ -2563,7 +2561,7 @@
 
 // Only works for little endian floating point formats.
 // We don't support VFP on the mixed endian floating point platform.
-static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
+static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
   DCHECK(CpuFeatures::IsSupported(VFP3));
 
   // VMOV can accept an immediate of the form:
@@ -2592,12 +2590,12 @@
     return false;
   }
 
-  // Bits 62:55 must be all clear or all set.
+  // Bits 61:54 must be all clear or all set.
   if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
     return false;
   }
 
-  // Bit 63 must be NOT bit 62.
+  // Bit 62 must be NOT bit 61.
   if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
     return false;
   }
@@ -2612,6 +2610,25 @@
 }
 
 
+void Assembler::vmov(const SwVfpRegister dst, float imm) {
+  uint32_t enc;
+  if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
+    // The float can be encoded in the instruction.
+    //
+    // Sd = immediate
+    // Instruction details available in ARM DDI 0406C.b, A8-936.
+    // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
+    // Vd(15-12) | 101(11-9) | sz=0(8) | imm4L(3-0)
+    int vd, d;
+    dst.split_code(&vd, &d);
+    emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc);
+  } else {
+    mov(ip, Operand(bit_cast<int32_t>(imm)));
+    vmov(dst, ip);
+  }
+}
+
+
 void Assembler::vmov(const DwVfpRegister dst,
                      double imm,
                      const Register scratch) {
@@ -2622,7 +2639,7 @@
   // pointer (pp) is valid.
   bool can_use_pool =
       !FLAG_enable_embedded_constant_pool || is_constant_pool_available();
-  if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
+  if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
     // The double can be encoded in the instruction.
     //
     // Dd = immediate
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index d381653..08ad64c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1390,7 +1390,9 @@
   // Emits the address of the code stub's first instruction.
   void emit_code_stub_address(Code* stub);
 
-  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+  AssemblerPositionsRecorder* positions_recorder() {
+    return &positions_recorder_;
+  }
 
   // Read/patch instructions
   Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@@ -1637,8 +1639,8 @@
   friend class RelocInfo;
   friend class CodePatcher;
   friend class BlockConstPoolScope;
-  PositionsRecorder positions_recorder_;
-  friend class PositionsRecorder;
+  AssemblerPositionsRecorder positions_recorder_;
+  friend class AssemblerPositionsRecorder;
   friend class EnsureSpace;
 };
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index a6bfdb1..1fffcb6 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -531,6 +531,7 @@
   //  -- r1     : constructor function
   //  -- r2     : allocation site or undefined
   //  -- r3     : new target
+  //  -- cp     : context
   //  -- lr     : return address
   //  -- sp[...]: constructor arguments
   // -----------------------------------
@@ -543,6 +544,7 @@
 
     // Preserve the incoming parameters on the stack.
     __ AssertUndefinedOrAllocationSite(r2, r4);
+    __ Push(cp);
     __ SmiTag(r0);
     __ Push(r2, r0);
 
@@ -622,7 +624,7 @@
     // r0: result
     // sp[0]: receiver
     // sp[1]: number of arguments (smi-tagged)
-    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
 
     if (create_implicit_receiver) {
       // If the result is an object (in the ECMA sense), we should get rid
@@ -751,9 +753,6 @@
   // r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
-  // Clear the context before we push it when entering the internal frame.
-  __ mov(cp, Operand::Zero());
-
   // Enter an internal frame.
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -855,8 +854,7 @@
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
   FrameScope frame_scope(masm, StackFrame::MANUAL);
-  __ PushFixedFrame(r1);
-  __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  __ PushStandardFrame(r1);
 
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into kInterpreterBytecodeRegister.
@@ -1192,8 +1190,7 @@
   __ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
 
   // Perform prologue operations usually performed by the young code stub.
-  __ PushFixedFrame(r1);
-  __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  __ PushStandardFrame(r1);
 
   // Jump to point after the code-age stub.
   __ add(r0, r0, Operand(kNoCodeAgeSequenceLength));
@@ -1430,24 +1427,6 @@
 }
 
 
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
-  // We check the stack limit as indicator that recompilation might be done.
-  Label ok;
-  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-  __ cmp(sp, Operand(ip));
-  __ b(hs, &ok);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kStackGuard);
-  }
-  __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&ok);
-  __ Ret();
-}
-
-
 // static
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
@@ -1494,6 +1473,27 @@
   __ TailCallRuntime(Runtime::kThrowNotDateError);
 }
 
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : argc
+  //  -- sp[0] : first argument (left-hand side)
+  //  -- sp[4] : receiver (right-hand side)
+  // -----------------------------------
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ ldr(InstanceOfDescriptor::LeftRegister(),
+           MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
+    __ ldr(InstanceOfDescriptor::RightRegister(),
+           MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
+    InstanceOfStub stub(masm->isolate(), true);
+    __ CallStub(&stub);
+  }
+
+  // Pop the argument and the receiver.
+  __ Ret(2);
+}
 
 // static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1933,19 +1933,21 @@
   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
   Comment cmnt(masm, "[ PrepareForTailCall");
 
-  // Prepare for tail call only if the debugger is not active.
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
   Label done;
-  ExternalReference debug_is_active =
-      ExternalReference::debug_is_active_address(masm->isolate());
-  __ mov(scratch1, Operand(debug_is_active));
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
   __ ldrb(scratch1, MemOperand(scratch1));
   __ cmp(scratch1, Operand(0));
-  __ b(ne, &done);
+  __ b(eq, &done);
 
   // Drop possible interpreter handler/stub frame.
   {
     Label no_interpreter_frame;
-    __ ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+    __ ldr(scratch3,
+           MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
     __ b(ne, &no_interpreter_frame);
     __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1953,73 +1955,37 @@
   }
 
   // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ ldr(scratch3,
-         MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+         MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(ne, &no_arguments_adaptor);
 
-  // Drop arguments adaptor frame and load arguments count.
+  // Drop current frame and load arguments count from arguments adaptor frame.
   __ mov(fp, scratch2);
-  __ ldr(scratch1,
+  __ ldr(caller_args_count_reg,
          MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(scratch1);
+  __ SmiUntag(caller_args_count_reg);
   __ b(&formal_parameter_count_loaded);
 
   __ bind(&no_arguments_adaptor);
   // Load caller's formal parameter count
-  __ ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ldr(scratch1,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
   __ ldr(scratch1,
          FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(scratch1,
+  __ ldr(caller_args_count_reg,
          FieldMemOperand(scratch1,
                          SharedFunctionInfo::kFormalParameterCountOffset));
-  __ SmiUntag(scratch1);
+  __ SmiUntag(caller_args_count_reg);
 
   __ bind(&formal_parameter_count_loaded);
 
-  // Calculate the end of destination area where we will put the arguments
-  // after we drop current frame. We add kPointerSize to count the receiver
-  // argument which is not included into formal parameters count.
-  Register dst_reg = scratch2;
-  __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
-  __ add(dst_reg, dst_reg,
-         Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
-  Register src_reg = scratch1;
-  __ add(src_reg, sp, Operand(args_reg, LSL, kPointerSizeLog2));
-  // Count receiver argument as well (not included in args_reg).
-  __ add(src_reg, src_reg, Operand(kPointerSize));
-
-  if (FLAG_debug_code) {
-    __ cmp(src_reg, dst_reg);
-    __ Check(lo, kStackAccessBelowStackPointer);
-  }
-
-  // Restore caller's frame pointer and return address now as they will be
-  // overwritten by the copying loop.
-  __ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
-  __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
-  // Now copy callee arguments to the caller frame going backwards to avoid
-  // callee arguments corruption (source and destination areas could overlap).
-
-  // Both src_reg and dst_reg are pointing to the word after the one to copy,
-  // so they must be pre-decremented in the loop.
-  Register tmp_reg = scratch3;
-  Label loop, entry;
-  __ b(&entry);
-  __ bind(&loop);
-  __ ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
-  __ str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
-  __ bind(&entry);
-  __ cmp(sp, src_reg);
-  __ b(ne, &loop);
-
-  // Leave current frame.
-  __ mov(sp, dst_reg);
-
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
   __ bind(&done);
 }
 }  // namespace
@@ -2473,27 +2439,6 @@
 
   {  // Too few parameters: Actual < expected
     __ bind(&too_few);
-
-    // If the function is strong we need to throw an error.
-    Label no_strong_error;
-    __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-    __ ldr(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
-    __ tst(r5, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
-                             kSmiTagSize)));
-    __ b(eq, &no_strong_error);
-
-    // What we really care about is the required number of arguments.
-    __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kLengthOffset));
-    __ cmp(r0, Operand::SmiUntag(r4));
-    __ b(ge, &no_strong_error);
-
-    {
-      FrameScope frame(masm, StackFrame::MANUAL);
-      EnterArgumentsAdaptorFrame(masm);
-      __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
-    }
-
-    __ bind(&no_strong_error);
     EnterArgumentsAdaptorFrame(masm);
     ArgumentAdaptorStackCheck(masm, &stack_overflow);
 
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 82fb51d..31e3e95 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -4,9 +4,10 @@
 
 #if V8_TARGET_ARCH_ARM
 
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
 #include "src/base/bits.h"
 #include "src/bootstrapper.h"
-#include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
@@ -77,6 +78,10 @@
   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+  descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
 
 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
@@ -477,7 +482,9 @@
 }
 
 
-// Fast negative check for internalized-to-internalized equality.
+// Fast negative check for internalized-to-internalized equality or receiver
+// equality. Also handles the undetectable receiver to null/undefined
+// comparison.
 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
                                                      Register lhs, Register rhs,
                                                      Label* possible_strings,
@@ -486,7 +493,7 @@
          (lhs.is(r1) && rhs.is(r0)));
 
   // r2 is object type of rhs.
-  Label object_test, return_unequal, undetectable;
+  Label object_test, return_equal, return_unequal, undetectable;
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   __ tst(r2, Operand(kIsNotStringMask));
   __ b(ne, &object_test);
@@ -524,6 +531,16 @@
   __ bind(&undetectable);
   __ tst(r5, Operand(1 << Map::kIsUndetectable));
   __ b(eq, &return_unequal);
+
+  // If both sides are JSReceivers, then the result is false according to
+  // the HTML specification, which says that only comparisons with null or
+  // undefined are affected by special casing for document.all.
+  __ CompareInstanceType(r2, r2, ODDBALL_TYPE);
+  __ b(eq, &return_equal);
+  __ CompareInstanceType(r3, r3, ODDBALL_TYPE);
+  __ b(ne, &return_unequal);
+
+  __ bind(&return_equal);
   __ mov(r0, Operand(EQUAL));
   __ Ret();
 }
@@ -1049,9 +1066,9 @@
   if (result_size() > 2) {
     DCHECK_EQ(3, result_size());
     // Read result values stored on stack.
-    __ ldr(r2, MemOperand(r0, 2 * kPointerSize));
-    __ ldr(r1, MemOperand(r0, 1 * kPointerSize));
-    __ ldr(r0, MemOperand(r0, 0 * kPointerSize));
+    __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+    __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+    __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
   }
   // Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
 
@@ -1358,8 +1375,12 @@
   __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
   __ b(ne, &slow_case);
 
-  // Ensure that {function} has an instance prototype.
+  // Go to the runtime if the function is not a constructor.
   __ ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+  __ tst(scratch, Operand(1 << Map::kIsConstructor));
+  __ b(eq, &slow_case);
+
+  // Ensure that {function} has an instance prototype.
   __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
   __ b(ne, &slow_case);
 
@@ -1427,7 +1448,8 @@
   // Slow-case: Call the %InstanceOf runtime function.
   __ bind(&slow_case);
   __ Push(object, function);
-  __ TailCallRuntime(Runtime::kInstanceOf);
+  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+                                         : Runtime::kInstanceOf);
 }
 
 
@@ -1480,29 +1502,6 @@
 }
 
 
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
-  // Return address is in lr.
-  Label slow;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-
-  // Check that the key is an array index, that is Uint32.
-  __ NonNegativeSmiTst(key);
-  __ b(ne, &slow);
-
-  // Everything is fine, call runtime.
-  __ Push(receiver, key);  // Receiver, key.
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
-  __ bind(&slow);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -2633,29 +2632,28 @@
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in r0.
-  Label not_smi;
-  __ JumpIfNotSmi(r0, &not_smi);
-  __ Ret();
-  __ bind(&not_smi);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ tst(r0, Operand(kSmiTagMask));
+  __ Ret(eq);
 
   __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
   // r0: receiver
   // r1: receiver instance type
   __ Ret(eq);
 
-  Label not_string, slow_string;
-  __ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
-  __ b(hs, &not_string);
-  // Check if string has a cached array index.
-  __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
-  __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
-  __ b(ne, &slow_string);
-  __ IndexFromHash(r2, r0);
-  __ Ret();
-  __ bind(&slow_string);
-  __ push(r0);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-  __ bind(&not_string);
+  NonNumberToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in r0.
+  __ AssertNotNumber(r0);
+
+  __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
+  // r0: receiver
+  // r1: receiver instance type
+  StringToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub, lo);
 
   Label not_oddball;
   __ cmp(r1, Operand(ODDBALL_TYPE));
@@ -2664,26 +2662,27 @@
   __ Ret();
   __ bind(&not_oddball);
 
-  __ push(r0);  // Push argument.
+  __ Push(r0);  // Push argument.
   __ TailCallRuntime(Runtime::kToNumber);
 }
 
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in r0.
+  __ AssertString(r0);
 
-void ToLengthStub::Generate(MacroAssembler* masm) {
-  // The ToLength stub takes one argument in r0.
-  Label not_smi;
-  __ JumpIfNotSmi(r0, &not_smi);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ tst(r0, r0);
-  __ mov(r0, Operand(0), LeaveCC, lt);
+  // Check if string has a cached array index.
+  Label runtime;
+  __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
+  __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
+  __ b(ne, &runtime);
+  __ IndexFromHash(r2, r0);
   __ Ret();
-  __ bind(&not_smi);
 
-  __ push(r0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToLength);
+  __ bind(&runtime);
+  __ Push(r0);  // Push argument.
+  __ TailCallRuntime(Runtime::kStringToNumber);
 }
 
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in r0.
   Label is_number;
@@ -2839,42 +2838,6 @@
 }
 
 
-void StringCompareStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r1    : left
-  //  -- r0    : right
-  //  -- lr    : return address
-  // -----------------------------------
-  __ AssertString(r1);
-  __ AssertString(r0);
-
-  Label not_same;
-  __ cmp(r0, r1);
-  __ b(ne, &not_same);
-  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r1,
-                      r2);
-  __ Ret();
-
-  __ bind(&not_same);
-
-  // Check that both objects are sequential one-byte strings.
-  Label runtime;
-  __ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
-
-  // Compare flat one-byte strings natively.
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
-                      r3);
-  StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
-
-  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
-  // tagged as a small integer.
-  __ bind(&runtime);
-  __ Push(r1, r0);
-  __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r1    : left
@@ -3168,10 +3131,17 @@
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
-  __ Push(left, right);
   if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(left, right);
+      __ CallRuntime(Runtime::kStringEqual);
+    }
+    __ LoadRoot(r1, Heap::kTrueValueRootIndex);
+    __ sub(r0, r0, r1);
+    __ Ret();
   } else {
+    __ Push(left, right);
     __ TailCallRuntime(Runtime::kStringCompare);
   }
 
@@ -3710,7 +3680,7 @@
   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   int parameter_count_offset =
-      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   __ ldr(r1, MemOperand(fp, parameter_count_offset));
   if (function_mode() == JS_FUNCTION_STUB_MODE) {
     __ add(r1, r1, Operand(1));
@@ -4703,7 +4673,7 @@
     __ bind(&loop);
     __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+    __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
     __ cmp(ip, r1);
     __ b(ne, &loop);
   }
@@ -4712,7 +4682,7 @@
   // arguments adaptor frame below the function frame).
   Label no_rest_parameters;
   __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(ip, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ ldr(ip, MemOperand(r2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(ne, &no_rest_parameters);
 
@@ -4851,7 +4821,7 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
+  __ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(eq, &adaptor_frame);
 
@@ -5050,7 +5020,7 @@
     __ bind(&loop);
     __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+    __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
     __ cmp(ip, r1);
     __ b(ne, &loop);
   }
@@ -5058,7 +5028,7 @@
   // Check if we have an arguments adaptor frame below the function frame.
   Label arguments_adaptor, arguments_done;
   __ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(ip, MemOperand(r3, StandardFrameConstants::kContextOffset));
+  __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(eq, &arguments_adaptor);
   {
@@ -5424,16 +5394,12 @@
   __ jmp(&leave_exit_frame);
 }
 
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
-                                      const ParameterCount& argc,
-                                      bool return_first_arg,
-                                      bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0                  : callee
   //  -- r4                  : call_data
   //  -- r2                  : holder
   //  -- r1                  : api_function_address
-  //  -- r3                  : number of arguments if argc is a register
   //  -- cp                  : context
   //  --
   //  -- sp[0]               : last argument
@@ -5459,11 +5425,9 @@
   STATIC_ASSERT(FCA::kHolderIndex == 0);
   STATIC_ASSERT(FCA::kArgsLength == 7);
 
-  DCHECK(argc.is_immediate() || r3.is(argc.reg()));
-
   // context save
   __ push(context);
-  if (!is_lazy) {
+  if (!is_lazy()) {
     // load context from callee
     __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   }
@@ -5475,7 +5439,7 @@
   __ push(call_data);
 
   Register scratch = call_data;
-  if (!call_data_undefined) {
+  if (!call_data_undefined()) {
     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   }
   // return value
@@ -5504,29 +5468,15 @@
   __ add(r0, sp, Operand(1 * kPointerSize));
   // FunctionCallbackInfo::implicit_args_
   __ str(scratch, MemOperand(r0, 0 * kPointerSize));
-  if (argc.is_immediate()) {
-    // FunctionCallbackInfo::values_
-    __ add(ip, scratch,
-           Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
-    __ str(ip, MemOperand(r0, 1 * kPointerSize));
-    // FunctionCallbackInfo::length_ = argc
-    __ mov(ip, Operand(argc.immediate()));
-    __ str(ip, MemOperand(r0, 2 * kPointerSize));
-    // FunctionCallbackInfo::is_construct_call_ = 0
-    __ mov(ip, Operand::Zero());
-    __ str(ip, MemOperand(r0, 3 * kPointerSize));
-  } else {
-    // FunctionCallbackInfo::values_
-    __ add(ip, scratch, Operand(argc.reg(), LSL, kPointerSizeLog2));
-    __ add(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
-    __ str(ip, MemOperand(r0, 1 * kPointerSize));
-    // FunctionCallbackInfo::length_ = argc
-    __ str(argc.reg(), MemOperand(r0, 2 * kPointerSize));
-    // FunctionCallbackInfo::is_construct_call_
-    __ add(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
-    __ mov(ip, Operand(argc.reg(), LSL, kPointerSizeLog2));
-    __ str(ip, MemOperand(r0, 3 * kPointerSize));
-  }
+  // FunctionCallbackInfo::values_
+  __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+  __ str(ip, MemOperand(r0, 1 * kPointerSize));
+  // FunctionCallbackInfo::length_ = argc
+  __ mov(ip, Operand(argc()));
+  __ str(ip, MemOperand(r0, 2 * kPointerSize));
+  // FunctionCallbackInfo::is_construct_call_ = 0
+  __ mov(ip, Operand::Zero());
+  __ str(ip, MemOperand(r0, 3 * kPointerSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5536,7 +5486,7 @@
       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   // Stores return the first js argument
   int return_value_offset = 0;
-  if (return_first_arg) {
+  if (is_store()) {
     return_value_offset = 2 + FCA::kArgsLength;
   } else {
     return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5545,33 +5495,15 @@
   int stack_space = 0;
   MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
   MemOperand* stack_space_operand = &is_construct_call_operand;
-  if (argc.is_immediate()) {
-    stack_space = argc.immediate() + FCA::kArgsLength + 1;
-    stack_space_operand = NULL;
-  }
+  stack_space = argc() + FCA::kArgsLength + 1;
+  stack_space_operand = NULL;
+
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
                            stack_space_operand, return_value_operand,
                            &context_restore_operand);
 }
 
 
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
-  bool call_data_undefined = this->call_data_undefined();
-  CallApiFunctionStubHelper(masm, ParameterCount(r3), false,
-                            call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
-  bool is_store = this->is_store();
-  int argc = this->argc();
-  bool call_data_undefined = this->call_data_undefined();
-  bool is_lazy = this->is_lazy();
-  CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined, is_lazy);
-}
-
-
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- sp[0]                        : name
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 2dee363..7e1a550 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -898,10 +898,8 @@
                       young_sequence_.length() / Assembler::kInstrSize,
                       CodePatcher::DONT_FLUSH));
   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
-  patcher->masm()->PushFixedFrame(r1);
+  patcher->masm()->PushStandardFrame(r1);
   patcher->masm()->nop(ip.code());
-  patcher->masm()->add(
-      fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
 }
 
 
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index b9d4788..a162051 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -654,7 +654,7 @@
   inline bool HasH()    const { return HValue() == 1; }
   inline bool HasLink() const { return LinkValue() == 1; }
 
-  // Decoding the double immediate in the vmov instruction.
+  // Decode the double immediate from a vmov instruction.
   double DoubleImmedVmov() const;
 
   // Instructions are read of out a code stream. The only way to get a
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 3e9fac7..2785b75 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -103,12 +103,6 @@
   }
 }
 
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
-  // There is no dynamic alignment padding on ARM in the input frame.
-  return false;
-}
-
-
 #define __ masm()->
 
 // This code tries to be close to ia32 code so that any changes can be
@@ -162,7 +156,12 @@
   // Allocate a new deoptimizer object.
   // Pass four arguments in r0 to r3 and fifth argument on stack.
   __ PrepareCallCFunction(6, r5);
+  __ mov(r0, Operand(0));
+  Label context_check;
+  __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ JumpIfSmi(r1, &context_check);
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ bind(&context_check);
   __ mov(r1, Operand(type()));  // bailout type,
   // r2: bailout id already loaded.
   // r3: code address or 0 already loaded.
@@ -235,6 +234,8 @@
   }
   __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
 
+  __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
+
   // Replace the current (input) frame with the output frames.
   Label outer_push_loop, inner_push_loop,
       outer_loop_header, inner_loop_header;
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 9258703..287152a 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -604,6 +604,26 @@
           Print("s");
         }
         return 4;
+      } else if (format[1] == 'p') {
+        if (format[8] == '_') {  // 'spec_reg_fields
+          DCHECK(STRING_STARTS_WITH(format, "spec_reg_fields"));
+          Print("_");
+          int mask = instr->Bits(19, 16);
+          if (mask == 0) Print("(none)");
+          if ((mask & 0x8) != 0) Print("f");
+          if ((mask & 0x4) != 0) Print("s");
+          if ((mask & 0x2) != 0) Print("x");
+          if ((mask & 0x1) != 0) Print("c");
+          return 15;
+        } else {  // 'spec_reg
+          DCHECK(STRING_STARTS_WITH(format, "spec_reg"));
+          if (instr->Bit(22) == 0) {
+            Print("CPSR");
+          } else {
+            Print("SPSR");
+          }
+          return 8;
+        }
       }
       // 's: S field of data processing instructions
       if (instr->HasS()) {
@@ -822,7 +842,13 @@
       return;
     }
   } else if ((type == 0) && instr->IsMiscType0()) {
-    if (instr->Bits(22, 21) == 1) {
+    if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
+        (instr->Bits(15, 4) == 0xf00)) {
+      Format(instr, "msr'cond 'spec_reg'spec_reg_fields, 'rm");
+    } else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
+               (instr->Bits(11, 0) == 0)) {
+      Format(instr, "mrs'cond 'rd, 'spec_reg");
+    } else if (instr->Bits(22, 21) == 1) {
       switch (instr->BitField(7, 4)) {
         case BX:
           Format(instr, "bx'cond 'rm");
@@ -1404,7 +1430,7 @@
         if (instr->SzValue() == 0x1) {
           Format(instr, "vmov'cond.f64 'Dd, 'd");
         } else {
-          Unknown(instr);  // Not used by V8.
+          Format(instr, "vmov'cond.f32 'Sd, 'd");
         }
       } else if (((instr->Opc2Value() == 0x6)) && instr->Opc3Value() == 0x3) {
         // vrintz - round towards zero (truncate)
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 1ea7b1a..3792775 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -93,16 +93,11 @@
       -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
 };
 
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
  public:
-  static const int kFrameSize =
-      FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
-
-  static const int kConstantPoolOffset =
-      FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
-  static const int kCodeOffset         = -2 * kPointerSize;
-  static const int kSPOffset           = -1 * kPointerSize;
+  static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
 
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = 0 * kPointerSize;
@@ -120,7 +115,7 @@
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
   static const int kLastParameterOffset = +2 * kPointerSize;
-  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+  static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
 
   // Caller SP-relative.
   static const int kParam0Offset   = -2 * kPointerSize;
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
index 1f55c0b..b6cac76 100644
--- a/src/arm/interface-descriptors-arm.cc
+++ b/src/arm/interface-descriptors-arm.cc
@@ -111,35 +111,8 @@
 }
 
 
-void ToNumberDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 // static
-const Register ToLengthDescriptor::ReceiverRegister() { return r0; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return r0; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return r0; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return r0; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
 
 void TypeofDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -267,6 +240,13 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
+      CallInterfaceDescriptorData* data) {                          \
+    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
+  }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
 
 void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -311,6 +291,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (arg count)
+  Register registers[] = {r0};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void CompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -319,20 +305,6 @@
 }
 
 
-void CompareNilDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void BinaryOpDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r1, r0};
@@ -408,25 +380,7 @@
                                    &default_descriptor);
 }
 
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  static PlatformInterfaceDescriptor default_descriptor =
-      PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
-  Register registers[] = {
-      r0,  // callee
-      r4,  // call_data
-      r2,  // holder
-      r1,  // api_function_address
-      r3,  // actual number of arguments
-  };
-  data->InitializePlatformSpecific(arraysize(registers), registers,
-                                   &default_descriptor);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   static PlatformInterfaceDescriptor default_descriptor =
       PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 80aef0c..6af3d6c 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -738,12 +738,12 @@
   str(scratch, MemOperand(ip));
   // Call stub on end of buffer.
   // Check for end of buffer.
-  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+  tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
   if (and_then == kFallThroughAtEnd) {
-    b(eq, &done);
+    b(ne, &done);
   } else {
     DCHECK(and_then == kReturnAtEnd);
-    Ret(eq);
+    Ret(ne);
   }
   push(lr);
   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
@@ -755,20 +755,65 @@
   }
 }
 
-
-void MacroAssembler::PushFixedFrame(Register marker_reg) {
-  DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
-  stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
-                    (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
-                    fp.bit() | lr.bit());
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+  if (marker_reg.is_valid()) {
+    if (FLAG_enable_embedded_constant_pool) {
+      if (marker_reg.code() > pp.code()) {
+        stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
+        add(fp, sp, Operand(kPointerSize));
+        Push(marker_reg);
+      } else {
+        stm(db_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
+        add(fp, sp, Operand(2 * kPointerSize));
+      }
+    } else {
+      if (marker_reg.code() > fp.code()) {
+        stm(db_w, sp, fp.bit() | lr.bit());
+        mov(fp, Operand(sp));
+        Push(marker_reg);
+      } else {
+        stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
+        add(fp, sp, Operand(kPointerSize));
+      }
+    }
+  } else {
+    stm(db_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
+                      fp.bit() | lr.bit());
+    add(fp, sp, Operand(FLAG_enable_embedded_constant_pool ? kPointerSize : 0));
+  }
 }
 
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
+  if (marker_reg.is_valid()) {
+    if (FLAG_enable_embedded_constant_pool) {
+      if (marker_reg.code() > pp.code()) {
+        pop(marker_reg);
+        ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
+      } else {
+        ldm(ia_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
+      }
+    } else {
+      if (marker_reg.code() > fp.code()) {
+        pop(marker_reg);
+        ldm(ia_w, sp, fp.bit() | lr.bit());
+      } else {
+        ldm(ia_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
+      }
+    }
+  } else {
+    ldm(ia_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
+                      fp.bit() | lr.bit());
+  }
+}
 
-void MacroAssembler::PopFixedFrame(Register marker_reg) {
-  DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
-  ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+  DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
+  stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
                     (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
                     fp.bit() | lr.bit());
+  int offset = -StandardFrameConstants::kContextOffset;
+  offset += function_reg.is_valid() ? kPointerSize : 0;
+  add(fp, sp, Operand(offset));
 }
 
 
@@ -1056,7 +1101,144 @@
     vmov(dst, VmovIndexLo, src);
   }
 }
+void MacroAssembler::LslPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             Register scratch, Register shift) {
+  DCHECK(!AreAliased(dst_high, src_low));
+  DCHECK(!AreAliased(dst_high, shift));
 
+  Label less_than_32;
+  Label done;
+  rsb(scratch, shift, Operand(32), SetCC);
+  b(gt, &less_than_32);
+  // If shift >= 32
+  and_(scratch, shift, Operand(0x1f));
+  lsl(dst_high, src_low, Operand(scratch));
+  mov(dst_low, Operand(0));
+  jmp(&done);
+  bind(&less_than_32);
+  // If shift < 32
+  lsl(dst_high, src_high, Operand(shift));
+  orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
+  lsl(dst_low, src_low, Operand(shift));
+  bind(&done);
+}
+
+void MacroAssembler::LslPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             uint32_t shift) {
+  DCHECK(!AreAliased(dst_high, src_low));
+  Label less_than_32;
+  Label done;
+  if (shift == 0) {
+    Move(dst_high, src_high);
+    Move(dst_low, src_low);
+  } else if (shift == 32) {
+    Move(dst_high, src_low);
+    Move(dst_low, Operand(0));
+  } else if (shift >= 32) {
+    shift &= 0x1f;
+    lsl(dst_high, src_low, Operand(shift));
+    mov(dst_low, Operand(0));
+  } else {
+    lsl(dst_high, src_high, Operand(shift));
+    orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
+    lsl(dst_low, src_low, Operand(shift));
+  }
+}
+
+void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             Register scratch, Register shift) {
+  DCHECK(!AreAliased(dst_low, src_high));
+  DCHECK(!AreAliased(dst_low, shift));
+
+  Label less_than_32;
+  Label done;
+  rsb(scratch, shift, Operand(32), SetCC);
+  b(gt, &less_than_32);
+  // If shift >= 32
+  and_(scratch, shift, Operand(0x1f));
+  lsr(dst_low, src_high, Operand(scratch));
+  mov(dst_high, Operand(0));
+  jmp(&done);
+  bind(&less_than_32);
+  // If shift < 32
+
+  lsr(dst_low, src_low, Operand(shift));
+  orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
+  lsr(dst_high, src_high, Operand(shift));
+  bind(&done);
+}
+
+void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             uint32_t shift) {
+  DCHECK(!AreAliased(dst_low, src_high));
+  Label less_than_32;
+  Label done;
+  if (shift == 32) {
+    mov(dst_low, src_high);
+    mov(dst_high, Operand(0));
+  } else if (shift > 32) {
+    shift &= 0x1f;
+    lsr(dst_low, src_high, Operand(shift));
+    mov(dst_high, Operand(0));
+  } else if (shift == 0) {
+    Move(dst_low, src_low);
+    Move(dst_high, src_high);
+  } else {
+    lsr(dst_low, src_low, Operand(shift));
+    orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
+    lsr(dst_high, src_high, Operand(shift));
+  }
+}
+
+void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             Register scratch, Register shift) {
+  DCHECK(!AreAliased(dst_low, src_high));
+  DCHECK(!AreAliased(dst_low, shift));
+
+  Label less_than_32;
+  Label done;
+  rsb(scratch, shift, Operand(32), SetCC);
+  b(gt, &less_than_32);
+  // If shift >= 32
+  and_(scratch, shift, Operand(0x1f));
+  asr(dst_low, src_high, Operand(scratch));
+  asr(dst_high, src_high, Operand(31));
+  jmp(&done);
+  bind(&less_than_32);
+  // If shift < 32
+  lsr(dst_low, src_low, Operand(shift));
+  orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
+  asr(dst_high, src_high, Operand(shift));
+  bind(&done);
+}
+
+void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             uint32_t shift) {
+  DCHECK(!AreAliased(dst_low, src_high));
+  Label less_than_32;
+  Label done;
+  if (shift == 32) {
+    mov(dst_low, src_high);
+    asr(dst_high, src_high, Operand(31));
+  } else if (shift > 32) {
+    shift &= 0x1f;
+    asr(dst_low, src_high, Operand(shift));
+    asr(dst_high, src_high, Operand(31));
+  } else if (shift == 0) {
+    Move(dst_low, src_low);
+    Move(dst_high, src_high);
+  } else {
+    lsr(dst_low, src_low, Operand(shift));
+    orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
+    asr(dst_high, src_high, Operand(shift));
+  }
+}
 
 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
     Register code_target_address) {
@@ -1074,19 +1256,15 @@
   LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
 }
 
-
-void MacroAssembler::StubPrologue() {
-  PushFixedFrame();
-  Push(Smi::FromInt(StackFrame::STUB));
-  // Adjust FP to point to saved FP.
-  add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
+  mov(ip, Operand(Smi::FromInt(type)));
+  PushCommonFrame(ip);
   if (FLAG_enable_embedded_constant_pool) {
     LoadConstantPoolPointerRegister();
     set_constant_pool_available(true);
   }
 }
 
-
 void MacroAssembler::Prologue(bool code_pre_aging) {
   { PredictableCodeSizeScope predictible_code_size_scope(
         this, kNoCodeAgeSequenceLength);
@@ -1099,10 +1277,8 @@
       ldr(pc, MemOperand(pc, -4));
       emit_code_stub_address(stub);
     } else {
-      PushFixedFrame(r1);
+      PushStandardFrame(r1);
       nop(ip.code());
-      // Adjust FP to point to saved FP.
-      add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
     }
   }
   if (FLAG_enable_embedded_constant_pool) {
@@ -1123,17 +1299,15 @@
 void MacroAssembler::EnterFrame(StackFrame::Type type,
                                 bool load_constant_pool_pointer_reg) {
   // r0-r3: preserved
-  PushFixedFrame();
+  mov(ip, Operand(Smi::FromInt(type)));
+  PushCommonFrame(ip);
   if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
     LoadConstantPoolPointerRegister();
   }
-  mov(ip, Operand(Smi::FromInt(type)));
-  push(ip);
-  mov(ip, Operand(CodeObject()));
-  push(ip);
-  // Adjust FP to point to saved FP.
-  add(fp, sp,
-      Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+  if (type == StackFrame::INTERNAL) {
+    mov(ip, Operand(CodeObject()));
+    push(ip);
+  }
 }
 
 
@@ -1164,10 +1338,10 @@
   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
-  Push(lr, fp);
-  mov(fp, Operand(sp));  // Set up new frame pointer.
+  mov(ip, Operand(Smi::FromInt(StackFrame::EXIT)));
+  PushCommonFrame(ip);
   // Reserve room for saved entry sp and code object.
-  sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
+  sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
   if (emit_debug_code()) {
     mov(ip, Operand::Zero());
     str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -1249,7 +1423,7 @@
   // Optionally restore all double registers.
   if (save_doubles) {
     // Calculate the stack location of the saved doubles and restore them.
-    const int offset = ExitFrameConstants::kFrameSize;
+    const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
     sub(r3, fp,
         Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
     RestoreFPRegs(r3, ip);
@@ -1300,6 +1474,64 @@
   MovFromFloatResult(dst);
 }
 
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+                                        Register caller_args_count_reg,
+                                        Register scratch0, Register scratch1) {
+#if DEBUG
+  if (callee_args_count.is_reg()) {
+    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+                       scratch1));
+  } else {
+    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+  }
+#endif
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We add kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch0;
+  add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
+  add(dst_reg, dst_reg,
+      Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = caller_args_count_reg;
+  // Calculate the end of source area. +kPointerSize is for the receiver.
+  if (callee_args_count.is_reg()) {
+    add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+    add(src_reg, src_reg, Operand(kPointerSize));
+  } else {
+    add(src_reg, sp,
+        Operand((callee_args_count.immediate() + 1) * kPointerSize));
+  }
+
+  if (FLAG_debug_code) {
+    cmp(src_reg, dst_reg);
+    Check(lo, kStackAccessBelowStackPointer);
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch1;
+  Label loop, entry;
+  b(&entry);
+  bind(&loop);
+  ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
+  str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+  bind(&entry);
+  cmp(sp, src_reg);
+  b(ne, &loop);
+
+  // Leave current frame.
+  mov(sp, dst_reg);
+}
 
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
@@ -1578,8 +1810,19 @@
   DCHECK(!holder_reg.is(ip));
   DCHECK(!scratch.is(ip));
 
-  // Load current lexical context from the stack frame.
-  ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  // Load current lexical context from the active StandardFrame, which
+  // may require crawling past STUB frames.
+  Label load_context;
+  Label has_context;
+  DCHECK(!ip.is(scratch));
+  mov(ip, fp);
+  bind(&load_context);
+  ldr(scratch, MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
+  JumpIfNotSmi(scratch, &has_context);
+  ldr(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
+  b(&load_context);
+  bind(&has_context);
+
   // In debug mode, make sure the lexical context is set.
 #ifdef DEBUG
   cmp(scratch, Operand::Zero());
@@ -2803,6 +3046,17 @@
   b(eq, on_either_smi);
 }
 
+void MacroAssembler::AssertNotNumber(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    tst(object, Operand(kSmiTagMask));
+    Check(ne, kOperandIsANumber);
+    push(object);
+    CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
+    pop(object);
+    Check(ne, kOperandIsANumber);
+  }
+}
 
 void MacroAssembler::AssertNotSmi(Register object) {
   if (emit_debug_code()) {
@@ -3510,28 +3764,45 @@
   b(ne, &next);
 }
 
-
 void MacroAssembler::TestJSArrayForAllocationMemento(
     Register receiver_reg,
     Register scratch_reg,
     Label* no_memento_found) {
-  ExternalReference new_space_start =
-      ExternalReference::new_space_start(isolate());
+  Label map_check;
+  Label top_check;
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
-  add(scratch_reg, receiver_reg,
-      Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
-  cmp(scratch_reg, Operand(new_space_start));
-  b(lt, no_memento_found);
-  mov(ip, Operand(new_space_allocation_top));
-  ldr(ip, MemOperand(ip));
-  cmp(scratch_reg, ip);
-  b(gt, no_memento_found);
-  ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
-  cmp(scratch_reg,
-      Operand(isolate()->factory()->allocation_memento_map()));
-}
+  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
 
+  // Bail out if the object is not in new space.
+  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+  // If the object is in new space, we need to check whether it is on the same
+  // page as the current top.
+  add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  eor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+  tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
+  b(eq, &top_check);
+  // The object is on a different page than allocation top. Bail out if the
+  // object sits on the page boundary as no memento can follow and we cannot
+  // touch the memory following it.
+  add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  eor(scratch_reg, scratch_reg, Operand(receiver_reg));
+  tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
+  b(ne, no_memento_found);
+  // Continue with the actual map check.
+  jmp(&map_check);
+  // If top is on the same page as the current object, we need to check whether
+  // we are below top.
+  bind(&top_check);
+  add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  cmp(scratch_reg, Operand(new_space_allocation_top));
+  b(gt, no_memento_found);
+  // Memento map check.
+  bind(&map_check);
+  ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+  cmp(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
+}
 
 Register GetRegisterThatIsNotOneOf(Register reg1,
                                    Register reg2,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 468f4b5..f326304 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -457,10 +457,14 @@
   }
 
   // Push a fixed frame, consisting of lr, fp, constant pool (if
-  // FLAG_enable_embedded_constant_pool), context and JS function / marker id if
-  // marker_reg is a valid register.
-  void PushFixedFrame(Register marker_reg = no_reg);
-  void PopFixedFrame(Register marker_reg = no_reg);
+  // FLAG_enable_embedded_constant_pool)
+  void PushCommonFrame(Register marker_reg = no_reg);
+
+  // Push a standard frame, consisting of lr, fp, constant pool (if
+  // FLAG_enable_embedded_constant_pool), context and JS function
+  void PushStandardFrame(Register function_reg);
+
+  void PopCommonFrame(Register marker_reg = no_reg);
 
   // Push and pop the registers that can hold pointers, as defined by the
   // RegList constant kSafepointSavedRegisters.
@@ -545,6 +549,19 @@
   void VmovLow(Register dst, DwVfpRegister src);
   void VmovLow(DwVfpRegister dst, Register src);
 
+  void LslPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, Register scratch, Register shift);
+  void LslPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, uint32_t shift);
+  void LsrPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, Register scratch, Register shift);
+  void LsrPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, uint32_t shift);
+  void AsrPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, Register scratch, Register shift);
+  void AsrPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, uint32_t shift);
+
   // Loads the number from object into dst register.
   // If |object| is neither smi nor heap number, |not_number| is jumped to
   // with |object| still intact.
@@ -580,7 +597,7 @@
                          Label* not_int32);
 
   // Generates function and stub prologue code.
-  void StubPrologue();
+  void StubPrologue(StackFrame::Type type);
   void Prologue(bool code_pre_aging);
 
   // Enter exit frame.
@@ -637,6 +654,15 @@
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Removes current frame and its arguments from the stack preserving
+  // the arguments and a return address pushed to the stack for the next call.
+  // Both |callee_args_count| and |caller_args_count_reg| do not include
+  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+  // is trashed.
+  void PrepareForTailCall(const ParameterCount& callee_args_count,
+                          Register caller_args_count_reg, Register scratch0,
+                          Register scratch1);
+
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeFunctionCode(Register function, Register new_target,
                           const ParameterCount& expected,
@@ -1280,6 +1306,9 @@
   // Jump if either of the registers contain a smi.
   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
 
+  // Abort execution if argument is a number, enabled via --debug-code.
+  void AssertNotNumber(Register object);
+
   // Abort execution if argument is a smi, enabled via --debug-code.
   void AssertNotSmi(Register object);
   void AssertSmi(Register object);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 4630b94..6c22a0a 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1041,6 +1041,32 @@
   return value;
 }
 
+void Simulator::SetSpecialRegister(SRegisterFieldMask reg_and_mask,
+                                   uint32_t value) {
+  // Only CPSR_f is implemented. Of that, only N, Z, C and V are implemented.
+  if ((reg_and_mask == CPSR_f) && ((value & ~kSpecialCondition) == 0)) {
+    n_flag_ = ((value & (1 << 31)) != 0);
+    z_flag_ = ((value & (1 << 30)) != 0);
+    c_flag_ = ((value & (1 << 29)) != 0);
+    v_flag_ = ((value & (1 << 28)) != 0);
+  } else {
+    UNIMPLEMENTED();
+  }
+}
+
+uint32_t Simulator::GetFromSpecialRegister(SRegister reg) {
+  uint32_t result = 0;
+  // Only CPSR_f is implemented.
+  if (reg == CPSR) {
+    if (n_flag_) result |= (1 << 31);
+    if (z_flag_) result |= (1 << 30);
+    if (c_flag_) result |= (1 << 29);
+    if (v_flag_) result |= (1 << 28);
+  } else {
+    UNIMPLEMENTED();
+  }
+  return result;
+}
 
 // Runtime FP routines take:
 // - two double arguments
@@ -1307,11 +1333,12 @@
 
 
 // Calculate C flag value for subtractions.
-bool Simulator::BorrowFrom(int32_t left, int32_t right) {
+bool Simulator::BorrowFrom(int32_t left, int32_t right, int32_t carry) {
   uint32_t uleft = static_cast<uint32_t>(left);
   uint32_t uright = static_cast<uint32_t>(right);
 
-  return (uright > uleft);
+  return (uright > uleft) ||
+         (!carry && (((uright + 1) > uleft) || (uright > (uleft - 1))));
 }
 
 
@@ -2312,7 +2339,22 @@
       return;
     }
   } else if ((type == 0) && instr->IsMiscType0()) {
-    if (instr->Bits(22, 21) == 1) {
+    if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
+        (instr->Bits(15, 4) == 0xf00)) {
+      // MSR
+      int rm = instr->RmValue();
+      DCHECK_NE(pc, rm);  // UNPREDICTABLE
+      SRegisterFieldMask sreg_and_mask =
+          instr->BitField(22, 22) | instr->BitField(19, 16);
+      SetSpecialRegister(sreg_and_mask, get_register(rm));
+    } else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
+               (instr->Bits(11, 0) == 0)) {
+      // MRS
+      int rd = instr->RdValue();
+      DCHECK_NE(pc, rd);  // UNPREDICTABLE
+      SRegister sreg = static_cast<SRegister>(instr->BitField(22, 22));
+      set_register(rd, GetFromSpecialRegister(sreg));
+    } else if (instr->Bits(22, 21) == 1) {
       int rm = instr->RmValue();
       switch (instr->BitField(7, 4)) {
         case BX:
@@ -2452,8 +2494,15 @@
       }
 
       case SBC: {
-        Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
-        Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
+        //        Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
+        //        Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
+        alu_out = (rn_val - shifter_operand) - (GetCarry() ? 0 : 1);
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(!BorrowFrom(rn_val, shifter_operand, GetCarry()));
+          SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
+        }
         break;
       }
 
@@ -3215,7 +3264,7 @@
         if (instr->SzValue() == 0x1) {
           set_d_register_from_double(vd, instr->DoubleImmedVmov());
         } else {
-          UNREACHABLE();  // Not used by v8.
+          set_s_register_from_float(d, instr->DoubleImmedVmov());
         }
       } else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
         // vrintz - truncate
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 6567607..b3c8eb4 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -262,7 +262,7 @@
   void SetCFlag(bool val);
   void SetVFlag(bool val);
   bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
-  bool BorrowFrom(int32_t left, int32_t right);
+  bool BorrowFrom(int32_t left, int32_t right, int32_t carry = 1);
   bool OverflowFrom(int32_t alu_out,
                     int32_t left,
                     int32_t right,
@@ -363,6 +363,9 @@
   template<class InputType, int register_size>
       void SetVFPRegister(int reg_index, const InputType& value);
 
+  void SetSpecialRegister(SRegisterFieldMask reg_and_mask, uint32_t value);
+  uint32_t GetFromSpecialRegister(SRegister reg);
+
   void CallInternal(byte* entry);
 
   // Architecture state.
diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h
index aeca563..6191216 100644
--- a/src/arm64/assembler-arm64-inl.h
+++ b/src/arm64/assembler-arm64-inl.h
@@ -41,6 +41,18 @@
   }
 }
 
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, size_t old_size, size_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  DCHECK(old_base <= wasm_memory_reference() &&
+         wasm_memory_reference() < old_base + old_size);
+  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+  DCHECK(new_base <= updated_reference &&
+         updated_reference < new_base + new_size);
+  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+                                   icache_flush_mode);
+}
 
 inline int CPURegister::code() const {
   DCHECK(IsValid());
@@ -693,6 +705,10 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
 
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc
index ea7a732..2471d5e 100644
--- a/src/arm64/assembler-arm64.cc
+++ b/src/arm64/assembler-arm64.cc
@@ -56,7 +56,10 @@
   if (cpu.implementer() == base::CPU::NVIDIA &&
       cpu.variant() == base::CPU::NVIDIA_DENVER &&
       cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
-    supported_ |= 1u << COHERENT_CACHE;
+    // TODO(jkummerow): This is turned off as an experiment to see if it
+    // affects crash rates. Keep an eye on crash reports and either remove
+    // coherent cache support permanently, or re-enable it!
+    // supported_ |= 1u << COHERENT_CACHE;
   }
 }
 
@@ -437,7 +440,8 @@
   DCHECK(mode != RelocInfo::NONE32);
 
   return RelocInfo::IsNone(mode) ||
-         (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
+         (!assm_->serializer_enabled() &&
+          (mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE));
 }
 
 
@@ -2871,7 +2875,7 @@
   // We do not try to reuse pool constants.
   RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
   if (((rmode >= RelocInfo::COMMENT) &&
-       (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL)) ||
+       (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
       (rmode == RelocInfo::INTERNAL_REFERENCE) ||
       (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
       (rmode == RelocInfo::DEOPT_REASON) ||
diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h
index 47786eb..5460254 100644
--- a/src/arm64/assembler-arm64.h
+++ b/src/arm64/assembler-arm64.h
@@ -922,7 +922,9 @@
   }
 
   // Debugging ----------------------------------------------------------------
-  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+  AssemblerPositionsRecorder* positions_recorder() {
+    return &positions_recorder_;
+  }
   void RecordComment(const char* msg);
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
@@ -2135,8 +2137,8 @@
   void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
 
  private:
-  PositionsRecorder positions_recorder_;
-  friend class PositionsRecorder;
+  AssemblerPositionsRecorder positions_recorder_;
+  friend class AssemblerPositionsRecorder;
   friend class EnsureSpace;
   friend class ConstPool;
 };
diff --git a/src/arm64/builtins-arm64.cc b/src/arm64/builtins-arm64.cc
index 11f66a4..44bfc17 100644
--- a/src/arm64/builtins-arm64.cc
+++ b/src/arm64/builtins-arm64.cc
@@ -518,6 +518,7 @@
   //  -- x2     : allocation site or undefined
   //  -- x3     : new target
   //  -- lr     : return address
+  //  -- cp     : context pointer
   //  -- sp[...]: constructor arguments
   // -----------------------------------
 
@@ -537,6 +538,7 @@
 
     // Preserve the incoming parameters on the stack.
     __ AssertUndefinedOrAllocationSite(allocation_site, x10);
+    __ Push(cp);
     __ SmiTag(argc);
     __ Push(allocation_site, argc);
 
@@ -623,7 +625,7 @@
     // x0: result
     // jssp[0]: receiver
     // jssp[1]: number of arguments (smi-tagged)
-    __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
 
     if (create_implicit_receiver) {
       // If the result is an object (in the ECMA sense), we should get rid
@@ -763,9 +765,6 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
-  // Clear the context before we push it when entering the internal frame.
-  __ Mov(cp, 0);
-
   {
     // Enter an internal frame.
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1394,23 +1393,6 @@
 }
 
 
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
-  // We check the stack limit as indicator that recompilation might be done.
-  Label ok;
-  __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
-  __ B(hs, &ok);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kStackGuard);
-  }
-  __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
-          RelocInfo::CODE_TARGET);
-
-  __ Bind(&ok);
-  __ Ret();
-}
-
-
 // static
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
@@ -1456,6 +1438,29 @@
   __ TailCallRuntime(Runtime::kThrowNotDateError);
 }
 
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0      : argc
+  //  -- jssp[0] : first argument (left-hand side)
+  //  -- jssp[8] : receiver (right-hand side)
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_FunctionHasInstance");
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Ldr(InstanceOfDescriptor::LeftRegister(),
+           MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
+    __ Ldr(InstanceOfDescriptor::RightRegister(),
+           MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
+    InstanceOfStub stub(masm->isolate(), true);
+    __ CallStub(&stub);
+  }
+
+  // Pop the argument and the receiver.
+  __ Drop(2);
+  __ Ret();
+}
 
 // static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1972,19 +1977,21 @@
   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
   Comment cmnt(masm, "[ PrepareForTailCall");
 
-  // Prepare for tail call only if the debugger is not active.
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
   Label done;
-  ExternalReference debug_is_active =
-      ExternalReference::debug_is_active_address(masm->isolate());
-  __ Mov(scratch1, Operand(debug_is_active));
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ Mov(scratch1, Operand(is_tail_call_elimination_enabled));
   __ Ldrb(scratch1, MemOperand(scratch1));
   __ Cmp(scratch1, Operand(0));
-  __ B(ne, &done);
+  __ B(eq, &done);
 
   // Drop possible interpreter handler/stub frame.
   {
     Label no_interpreter_frame;
-    __ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+    __ Ldr(scratch3,
+           MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
     __ B(ne, &no_interpreter_frame);
     __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1992,18 +1999,19 @@
   }
 
   // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ Ldr(scratch3,
-         MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+         MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ B(ne, &no_arguments_adaptor);
 
-  // Drop arguments adaptor frame and load arguments count.
+  // Drop current frame and load arguments count from arguments adaptor frame.
   __ mov(fp, scratch2);
-  __ Ldr(scratch1,
+  __ Ldr(caller_args_count_reg,
          MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(scratch1);
+  __ SmiUntag(caller_args_count_reg);
   __ B(&formal_parameter_count_loaded);
 
   __ bind(&no_arguments_adaptor);
@@ -2011,54 +2019,14 @@
   __ Ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ Ldr(scratch1,
          FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
-  __ Ldrsw(scratch1,
+  __ Ldrsw(caller_args_count_reg,
            FieldMemOperand(scratch1,
                            SharedFunctionInfo::kFormalParameterCountOffset));
   __ bind(&formal_parameter_count_loaded);
 
-  // Calculate the end of destination area where we will put the arguments
-  // after we drop current frame. We add kPointerSize to count the receiver
-  // argument which is not included into formal parameters count.
-  Register dst_reg = scratch2;
-  __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
-  __ add(dst_reg, dst_reg,
-         Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
-  Register src_reg = scratch1;
-  __ add(src_reg, jssp, Operand(args_reg, LSL, kPointerSizeLog2));
-  // Count receiver argument as well (not included in args_reg).
-  __ add(src_reg, src_reg, Operand(kPointerSize));
-
-  if (FLAG_debug_code) {
-    __ Cmp(src_reg, dst_reg);
-    __ Check(lo, kStackAccessBelowStackPointer);
-  }
-
-  // Restore caller's frame pointer and return address now as they will be
-  // overwritten by the copying loop.
-  __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
-  __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
-  // Now copy callee arguments to the caller frame going backwards to avoid
-  // callee arguments corruption (source and destination areas could overlap).
-
-  // Both src_reg and dst_reg are pointing to the word after the one to copy,
-  // so they must be pre-decremented in the loop.
-  Register tmp_reg = scratch3;
-  Label loop, entry;
-  __ B(&entry);
-  __ bind(&loop);
-  __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
-  __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
-  __ bind(&entry);
-  __ Cmp(jssp, src_reg);
-  __ B(ne, &loop);
-
-  // Leave current frame.
-  __ Mov(jssp, dst_reg);
-  __ SetStackPointer(jssp);
-  __ AssertStackConsistency();
-
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
   __ bind(&done);
 }
 }  // namespace
@@ -2610,30 +2578,6 @@
     Register copy_to = x12;
     Register scratch1 = x13, scratch2 = x14;
 
-    // If the function is strong we need to throw an error.
-    Label no_strong_error;
-    __ Ldr(scratch1,
-           FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
-    __ Ldr(scratch2.W(),
-           FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
-    __ TestAndBranchIfAllClear(scratch2.W(),
-                               (1 << SharedFunctionInfo::kStrongModeFunction),
-                               &no_strong_error);
-
-    // What we really care about is the required number of arguments.
-    DCHECK_EQ(kPointerSize, kInt64Size);
-    __ Ldr(scratch2.W(),
-           FieldMemOperand(scratch1, SharedFunctionInfo::kLengthOffset));
-    __ Cmp(argc_actual, Operand(scratch2, LSR, 1));
-    __ B(ge, &no_strong_error);
-
-    {
-      FrameScope frame(masm, StackFrame::MANUAL);
-      EnterArgumentsAdaptorFrame(masm);
-      __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
-    }
-
-    __ Bind(&no_strong_error);
     EnterArgumentsAdaptorFrame(masm);
     ArgumentAdaptorStackCheck(masm, &stack_overflow);
 
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index ad566e6..ee40535 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -4,8 +4,9 @@
 
 #if V8_TARGET_ARCH_ARM64
 
-#include "src/bootstrapper.h"
 #include "src/code-stubs.h"
+#include "src/api-arguments.h"
+#include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
@@ -81,6 +82,10 @@
   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+  descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
 
 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
@@ -425,7 +430,9 @@
 }
 
 
-// Fast negative check for internalized-to-internalized equality.
+// Fast negative check for internalized-to-internalized equality or receiver
+// equality. Also handles the undetectable receiver to null/undefined
+// comparison.
 // See call site for description.
 static void EmitCheckForInternalizedStringsOrObjects(
     MacroAssembler* masm, Register left, Register right, Register left_map,
@@ -435,7 +442,7 @@
   Register result = x0;
   DCHECK(left.is(x0) || right.is(x0));
 
-  Label object_test, return_unequal, undetectable;
+  Label object_test, return_equal, return_unequal, undetectable;
   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
   // TODO(all): reexamine this branch sequence for optimisation wrt branch
   // prediction.
@@ -463,12 +470,22 @@
   __ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE);
   __ B(lt, runtime_call);
 
-  __ bind(&return_unequal);
+  __ Bind(&return_unequal);
   // Return non-equal by returning the non-zero object pointer in x0.
   __ Ret();
 
-  __ bind(&undetectable);
+  __ Bind(&undetectable);
   __ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
+
+  // If both sides are JSReceivers, then the result is false according to
+  // the HTML specification, which says that only comparisons with null or
+  // undefined are affected by special casing for document.all.
+  __ CompareInstanceType(right_map, right_type, ODDBALL_TYPE);
+  __ B(eq, &return_equal);
+  __ CompareInstanceType(left_map, left_type, ODDBALL_TYPE);
+  __ B(ne, &return_unequal);
+
+  __ Bind(&return_equal);
   __ Mov(result, EQUAL);
   __ Ret();
 }
@@ -1324,7 +1341,7 @@
   __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
   __ Ldr(x10, MemOperand(x11));
 
-  __ Push(x13, xzr, x12, x10);
+  __ Push(x13, x12, xzr, x10);
   // Set up fp.
   __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
 
@@ -1544,8 +1561,11 @@
   __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
                          &slow_case);
 
-  // Ensure that {function} has an instance prototype.
+  // Go to the runtime if the function is not a constructor.
   __ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+  __ Tbz(scratch, Map::kIsConstructor, &slow_case);
+
+  // Ensure that {function} has an instance prototype.
   __ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
 
   // Get the "prototype" (or initial map) of the {function}.
@@ -1612,27 +1632,8 @@
   // Slow-case: Call the %InstanceOf runtime function.
   __ bind(&slow_case);
   __ Push(object, function);
-  __ TailCallRuntime(Runtime::kInstanceOf);
-}
-
-
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
-  // Return address is in lr.
-  Label slow;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-
-  // Check that the key is an array index, that is Uint32.
-  __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
-
-  // Everything is fine, call runtime.
-  __ Push(receiver, key);
-  __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
-  __ Bind(&slow);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+                                         : Runtime::kInstanceOf);
 }
 
 
@@ -2856,10 +2857,17 @@
 
   // Handle more complex cases in runtime.
   __ Bind(&runtime);
-  __ Push(lhs, rhs);
   if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(lhs, rhs);
+      __ CallRuntime(Runtime::kStringEqual);
+    }
+    __ LoadRoot(x1, Heap::kTrueValueRootIndex);
+    __ Sub(x0, x0, x1);
+    __ Ret();
   } else {
+    __ Push(lhs, rhs);
     __ TailCallRuntime(Runtime::kStringCompare);
   }
 
@@ -3227,27 +3235,28 @@
   __ Bind(&not_smi);
 
   Label not_heap_number;
-  __ Ldr(x1, FieldMemOperand(x0, HeapObject::kMapOffset));
-  __ Ldrb(x1, FieldMemOperand(x1, Map::kInstanceTypeOffset));
-  // x0: object
-  // x1: instance type
-  __ Cmp(x1, HEAP_NUMBER_TYPE);
+  __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
+  // x0: receiver
+  // x1: receiver instance type
   __ B(ne, &not_heap_number);
   __ Ret();
   __ Bind(&not_heap_number);
 
-  Label not_string, slow_string;
-  __ Cmp(x1, FIRST_NONSTRING_TYPE);
+  NonNumberToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in x0.
+  __ AssertNotNumber(x0);
+
+  Label not_string;
+  __ CompareObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE);
+  // x0: receiver
+  // x1: receiver instance type
   __ B(hs, &not_string);
-  // Check if string has a cached array index.
-  __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
-  __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
-  __ B(ne, &slow_string);
-  __ IndexFromHash(x2, x0);
-  __ Ret();
-  __ Bind(&slow_string);
-  __ Push(x0);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
+  StringToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
   __ Bind(&not_string);
 
   Label not_oddball;
@@ -3261,22 +3270,23 @@
   __ TailCallRuntime(Runtime::kToNumber);
 }
 
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in x0.
+  __ AssertString(x0);
 
-void ToLengthStub::Generate(MacroAssembler* masm) {
-  // The ToLength stub takes one argument in x0.
-  Label not_smi;
-  __ JumpIfNotSmi(x0, &not_smi);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ Tst(x0, x0);
-  __ Csel(x0, x0, Operand(0), ge);
+  // Check if string has a cached array index.
+  Label runtime;
+  __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
+  __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
+  __ B(ne, &runtime);
+  __ IndexFromHash(x2, x0);
   __ Ret();
-  __ Bind(&not_smi);
 
+  __ Bind(&runtime);
   __ Push(x0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToLength);
+  __ TailCallRuntime(Runtime::kStringToNumber);
 }
 
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in x0.
   Label is_number;
@@ -3449,43 +3459,6 @@
 }
 
 
-void StringCompareStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x1    : left
-  //  -- x0    : right
-  //  -- lr    : return address
-  // -----------------------------------
-  __ AssertString(x1);
-  __ AssertString(x0);
-
-  Label not_same;
-  __ Cmp(x0, x1);
-  __ B(ne, &not_same);
-  __ Mov(x0, Smi::FromInt(EQUAL));
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
-                      x4);
-  __ Ret();
-
-  __ Bind(&not_same);
-
-  // Check that both objects are sequential one-byte strings.
-  Label runtime;
-  __ JumpIfEitherIsNotSequentialOneByteStrings(x1, x0, x12, x13, &runtime);
-
-  // Compare flat one-byte strings natively.
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
-                      x4);
-  StringHelper::GenerateCompareFlatOneByteStrings(masm, x1, x0, x12, x13, x14,
-                                                  x15);
-
-  // Call the runtime.
-  // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
-  __ Bind(&runtime);
-  __ Push(x1, x0);
-  __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- x1    : left
@@ -3682,7 +3655,7 @@
   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   int parameter_count_offset =
-      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   __ Ldr(x1, MemOperand(fp, parameter_count_offset));
   if (function_mode() == JS_FUNCTION_STUB_MODE) {
     __ Add(x1, x1, 1);
@@ -4972,7 +4945,7 @@
     __ Bind(&loop);
     __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
     __ Bind(&loop_entry);
-    __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+    __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
     __ Cmp(x3, x1);
     __ B(ne, &loop);
   }
@@ -4980,8 +4953,8 @@
   // Check if we have rest parameters (only possible if we have an
   // arguments adaptor frame below the function frame).
   Label no_rest_parameters;
-  __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-  __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kContextOffset));
+  __ Ldr(x2, MemOperand(x2, CommonFrameConstants::kCallerFPOffset));
+  __ Ldr(x3, MemOperand(x2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ B(ne, &no_rest_parameters);
 
@@ -5137,8 +5110,9 @@
   Label runtime;
   Label adaptor_frame, try_allocate;
   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ Ldr(caller_ctx, MemOperand(caller_fp,
-                                StandardFrameConstants::kContextOffset));
+  __ Ldr(
+      caller_ctx,
+      MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ B(eq, &adaptor_frame);
 
@@ -5401,7 +5375,7 @@
     __ Bind(&loop);
     __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
     __ Bind(&loop_entry);
-    __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+    __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
     __ Cmp(x3, x1);
     __ B(ne, &loop);
   }
@@ -5409,7 +5383,7 @@
   // Check if we have an arguments adaptor frame below the function frame.
   Label arguments_adaptor, arguments_done;
   __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-  __ Ldr(x4, MemOperand(x3, StandardFrameConstants::kContextOffset));
+  __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ B(eq, &arguments_adaptor);
   {
@@ -5804,16 +5778,12 @@
   __ B(&leave_exit_frame);
 }
 
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
-                                      const ParameterCount& argc,
-                                      bool return_first_arg,
-                                      bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- x0                  : callee
   //  -- x4                  : call_data
   //  -- x2                  : holder
   //  -- x1                  : api_function_address
-  //  -- x3                  : number of arguments if argc is a register
   //  -- cp                  : context
   //  --
   //  -- sp[0]               : last argument
@@ -5839,17 +5809,15 @@
   STATIC_ASSERT(FCA::kHolderIndex == 0);
   STATIC_ASSERT(FCA::kArgsLength == 7);
 
-  DCHECK(argc.is_immediate() || x3.is(argc.reg()));
-
   // FunctionCallbackArguments: context, callee and call data.
   __ Push(context, callee, call_data);
 
-  if (!is_lazy) {
+  if (!is_lazy()) {
     // Load context from callee
     __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   }
 
-  if (!call_data_undefined) {
+  if (!call_data_undefined()) {
     __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
   }
   Register isolate_reg = x5;
@@ -5878,26 +5846,13 @@
   // x0 = FunctionCallbackInfo&
   // Arguments is after the return address.
   __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
-  if (argc.is_immediate()) {
-    // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
-    __ Add(x10, args,
-           Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
-    __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
-    // FunctionCallbackInfo::length_ = argc and
-    // FunctionCallbackInfo::is_construct_call = 0
-    __ Mov(x10, argc.immediate());
-    __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
-  } else {
-    // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
-    __ Add(x10, args, Operand(argc.reg(), LSL, kPointerSizeLog2));
-    __ Add(x10, x10, (FCA::kArgsLength - 1) * kPointerSize);
-    __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
-    // FunctionCallbackInfo::length_ = argc and
-    // FunctionCallbackInfo::is_construct_call
-    __ Add(x10, argc.reg(), FCA::kArgsLength + 1);
-    __ Mov(x10, Operand(x10, LSL, kPointerSizeLog2));
-    __ Stp(argc.reg(), x10, MemOperand(x0, 2 * kPointerSize));
-  }
+  // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
+  __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+  __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
+  // FunctionCallbackInfo::length_ = argc and
+  // FunctionCallbackInfo::is_construct_call = 0
+  __ Mov(x10, argc());
+  __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5907,7 +5862,7 @@
       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   // Stores return the first js argument
   int return_value_offset = 0;
-  if (return_first_arg) {
+  if (is_store()) {
     return_value_offset = 2 + FCA::kArgsLength;
   } else {
     return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5917,10 +5872,8 @@
   MemOperand is_construct_call_operand =
       MemOperand(masm->StackPointer(), 4 * kPointerSize);
   MemOperand* stack_space_operand = &is_construct_call_operand;
-  if (argc.is_immediate()) {
-    stack_space = argc.immediate() + FCA::kArgsLength + 1;
-    stack_space_operand = NULL;
-  }
+  stack_space = argc() + FCA::kArgsLength + 1;
+  stack_space_operand = NULL;
 
   const int spill_offset = 1 + kApiStackSpace;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
@@ -5929,23 +5882,6 @@
 }
 
 
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
-  bool call_data_undefined = this->call_data_undefined();
-  CallApiFunctionStubHelper(masm, ParameterCount(x3), false,
-                            call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
-  bool is_store = this->is_store();
-  int argc = this->argc();
-  bool call_data_undefined = this->call_data_undefined();
-  bool is_lazy = this->is_lazy();
-  CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined, is_lazy);
-}
-
-
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- sp[0]                         : name
diff --git a/src/arm64/cpu-arm64.cc b/src/arm64/cpu-arm64.cc
index 37bb4a2..712dbbd 100644
--- a/src/arm64/cpu-arm64.cc
+++ b/src/arm64/cpu-arm64.cc
@@ -19,7 +19,7 @@
     cache_type_register_ = 0;
 #else
     // Copy the content of the cache type register to a core register.
-    __asm__ __volatile__("mrs %[ctr], ctr_el0"  // NOLINT
+    __asm__ __volatile__("mrs %x[ctr], ctr_el0"  // NOLINT
                          : [ctr] "=r"(cache_type_register_));
 #endif
   }
diff --git a/src/arm64/deoptimizer-arm64.cc b/src/arm64/deoptimizer-arm64.cc
index 3aa1e4d..fe2a269 100644
--- a/src/arm64/deoptimizer-arm64.cc
+++ b/src/arm64/deoptimizer-arm64.cc
@@ -65,12 +65,6 @@
 }
 
 
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
-  // There is no dynamic alignment padding on ARM64 in the input frame.
-  return false;
-}
-
-
 void Deoptimizer::SetPlatformCompiledStubRegisters(
     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   ApiFunction function(descriptor->deoptimization_handler());
@@ -132,12 +126,17 @@
   // address for lazy deoptimization.
   __ Mov(code_object, lr);
   // Compute the fp-to-sp delta, and correct one word for bailout id.
-  __ Add(fp_to_sp, masm()->StackPointer(),
+  __ Add(fp_to_sp, __ StackPointer(),
          kSavedRegistersAreaSize + (1 * kPointerSize));
   __ Sub(fp_to_sp, fp, fp_to_sp);
 
   // Allocate a new deoptimizer object.
+  __ Mov(x0, 0);
+  Label context_check;
+  __ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ JumpIfSmi(x1, &context_check);
   __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ bind(&context_check);
   __ Mov(x1, type());
   // Following arguments are already loaded:
   //  - x2: bailout id
@@ -212,6 +211,9 @@
   }
   __ Pop(x4);  // Restore deoptimizer object (class Deoptimizer).
 
+  __ Ldr(__ StackPointer(),
+         MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
+
   // Replace the current (input) frame with the output frames.
   Label outer_push_loop, inner_push_loop,
       outer_loop_header, inner_loop_header;
diff --git a/src/arm64/frames-arm64.h b/src/arm64/frames-arm64.h
index 7835144..f1e45f5 100644
--- a/src/arm64/frames-arm64.h
+++ b/src/arm64/frames-arm64.h
@@ -34,16 +34,11 @@
       -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
 };
 
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
  public:
-  static const int kFrameSize            =  2 * kPointerSize;
-
-  static const int kCallerSPDisplacement =  2 * kPointerSize;
-  static const int kCallerPCOffset       =  1 * kPointerSize;
-  static const int kCallerFPOffset       =  0 * kPointerSize;   // <- fp
-  static const int kSPOffset             = -1 * kPointerSize;
-  static const int kCodeOffset           = -2 * kPointerSize;
+  static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
   static const int kLastExitFrameField   = kCodeOffset;
 
   static const int kConstantPoolOffset   = 0;  // Not used
@@ -59,7 +54,7 @@
   // the arguments.
   static const int kLastParameterOffset = 2 * kPointerSize;
 
-  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+  static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
 };
 
 
diff --git a/src/arm64/interface-descriptors-arm64.cc b/src/arm64/interface-descriptors-arm64.cc
index c6ae37e..f307aeb 100644
--- a/src/arm64/interface-descriptors-arm64.cc
+++ b/src/arm64/interface-descriptors-arm64.cc
@@ -114,37 +114,8 @@
 }
 
 
-void ToNumberDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // x0: value
-  Register registers[] = {x0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 // static
-const Register ToLengthDescriptor::ReceiverRegister() { return x0; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return x0; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return x0; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return x0; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // x0: value
-  Register registers[] = {x0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
 
 void TypeofDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -294,6 +265,13 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
+      CallInterfaceDescriptorData* data) {                          \
+    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
+  }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
 
 void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -336,6 +314,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (arg count)
+  Register registers[] = {x0};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void CompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -346,22 +330,6 @@
 }
 
 
-void CompareNilDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // x0: value to compare
-  Register registers[] = {x0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // x0: value
-  Register registers[] = {x0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void BinaryOpDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // x1: left operand
@@ -444,25 +412,7 @@
                                    &default_descriptor);
 }
 
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  static PlatformInterfaceDescriptor default_descriptor =
-      PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
-  Register registers[] = {
-      x0,  // callee
-      x4,  // call_data
-      x2,  // holder
-      x1,  // api_function_address
-      x3,  // actual number of arguments
-  };
-  data->InitializePlatformSpecific(arraysize(registers), registers,
-                                   &default_descriptor);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   static PlatformInterfaceDescriptor default_descriptor =
       PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index 953c3fd..12ddd81 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -1355,6 +1355,14 @@
   }
 }
 
+void MacroAssembler::AssertCspAligned() {
+  if (emit_debug_code() && use_real_aborts()) {
+    // TODO(titzer): use a real assert for alignment check?
+    UseScratchRegisterScope scope(this);
+    Register temp = scope.AcquireX();
+    ldr(temp, MemOperand(csp));
+  }
+}
 
 void MacroAssembler::AssertFPCRState(Register fpcr) {
   if (emit_debug_code()) {
@@ -1548,24 +1556,38 @@
                                                      Register scratch1,
                                                      Register scratch2,
                                                      Label* no_memento_found) {
-  ExternalReference new_space_start =
-      ExternalReference::new_space_start(isolate());
+  Label map_check;
+  Label top_check;
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
+  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
 
-  Add(scratch1, receiver,
-      JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
-  Cmp(scratch1, new_space_start);
-  B(lt, no_memento_found);
-
-  Mov(scratch2, new_space_allocation_top);
-  Ldr(scratch2, MemOperand(scratch2));
-  Cmp(scratch1, scratch2);
+  // Bail out if the object is not in new space.
+  JumpIfNotInNewSpace(receiver, no_memento_found);
+  Add(scratch1, receiver, kMementoEndOffset);
+  // If the object is in new space, we need to check whether it is on the same
+  // page as the current top.
+  Eor(scratch2, scratch1, new_space_allocation_top);
+  Tst(scratch2, ~Page::kPageAlignmentMask);
+  B(eq, &top_check);
+  // The object is on a different page than allocation top. Bail out if the
+  // object sits on the page boundary as no memento can follow and we cannot
+  // touch the memory following it.
+  Eor(scratch2, scratch1, receiver);
+  Tst(scratch2, ~Page::kPageAlignmentMask);
+  B(ne, no_memento_found);
+  // Continue with the actual map check.
+  jmp(&map_check);
+  // If top is on the same page as the current object, we need to check whether
+  // we are below top.
+  bind(&top_check);
+  Cmp(scratch1, new_space_allocation_top);
   B(gt, no_memento_found);
-
-  Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
-  Cmp(scratch1,
-      Operand(isolate()->factory()->allocation_memento_map()));
+  // Memento map check.
+  bind(&map_check);
+  Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
+  Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map()));
 }
 
 
@@ -1690,6 +1712,18 @@
   }
 }
 
+void MacroAssembler::AssertNotNumber(Register value) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    Tst(value, kSmiTagMask);
+    Check(ne, kOperandIsANumber);
+    Label done;
+    JumpIfNotHeapNumber(value, &done);
+    Abort(kOperandIsANumber);
+    Bind(&done);
+  }
+}
+
 void MacroAssembler::AssertNumber(Register value) {
   if (emit_debug_code()) {
     Label done;
@@ -2330,6 +2364,66 @@
   B(ne, not_unique_name);
 }
 
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+                                        Register caller_args_count_reg,
+                                        Register scratch0, Register scratch1) {
+#if DEBUG
+  if (callee_args_count.is_reg()) {
+    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+                       scratch1));
+  } else {
+    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+  }
+#endif
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We add kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch0;
+  __ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
+  __ add(dst_reg, dst_reg,
+         Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = caller_args_count_reg;
+  // Calculate the end of source area. +kPointerSize is for the receiver.
+  if (callee_args_count.is_reg()) {
+    add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+    add(src_reg, src_reg, Operand(kPointerSize));
+  } else {
+    add(src_reg, jssp,
+        Operand((callee_args_count.immediate() + 1) * kPointerSize));
+  }
+
+  if (FLAG_debug_code) {
+    __ Cmp(src_reg, dst_reg);
+    __ Check(lo, kStackAccessBelowStackPointer);
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch1;
+  Label loop, entry;
+  __ B(&entry);
+  __ bind(&loop);
+  __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
+  __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+  __ bind(&entry);
+  __ Cmp(jssp, src_reg);
+  __ B(ne, &loop);
+
+  // Leave current frame.
+  __ Mov(jssp, dst_reg);
+  __ SetStackPointer(jssp);
+  __ AssertStackConsistency();
+}
 
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
@@ -2651,18 +2745,17 @@
   Bind(&done);
 }
 
-
-void MacroAssembler::StubPrologue() {
+void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
   UseScratchRegisterScope temps(this);
+  frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
   Register temp = temps.AcquireX();
-  __ Mov(temp, Smi::FromInt(StackFrame::STUB));
-  // Compiled stubs don't age, and so they don't need the predictable code
-  // ageing sequence.
-  __ Push(lr, fp, cp, temp);
-  __ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+  Mov(temp, Smi::FromInt(type));
+  Push(lr, fp);
+  Mov(fp, StackPointer());
+  Claim(frame_slots);
+  str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
 }
 
-
 void MacroAssembler::Prologue(bool code_pre_aging) {
   if (code_pre_aging) {
     Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
@@ -2694,18 +2787,26 @@
   Register type_reg = temps.AcquireX();
   Register code_reg = temps.AcquireX();
 
-  Push(lr, fp, cp);
-  Mov(type_reg, Smi::FromInt(type));
-  Mov(code_reg, Operand(CodeObject()));
-  Push(type_reg, code_reg);
-  // jssp[4] : lr
-  // jssp[3] : fp
-  // jssp[2] : cp
-  // jssp[1] : type
-  // jssp[0] : code object
-
-  // Adjust FP to point to saved FP.
-  Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+  if (type == StackFrame::INTERNAL) {
+    Mov(type_reg, Smi::FromInt(type));
+    Push(lr, fp);
+    Push(type_reg);
+    Mov(code_reg, Operand(CodeObject()));
+    Push(code_reg);
+    Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
+    // jssp[4] : lr
+    // jssp[3] : fp
+    // jssp[1] : type
+    // jssp[0] : [code object]
+  } else {
+    Mov(type_reg, Smi::FromInt(type));
+    Push(lr, fp);
+    Push(type_reg);
+    Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
+    // jssp[2] : lr
+    // jssp[1] : fp
+    // jssp[0] : type
+  }
 }
 
 
@@ -2746,20 +2847,23 @@
   DCHECK(jssp.Is(StackPointer()));
 
   // Set up the new stack frame.
-  Mov(scratch, Operand(CodeObject()));
   Push(lr, fp);
   Mov(fp, StackPointer());
-  Push(xzr, scratch);
+  Mov(scratch, Smi::FromInt(StackFrame::EXIT));
+  Push(scratch);
+  Push(xzr);
+  Mov(scratch, Operand(CodeObject()));
+  Push(scratch);
   //          fp[8]: CallerPC (lr)
   //    fp -> fp[0]: CallerFP (old fp)
-  //          fp[-8]: Space reserved for SPOffset.
-  //  jssp -> fp[-16]: CodeObject()
-  STATIC_ASSERT((2 * kPointerSize) ==
-                ExitFrameConstants::kCallerSPDisplacement);
+  //          fp[-8]: STUB marker
+  //          fp[-16]: Space reserved for SPOffset.
+  //  jssp -> fp[-24]: CodeObject()
+  STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
   STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
   STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
-  STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
-  STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
+  STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
+  STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
 
   // Save the frame pointer and context pointer in the top frame.
   Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
@@ -2769,8 +2873,7 @@
                                          isolate())));
   Str(cp, MemOperand(scratch));
 
-  STATIC_ASSERT((-2 * kPointerSize) ==
-                ExitFrameConstants::kLastExitFrameField);
+  STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
   if (save_doubles) {
     ExitFramePreserveFPRegs();
   }
@@ -2781,9 +2884,10 @@
   Claim(extra_space + 1, kXRegSize);
   //         fp[8]: CallerPC (lr)
   //   fp -> fp[0]: CallerFP (old fp)
-  //         fp[-8]: Space reserved for SPOffset.
-  //         fp[-16]: CodeObject()
-  //         fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+  //         fp[-8]: STUB marker
+  //         fp[-16]: Space reserved for SPOffset.
+  //         fp[-24]: CodeObject()
+  //         fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
   //         jssp[8]: Extra space reserved for caller (if extra_space != 0).
   // jssp -> jssp[0]: Space reserved for the return address.
 
@@ -2793,9 +2897,10 @@
 
   //         fp[8]: CallerPC (lr)
   //   fp -> fp[0]: CallerFP (old fp)
-  //         fp[-8]: Space reserved for SPOffset.
-  //         fp[-16]: CodeObject()
-  //         fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+  //         fp[-8]: STUB marker
+  //         fp[-16]: Space reserved for SPOffset.
+  //         fp[-24]: CodeObject()
+  //         fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
   //         csp[8]: Memory reserved for the caller if extra_space != 0.
   //                 Alignment padding, if necessary.
   //  csp -> csp[0]: Space reserved for the return address.
@@ -3678,8 +3783,19 @@
   DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
   Label same_contexts;
 
-  // Load current lexical context from the stack frame.
-  Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  // Load current lexical context from the active StandardFrame, which
+  // may require crawling past STUB frames.
+  Label load_context;
+  Label has_context;
+  Mov(scratch2, fp);
+  bind(&load_context);
+  Ldr(scratch1,
+      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+  JumpIfNotSmi(scratch1, &has_context);
+  Ldr(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
+  B(&load_context);
+  bind(&has_context);
+
   // In debug mode, make sure the lexical context is set.
 #ifdef DEBUG
   Cmp(scratch1, 0);
@@ -3916,13 +4032,12 @@
   Str(scratch1, MemOperand(scratch2));
   // Call stub on end of buffer.
   // Check for end of buffer.
-  DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
-         (1 << (14 + kPointerSizeLog2)));
+  Tst(scratch1, StoreBuffer::kStoreBufferMask);
   if (and_then == kFallThroughAtEnd) {
-    Tbz(scratch1, (14 + kPointerSizeLog2), &done);
+    B(ne, &done);
   } else {
     DCHECK(and_then == kReturnAtEnd);
-    Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
+    B(eq, &store_buffer_overflow);
     Ret();
   }
 
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index ff41c4f..4b6b3c0 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -788,6 +788,9 @@
   // If emit_debug_code() is false, this emits no code.
   void AssertStackConsistency();
 
+  // Emits a runtime assert that the CSP is aligned.
+  void AssertCspAligned();
+
   // Preserve the callee-saved registers (as defined by AAPCS64).
   //
   // Higher-numbered registers are pushed before lower-numbered registers, and
@@ -895,6 +898,7 @@
   // This is required for compatibility with architecture independant code.
   // Remove if not needed.
   inline void Move(Register dst, Register src) { Mov(dst, src); }
+  inline void Move(Register dst, Handle<Object> x) { LoadObject(dst, x); }
   inline void Move(Register dst, Smi* src) { Mov(dst, src); }
 
   void LoadInstanceDescriptors(Register map,
@@ -986,6 +990,7 @@
 
   // Abort execution if argument is not a number (heap number or smi).
   void AssertNumber(Register value);
+  void AssertNotNumber(Register value);
 
   void JumpIfHeapNumber(Register object, Label* on_heap_number,
                         SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
@@ -1165,6 +1170,15 @@
                       RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
                       TypeFeedbackId ast_id = TypeFeedbackId::None());
 
+  // Removes current frame and its arguments from the stack preserving
+  // the arguments and a return address pushed to the stack for the next call.
+  // Both |callee_args_count| and |caller_args_count_reg| do not include
+  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+  // is trashed.
+  void PrepareForTailCall(const ParameterCount& callee_args_count,
+                          Register caller_args_count_reg, Register scratch0,
+                          Register scratch1);
+
   // Registers used through the invocation chain are hard-coded.
   // We force passing the parameters to ensure the contracts are correctly
   // honoured by the caller.
@@ -1621,7 +1635,7 @@
   void ExitFrameRestoreFPRegs();
 
   // Generates function and stub prologue code.
-  void StubPrologue();
+  void StubPrologue(StackFrame::Type type, int frame_slots);
   void Prologue(bool code_pre_aging);
 
   // Enter exit frame. Exit frames are used when calling C code from generated
diff --git a/src/assembler.cc b/src/assembler.cc
index 5c8c2ce..a912bb6 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -60,7 +60,8 @@
 #include "src/register-configuration.h"
 #include "src/runtime/runtime.h"
 #include "src/simulator.h"  // For flushing instruction cache.
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/serializer-common.h"
+#include "src/wasm/wasm-external-refs.h"
 
 #if V8_TARGET_ARCH_IA32
 #include "src/ia32/assembler-ia32-inl.h"  // NOLINT
@@ -76,6 +77,8 @@
 #include "src/mips/assembler-mips-inl.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS64
 #include "src/mips64/assembler-mips64-inl.h"  // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/assembler-s390-inl.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
 #include "src/x87/assembler-x87-inl.h"  // NOLINT
 #else
@@ -98,6 +101,8 @@
 #include "src/regexp/mips/regexp-macro-assembler-mips.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS64
 #include "src/regexp/mips64/regexp-macro-assembler-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/regexp/s390/regexp-macro-assembler-s390.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
 #include "src/regexp/x87/regexp-macro-assembler-x87.h"  // NOLINT
 #else  // Unknown architecture.
@@ -833,10 +838,14 @@
       return "debug break slot at return";
     case DEBUG_BREAK_SLOT_AT_CALL:
       return "debug break slot at call";
+    case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
+      return "debug break slot at tail call";
     case CODE_AGE_SEQUENCE:
       return "code age sequence";
     case GENERATOR_CONTINUATION:
       return "generator continuation";
+    case WASM_MEMORY_REFERENCE:
+      return "wasm memory reference";
     case NUMBER_OF_MODES:
     case PC_JUMP:
       UNREACHABLE();
@@ -929,7 +938,9 @@
     case DEBUG_BREAK_SLOT_AT_POSITION:
     case DEBUG_BREAK_SLOT_AT_RETURN:
     case DEBUG_BREAK_SLOT_AT_CALL:
+    case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
     case GENERATOR_CONTINUATION:
+    case WASM_MEMORY_REFERENCE:
     case NONE32:
     case NONE64:
       break;
@@ -1147,66 +1158,199 @@
       Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
 }
 
-static void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
-
-ExternalReference ExternalReference::f32_trunc_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_trunc_wrapper)));
-}
-
-static void f32_floor_wrapper(float* param) { *param = floorf(*param); }
-
-ExternalReference ExternalReference::f32_floor_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_floor_wrapper)));
-}
-
-static void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
-
-ExternalReference ExternalReference::f32_ceil_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_ceil_wrapper)));
-}
-
-static void f32_nearest_int_wrapper(float* param) {
-  *param = nearbyintf(*param);
-}
-
-ExternalReference ExternalReference::f32_nearest_int_wrapper_function(
-    Isolate* isolate) {
+ExternalReference ExternalReference::wasm_f32_trunc(Isolate* isolate) {
   return ExternalReference(
-      Redirect(isolate, FUNCTION_ADDR(f32_nearest_int_wrapper)));
+      Redirect(isolate, FUNCTION_ADDR(wasm::f32_trunc_wrapper)));
 }
-
-static void f64_trunc_wrapper(double* param) { *param = trunc(*param); }
-
-ExternalReference ExternalReference::f64_trunc_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_trunc_wrapper)));
-}
-
-static void f64_floor_wrapper(double* param) { *param = floor(*param); }
-
-ExternalReference ExternalReference::f64_floor_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_floor_wrapper)));
-}
-
-static void f64_ceil_wrapper(double* param) { *param = ceil(*param); }
-
-ExternalReference ExternalReference::f64_ceil_wrapper_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_ceil_wrapper)));
-}
-
-static void f64_nearest_int_wrapper(double* param) {
-  *param = nearbyint(*param);
-}
-
-ExternalReference ExternalReference::f64_nearest_int_wrapper_function(
-    Isolate* isolate) {
+ExternalReference ExternalReference::wasm_f32_floor(Isolate* isolate) {
   return ExternalReference(
-      Redirect(isolate, FUNCTION_ADDR(f64_nearest_int_wrapper)));
+      Redirect(isolate, FUNCTION_ADDR(wasm::f32_floor_wrapper)));
+}
+ExternalReference ExternalReference::wasm_f32_ceil(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::f32_ceil_wrapper)));
+}
+ExternalReference ExternalReference::wasm_f32_nearest_int(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::f32_nearest_int_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_trunc(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::f64_trunc_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_floor(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::f64_floor_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_ceil(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::f64_ceil_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_nearest_int(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::f64_nearest_int_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_to_float32(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::int64_to_float32_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_to_float32(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::uint64_to_float32_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_to_float64(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::int64_to_float64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_to_float64(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::uint64_to_float64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float32_to_int64(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::float32_to_int64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float32_to_uint64(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::float32_to_uint64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float64_to_int64(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::float64_to_int64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float64_to_uint64(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::float64_to_uint64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_div(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::int64_div_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_mod(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::int64_mod_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_div(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::uint64_div_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_mod(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::uint64_mod_wrapper)));
+}
+
+static void f64_acos_wrapper(double* param) { *param = std::acos(*param); }
+
+ExternalReference ExternalReference::f64_acos_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_acos_wrapper)));
+}
+
+static void f64_asin_wrapper(double* param) { *param = std::asin(*param); }
+
+ExternalReference ExternalReference::f64_asin_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_asin_wrapper)));
+}
+
+static void f64_atan_wrapper(double* param) { *param = std::atan(*param); }
+
+ExternalReference ExternalReference::f64_atan_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_atan_wrapper)));
+}
+
+static void f64_cos_wrapper(double* param) { *param = std::cos(*param); }
+
+ExternalReference ExternalReference::f64_cos_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_cos_wrapper)));
+}
+
+static void f64_sin_wrapper(double* param) { *param = std::sin(*param); }
+
+ExternalReference ExternalReference::f64_sin_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_sin_wrapper)));
+}
+
+static void f64_tan_wrapper(double* param) { *param = std::tan(*param); }
+
+ExternalReference ExternalReference::f64_tan_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_tan_wrapper)));
+}
+
+static void f64_exp_wrapper(double* param) { *param = std::exp(*param); }
+
+ExternalReference ExternalReference::f64_exp_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_exp_wrapper)));
+}
+
+static void f64_log_wrapper(double* param) { *param = std::log(*param); }
+
+ExternalReference ExternalReference::f64_log_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_log_wrapper)));
+}
+
+static void f64_pow_wrapper(double* param0, double* param1) {
+  *param0 = power_double_double(*param0, *param1);
+}
+
+ExternalReference ExternalReference::f64_pow_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_pow_wrapper)));
+}
+
+static void f64_atan2_wrapper(double* param0, double* param1) {
+  double x = *param0;
+  double y = *param1;
+  // TODO(bradnelson): Find a good place to put this to share
+  // with the same code in src/runtime/runtime-math.cc
+  static const double kPiDividedBy4 = 0.78539816339744830962;
+  if (std::isinf(x) && std::isinf(y)) {
+    // Make sure that the result in case of two infinite arguments
+    // is a multiple of Pi / 4. The sign of the result is determined
+    // by the first argument (x) and the sign of the second argument
+    // determines the multiplier: one or three.
+    int multiplier = (x < 0) ? -1 : 1;
+    if (y < 0) multiplier *= 3;
+    *param0 = multiplier * kPiDividedBy4;
+  } else {
+    *param0 = std::atan2(x, y);
+  }
+}
+
+ExternalReference ExternalReference::f64_atan2_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_atan2_wrapper)));
+}
+
+static void f64_mod_wrapper(double* param0, double* param1) {
+  *param0 = modulo(*param0, *param1);
+}
+
+ExternalReference ExternalReference::f64_mod_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_mod_wrapper)));
 }
 
 ExternalReference ExternalReference::log_enter_external_function(
@@ -1262,12 +1406,6 @@
   return ExternalReference(isolate->regexp_stack()->limit_address());
 }
 
-
-ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
-  return ExternalReference(isolate->heap()->NewSpaceStart());
-}
-
-
 ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
   return ExternalReference(isolate->heap()->store_buffer_top_address());
 }
@@ -1404,6 +1542,8 @@
   function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
 #elif V8_TARGET_ARCH_MIPS64
   function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
+#elif V8_TARGET_ARCH_S390
+  function = FUNCTION_ADDR(RegExpMacroAssemblerS390::CheckStackGuardState);
 #elif V8_TARGET_ARCH_X87
   function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
 #else
@@ -1489,6 +1629,10 @@
   return ExternalReference(&CpuFeatures::supported_);
 }
 
+ExternalReference ExternalReference::is_tail_call_elimination_enabled_address(
+    Isolate* isolate) {
+  return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
+}
 
 ExternalReference ExternalReference::debug_is_active_address(
     Isolate* isolate) {
@@ -1559,34 +1703,12 @@
 
 
 double power_double_double(double x, double y) {
-#if (defined(__MINGW64_VERSION_MAJOR) &&                              \
-     (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
-    defined(V8_OS_AIX)
-  // MinGW64 and AIX have a custom implementation for pow.  This handles certain
-  // special cases that are different.
-  if ((x == 0.0 || std::isinf(x)) && y != 0.0 && std::isfinite(y)) {
-    double f;
-    double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
-    /* retain sign if odd integer exponent */
-    return ((std::modf(y, &f) == 0.0) && (static_cast<int64_t>(y) & 1))
-               ? copysign(result, x)
-               : result;
-  }
-
-  if (x == 2.0) {
-    int y_int = static_cast<int>(y);
-    if (y == y_int) {
-      return std::ldexp(1.0, y_int);
-    }
-  }
-#endif
-
   // The checks for special cases can be dropped in ia32 because it has already
   // been done in generated code before bailing out here.
   if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
     return std::numeric_limits<double>::quiet_NaN();
   }
-  return std::pow(x, y);
+  return Pow(x, y);
 }
 
 
@@ -1648,8 +1770,7 @@
   return os;
 }
 
-
-void PositionsRecorder::RecordPosition(int pos) {
+void AssemblerPositionsRecorder::RecordPosition(int pos) {
   DCHECK(pos != RelocInfo::kNoPosition);
   DCHECK(pos >= 0);
   state_.current_position = pos;
@@ -1659,8 +1780,7 @@
                                                  pos));
 }
 
-
-void PositionsRecorder::RecordStatementPosition(int pos) {
+void AssemblerPositionsRecorder::RecordStatementPosition(int pos) {
   DCHECK(pos != RelocInfo::kNoPosition);
   DCHECK(pos >= 0);
   state_.current_statement_position = pos;
@@ -1671,8 +1791,7 @@
                      pos));
 }
 
-
-bool PositionsRecorder::WriteRecordedPositions() {
+bool AssemblerPositionsRecorder::WriteRecordedPositions() {
   bool written = false;
 
   // Write the statement position if it is different from what was written last
diff --git a/src/assembler.h b/src/assembler.h
index 7bd9ee6..192d16b 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -38,6 +38,7 @@
 #include "src/allocation.h"
 #include "src/builtins.h"
 #include "src/isolate.h"
+#include "src/log.h"
 #include "src/runtime/runtime.h"
 
 namespace v8 {
@@ -384,6 +385,8 @@
     DEBUGGER_STATEMENT,  // Code target for the debugger statement.
     EMBEDDED_OBJECT,
     CELL,
+    // To relocate pointers into the wasm memory embedded in wasm code
+    WASM_MEMORY_REFERENCE,
 
     // Everything after runtime_entry (inclusive) is not GC'ed.
     RUNTIME_ENTRY,
@@ -395,6 +398,7 @@
     DEBUG_BREAK_SLOT_AT_POSITION,
     DEBUG_BREAK_SLOT_AT_RETURN,
     DEBUG_BREAK_SLOT_AT_CALL,
+    DEBUG_BREAK_SLOT_AT_TAIL_CALL,
 
     EXTERNAL_REFERENCE,  // The address of an external C++ function.
     INTERNAL_REFERENCE,  // An address inside the same function.
@@ -426,7 +430,8 @@
     FIRST_REAL_RELOC_MODE = CODE_TARGET,
     LAST_REAL_RELOC_MODE = VENEER_POOL,
     LAST_CODE_ENUM = DEBUGGER_STATEMENT,
-    LAST_GCED_ENUM = CELL,
+    LAST_GCED_ENUM = WASM_MEMORY_REFERENCE,
+    FIRST_SHAREABLE_RELOC_MODE = CELL,
   };
 
   STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
@@ -487,7 +492,7 @@
   }
   static inline bool IsDebugBreakSlot(Mode mode) {
     return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
-           IsDebugBreakSlotAtCall(mode);
+           IsDebugBreakSlotAtCall(mode) || IsDebugBreakSlotAtTailCall(mode);
   }
   static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
     return mode == DEBUG_BREAK_SLOT_AT_POSITION;
@@ -498,6 +503,9 @@
   static inline bool IsDebugBreakSlotAtCall(Mode mode) {
     return mode == DEBUG_BREAK_SLOT_AT_CALL;
   }
+  static inline bool IsDebugBreakSlotAtTailCall(Mode mode) {
+    return mode == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
+  }
   static inline bool IsDebuggerStatement(Mode mode) {
     return mode == DEBUGGER_STATEMENT;
   }
@@ -510,6 +518,9 @@
   static inline bool IsGeneratorContinuation(Mode mode) {
     return mode == GENERATOR_CONTINUATION;
   }
+  static inline bool IsWasmMemoryReference(Mode mode) {
+    return mode == WASM_MEMORY_REFERENCE;
+  }
   static inline int ModeMask(Mode mode) { return 1 << mode; }
 
   // Accessors
@@ -570,6 +581,10 @@
                                 ICacheFlushMode icache_flush_mode =
                                     FLUSH_ICACHE_IF_NEEDED));
 
+  INLINE(Address wasm_memory_reference());
+  INLINE(void update_wasm_memory_reference(
+      Address old_base, Address new_base, size_t old_size, size_t new_size,
+      ICacheFlushMode icache_flush_mode = SKIP_ICACHE_FLUSH));
   // Returns the address of the constant pool entry where the target address
   // is held.  This should only be called if IsInConstantPool returns true.
   INLINE(Address constant_pool_entry_address());
@@ -913,14 +928,38 @@
   static ExternalReference new_deoptimizer_function(Isolate* isolate);
   static ExternalReference compute_output_frames_function(Isolate* isolate);
 
-  static ExternalReference f32_trunc_wrapper_function(Isolate* isolate);
-  static ExternalReference f32_floor_wrapper_function(Isolate* isolate);
-  static ExternalReference f32_ceil_wrapper_function(Isolate* isolate);
-  static ExternalReference f32_nearest_int_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_trunc_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_floor_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_ceil_wrapper_function(Isolate* isolate);
-  static ExternalReference f64_nearest_int_wrapper_function(Isolate* isolate);
+  static ExternalReference wasm_f32_trunc(Isolate* isolate);
+  static ExternalReference wasm_f32_floor(Isolate* isolate);
+  static ExternalReference wasm_f32_ceil(Isolate* isolate);
+  static ExternalReference wasm_f32_nearest_int(Isolate* isolate);
+  static ExternalReference wasm_f64_trunc(Isolate* isolate);
+  static ExternalReference wasm_f64_floor(Isolate* isolate);
+  static ExternalReference wasm_f64_ceil(Isolate* isolate);
+  static ExternalReference wasm_f64_nearest_int(Isolate* isolate);
+  static ExternalReference wasm_int64_to_float32(Isolate* isolate);
+  static ExternalReference wasm_uint64_to_float32(Isolate* isolate);
+  static ExternalReference wasm_int64_to_float64(Isolate* isolate);
+  static ExternalReference wasm_uint64_to_float64(Isolate* isolate);
+  static ExternalReference wasm_float32_to_int64(Isolate* isolate);
+  static ExternalReference wasm_float32_to_uint64(Isolate* isolate);
+  static ExternalReference wasm_float64_to_int64(Isolate* isolate);
+  static ExternalReference wasm_float64_to_uint64(Isolate* isolate);
+  static ExternalReference wasm_int64_div(Isolate* isolate);
+  static ExternalReference wasm_int64_mod(Isolate* isolate);
+  static ExternalReference wasm_uint64_div(Isolate* isolate);
+  static ExternalReference wasm_uint64_mod(Isolate* isolate);
+
+  static ExternalReference f64_acos_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_atan_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_cos_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_sin_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_tan_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_exp_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_log_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_atan2_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_pow_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_mod_wrapper_function(Isolate* isolate);
 
   // Log support.
   static ExternalReference log_enter_external_function(Isolate* isolate);
@@ -952,9 +991,6 @@
   static ExternalReference address_of_regexp_stack_memory_size(
       Isolate* isolate);
 
-  // Static variable Heap::NewSpaceStart()
-  static ExternalReference new_space_start(Isolate* isolate);
-
   // Write barrier.
   static ExternalReference store_buffer_top(Isolate* isolate);
 
@@ -994,6 +1030,9 @@
 
   static ExternalReference cpu_features();
 
+  static ExternalReference is_tail_call_elimination_enabled_address(
+      Isolate* isolate);
+
   static ExternalReference debug_is_active_address(Isolate* isolate);
   static ExternalReference debug_after_break_target_address(Isolate* isolate);
 
@@ -1085,23 +1124,11 @@
   int written_statement_position;
 };
 
-
-class PositionsRecorder BASE_EMBEDDED {
+class AssemblerPositionsRecorder : public PositionsRecorder {
  public:
-  explicit PositionsRecorder(Assembler* assembler)
-      : assembler_(assembler) {
-    jit_handler_data_ = NULL;
-  }
+  explicit AssemblerPositionsRecorder(Assembler* assembler)
+      : assembler_(assembler) {}
 
-  void AttachJITHandlerData(void* user_data) {
-    jit_handler_data_ = user_data;
-  }
-
-  void* DetachJITHandlerData() {
-    void* old_data = jit_handler_data_;
-    jit_handler_data_ = NULL;
-    return old_data;
-  }
   // Set current position to pos.
   void RecordPosition(int pos);
 
@@ -1121,11 +1148,7 @@
   Assembler* assembler_;
   PositionState state_;
 
-  // Currently jit_handler_data_ is used to store JITHandler-specific data
-  // over the lifetime of a PositionsRecorder
-  void* jit_handler_data_;
-
-  DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
+  DISALLOW_COPY_AND_ASSIGN(AssemblerPositionsRecorder);
 };
 
 
diff --git a/src/ast/ast-numbering.cc b/src/ast/ast-numbering.cc
index 272f9bd..f54333f 100644
--- a/src/ast/ast-numbering.cc
+++ b/src/ast/ast-numbering.cc
@@ -138,7 +138,6 @@
 
 void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
   IncrementNodeCount();
-  DisableCrankshaft(kDoExpression);
   node->set_base_id(ReserveIdRange(DoExpression::num_ids()));
   Visit(node->block());
   Visit(node->result());
@@ -267,10 +266,6 @@
 void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
   IncrementNodeCount();
   ReserveFeedbackSlots(node);
-  if (node->is_jsruntime()) {
-    // Don't try to optimize JS runtime calls because we bailout on them.
-    DisableOptimization(kCallToAJavaScriptRuntimeFunction);
-  }
   node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
   VisitArguments(node->arguments());
 }
@@ -504,9 +499,6 @@
 
 void AstNumberingVisitor::VisitCall(Call* node) {
   IncrementNodeCount();
-  if (node->tail_call_mode() == TailCallMode::kAllow) {
-    DisableOptimization(kTailCall);
-  }
   ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(Call::num_ids()));
   Visit(node->expression());
@@ -571,12 +563,6 @@
 
 bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
   Scope* scope = node->scope();
-
-  if (scope->HasIllegalRedeclaration()) {
-    Visit(scope->GetIllegalRedeclaration());
-    DisableOptimization(kFunctionWithIllegalRedeclaration);
-    return Finish(node);
-  }
   if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
   if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
   if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
diff --git a/src/ast/ast-value-factory.h b/src/ast/ast-value-factory.h
index 85e8277..8b3f0ed 100644
--- a/src/ast/ast-value-factory.h
+++ b/src/ast/ast-value-factory.h
@@ -271,7 +271,6 @@
   F(throw, "throw")                             \
   F(undefined, "undefined")                     \
   F(use_asm, "use asm")                         \
-  F(use_strong, "use strong")                   \
   F(use_strict, "use strict")                   \
   F(value, "value")
 
diff --git a/src/ast/ast.cc b/src/ast/ast.cc
index 9b2c638..e8b6269 100644
--- a/src/ast/ast.cc
+++ b/src/ast/ast.cc
@@ -36,17 +36,11 @@
 
 #ifdef DEBUG
 
-void AstNode::Print() { Print(Isolate::Current()); }
-
-
 void AstNode::Print(Isolate* isolate) {
   AstPrinter::PrintOut(isolate, this);
 }
 
 
-void AstNode::PrettyPrint() { PrettyPrint(Isolate::Current()); }
-
-
 void AstNode::PrettyPrint(Isolate* isolate) {
   PrettyPrinter::PrintOut(isolate, this);
 }
@@ -68,8 +62,11 @@
   return IsLiteral() && AsLiteral()->value()->IsNull();
 }
 
+bool Expression::IsUndefinedLiteral() const {
+  if (IsLiteral() && AsLiteral()->value()->IsUndefined()) {
+    return true;
+  }
 
-bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
   const VariableProxy* var_proxy = AsVariableProxy();
   if (var_proxy == NULL) return false;
   Variable* var = var_proxy->var();
@@ -154,15 +151,11 @@
   }
 }
 
-
-void ForEachStatement::AssignFeedbackVectorSlots(
-    Isolate* isolate, FeedbackVectorSpec* spec,
-    FeedbackVectorSlotCache* cache) {
-  // TODO(adamk): for-of statements do not make use of this feedback slot.
-  // The each_slot_ should be specific to ForInStatement, and this work moved
-  // there.
-  if (IsForOfStatement()) return;
+void ForInStatement::AssignFeedbackVectorSlots(Isolate* isolate,
+                                               FeedbackVectorSpec* spec,
+                                               FeedbackVectorSlotCache* cache) {
   AssignVectorSlots(each(), spec, &each_slot_);
+  for_in_feedback_slot_ = spec->AddGeneralSlot();
 }
 
 
@@ -475,18 +468,15 @@
     // much larger than the number of elements, creating an object
     // literal with fast elements will be a waste of space.
     uint32_t element_index = 0;
-    if (key->IsString()
-        && Handle<String>::cast(key)->AsArrayIndex(&element_index)
-        && element_index > max_element_index) {
-      max_element_index = element_index;
+    if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
+      max_element_index = Max(element_index, max_element_index);
       elements++;
-    } else if (key->IsSmi()) {
-      int key_value = Smi::cast(*key)->value();
-      if (key_value > 0
-          && static_cast<uint32_t>(key_value) > max_element_index) {
-        max_element_index = key_value;
-      }
+      key = isolate->factory()->NewNumberFromUint(element_index);
+    } else if (key->ToArrayIndex(&element_index)) {
+      max_element_index = Max(element_index, max_element_index);
       elements++;
+    } else if (key->IsNumber()) {
+      key = isolate->factory()->NumberToString(key);
     }
 
     // Add name, value pair to the fixed array.
@@ -513,7 +503,7 @@
   // Allocate a fixed array to hold all the object literals.
   Handle<JSArray> array = isolate->factory()->NewJSArray(
       FAST_HOLEY_SMI_ELEMENTS, constants_length, constants_length,
-      Strength::WEAK, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+      INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
 
   // Fill in the literals.
   bool is_simple = true;
@@ -678,24 +668,21 @@
 static bool MatchLiteralCompareUndefined(Expression* left,
                                          Token::Value op,
                                          Expression* right,
-                                         Expression** expr,
-                                         Isolate* isolate) {
+                                         Expression** expr) {
   if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
     *expr = right;
     return true;
   }
-  if (left->IsUndefinedLiteral(isolate) && Token::IsEqualityOp(op)) {
+  if (left->IsUndefinedLiteral() && Token::IsEqualityOp(op)) {
     *expr = right;
     return true;
   }
   return false;
 }
 
-
-bool CompareOperation::IsLiteralCompareUndefined(
-    Expression** expr, Isolate* isolate) {
-  return MatchLiteralCompareUndefined(left_, op_, right_, expr, isolate) ||
-      MatchLiteralCompareUndefined(right_, op_, left_, expr, isolate);
+bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
+  return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
+         MatchLiteralCompareUndefined(right_, op_, left_, expr);
 }
 
 
diff --git a/src/ast/ast.h b/src/ast/ast.h
index dcb440d..52bac8e 100644
--- a/src/ast/ast.h
+++ b/src/ast/ast.h
@@ -198,9 +198,7 @@
 
 #ifdef DEBUG
   void PrettyPrint(Isolate* isolate);
-  void PrettyPrint();
   void Print(Isolate* isolate);
-  void Print();
 #endif  // DEBUG
 
   // Type testing & conversion functions overridden by concrete subclasses.
@@ -332,8 +330,9 @@
   // True iff the expression is the null literal.
   bool IsNullLiteral() const;
 
-  // True if we can prove that the expression is the undefined literal.
-  bool IsUndefinedLiteral(Isolate* isolate) const;
+  // True if we can prove that the expression is the undefined literal. Note
+  // that this also checks for loads of the global "undefined" variable.
+  bool IsUndefinedLiteral() const;
 
   // True iff the expression is a valid target for an assignment.
   bool IsValidReferenceExpressionOrThis() const;
@@ -792,10 +791,6 @@
   void set_each(Expression* e) { each_ = e; }
   void set_subject(Expression* e) { subject_ = e; }
 
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache) override;
-  FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
-
   static const char* VisitModeString(VisitMode mode) {
     return mode == ITERATE ? "for-of" : "for-in";
   }
@@ -807,7 +802,6 @@
  private:
   Expression* each_;
   Expression* subject_;
-  FeedbackVectorSlot each_slot_;
 };
 
 
@@ -821,11 +815,8 @@
 
   // Type feedback information.
   void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache) override {
-    ForEachStatement::AssignFeedbackVectorSlots(isolate, spec, cache);
-    for_in_feedback_slot_ = spec->AddGeneralSlot();
-  }
-
+                                 FeedbackVectorSlotCache* cache) override;
+  FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
   FeedbackVectorSlot ForInFeedbackSlot() {
     DCHECK(!for_in_feedback_slot_.IsInvalid());
     return for_in_feedback_slot_;
@@ -854,6 +845,7 @@
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
   ForInType for_in_type_;
+  FeedbackVectorSlot each_slot_;
   FeedbackVectorSlot for_in_feedback_slot_;
 };
 
@@ -1191,18 +1183,33 @@
   Block* catch_block() const { return catch_block_; }
   void set_catch_block(Block* b) { catch_block_ = b; }
 
+  // The clear_pending_message flag indicates whether or not to clear the
+  // isolate's pending exception message before executing the catch_block.  In
+  // the normal use case, this flag is always on because the message object
+  // is not needed anymore when entering the catch block and should not be kept
+  // alive.
+  // The use case where the flag is off is when the catch block is guaranteed to
+  // rethrow the caught exception (using %ReThrow), which reuses the pending
+  // message instead of generating a new one.
+  // (When the catch block doesn't rethrow but is guaranteed to perform an
+  // ordinary throw, not clearing the old message is safe but not very useful.)
+  bool clear_pending_message() { return clear_pending_message_; }
+
  protected:
   TryCatchStatement(Zone* zone, Block* try_block, Scope* scope,
-                    Variable* variable, Block* catch_block, int pos)
+                    Variable* variable, Block* catch_block,
+                    bool clear_pending_message, int pos)
       : TryStatement(zone, try_block, pos),
         scope_(scope),
         variable_(variable),
-        catch_block_(catch_block) {}
+        catch_block_(catch_block),
+        clear_pending_message_(clear_pending_message) {}
 
  private:
   Scope* scope_;
   Variable* variable_;
   Block* catch_block_;
+  bool clear_pending_message_;
 };
 
 
@@ -1339,14 +1346,11 @@
     return depth_;
   }
 
-  bool is_strong() const { return is_strong_; }
-
  protected:
-  MaterializedLiteral(Zone* zone, int literal_index, bool is_strong, int pos)
+  MaterializedLiteral(Zone* zone, int literal_index, int pos)
       : Expression(zone, pos),
         literal_index_(literal_index),
         is_simple_(false),
-        is_strong_(is_strong),
         depth_(0) {}
 
   // A materialized literal is simple if the values consist of only
@@ -1375,7 +1379,6 @@
  private:
   int literal_index_;
   bool is_simple_;
-  bool is_strong_;
   int depth_;
 
   friend class AstLiteralReindexer;
@@ -1463,7 +1466,6 @@
   ZoneList<Property*>* properties() const { return properties_; }
   bool fast_elements() const { return fast_elements_; }
   bool may_store_doubles() const { return may_store_doubles_; }
-  bool has_function() const { return has_function_; }
   bool has_elements() const { return has_elements_; }
   bool has_shallow_properties() const {
     return depth() == 1 && !has_elements() && !may_store_doubles();
@@ -1483,26 +1485,20 @@
   // Assemble bitfield of flags for the CreateObjectLiteral helper.
   int ComputeFlags(bool disable_mementos = false) const {
     int flags = fast_elements() ? kFastElements : kNoFlags;
-    flags |= has_function() ? kHasFunction : kNoFlags;
     if (has_shallow_properties()) {
       flags |= kShallowProperties;
     }
     if (disable_mementos) {
       flags |= kDisableMementos;
     }
-    if (is_strong()) {
-      flags |= kIsStrong;
-    }
     return flags;
   }
 
   enum Flags {
     kNoFlags = 0,
     kFastElements = 1,
-    kHasFunction = 1 << 1,
-    kShallowProperties = 1 << 2,
-    kDisableMementos = 1 << 3,
-    kIsStrong = 1 << 4
+    kShallowProperties = 1 << 1,
+    kDisableMementos = 1 << 2
   };
 
   struct Accessors: public ZoneObject {
@@ -1534,15 +1530,13 @@
 
  protected:
   ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
-                int boilerplate_properties, bool has_function, bool is_strong,
-                int pos)
-      : MaterializedLiteral(zone, literal_index, is_strong, pos),
+                int boilerplate_properties, int pos)
+      : MaterializedLiteral(zone, literal_index, pos),
         properties_(properties),
         boilerplate_properties_(boilerplate_properties),
         fast_elements_(false),
         has_elements_(false),
-        may_store_doubles_(false),
-        has_function_(has_function) {}
+        may_store_doubles_(false) {}
   static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
 
  private:
@@ -1553,7 +1547,6 @@
   bool fast_elements_;
   bool has_elements_;
   bool may_store_doubles_;
-  bool has_function_;
   FeedbackVectorSlot slot_;
 };
 
@@ -1589,8 +1582,8 @@
 
  protected:
   RegExpLiteral(Zone* zone, const AstRawString* pattern, int flags,
-                int literal_index, bool is_strong, int pos)
-      : MaterializedLiteral(zone, literal_index, is_strong, pos),
+                int literal_index, int pos)
+      : MaterializedLiteral(zone, literal_index, pos),
         pattern_(pattern),
         flags_(flags) {
     set_depth(1);
@@ -1635,9 +1628,6 @@
     if (disable_mementos) {
       flags |= kDisableMementos;
     }
-    if (is_strong()) {
-      flags |= kIsStrong;
-    }
     return flags;
   }
 
@@ -1657,8 +1647,7 @@
   enum Flags {
     kNoFlags = 0,
     kShallowElements = 1,
-    kDisableMementos = 1 << 1,
-    kIsStrong = 1 << 2
+    kDisableMementos = 1 << 1
   };
 
   void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
@@ -1667,9 +1656,8 @@
 
  protected:
   ArrayLiteral(Zone* zone, ZoneList<Expression*>* values,
-               int first_spread_index, int literal_index, bool is_strong,
-               int pos)
-      : MaterializedLiteral(zone, literal_index, is_strong, pos),
+               int first_spread_index, int literal_index, int pos)
+      : MaterializedLiteral(zone, literal_index, pos),
         values_(values),
         first_spread_index_(first_spread_index) {}
   static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
@@ -2313,7 +2301,7 @@
 
   // Match special cases.
   bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
-  bool IsLiteralCompareUndefined(Expression** expr, Isolate* isolate);
+  bool IsLiteralCompareUndefined(Expression** expr);
   bool IsLiteralCompareNull(Expression** expr);
 
  protected:
@@ -2529,37 +2517,29 @@
   Expression* expr_;
 };
 
-
+// Our Yield is different from the JS yield in that it "returns" its argument as
+// is, without wrapping it in an iterator result object.  Such wrapping, if
+// desired, must be done beforehand (see the parser).
 class Yield final : public Expression {
  public:
   DECLARE_NODE_TYPE(Yield)
 
-  enum Kind {
-    kInitial,  // The initial yield that returns the unboxed generator object.
-    kSuspend,  // A normal yield: { value: EXPRESSION, done: false }
-    kDelegating,  // A yield*.
-    kFinal        // A return: { value: EXPRESSION, done: true }
-  };
-
   Expression* generator_object() const { return generator_object_; }
   Expression* expression() const { return expression_; }
-  Kind yield_kind() const { return yield_kind_; }
 
   void set_generator_object(Expression* e) { generator_object_ = e; }
   void set_expression(Expression* e) { expression_ = e; }
 
  protected:
   Yield(Zone* zone, Expression* generator_object, Expression* expression,
-        Kind yield_kind, int pos)
+        int pos)
       : Expression(zone, pos),
         generator_object_(generator_object),
-        expression_(expression),
-        yield_kind_(yield_kind) {}
+        expression_(expression) {}
 
  private:
   Expression* generator_object_;
   Expression* expression_;
-  Kind yield_kind_;
 };
 
 
@@ -3169,8 +3149,17 @@
   TryCatchStatement* NewTryCatchStatement(Block* try_block, Scope* scope,
                                           Variable* variable,
                                           Block* catch_block, int pos) {
-    return new (local_zone_) TryCatchStatement(local_zone_, try_block, scope,
-                                               variable, catch_block, pos);
+    return new (local_zone_) TryCatchStatement(
+        local_zone_, try_block, scope, variable, catch_block, true, pos);
+  }
+
+  TryCatchStatement* NewTryCatchStatementForReThrow(Block* try_block,
+                                                    Scope* scope,
+                                                    Variable* variable,
+                                                    Block* catch_block,
+                                                    int pos) {
+    return new (local_zone_) TryCatchStatement(
+        local_zone_, try_block, scope, variable, catch_block, false, pos);
   }
 
   TryFinallyStatement* NewTryFinallyStatement(Block* try_block,
@@ -3243,12 +3232,9 @@
       ZoneList<ObjectLiteral::Property*>* properties,
       int literal_index,
       int boilerplate_properties,
-      bool has_function,
-      bool is_strong,
       int pos) {
-    return new (local_zone_)
-        ObjectLiteral(local_zone_, properties, literal_index,
-                      boilerplate_properties, has_function, is_strong, pos);
+    return new (local_zone_) ObjectLiteral(
+        local_zone_, properties, literal_index, boilerplate_properties, pos);
   }
 
   ObjectLiteral::Property* NewObjectLiteralProperty(
@@ -3267,24 +3253,23 @@
   }
 
   RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
-                                  int literal_index, bool is_strong, int pos) {
-    return new (local_zone_) RegExpLiteral(local_zone_, pattern, flags,
-                                           literal_index, is_strong, pos);
+                                  int literal_index, int pos) {
+    return new (local_zone_)
+        RegExpLiteral(local_zone_, pattern, flags, literal_index, pos);
   }
 
   ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
                                 int literal_index,
-                                bool is_strong,
                                 int pos) {
     return new (local_zone_)
-        ArrayLiteral(local_zone_, values, -1, literal_index, is_strong, pos);
+        ArrayLiteral(local_zone_, values, -1, literal_index, pos);
   }
 
   ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
                                 int first_spread_index, int literal_index,
-                                bool is_strong, int pos) {
+                                int pos) {
     return new (local_zone_) ArrayLiteral(
-        local_zone_, values, first_spread_index, literal_index, is_strong, pos);
+        local_zone_, values, first_spread_index, literal_index, pos);
   }
 
   VariableProxy* NewVariableProxy(Variable* var,
@@ -3399,11 +3384,10 @@
 
   Yield* NewYield(Expression *generator_object,
                   Expression* expression,
-                  Yield::Kind yield_kind,
                   int pos) {
     if (!expression) expression = NewUndefinedLiteral(pos);
     return new (local_zone_)
-        Yield(local_zone_, generator_object, expression, yield_kind, pos);
+        Yield(local_zone_, generator_object, expression, pos);
   }
 
   Throw* NewThrow(Expression* exception, int pos) {
diff --git a/src/ast/prettyprinter.cc b/src/ast/prettyprinter.cc
index 0e9986a..2a79049 100644
--- a/src/ast/prettyprinter.cc
+++ b/src/ast/prettyprinter.cc
@@ -471,7 +471,7 @@
                           const char* node_name, FeedbackVectorSlot slot) {
   int pos = SNPrintF(*buf, "%s", node_name);
   if (!slot.IsInvalid()) {
-    pos = SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
+    pos += SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
   }
   return pos;
 }
@@ -1563,6 +1563,7 @@
     Variable* var = node->var();
     switch (var->location()) {
       case VariableLocation::UNALLOCATED:
+        SNPrintF(buf + pos, " unallocated");
         break;
       case VariableLocation::PARAMETER:
         SNPrintF(buf + pos, " parameter[%d]", var->index());
@@ -1593,9 +1594,7 @@
 
 
 void AstPrinter::VisitYield(Yield* node) {
-  EmbeddedVector<char, 128> buf;
-  SNPrintF(buf, "YIELD (kind %d)", node->yield_kind());
-  IndentedScope indent(this, buf.start(), node->position());
+  IndentedScope indent(this, "YIELD", node->position());
   Visit(node->expression());
 }
 
diff --git a/src/ast/scopes.cc b/src/ast/scopes.cc
index 7c87ce3..5d4b809 100644
--- a/src/ast/scopes.cc
+++ b/src/ast/scopes.cc
@@ -100,7 +100,6 @@
               function_kind);
   // The outermost scope must be a script scope.
   DCHECK(scope_type == SCRIPT_SCOPE || outer_scope != NULL);
-  DCHECK(!HasIllegalRedeclaration());
 }
 
 Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
@@ -169,9 +168,7 @@
   function_ = nullptr;
   arguments_ = nullptr;
   this_function_ = nullptr;
-  illegal_redecl_ = nullptr;
   scope_inside_with_ = false;
-  scope_contains_with_ = false;
   scope_calls_eval_ = false;
   scope_uses_arguments_ = false;
   scope_uses_super_property_ = false;
@@ -210,15 +207,14 @@
   // Reconstruct the outer scope chain from a closure's context chain.
   Scope* current_scope = NULL;
   Scope* innermost_scope = NULL;
-  bool contains_with = false;
   while (!context->IsNativeContext()) {
-    if (context->IsWithContext()) {
+    if (context->IsWithContext() || context->IsDebugEvaluateContext()) {
+      // For scope analysis, debug-evaluate is equivalent to a with scope.
       Scope* with_scope = new (zone)
           Scope(zone, current_scope, WITH_SCOPE, Handle<ScopeInfo>::null(),
                 script_scope->ast_value_factory_);
       current_scope = with_scope;
       // All the inner scopes are inside a with.
-      contains_with = true;
       for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
         s->scope_inside_with_ = true;
       }
@@ -252,13 +248,7 @@
           script_scope->ast_value_factory_->GetString(Handle<String>(name)),
           script_scope->ast_value_factory_);
     }
-    if (contains_with) current_scope->RecordWithStatement();
     if (innermost_scope == NULL) innermost_scope = current_scope;
-
-    // Forget about a with when we move to a context for a different function.
-    if (context->previous()->closure() != context->closure()) {
-      contains_with = false;
-    }
     context = context->previous();
   }
 
@@ -392,7 +382,6 @@
   if (uses_arguments()) other->RecordArgumentsUsage();
   if (uses_super_property()) other->RecordSuperPropertyUsage();
   if (calls_eval()) other->RecordEvalCall();
-  if (scope_contains_with_) other->RecordWithStatement();
 }
 
 
@@ -583,21 +572,6 @@
 }
 
 
-void Scope::SetIllegalRedeclaration(Expression* expression) {
-  // Record only the first illegal redeclaration.
-  if (!HasIllegalRedeclaration()) {
-    illegal_redecl_ = expression;
-  }
-  DCHECK(HasIllegalRedeclaration());
-}
-
-
-Expression* Scope::GetIllegalRedeclaration() {
-  DCHECK(HasIllegalRedeclaration());
-  return illegal_redecl_;
-}
-
-
 Declaration* Scope::CheckConflictingVarDeclarations() {
   int length = decls_.length();
   for (int i = 0; i < length; i++) {
@@ -817,25 +791,7 @@
   return scope_info_;
 }
 
-
-void Scope::GetNestedScopeChain(Isolate* isolate,
-                                List<Handle<ScopeInfo> >* chain, int position) {
-  if (!is_eval_scope()) chain->Add(Handle<ScopeInfo>(GetScopeInfo(isolate)));
-
-  for (int i = 0; i < inner_scopes_.length(); i++) {
-    Scope* scope = inner_scopes_[i];
-    int beg_pos = scope->start_position();
-    int end_pos = scope->end_position();
-    DCHECK(beg_pos >= 0 && end_pos >= 0);
-    if (beg_pos <= position && position < end_pos) {
-      scope->GetNestedScopeChain(isolate, chain, position);
-      return;
-    }
-  }
-}
-
-
-void Scope::CollectNonLocals(HashMap* non_locals) {
+Handle<StringSet> Scope::CollectNonLocals(Handle<StringSet> non_locals) {
   // Collect non-local variables referenced in the scope.
   // TODO(yangguo): store non-local variables explicitly if we can no longer
   //                rely on unresolved_ to find them.
@@ -843,13 +799,12 @@
     VariableProxy* proxy = unresolved_[i];
     if (proxy->is_resolved() && proxy->var()->IsStackAllocated()) continue;
     Handle<String> name = proxy->name();
-    void* key = reinterpret_cast<void*>(name.location());
-    HashMap::Entry* entry = non_locals->LookupOrInsert(key, name->Hash());
-    entry->value = key;
+    non_locals = StringSet::Add(non_locals, name);
   }
   for (int i = 0; i < inner_scopes_.length(); i++) {
-    inner_scopes_[i]->CollectNonLocals(non_locals);
+    non_locals = inner_scopes_[i]->CollectNonLocals(non_locals);
   }
+  return non_locals;
 }
 
 
@@ -999,7 +954,6 @@
     Indent(n1, "// strict mode scope\n");
   }
   if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
-  if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
   if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
   if (scope_uses_arguments_) Indent(n1, "// scope uses 'arguments'\n");
   if (scope_uses_super_property_)
@@ -1271,8 +1225,8 @@
   // visible name.
   if ((var->is_this() || !var->raw_name()->IsEmpty()) &&
       (var->has_forced_context_allocation() || scope_calls_eval_ ||
-       inner_scope_calls_eval_ || scope_contains_with_ || is_catch_scope() ||
-       is_block_scope() || is_module_scope() || is_script_scope())) {
+       inner_scope_calls_eval_ || is_catch_scope() || is_block_scope() ||
+       is_module_scope() || is_script_scope())) {
     var->set_is_used();
     if (scope_calls_eval_ || inner_scope_calls_eval_) var->set_maybe_assigned();
   }
@@ -1295,10 +1249,8 @@
   if (var->mode() == TEMPORARY) return false;
   if (is_catch_scope() || is_module_scope()) return true;
   if (is_script_scope() && IsLexicalVariableMode(var->mode())) return true;
-  return var->has_forced_context_allocation() ||
-      scope_calls_eval_ ||
-      inner_scope_calls_eval_ ||
-      scope_contains_with_;
+  return var->has_forced_context_allocation() || scope_calls_eval_ ||
+         inner_scope_calls_eval_;
 }
 
 
diff --git a/src/ast/scopes.h b/src/ast/scopes.h
index 76f761d..dae70c0 100644
--- a/src/ast/scopes.h
+++ b/src/ast/scopes.h
@@ -224,20 +224,7 @@
   // ---------------------------------------------------------------------------
   // Illegal redeclaration support.
 
-  // Set an expression node that will be executed when the scope is
-  // entered. We only keep track of one illegal redeclaration node per
-  // scope - the first one - so if you try to set it multiple times
-  // the additional requests will be silently ignored.
-  void SetIllegalRedeclaration(Expression* expression);
-
-  // Retrieve the illegal redeclaration expression. Do not call if the
-  // scope doesn't have an illegal redeclaration node.
-  Expression* GetIllegalRedeclaration();
-
-  // Check if the scope has (at least) one illegal redeclaration.
-  bool HasIllegalRedeclaration() const { return illegal_redecl_ != NULL; }
-
-  // For harmony block scoping mode: Check if the scope has conflicting var
+  // Check if the scope has conflicting var
   // declarations, i.e. a var declaration that has been hoisted from a nested
   // scope over a let binding of the same name.
   Declaration* CheckConflictingVarDeclarations();
@@ -245,9 +232,6 @@
   // ---------------------------------------------------------------------------
   // Scope-specific info.
 
-  // Inform the scope that the corresponding code contains a with statement.
-  void RecordWithStatement() { scope_contains_with_ = true; }
-
   // Inform the scope that the corresponding code contains an eval call.
   void RecordEvalCall() { scope_calls_eval_ = true; }
 
@@ -556,14 +540,7 @@
 
   Handle<ScopeInfo> GetScopeInfo(Isolate* isolate);
 
-  // Get the chain of nested scopes within this scope for the source statement
-  // position. The scopes will be added to the list from the outermost scope to
-  // the innermost scope. Only nested block, catch or with scopes are tracked
-  // and will be returned, but no inner function scopes.
-  void GetNestedScopeChain(Isolate* isolate, List<Handle<ScopeInfo> >* chain,
-                           int statement_position);
-
-  void CollectNonLocals(HashMap* non_locals);
+  Handle<StringSet> CollectNonLocals(Handle<StringSet> non_locals);
 
   // ---------------------------------------------------------------------------
   // Strict mode support.
@@ -646,15 +623,10 @@
   // Map of function names to lists of functions defined in sloppy blocks
   SloppyBlockFunctionMap sloppy_block_function_map_;
 
-  // Illegal redeclaration.
-  Expression* illegal_redecl_;
-
   // Scope-specific information computed during parsing.
   //
   // This scope is inside a 'with' of some outer scope.
   bool scope_inside_with_;
-  // This scope contains a 'with' statement.
-  bool scope_contains_with_;
   // This scope or a nested catch scope or with scope contain an 'eval' call. At
   // the 'eval' call site this scope is the declaration scope.
   bool scope_calls_eval_;
diff --git a/src/background-parsing-task.cc b/src/background-parsing-task.cc
index cc80e01..3e0a5dc 100644
--- a/src/background-parsing-task.cc
+++ b/src/background-parsing-task.cc
@@ -21,7 +21,7 @@
 
   // Prepare the data for the internalization phase and compilation phase, which
   // will happen in the main thread after parsing.
-  Zone* zone = new Zone();
+  Zone* zone = new Zone(isolate->allocator());
   ParseInfo* info = new ParseInfo(zone);
   source->zone.Reset(zone);
   source->info.Reset(info);
@@ -32,7 +32,8 @@
   info->set_global();
   info->set_unicode_cache(&source_->unicode_cache);
   info->set_compile_options(options);
-  info->set_allow_lazy_parsing(true);
+  // Parse eagerly with ignition since we will compile eagerly.
+  info->set_allow_lazy_parsing(!(i::FLAG_ignition && i::FLAG_ignition_eager));
 }
 
 
diff --git a/src/bailout-reason.h b/src/bailout-reason.h
index 272b6a4..92929cf 100644
--- a/src/bailout-reason.h
+++ b/src/bailout-reason.h
@@ -14,7 +14,6 @@
                                                                                \
   V(k32BitValueInRegisterIsNotZeroExtended,                                    \
     "32 bit value in register is not zero-extended")                           \
-  V(kAlignmentMarkerExpected, "Alignment marker expected")                     \
   V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned")         \
   V(kAPICallReturnedInvalidObject, "API call returned invalid object")         \
   V(kArgumentsObjectValueInATestContext,                                       \
@@ -37,8 +36,6 @@
   V(kBailoutWasNotPrepared, "Bailout was not prepared")                        \
   V(kBothRegistersWereSmisInSelectNonSmi,                                      \
     "Both registers were smis in SelectNonSmi")                                \
-  V(kCallToAJavaScriptRuntimeFunction,                                         \
-    "Call to a JavaScript runtime function")                                   \
   V(kClassLiteral, "Class literal")                                            \
   V(kCodeGenerationFailed, "Code generation failed")                           \
   V(kCodeObjectNotProperlyPatched, "Code object not properly patched")         \
@@ -57,7 +54,8 @@
   V(kDestinationOfCopyNotAligned, "Destination of copy not aligned")           \
   V(kDontDeleteCellsCannotContainTheHole,                                      \
     "DontDelete cells can't contain the hole")                                 \
-  V(kDoExpression, "Do expression encountered")                                \
+  V(kDoExpressionUnmodelable,                                                  \
+    "Encountered a do-expression with unmodelable control statements")         \
   V(kDoPushArgumentNotImplementedForDoubleType,                                \
     "DoPushArgument not implemented for double type")                          \
   V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed")            \
@@ -84,11 +82,11 @@
   V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned")            \
   V(kFunctionBeingDebugged, "Function is being debugged")                      \
   V(kFunctionCallsEval, "Function calls eval")                                 \
-  V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration")  \
   V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,                      \
     "The function_data field should be a BytecodeArray on interpreter entry")  \
   V(kGeneratedCodeIsTooLarge, "Generated code is too large")                   \
   V(kGeneratorFailedToResume, "Generator failed to resume")                    \
+  V(kGeneratorResumeMethod, "Generator resume method is being called")         \
   V(kGenerator, "Generator")                                                   \
   V(kGlobalFunctionsMustHaveInitialMap,                                        \
     "Global functions must have initial map")                                  \
@@ -103,6 +101,7 @@
   V(kInputStringTooLong, "Input string too long")                              \
   V(kInteger32ToSmiFieldWritingToNonSmiLocation,                               \
     "Integer32ToSmiField writing to non-smi location")                         \
+  V(kInvalidBytecode, "Invalid bytecode")                                      \
   V(kInvalidCaptureReferenced, "Invalid capture referenced")                   \
   V(kInvalidElementsKindForInternalArrayOrInternalPackedArray,                 \
     "Invalid ElementsKind for InternalArray or InternalPackedArray")           \
@@ -140,6 +139,7 @@
   V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array")              \
   V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
   V(kOffsetOutOfRange, "Offset out of range")                                  \
+  V(kOperandIsANumber, "Operand is a number")                                  \
   V(kOperandIsASmiAndNotABoundFunction,                                        \
     "Operand is a smi and not a bound function")                               \
   V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function")      \
@@ -230,6 +230,8 @@
   V(kUnexpectedNegativeValue, "Unexpected negative value")                     \
   V(kUnexpectedNumberOfPreAllocatedPropertyFields,                             \
     "Unexpected number of pre-allocated property fields")                      \
+  V(kUnexpectedFunctionIDForInvokeIntrinsic,                                   \
+    "Unexpected runtime function id for the InvokeIntrinsic bytecode")         \
   V(kUnexpectedFPCRMode, "Unexpected FPCR mode.")                              \
   V(kUnexpectedSmi, "Unexpected smi value")                                    \
   V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen")   \
@@ -249,7 +251,7 @@
   V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare")      \
   V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments")         \
   V(kUnsupportedPhiUseOfConstVariable,                                         \
-    "Unsupported phi use of const variable")                                   \
+    "Unsupported phi use of const or let variable")                            \
   V(kUnexpectedReturnFromBytecodeHandler,                                      \
     "Unexpectedly returned from a bytecode handler")                           \
   V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw")          \
@@ -262,6 +264,8 @@
   V(kWrongFunctionContext, "Wrong context passed to function")                 \
   V(kWrongAddressOrValuePassedToRecordWrite,                                   \
     "Wrong address or value passed to RecordWrite")                            \
+  V(kWrongArgumentCountForInvokeIntrinsic,                                     \
+    "Wrong number of arguments for intrinsic")                                 \
   V(kShouldNotDirectlyEnterOsrFunction,                                        \
     "Should not directly enter OSR-compiled function")                         \
   V(kYield, "Yield")
diff --git a/src/base/accounting-allocator.cc b/src/base/accounting-allocator.cc
new file mode 100644
index 0000000..2269c60
--- /dev/null
+++ b/src/base/accounting-allocator.cc
@@ -0,0 +1,33 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/accounting-allocator.h"
+
+#include <cstdlib>
+
+#if V8_LIBC_BIONIC
+#include <malloc.h>  // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+void* AccountingAllocator::Allocate(size_t bytes) {
+  void* memory = malloc(bytes);
+  if (memory) NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
+  return memory;
+}
+
+void AccountingAllocator::Free(void* memory, size_t bytes) {
+  free(memory);
+  NoBarrier_AtomicIncrement(&current_memory_usage_,
+                            -static_cast<AtomicWord>(bytes));
+}
+
+size_t AccountingAllocator::GetCurrentMemoryUsage() const {
+  return NoBarrier_Load(&current_memory_usage_);
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/accounting-allocator.h b/src/base/accounting-allocator.h
new file mode 100644
index 0000000..ce67f37
--- /dev/null
+++ b/src/base/accounting-allocator.h
@@ -0,0 +1,34 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_ACCOUNTING_ALLOCATOR_H_
+#define V8_BASE_ACCOUNTING_ALLOCATOR_H_
+
+#include "src/base/atomicops.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+class AccountingAllocator final {
+ public:
+  AccountingAllocator() = default;
+  ~AccountingAllocator() = default;
+
+  // Returns nullptr on failed allocation.
+  void* Allocate(size_t bytes);
+  void Free(void* memory, size_t bytes);
+
+  size_t GetCurrentMemoryUsage() const;
+
+ private:
+  AtomicWord current_memory_usage_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
+};
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_ACCOUNTING_ALLOCATOR_H_
diff --git a/src/base/atomicops_internals_arm_gcc.h b/src/base/atomicops_internals_arm_gcc.h
index 6c8b27e..8d049e0 100644
--- a/src/base/atomicops_internals_arm_gcc.h
+++ b/src/base/atomicops_internals_arm_gcc.h
@@ -44,14 +44,15 @@
 //
 
 inline void MemoryBarrier() {
-#if defined(__linux__) || defined(__ANDROID__)
+#if defined(__ANDROID__)
   // Note: This is a function call, which is also an implicit compiler barrier.
   typedef void (*KernelMemoryBarrierFunc)();
   ((KernelMemoryBarrierFunc)0xffff0fa0)();
 #elif defined(__QNXNTO__)
   __cpu_membarrier();
 #else
-#error MemoryBarrier() is not implemented on this platform.
+  // Fallback to GCC built-in function
+  __sync_synchronize();
 #endif
 }
 
diff --git a/src/base/cpu.cc b/src/base/cpu.cc
index 777f379..12a3881 100644
--- a/src/base/cpu.cc
+++ b/src/base/cpu.cc
@@ -468,7 +468,12 @@
     char* end;
     architecture_ = strtol(architecture, &end, 10);
     if (end == architecture) {
-      architecture_ = 0;
+      // Kernels older than 3.18 report "CPU architecture: AArch64" on ARMv8.
+      if (strcmp(architecture, "AArch64") == 0) {
+        architecture_ = 8;
+      } else {
+        architecture_ = 0;
+      }
     }
     delete[] architecture;
 
diff --git a/src/base/logging.cc b/src/base/logging.cc
index a2688c9..ebab129 100644
--- a/src/base/logging.cc
+++ b/src/base/logging.cc
@@ -115,3 +115,14 @@
   fflush(stderr);
   v8::base::OS::Abort();
 }
+
+extern "C" void V8_RuntimeError(const char* file, int line,
+                                const char* message) {
+  fflush(stdout);
+  fflush(stderr);
+  v8::base::OS::PrintError("\n\n#\n# Runtime error in %s, line %d\n# ", file,
+                           line);
+  v8::base::OS::PrintError("\n# %s\n", message);
+  v8::base::DumpBacktrace();
+  fflush(stderr);
+}
diff --git a/src/base/logging.h b/src/base/logging.h
index e4e3f49..15322f6 100644
--- a/src/base/logging.h
+++ b/src/base/logging.h
@@ -14,6 +14,8 @@
 extern "C" V8_NORETURN void V8_Fatal(const char* file, int line,
                                      const char* format, ...);
 
+extern "C" void V8_RuntimeError(const char* file, int line,
+                                const char* message);
 
 // The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
 // development, but they should not be relied on in the final product.
diff --git a/src/base/macros.h b/src/base/macros.h
index 10cab4b..3f09b2b 100644
--- a/src/base/macros.h
+++ b/src/base/macros.h
@@ -278,6 +278,17 @@
 #if V8_OS_MACOSX
 #undef V8PRIxPTR
 #define V8PRIxPTR "lx"
+#undef V8PRIuPTR
+#define V8PRIuPTR "lxu"
+#endif
+
+// GCC on S390 31-bit expands 'size_t' to 'long unsigned int'
+// instead of 'int', resulting in compilation errors with %d.
+// The printf format specifier needs to be %zd instead.
+#if V8_HOST_ARCH_S390 && !V8_HOST_ARCH_64_BIT
+#define V8_SIZET_PREFIX "z"
+#else
+#define V8_SIZET_PREFIX ""
 #endif
 
 // The following macro works on both 32 and 64-bit platforms.
diff --git a/src/base/platform/platform-linux.cc b/src/base/platform/platform-linux.cc
index a4b742a..1323a0d 100644
--- a/src/base/platform/platform-linux.cc
+++ b/src/base/platform/platform-linux.cc
@@ -72,14 +72,14 @@
 #define GCC_VERSION (__GNUC__ * 10000                                          \
                      + __GNUC_MINOR__ * 100                                    \
                      + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION >= 40600
+#if GCC_VERSION >= 40600 && !defined(__clang__)
 #if defined(__ARM_PCS_VFP)
   return true;
 #else
   return false;
 #endif
 
-#elif GCC_VERSION < 40500
+#elif GCC_VERSION < 40500 && !defined(__clang__)
   return false;
 
 #else
@@ -89,7 +89,7 @@
       !defined(__VFP_FP__)
   return false;
 #else
-#error "Your version of GCC does not report the FP ABI compiled for."          \
+#error "Your version of compiler does not report the FP ABI compiled for."     \
        "Please report it on this issue"                                        \
        "http://code.google.com/p/v8/issues/detail?id=2140"
 
diff --git a/src/base/platform/platform-posix.cc b/src/base/platform/platform-posix.cc
index 046dbb6..bb340ab 100644
--- a/src/base/platform/platform-posix.cc
+++ b/src/base/platform/platform-posix.cc
@@ -81,6 +81,8 @@
   return 8;
 #elif V8_TARGET_ARCH_MIPS
   return 8;
+#elif V8_TARGET_ARCH_S390
+  return 8;
 #else
   // Otherwise we just assume 16 byte alignment, i.e.:
   // - With gcc 4.4 the tree vectorization optimizer can generate code
@@ -185,6 +187,15 @@
   // Little-endian Linux: 48 bits of virtual addressing.
   raw_addr &= V8_UINT64_C(0x3ffffffff000);
 #endif
+#elif V8_TARGET_ARCH_S390X
+  // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
+  // of virtual addressing.  Truncate to 40 bits to allow kernel chance to
+  // fulfill request.
+  raw_addr &= V8_UINT64_C(0xfffffff000);
+#elif V8_TARGET_ARCH_S390
+  // 31 bits of virtual addressing.  Truncate to 29 bits to allow kernel chance
+  // to fulfill request.
+  raw_addr &= 0x1ffff000;
 #else
   raw_addr &= 0x3ffff000;
 
@@ -252,6 +263,9 @@
 #endif  // V8_OS_NACL
 #elif V8_HOST_ARCH_X64
   asm("int $3");
+#elif V8_HOST_ARCH_S390
+  // Software breakpoint instruction is 0x0001
+  asm volatile(".word 0x0001");
 #else
 #error Unsupported host architecture.
 #endif
@@ -415,9 +429,10 @@
   return (remove(path) == 0);
 }
 
+char OS::DirectorySeparator() { return '/'; }
 
 bool OS::isDirectorySeparator(const char ch) {
-  return ch == '/';
+  return ch == DirectorySeparator();
 }
 
 
diff --git a/src/base/platform/platform-win32.cc b/src/base/platform/platform-win32.cc
index 6afa6f9..0076a35 100644
--- a/src/base/platform/platform-win32.cc
+++ b/src/base/platform/platform-win32.cc
@@ -574,6 +574,7 @@
   return (DeleteFileA(path) != 0);
 }
 
+char OS::DirectorySeparator() { return '\\'; }
 
 bool OS::isDirectorySeparator(const char ch) {
   return ch == '/' || ch == '\\';
diff --git a/src/base/platform/platform.h b/src/base/platform/platform.h
index 89d6225..5b2dbc9 100644
--- a/src/base/platform/platform.h
+++ b/src/base/platform/platform.h
@@ -142,6 +142,7 @@
   static FILE* FOpen(const char* path, const char* mode);
   static bool Remove(const char* path);
 
+  static char DirectorySeparator();
   static bool isDirectorySeparator(const char ch);
 
   // Opens a temporary file, the file is auto removed on close.
@@ -290,6 +291,10 @@
   // by address().
   VirtualMemory(size_t size, size_t alignment);
 
+  // Construct a virtual memory by assigning it some already mapped address
+  // and size.
+  VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
+
   // Releases the reserved memory, if any, controlled by this VirtualMemory
   // object.
   ~VirtualMemory();
diff --git a/src/base/platform/semaphore.cc b/src/base/platform/semaphore.cc
index 9e7b59a..284474e 100644
--- a/src/base/platform/semaphore.cc
+++ b/src/base/platform/semaphore.cc
@@ -94,8 +94,7 @@
 
 void Semaphore::Signal() {
   int result = sem_post(&native_handle_);
-  DCHECK_EQ(0, result);
-  USE(result);
+  CHECK_EQ(0, result);
 }
 
 
diff --git a/src/base/platform/time.cc b/src/base/platform/time.cc
index e847d54..6d5e538 100644
--- a/src/base/platform/time.cc
+++ b/src/base/platform/time.cc
@@ -520,14 +520,6 @@
   return high_res_tick_clock.Pointer()->IsHighResolution();
 }
 
-
-// static
-TimeTicks TimeTicks::KernelTimestampNow() { return TimeTicks(0); }
-
-
-// static
-bool TimeTicks::KernelTimestampAvailable() { return false; }
-
 #else  // V8_OS_WIN
 
 TimeTicks TimeTicks::Now() {
@@ -566,82 +558,6 @@
   return true;
 }
 
-
-#if V8_OS_LINUX
-
-class KernelTimestampClock {
- public:
-  KernelTimestampClock() : clock_fd_(-1), clock_id_(kClockInvalid) {
-    clock_fd_ = open(kTraceClockDevice, O_RDONLY);
-    if (clock_fd_ == -1) {
-      return;
-    }
-    clock_id_ = get_clockid(clock_fd_);
-  }
-
-  virtual ~KernelTimestampClock() {
-    if (clock_fd_ != -1) {
-      close(clock_fd_);
-    }
-  }
-
-  int64_t Now() {
-    if (clock_id_ == kClockInvalid) {
-      return 0;
-    }
-
-    struct timespec ts;
-
-    clock_gettime(clock_id_, &ts);
-    return ((int64_t)ts.tv_sec * kNsecPerSec) + ts.tv_nsec;
-  }
-
-  bool Available() { return clock_id_ != kClockInvalid; }
-
- private:
-  static const clockid_t kClockInvalid = -1;
-  static const char kTraceClockDevice[];
-  static const uint64_t kNsecPerSec = 1000000000;
-
-  int clock_fd_;
-  clockid_t clock_id_;
-
-  static int get_clockid(int fd) { return ((~(clockid_t)(fd) << 3) | 3); }
-};
-
-
-// Timestamp module name
-const char KernelTimestampClock::kTraceClockDevice[] = "/dev/trace_clock";
-
-#else
-
-class KernelTimestampClock {
- public:
-  KernelTimestampClock() {}
-
-  int64_t Now() { return 0; }
-  bool Available() { return false; }
-};
-
-#endif  // V8_OS_LINUX
-
-static LazyStaticInstance<KernelTimestampClock,
-                          DefaultConstructTrait<KernelTimestampClock>,
-                          ThreadSafeInitOnceTrait>::type kernel_tick_clock =
-    LAZY_STATIC_INSTANCE_INITIALIZER;
-
-
-// static
-TimeTicks TimeTicks::KernelTimestampNow() {
-  return TimeTicks(kernel_tick_clock.Pointer()->Now());
-}
-
-
-// static
-bool TimeTicks::KernelTimestampAvailable() {
-  return kernel_tick_clock.Pointer()->Available();
-}
-
 #endif  // V8_OS_WIN
 
 }  // namespace base
diff --git a/src/base/platform/time.h b/src/base/platform/time.h
index 29300e5..c8140ef 100644
--- a/src/base/platform/time.h
+++ b/src/base/platform/time.h
@@ -318,13 +318,6 @@
   // Returns true if the high-resolution clock is working on this system.
   static bool IsHighResolutionClockWorking();
 
-  // Returns Linux kernel timestamp for generating profiler events. This method
-  // returns null TimeTicks if the kernel cannot provide the timestamps (e.g.,
-  // on non-Linux OS or if the kernel module for timestamps is not loaded).
-
-  static TimeTicks KernelTimestampNow();
-  static bool KernelTimestampAvailable();
-
   // Returns true if this object has not been initialized.
   bool IsNull() const { return ticks_ == 0; }
 
diff --git a/src/base/win32-headers.h b/src/base/win32-headers.h
index 2d94abd..20ec8e0 100644
--- a/src/base/win32-headers.h
+++ b/src/base/win32-headers.h
@@ -76,6 +76,8 @@
 #undef CreateSemaphore
 #undef Yield
 #undef RotateRight32
+#undef RotateLeft32
 #undef RotateRight64
+#undef RotateLeft64
 
 #endif  // V8_BASE_WIN32_HEADERS_H_
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 252c51c..f67065d 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -157,8 +157,8 @@
   Handle<JSFunction> GetThrowTypeErrorIntrinsic(Builtins::Name builtin_name);
 
   void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
-  void CreateStrongModeFunctionMaps(Handle<JSFunction> empty);
   void CreateIteratorMaps();
+  void CreateJSProxyMaps();
 
   // Make the "arguments" and "caller" properties throw a TypeError on access.
   void AddRestrictedFunctionProperties(Handle<Map> map);
@@ -218,7 +218,6 @@
   void InstallBuiltinFunctionIds();
   void InstallExperimentalBuiltinFunctionIds();
   void InitializeNormalizedMapCaches();
-  void InstallJSProxyMaps();
 
   enum ExtensionTraversalState {
     UNVISITED, VISITED, INSTALLED
@@ -284,13 +283,10 @@
 
   Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
                                       Handle<JSFunction> empty_function);
-  Handle<Map> CreateStrongFunctionMap(Handle<JSFunction> empty_function,
-                                      bool is_constructor);
 
 
   void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
                                            FunctionMode function_mode);
-  void SetStrongFunctionInstanceDescriptor(Handle<Map> map);
 
   static bool CallUtilsFunction(Isolate* isolate, const char* name);
 
@@ -547,12 +543,6 @@
     native_context()->set_initial_array_prototype(*object_function_prototype);
     Accessors::FunctionSetPrototype(object_fun, object_function_prototype)
         .Assert();
-
-    // Allocate initial strong object map.
-    Handle<Map> strong_object_map =
-        Map::Copy(Handle<Map>(object_fun->initial_map()), "EmptyStrongObject");
-    strong_object_map->set_is_strong();
-    native_context()->set_js_object_strong_map(*strong_object_map);
   }
 
   // Allocate the empty function as the prototype for function - ES6 19.2.3
@@ -637,29 +627,6 @@
 }
 
 
-void Genesis::SetStrongFunctionInstanceDescriptor(Handle<Map> map) {
-  Map::EnsureDescriptorSlack(map, 2);
-
-  PropertyAttributes ro_attribs =
-      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-
-  Handle<AccessorInfo> length =
-      Accessors::FunctionLengthInfo(isolate(), ro_attribs);
-  {  // Add length.
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
-                                 length, ro_attribs);
-    map->AppendDescriptor(&d);
-  }
-  Handle<AccessorInfo> name =
-      Accessors::FunctionNameInfo(isolate(), ro_attribs);
-  {  // Add name.
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
-                                 ro_attribs);
-    map->AppendDescriptor(&d);
-  }
-}
-
-
 // Creates the %ThrowTypeError% function.
 Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
     Builtins::Name builtin_name) {
@@ -722,19 +689,6 @@
 }
 
 
-Handle<Map> Genesis::CreateStrongFunctionMap(
-    Handle<JSFunction> empty_function, bool is_constructor) {
-  Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
-  SetStrongFunctionInstanceDescriptor(map);
-  map->set_is_constructor(is_constructor);
-  Map::SetPrototype(map, empty_function);
-  map->set_is_callable();
-  map->set_is_extensible(is_constructor);
-  map->set_is_strong();
-  return map;
-}
-
-
 void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
   // Allocate map for the prototype-less strict mode instances.
   Handle<Map> strict_function_without_prototype_map =
@@ -756,16 +710,6 @@
 }
 
 
-void Genesis::CreateStrongModeFunctionMaps(Handle<JSFunction> empty) {
-  // Allocate map for strong mode instances, which never have prototypes.
-  Handle<Map> strong_function_map = CreateStrongFunctionMap(empty, false);
-  native_context()->set_strong_function_map(*strong_function_map);
-  // Constructors do, though.
-  Handle<Map> strong_constructor_map = CreateStrongFunctionMap(empty, true);
-  native_context()->set_strong_constructor_map(*strong_constructor_map);
-}
-
-
 void Genesis::CreateIteratorMaps() {
   // Create iterator-related meta-objects.
   Handle<JSObject> iterator_prototype =
@@ -803,15 +747,6 @@
   native_context()->set_strict_generator_function_map(
       *strict_generator_function_map);
 
-  Handle<Map> strong_function_map(native_context()->strong_function_map());
-  Handle<Map> strong_generator_function_map =
-      Map::Copy(strong_function_map, "StrongGeneratorFunction");
-  strong_generator_function_map->set_is_constructor(false);
-  Map::SetPrototype(strong_generator_function_map,
-                    generator_function_prototype);
-  native_context()->set_strong_generator_function_map(
-      *strong_generator_function_map);
-
   Handle<JSFunction> object_function(native_context()->object_function());
   Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
   Map::SetPrototype(generator_object_prototype_map, generator_object_prototype);
@@ -819,6 +754,30 @@
       *generator_object_prototype_map);
 }
 
+void Genesis::CreateJSProxyMaps() {
+  // Allocate the different maps for all Proxy types.
+  // Next to the default proxy, we need maps indicating callable and
+  // constructable proxies.
+  Handle<Map> proxy_function_map =
+      Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
+  proxy_function_map->set_is_constructor(true);
+  native_context()->set_proxy_function_map(*proxy_function_map);
+
+  Handle<Map> proxy_map =
+      factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, FAST_ELEMENTS);
+  proxy_map->set_dictionary_map(true);
+  native_context()->set_proxy_map(*proxy_map);
+
+  Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
+  proxy_callable_map->set_is_callable();
+  native_context()->set_proxy_callable_map(*proxy_callable_map);
+  proxy_callable_map->SetConstructor(native_context()->function_function());
+
+  Handle<Map> proxy_constructor_map =
+      Map::Copy(proxy_callable_map, "constructor Proxy");
+  proxy_constructor_map->set_is_constructor(true);
+  native_context()->set_proxy_constructor_map(*proxy_constructor_map);
+}
 
 static void ReplaceAccessors(Handle<Map> map,
                              Handle<String> name,
@@ -942,7 +901,7 @@
 #ifdef DEBUG
     LookupIterator it(prototype, factory()->constructor_string(),
                       LookupIterator::OWN_SKIP_INTERCEPTOR);
-    Handle<Object> value = JSReceiver::GetProperty(&it).ToHandleChecked();
+    Handle<Object> value = Object::GetProperty(&it).ToHandleChecked();
     DCHECK(it.IsFound());
     DCHECK_EQ(*isolate()->object_function(), *value);
 #endif
@@ -1121,6 +1080,9 @@
                           Builtins::kObjectPreventExtensions, 1, false);
     SimpleInstallFunction(object_function, "seal", Builtins::kObjectSeal, 1,
                           false);
+
+    SimpleInstallFunction(isolate->initial_object_prototype(), "hasOwnProperty",
+                          Builtins::kObjectHasOwnProperty, 1, true);
   }
 
   Handle<JSObject> global(native_context()->global_object());
@@ -1171,7 +1133,6 @@
 
     sloppy_function_map_writable_prototype_->SetConstructor(*function_fun);
     strict_function_map_writable_prototype_->SetConstructor(*function_fun);
-    native_context()->strong_function_map()->SetConstructor(*function_fun);
   }
 
   {  // --- A r r a y ---
@@ -1180,7 +1141,7 @@
                         isolate->initial_object_prototype(),
                         Builtins::kArrayCode);
     array_function->shared()->DontAdaptArguments();
-    array_function->shared()->set_function_data(Smi::FromInt(kArrayCode));
+    array_function->shared()->set_builtin_function_id(kArrayCode);
 
     // This seems a bit hackish, but we need to make sure Array.length
     // is 1.
@@ -1214,11 +1175,6 @@
     Handle<Code> code = array_constructor_stub.GetCode();
     array_function->shared()->set_construct_stub(*code);
 
-    Handle<Map> initial_strong_map =
-        Map::Copy(initial_map, "SetInstancePrototype");
-    initial_strong_map->set_is_strong();
-    CacheInitialJSArrayMaps(native_context(), initial_strong_map);
-
     Handle<JSFunction> is_arraylike = SimpleInstallFunction(
         array_function, isolate->factory()->InternalizeUtf8String("isArray"),
         Builtins::kArrayIsArray, 1, true);
@@ -1292,6 +1248,10 @@
                                    attribs);
       string_map->AppendDescriptor(&d);
     }
+
+    // Install the String.fromCharCode function.
+    SimpleInstallFunction(string_fun, "fromCharCode",
+                          Builtins::kStringFromCharCode, 1, false);
   }
 
   {
@@ -1303,7 +1263,7 @@
                         prototype, Builtins::kSymbolConstructor);
     symbol_fun->shared()->set_construct_stub(
         *isolate->builtins()->SymbolConstructor_ConstructStub());
-    symbol_fun->shared()->set_length(1);
+    symbol_fun->shared()->set_length(0);
     symbol_fun->shared()->DontAdaptArguments();
     native_context()->set_symbol_function(*symbol_fun);
 
@@ -1560,8 +1520,23 @@
     Handle<JSObject> math = factory->NewJSObject(cons, TENURED);
     DCHECK(math->IsJSObject());
     JSObject::AddProperty(global, name, math, DONT_ENUM);
+    SimpleInstallFunction(math, "acos", Builtins::kMathAcos, 1, true);
+    SimpleInstallFunction(math, "asin", Builtins::kMathAsin, 1, true);
+    SimpleInstallFunction(math, "atan", Builtins::kMathAtan, 1, true);
+    SimpleInstallFunction(math, "ceil", Builtins::kMathCeil, 1, true);
+    SimpleInstallFunction(math, "clz32", Builtins::kMathClz32, 1, true);
+    Handle<JSFunction> math_floor =
+        SimpleInstallFunction(math, "floor", Builtins::kMathFloor, 1, true);
+    native_context()->set_math_floor(*math_floor);
+    SimpleInstallFunction(math, "fround", Builtins::kMathFround, 1, true);
+    SimpleInstallFunction(math, "imul", Builtins::kMathImul, 2, true);
     SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);
     SimpleInstallFunction(math, "min", Builtins::kMathMin, 2, false);
+    SimpleInstallFunction(math, "round", Builtins::kMathRound, 1, true);
+    Handle<JSFunction> math_sqrt =
+        SimpleInstallFunction(math, "sqrt", Builtins::kMathSqrt, 1, true);
+    native_context()->set_math_sqrt(*math_sqrt);
+    SimpleInstallFunction(math, "trunc", Builtins::kMathTrunc, 1, true);
   }
 
   {  // -- A r r a y B u f f e r
@@ -1649,6 +1624,74 @@
                                      Context::JS_WEAK_SET_FUN_INDEX);
   }
 
+  {  // -- P r o x y
+    CreateJSProxyMaps();
+
+    Handle<String> name = factory->Proxy_string();
+    Handle<Code> code(isolate->builtins()->ProxyConstructor());
+
+    Handle<JSFunction> proxy_function =
+        factory->NewFunction(isolate->proxy_function_map(),
+                             factory->Proxy_string(), MaybeHandle<Code>(code));
+
+    JSFunction::SetInitialMap(
+        proxy_function, Handle<Map>(native_context()->proxy_map(), isolate),
+        factory->null_value());
+
+    proxy_function->shared()->set_construct_stub(
+        *isolate->builtins()->ProxyConstructor_ConstructStub());
+    proxy_function->shared()->set_internal_formal_parameter_count(2);
+    proxy_function->shared()->set_length(2);
+
+    native_context()->set_proxy_function(*proxy_function);
+    InstallFunction(global, name, proxy_function, factory->Object_string());
+  }
+
+  {  // -- R e f l e c t
+    Handle<String> reflect_string = factory->InternalizeUtf8String("Reflect");
+    Handle<JSObject> reflect =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
+
+    Handle<JSFunction> define_property =
+        SimpleInstallFunction(reflect, factory->defineProperty_string(),
+                              Builtins::kReflectDefineProperty, 3, true);
+    native_context()->set_reflect_define_property(*define_property);
+
+    Handle<JSFunction> delete_property =
+        SimpleInstallFunction(reflect, factory->deleteProperty_string(),
+                              Builtins::kReflectDeleteProperty, 2, true);
+    native_context()->set_reflect_delete_property(*delete_property);
+
+    Handle<JSFunction> apply = SimpleInstallFunction(
+        reflect, factory->apply_string(), Builtins::kReflectApply, 3, false);
+    native_context()->set_reflect_apply(*apply);
+
+    Handle<JSFunction> construct =
+        SimpleInstallFunction(reflect, factory->construct_string(),
+                              Builtins::kReflectConstruct, 2, false);
+    native_context()->set_reflect_construct(*construct);
+
+    SimpleInstallFunction(reflect, factory->get_string(), Builtins::kReflectGet,
+                          2, false);
+    SimpleInstallFunction(reflect, factory->getOwnPropertyDescriptor_string(),
+                          Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
+    SimpleInstallFunction(reflect, factory->getPrototypeOf_string(),
+                          Builtins::kReflectGetPrototypeOf, 1, true);
+    SimpleInstallFunction(reflect, factory->has_string(), Builtins::kReflectHas,
+                          2, true);
+    SimpleInstallFunction(reflect, factory->isExtensible_string(),
+                          Builtins::kReflectIsExtensible, 1, true);
+    SimpleInstallFunction(reflect, factory->ownKeys_string(),
+                          Builtins::kReflectOwnKeys, 1, true);
+    SimpleInstallFunction(reflect, factory->preventExtensions_string(),
+                          Builtins::kReflectPreventExtensions, 1, true);
+    SimpleInstallFunction(reflect, factory->set_string(), Builtins::kReflectSet,
+                          3, false);
+    SimpleInstallFunction(reflect, factory->setPrototypeOf_string(),
+                          Builtins::kReflectSetPrototypeOf, 2, true);
+  }
+
   {  // --- B o u n d F u n c t i o n
     Handle<Map> map =
         factory->NewMap(JS_BOUND_FUNCTION_TYPE, JSBoundFunction::kSize);
@@ -1924,10 +1967,11 @@
 
   Handle<String> script_name =
       isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
-  Handle<SharedFunctionInfo> function_info = Compiler::CompileScript(
-      source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
-      context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
-      false);
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::GetSharedFunctionInfoForScript(
+          source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
+          context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
+          false);
   if (function_info.is_null()) return false;
 
   DCHECK(context->IsNativeContext());
@@ -1981,7 +2025,7 @@
   if (!cache->Lookup(name, &function_info)) {
     Handle<String> script_name =
         factory->NewStringFromUtf8(name).ToHandleChecked();
-    function_info = Compiler::CompileScript(
+    function_info = Compiler::GetSharedFunctionInfoForScript(
         source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
         context, extension, NULL, ScriptCompiler::kNoCompileOptions,
         EXTENSION_CODE, false);
@@ -2021,7 +2065,7 @@
   Handle<String> property_string = factory->InternalizeUtf8String(property);
   DCHECK(!property_string.is_null());
   Handle<JSObject> object = Handle<JSObject>::cast(
-      Object::GetProperty(global, property_string).ToHandleChecked());
+      JSReceiver::GetProperty(global, property_string).ToHandleChecked());
   if (strcmp("prototype", inner) == 0) {
     Handle<JSFunction> function = Handle<JSFunction>::cast(object);
     return Handle<JSObject>(JSObject::cast(function->prototype()));
@@ -2029,7 +2073,7 @@
   Handle<String> inner_string = factory->InternalizeUtf8String(inner);
   DCHECK(!inner_string.is_null());
   Handle<Object> value =
-      Object::GetProperty(object, inner_string).ToHandleChecked();
+      JSReceiver::GetProperty(object, inner_string).ToHandleChecked();
   return Handle<JSObject>::cast(value);
 }
 
@@ -2129,8 +2173,6 @@
         *generator_function_function);
     native_context->strict_generator_function_map()->SetConstructor(
         *generator_function_function);
-    native_context->strong_generator_function_map()->SetConstructor(
-        *generator_function_function);
   }
 
   {  // -- S e t I t e r a t o r
@@ -2315,7 +2357,6 @@
                           isolate->factory()->ToBoolean(FLAG), NONE); \
   }
 
-  INITIALIZE_FLAG(FLAG_harmony_tostring)
   INITIALIZE_FLAG(FLAG_harmony_species)
 
 #undef INITIALIZE_FLAG
@@ -2325,18 +2366,14 @@
 #define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
   void Genesis::InitializeGlobal_##id() {}
 
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_function)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_let)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_default_parameters)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_bind)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_assignment)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexps)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_iterator_close)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_exec)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_name)
@@ -2344,6 +2381,9 @@
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(promise_extra)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_instanceof)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_declarations)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_exponentiation_operator)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
 
 void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
                          const char* name, Handle<Symbol> value) {
@@ -2359,13 +2399,6 @@
 }
 
 
-void Genesis::InitializeGlobal_harmony_tostring() {
-  if (!FLAG_harmony_tostring) return;
-  InstallPublicSymbol(factory(), native_context(), "toStringTag",
-                      factory()->to_string_tag_symbol());
-}
-
-
 void Genesis::InitializeGlobal_harmony_regexp_subclass() {
   if (!FLAG_harmony_regexp_subclass) return;
   InstallPublicSymbol(factory(), native_context(), "match",
@@ -2379,66 +2412,6 @@
 }
 
 
-void Genesis::InitializeGlobal_harmony_reflect() {
-  Factory* factory = isolate()->factory();
-
-  // We currently use some of the Reflect functions internally, even when
-  // the --harmony-reflect flag is not given.
-
-  Handle<JSFunction> define_property =
-      SimpleCreateFunction(isolate(), factory->defineProperty_string(),
-                           Builtins::kReflectDefineProperty, 3, true);
-  native_context()->set_reflect_define_property(*define_property);
-
-  Handle<JSFunction> delete_property =
-      SimpleCreateFunction(isolate(), factory->deleteProperty_string(),
-                           Builtins::kReflectDeleteProperty, 2, true);
-  native_context()->set_reflect_delete_property(*delete_property);
-
-  Handle<JSFunction> apply = SimpleCreateFunction(
-      isolate(), factory->apply_string(), Builtins::kReflectApply, 3, false);
-  native_context()->set_reflect_apply(*apply);
-
-  Handle<JSFunction> construct =
-      SimpleCreateFunction(isolate(), factory->construct_string(),
-                           Builtins::kReflectConstruct, 2, false);
-  native_context()->set_reflect_construct(*construct);
-
-  if (!FLAG_harmony_reflect) return;
-
-  Handle<JSGlobalObject> global(JSGlobalObject::cast(
-      native_context()->global_object()));
-  Handle<String> reflect_string = factory->NewStringFromStaticChars("Reflect");
-  Handle<JSObject> reflect =
-      factory->NewJSObject(isolate()->object_function(), TENURED);
-  JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
-
-  InstallFunction(reflect, define_property, factory->defineProperty_string());
-  InstallFunction(reflect, delete_property, factory->deleteProperty_string());
-  InstallFunction(reflect, apply, factory->apply_string());
-  InstallFunction(reflect, construct, factory->construct_string());
-
-  SimpleInstallFunction(reflect, factory->get_string(),
-                        Builtins::kReflectGet, 2, false);
-  SimpleInstallFunction(reflect, factory->getOwnPropertyDescriptor_string(),
-                        Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
-  SimpleInstallFunction(reflect, factory->getPrototypeOf_string(),
-                        Builtins::kReflectGetPrototypeOf, 1, true);
-  SimpleInstallFunction(reflect, factory->has_string(),
-                        Builtins::kReflectHas, 2, true);
-  SimpleInstallFunction(reflect, factory->isExtensible_string(),
-                        Builtins::kReflectIsExtensible, 1, true);
-  SimpleInstallFunction(reflect, factory->ownKeys_string(),
-                        Builtins::kReflectOwnKeys, 1, true);
-  SimpleInstallFunction(reflect, factory->preventExtensions_string(),
-                        Builtins::kReflectPreventExtensions, 1, true);
-  SimpleInstallFunction(reflect, factory->set_string(),
-                        Builtins::kReflectSet, 3, false);
-  SimpleInstallFunction(reflect, factory->setPrototypeOf_string(),
-                        Builtins::kReflectSetPrototypeOf, 2, true);
-}
-
-
 void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
   if (!FLAG_harmony_sharedarraybuffer) return;
 
@@ -2509,64 +2482,27 @@
                         Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
 }
 
-void Genesis::InstallJSProxyMaps() {
-  // Allocate the different maps for all Proxy types.
-  // Next to the default proxy, we need maps indicating callable and
-  // constructable proxies.
+void Genesis::InitializeGlobal_harmony_array_prototype_values() {
+  if (!FLAG_harmony_array_prototype_values) return;
+  Handle<JSFunction> array_constructor(native_context()->array_function());
+  Handle<JSObject> array_prototype(
+      JSObject::cast(array_constructor->instance_prototype()));
+  Handle<Object> values_iterator =
+      JSObject::GetProperty(array_prototype, factory()->iterator_symbol())
+          .ToHandleChecked();
+  DCHECK(values_iterator->IsJSFunction());
+  JSObject::AddProperty(array_prototype, factory()->values_string(),
+                        values_iterator, DONT_ENUM);
 
-  Handle<Map> proxy_function_map =
-      Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
-  proxy_function_map->set_is_constructor(true);
-  native_context()->set_proxy_function_map(*proxy_function_map);
-
-  Handle<Map> proxy_map =
-      factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, FAST_ELEMENTS);
-  proxy_map->set_dictionary_map(true);
-  native_context()->set_proxy_map(*proxy_map);
-
-  Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
-  proxy_callable_map->set_is_callable();
-  native_context()->set_proxy_callable_map(*proxy_callable_map);
-  proxy_callable_map->SetConstructor(native_context()->function_function());
-
-  Handle<Map> proxy_constructor_map =
-      Map::Copy(proxy_callable_map, "constructor Proxy");
-  proxy_constructor_map->set_is_constructor(true);
-  native_context()->set_proxy_constructor_map(*proxy_constructor_map);
+  Handle<Object> unscopables =
+      JSObject::GetProperty(array_prototype, factory()->unscopables_symbol())
+          .ToHandleChecked();
+  DCHECK(unscopables->IsJSObject());
+  JSObject::AddProperty(Handle<JSObject>::cast(unscopables),
+                        factory()->values_string(), factory()->true_value(),
+                        NONE);
 }
 
-
-void Genesis::InitializeGlobal_harmony_proxies() {
-  if (!FLAG_harmony_proxies) return;
-  Handle<JSGlobalObject> global(
-      JSGlobalObject::cast(native_context()->global_object()));
-  Isolate* isolate = global->GetIsolate();
-  Factory* factory = isolate->factory();
-
-  InstallJSProxyMaps();
-
-  // Create the Proxy object.
-  Handle<String> name = factory->Proxy_string();
-  Handle<Code> code(isolate->builtins()->ProxyConstructor());
-
-  Handle<JSFunction> proxy_function =
-      factory->NewFunction(isolate->proxy_function_map(),
-                           factory->Proxy_string(), MaybeHandle<Code>(code));
-
-  JSFunction::SetInitialMap(proxy_function,
-                            Handle<Map>(native_context()->proxy_map(), isolate),
-                            factory->null_value());
-
-  proxy_function->shared()->set_construct_stub(
-      *isolate->builtins()->ProxyConstructor_ConstructStub());
-  proxy_function->shared()->set_internal_formal_parameter_count(2);
-  proxy_function->shared()->set_length(2);
-
-  native_context()->set_proxy_function(*proxy_function);
-  InstallFunction(global, name, proxy_function, factory->Object_string());
-}
-
-
 Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
                                                const char* name) {
   // Setup the {prototype} with the given {name} for @@toStringTag.
@@ -2708,9 +2644,8 @@
 
   if (!CallUtilsFunction(isolate(), "PostNatives")) return false;
 
-  auto template_instantiations_cache =
-      ObjectHashTable::New(isolate(), ApiNatives::kInitialFunctionCacheSize,
-                           USE_CUSTOM_MINIMUM_CAPACITY);
+  auto template_instantiations_cache = UnseededNumberDictionary::New(
+      isolate(), ApiNatives::kInitialFunctionCacheSize);
   native_context()->set_template_instantiations_cache(
       *template_instantiations_cache);
 
@@ -2777,7 +2712,7 @@
   {
     Handle<String> key = factory()->Promise_string();
     Handle<JSFunction> function = Handle<JSFunction>::cast(
-        Object::GetProperty(handle(native_context()->global_object()), key)
+        JSReceiver::GetProperty(handle(native_context()->global_object()), key)
             .ToHandleChecked());
     JSFunction::EnsureHasInitialMap(function);
     function->initial_map()->set_instance_type(JS_PROMISE_TYPE);
@@ -2789,6 +2724,37 @@
 
   InstallBuiltinFunctionIds();
 
+  // Also install builtin function ids to some generator object methods. These
+  // three methods use the three resume operations (Runtime_GeneratorNext,
+  // Runtime_GeneratorReturn, Runtime_GeneratorThrow) respectively. Those
+  // operations are not supported by Crankshaft, TurboFan, nor Ignition.
+  {
+    Handle<JSObject> generator_object_prototype(JSObject::cast(
+        native_context()->generator_object_prototype_map()->prototype()));
+
+    {  // GeneratorObject.prototype.next
+      Handle<String> key = factory()->next_string();
+      Handle<JSFunction> function = Handle<JSFunction>::cast(
+          JSReceiver::GetProperty(generator_object_prototype, key)
+              .ToHandleChecked());
+      function->shared()->set_builtin_function_id(kGeneratorObjectNext);
+    }
+    {  // GeneratorObject.prototype.return
+      Handle<String> key = factory()->NewStringFromAsciiChecked("return");
+      Handle<JSFunction> function = Handle<JSFunction>::cast(
+          JSReceiver::GetProperty(generator_object_prototype, key)
+              .ToHandleChecked());
+      function->shared()->set_builtin_function_id(kGeneratorObjectReturn);
+    }
+    {  // GeneratorObject.prototype.throw
+      Handle<String> key = factory()->throw_string();
+      Handle<JSFunction> function = Handle<JSFunction>::cast(
+          JSReceiver::GetProperty(generator_object_prototype, key)
+              .ToHandleChecked());
+      function->shared()->set_builtin_function_id(kGeneratorObjectThrow);
+    }
+  }
+
   // Create a map for accessor property descriptors (a variant of JSObject
   // that predefines four properties get, set, configurable and enumerable).
   {
@@ -2969,11 +2935,6 @@
 
 
 bool Genesis::InstallExperimentalNatives() {
-  static const char* harmony_proxies_natives[] = {"native proxy.js", nullptr};
-  static const char* harmony_modules_natives[] = {nullptr};
-  static const char* harmony_regexps_natives[] = {"native harmony-regexp.js",
-                                                  nullptr};
-  static const char* harmony_tostring_natives[] = {nullptr};
   static const char* harmony_iterator_close_natives[] = {nullptr};
   static const char* harmony_sloppy_natives[] = {nullptr};
   static const char* harmony_sloppy_function_natives[] = {nullptr};
@@ -2983,11 +2944,6 @@
   static const char* harmony_tailcalls_natives[] = {nullptr};
   static const char* harmony_unicode_regexps_natives[] = {
       "native harmony-unicode-regexps.js", nullptr};
-  static const char* harmony_default_parameters_natives[] = {nullptr};
-  static const char* harmony_reflect_natives[] = {"native harmony-reflect.js",
-                                                  nullptr};
-  static const char* harmony_destructuring_bind_natives[] = {nullptr};
-  static const char* harmony_destructuring_assignment_natives[] = {nullptr};
   static const char* harmony_object_observe_natives[] = {
       "native harmony-object-observe.js", nullptr};
   static const char* harmony_sharedarraybuffer_natives[] = {
@@ -2995,9 +2951,12 @@
   static const char* harmony_simd_natives[] = {"native harmony-simd.js",
                                                nullptr};
   static const char* harmony_do_expressions_natives[] = {nullptr};
+  static const char* harmony_regexp_exec_natives[] = {
+      "native harmony-regexp-exec.js", nullptr};
   static const char* harmony_regexp_subclass_natives[] = {nullptr};
   static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
   static const char* harmony_instanceof_natives[] = {nullptr};
+  static const char* harmony_restrictive_declarations_natives[] = {nullptr};
   static const char* harmony_regexp_property_natives[] = {nullptr};
   static const char* harmony_function_name_natives[] = {nullptr};
   static const char* harmony_function_sent_natives[] = {nullptr};
@@ -3006,6 +2965,10 @@
   static const char* harmony_object_values_entries_natives[] = {nullptr};
   static const char* harmony_object_own_property_descriptors_natives[] = {
       nullptr};
+  static const char* harmony_array_prototype_values_natives[] = {nullptr};
+  static const char* harmony_exponentiation_operator_natives[] = {nullptr};
+  static const char* harmony_string_padding_natives[] = {
+      "native harmony-string-padding.js", nullptr};
 
   for (int i = ExperimentalNatives::GetDebuggerCount();
        i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -3075,9 +3038,9 @@
                                      BuiltinFunctionId id) {
   Isolate* isolate = holder->GetIsolate();
   Handle<Object> function_object =
-      Object::GetProperty(isolate, holder, function_name).ToHandleChecked();
+      JSReceiver::GetProperty(isolate, holder, function_name).ToHandleChecked();
   Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
-  function->shared()->set_function_data(Smi::FromInt(id));
+  function->shared()->set_builtin_function_id(id);
 }
 
 
@@ -3596,7 +3559,6 @@
     CreateRoots();
     Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
     CreateStrictModeFunctionMaps(empty_function);
-    CreateStrongModeFunctionMaps(empty_function);
     CreateIteratorMaps();
     Handle<JSGlobalObject> global_object =
         CreateNewGlobals(global_proxy_template, global_proxy);
diff --git a/src/builtins.cc b/src/builtins.cc
index 23c41f7..9c3ff59 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -5,10 +5,12 @@
 #include "src/builtins.h"
 
 #include "src/api.h"
+#include "src/api-arguments.h"
 #include "src/api-natives.h"
-#include "src/arguments.h"
 #include "src/base/once.h"
 #include "src/bootstrapper.h"
+#include "src/code-factory.h"
+#include "src/compiler/code-stub-assembler.h"
 #include "src/dateparser-inl.h"
 #include "src/elements.h"
 #include "src/frames-inl.h"
@@ -142,11 +144,18 @@
                                                      Isolate* isolate);        \
   MUST_USE_RESULT static Object* Builtin_##name(                               \
       int args_length, Object** args_object, Isolate* isolate) {               \
+    Object* value;                                                             \
     isolate->counters()->runtime_calls()->Increment();                         \
-    RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();       \
-    RuntimeCallTimerScope timer(isolate, &stats->Builtin_##name);              \
+    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),                      \
+                 "V8.Builtin_" #name);                                         \
     name##ArgumentsType args(args_length, args_object);                        \
-    Object* value = Builtin_Impl_##name(args, isolate);                        \
+    if (FLAG_runtime_call_stats) {                                             \
+      RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();     \
+      RuntimeCallTimerScope timer(isolate, &stats->Builtin_##name);            \
+      value = Builtin_Impl_##name(args, isolate);                              \
+    } else {                                                                   \
+      value = Builtin_Impl_##name(args, isolate);                              \
+    }                                                                          \
     return value;                                                              \
   }                                                                            \
                                                                                \
@@ -207,58 +216,38 @@
   return *out <= object->elements()->length();
 }
 
-
-inline bool PrototypeHasNoElements(PrototypeIterator* iter) {
+inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
   DisallowHeapAllocation no_gc;
-  for (; !iter->IsAtEnd(); iter->Advance()) {
-    if (iter->GetCurrent()->IsJSProxy()) return false;
-    JSObject* current = iter->GetCurrent<JSObject>();
-    if (current->IsAccessCheckNeeded()) return false;
-    if (current->HasIndexedInterceptor()) return false;
-    if (current->HasStringWrapperElements()) return false;
-    if (current->elements()->length() != 0) return false;
+  HeapObject* prototype = HeapObject::cast(object->map()->prototype());
+  HeapObject* null = isolate->heap()->null_value();
+  HeapObject* empty = isolate->heap()->empty_fixed_array();
+  while (prototype != null) {
+    Map* map = prototype->map();
+    if (map->instance_type() <= LAST_CUSTOM_ELEMENTS_RECEIVER) return false;
+    if (JSObject::cast(prototype)->elements() != empty) return false;
+    prototype = HeapObject::cast(map->prototype());
   }
   return true;
 }
 
 inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
                                               JSArray* receiver) {
-  DisallowHeapAllocation no_gc;
-  // If the array prototype chain is intact (and free of elements), and if the
-  // receiver's prototype is the array prototype, then we are done.
-  Object* prototype = receiver->map()->prototype();
-  if (prototype->IsJSArray() &&
-      isolate->is_initial_array_prototype(JSArray::cast(prototype)) &&
-      isolate->IsFastArrayConstructorPrototypeChainIntact()) {
-    return true;
-  }
-
-  // Slow case.
-  PrototypeIterator iter(isolate, receiver);
-  return PrototypeHasNoElements(&iter);
+  return PrototypeHasNoElements(isolate, receiver);
 }
 
 inline bool HasSimpleElements(JSObject* current) {
-  if (current->IsAccessCheckNeeded()) return false;
-  if (current->HasIndexedInterceptor()) return false;
-  if (current->HasStringWrapperElements()) return false;
-  if (current->GetElementsAccessor()->HasAccessors(current)) return false;
-  return true;
+  return current->map()->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER &&
+         !current->GetElementsAccessor()->HasAccessors(current);
 }
 
 inline bool HasOnlySimpleReceiverElements(Isolate* isolate,
-                                          JSReceiver* receiver) {
+                                          JSObject* receiver) {
   // Check that we have no accessors on the receiver's elements.
-  JSObject* object = JSObject::cast(receiver);
-  if (!HasSimpleElements(object)) return false;
-  // Check that ther are not elements on the prototype.
-  DisallowHeapAllocation no_gc;
-  PrototypeIterator iter(isolate, receiver);
-  return PrototypeHasNoElements(&iter);
+  if (!HasSimpleElements(receiver)) return false;
+  return PrototypeHasNoElements(isolate, receiver);
 }
 
 inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
-  // Check that ther are not elements on the prototype.
   DisallowHeapAllocation no_gc;
   PrototypeIterator iter(isolate, receiver,
                          PrototypeIterator::START_AT_RECEIVER);
@@ -270,65 +259,39 @@
   return true;
 }
 
-// Returns empty handle if not applicable.
+// Returns |false| if not applicable.
 MUST_USE_RESULT
-inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
-    Isolate* isolate, Handle<Object> receiver, Arguments* args,
-    int first_added_arg) {
-  // We explicitly add a HandleScope to avoid creating several copies of the
-  // same handle which would otherwise cause issue when left-trimming later-on.
-  HandleScope scope(isolate);
-  if (!receiver->IsJSArray()) return MaybeHandle<FixedArrayBase>();
+inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
+                                                  Handle<Object> receiver,
+                                                  Arguments* args,
+                                                  int first_added_arg) {
+  if (!receiver->IsJSArray()) return false;
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+  ElementsKind origin_kind = array->GetElementsKind();
+  if (IsDictionaryElementsKind(origin_kind)) return false;
+  if (array->map()->is_observed()) return false;
+  if (!array->map()->is_extensible()) return false;
+  if (args == nullptr) return true;
+
   // If there may be elements accessors in the prototype chain, the fast path
   // cannot be used if there arguments to add to the array.
-  Heap* heap = isolate->heap();
-  if (args != NULL && !IsJSArrayFastElementMovingAllowed(isolate, *array)) {
-    return MaybeHandle<FixedArrayBase>();
-  }
-  if (array->map()->is_observed()) return MaybeHandle<FixedArrayBase>();
-  if (!array->map()->is_extensible()) return MaybeHandle<FixedArrayBase>();
-  Handle<FixedArrayBase> elms(array->elements(), isolate);
-  Map* map = elms->map();
-  if (map == heap->fixed_array_map()) {
-    if (args == NULL || array->HasFastObjectElements()) {
-      return scope.CloseAndEscape(elms);
-    }
-  } else if (map == heap->fixed_cow_array_map()) {
-    elms = JSObject::EnsureWritableFastElements(array);
-    if (args == NULL || array->HasFastObjectElements()) {
-      return scope.CloseAndEscape(elms);
-    }
-  } else if (map == heap->fixed_double_array_map()) {
-    if (args == NULL) {
-      return scope.CloseAndEscape(elms);
-    }
-  } else {
-    return MaybeHandle<FixedArrayBase>();
-  }
+  if (!IsJSArrayFastElementMovingAllowed(isolate, *array)) return false;
 
   // Adding elements to the array prototype would break code that makes sure
   // it has no elements. Handle that elsewhere.
-  if (isolate->IsAnyInitialArrayPrototype(array)) {
-    return MaybeHandle<FixedArrayBase>();
-  }
+  if (isolate->IsAnyInitialArrayPrototype(array)) return false;
 
   // Need to ensure that the arguments passed in args can be contained in
   // the array.
   int args_length = args->length();
-  if (first_added_arg >= args_length) {
-    return scope.CloseAndEscape(elms);
-  }
+  if (first_added_arg >= args_length) return true;
 
-  ElementsKind origin_kind = array->map()->elements_kind();
-  DCHECK(!IsFastObjectElementsKind(origin_kind));
+  if (IsFastObjectElementsKind(origin_kind)) return true;
   ElementsKind target_kind = origin_kind;
   {
     DisallowHeapAllocation no_gc;
-    int arg_count = args_length - first_added_arg;
-    Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
-    for (int i = 0; i < arg_count; i++) {
-      Object* arg = arguments[i];
+    for (int i = first_added_arg; i < args_length; i++) {
+      Object* arg = (*args)[i];
       if (arg->IsHeapObject()) {
         if (arg->IsHeapNumber()) {
           target_kind = FAST_DOUBLE_ELEMENTS;
@@ -340,10 +303,12 @@
     }
   }
   if (target_kind != origin_kind) {
+    // Use a short-lived HandleScope to avoid creating several copies of the
+    // elements handle which would cause issues when left-trimming later-on.
+    HandleScope scope(isolate);
     JSObject::TransitionElementsKind(array, target_kind);
-    elms = handle(array->elements(), isolate);
   }
-  return scope.CloseAndEscape(elms);
+  return true;
 }
 
 
@@ -379,41 +344,235 @@
 
 BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
 
+void Builtins::Generate_ObjectHasOwnProperty(
+    compiler::CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::CodeStubAssembler::Variable Variable;
 
-BUILTIN(ArrayPush) {
+  Node* object = assembler->Parameter(0);
+  Node* key = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+
+  Label call_runtime(assembler), return_true(assembler),
+      return_false(assembler);
+
+  // Smi receivers do not have own properties.
+  Label if_objectisnotsmi(assembler);
+  assembler->Branch(assembler->WordIsSmi(object), &return_false,
+                    &if_objectisnotsmi);
+  assembler->Bind(&if_objectisnotsmi);
+
+  Node* map = assembler->LoadMap(object);
+  Node* instance_type = assembler->LoadMapInstanceType(map);
+
+  Variable var_index(assembler, MachineRepresentation::kWord32);
+
+  Label if_keyissmi(assembler), if_keyisnotsmi(assembler),
+      keyisindex(assembler);
+  assembler->Branch(assembler->WordIsSmi(key), &if_keyissmi, &if_keyisnotsmi);
+  assembler->Bind(&if_keyissmi);
+  {
+    // Negative smi keys are named properties. Handle in the runtime.
+    Label if_keyispositive(assembler);
+    assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositive,
+                      &call_runtime);
+    assembler->Bind(&if_keyispositive);
+
+    var_index.Bind(assembler->SmiUntag(key));
+    assembler->Goto(&keyisindex);
+  }
+
+  assembler->Bind(&if_keyisnotsmi);
+
+  Node* key_instance_type = assembler->LoadInstanceType(key);
+  Label if_iskeyunique(assembler), if_iskeynotsymbol(assembler);
+  assembler->Branch(
+      assembler->Word32Equal(key_instance_type,
+                             assembler->Int32Constant(SYMBOL_TYPE)),
+      &if_iskeyunique, &if_iskeynotsymbol);
+  assembler->Bind(&if_iskeynotsymbol);
+  {
+    Label if_iskeyinternalized(assembler);
+    Node* bits = assembler->WordAnd(
+        key_instance_type,
+        assembler->Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
+    assembler->Branch(
+        assembler->Word32Equal(
+            bits, assembler->Int32Constant(kStringTag | kInternalizedTag)),
+        &if_iskeyinternalized, &call_runtime);
+    assembler->Bind(&if_iskeyinternalized);
+
+    // Check whether the key is an array index passed in as string. Handle
+    // uniform with smi keys if so.
+    // TODO(verwaest): Also support non-internalized strings.
+    Node* hash = assembler->LoadNameHash(key);
+    Node* bit = assembler->Word32And(
+        hash, assembler->Int32Constant(internal::Name::kIsNotArrayIndexMask));
+    Label if_isarrayindex(assembler);
+    assembler->Branch(assembler->Word32Equal(bit, assembler->Int32Constant(0)),
+                      &if_isarrayindex, &if_iskeyunique);
+    assembler->Bind(&if_isarrayindex);
+    var_index.Bind(
+        assembler->BitFieldDecode<internal::Name::ArrayIndexValueBits>(hash));
+    assembler->Goto(&keyisindex);
+  }
+  assembler->Bind(&if_iskeyunique);
+
+  {
+    Label if_objectissimple(assembler);
+    assembler->Branch(assembler->Int32LessThanOrEqual(
+                          instance_type,
+                          assembler->Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+                      &call_runtime, &if_objectissimple);
+    assembler->Bind(&if_objectissimple);
+  }
+
+  // TODO(verwaest): Perform a dictonary lookup on slow-mode receivers.
+  Node* bit_field3 = assembler->LoadMapBitField3(map);
+  Node* bit = assembler->BitFieldDecode<Map::DictionaryMap>(bit_field3);
+  Label if_isfastmap(assembler);
+  assembler->Branch(assembler->Word32Equal(bit, assembler->Int32Constant(0)),
+                    &if_isfastmap, &call_runtime);
+  assembler->Bind(&if_isfastmap);
+  Node* nof =
+      assembler->BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+  // Bail out to the runtime for large numbers of own descriptors. The stub only
+  // does linear search, which becomes too expensive in that case.
+  {
+    static const int32_t kMaxLinear = 256;
+    Label above_max(assembler), below_max(assembler);
+    assembler->Branch(assembler->Int32LessThanOrEqual(
+                          nof, assembler->Int32Constant(kMaxLinear)),
+                      &below_max, &call_runtime);
+    assembler->Bind(&below_max);
+  }
+  Node* descriptors = assembler->LoadMapDescriptors(map);
+
+  Variable var_descriptor(assembler, MachineRepresentation::kWord32);
+  Label loop(assembler, &var_descriptor);
+  var_descriptor.Bind(assembler->Int32Constant(0));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    Node* index = var_descriptor.value();
+    Node* offset = assembler->Int32Constant(DescriptorArray::ToKeyIndex(0));
+    Node* factor = assembler->Int32Constant(DescriptorArray::kDescriptorSize);
+    Label if_notdone(assembler);
+    assembler->Branch(assembler->Word32Equal(index, nof), &return_false,
+                      &if_notdone);
+    assembler->Bind(&if_notdone);
+    {
+      Node* array_index =
+          assembler->Int32Add(offset, assembler->Int32Mul(index, factor));
+      Node* current =
+          assembler->LoadFixedArrayElementInt32Index(descriptors, array_index);
+      Label if_unequal(assembler);
+      assembler->Branch(assembler->WordEqual(current, key), &return_true,
+                        &if_unequal);
+      assembler->Bind(&if_unequal);
+
+      var_descriptor.Bind(
+          assembler->Int32Add(index, assembler->Int32Constant(1)));
+      assembler->Goto(&loop);
+    }
+  }
+
+  assembler->Bind(&keyisindex);
+  {
+    Label if_objectissimple(assembler);
+    assembler->Branch(assembler->Int32LessThanOrEqual(
+                          instance_type, assembler->Int32Constant(
+                                             LAST_CUSTOM_ELEMENTS_RECEIVER)),
+                      &call_runtime, &if_objectissimple);
+    assembler->Bind(&if_objectissimple);
+  }
+
+  Node* index = var_index.value();
+  Node* bit_field2 = assembler->LoadMapBitField2(map);
+  Node* elements_kind =
+      assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+
+  // TODO(verwaest): Support other elements kinds as well.
+  Label if_isobjectorsmi(assembler);
+  assembler->Branch(
+      assembler->Int32LessThanOrEqual(
+          elements_kind, assembler->Int32Constant(FAST_HOLEY_ELEMENTS)),
+      &if_isobjectorsmi, &call_runtime);
+  assembler->Bind(&if_isobjectorsmi);
+  {
+    Node* elements = assembler->LoadElements(object);
+    Node* length = assembler->LoadFixedArrayBaseLength(elements);
+
+    Label if_iskeyinrange(assembler);
+    assembler->Branch(
+        assembler->Int32LessThan(index, assembler->SmiToWord32(length)),
+        &if_iskeyinrange, &return_false);
+
+    assembler->Bind(&if_iskeyinrange);
+    Node* element = assembler->LoadFixedArrayElementInt32Index(elements, index);
+    Node* the_hole = assembler->LoadRoot(Heap::kTheHoleValueRootIndex);
+    assembler->Branch(assembler->WordEqual(element, the_hole), &return_false,
+                      &return_true);
+  }
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+
+  assembler->Bind(&call_runtime);
+  assembler->Return(assembler->CallRuntime(Runtime::kObjectHasOwnProperty,
+                                           context, object, key));
+}
+
+namespace {
+
+Object* DoArrayPush(Isolate* isolate,
+                    BuiltinArguments<BuiltinExtraArguments::kNone> args) {
   HandleScope scope(isolate);
   Handle<Object> receiver = args.receiver();
-  MaybeHandle<FixedArrayBase> maybe_elms_obj =
-      EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
-  Handle<FixedArrayBase> elms_obj;
-  if (!maybe_elms_obj.ToHandle(&elms_obj)) {
+  if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
     return CallJsIntrinsic(isolate, isolate->array_push(), args);
   }
   // Fast Elements Path
-  int push_size = args.length() - 1;
+  int to_add = args.length() - 1;
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
   int len = Smi::cast(array->length())->value();
-  if (push_size == 0) {
-    return Smi::FromInt(len);
-  }
-  if (push_size > 0 &&
-      JSArray::WouldChangeReadOnlyLength(array, len + push_size)) {
+  if (to_add == 0) return Smi::FromInt(len);
+
+  // Currently fixed arrays cannot grow too big, so we should never hit this.
+  DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+
+  if (JSArray::HasReadOnlyLength(array)) {
     return CallJsIntrinsic(isolate, isolate->array_push(), args);
   }
-  DCHECK(!array->map()->is_observed());
+
   ElementsAccessor* accessor = array->GetElementsAccessor();
-  int new_length = accessor->Push(array, elms_obj, &args, push_size);
+  int new_length = accessor->Push(array, &args, to_add);
   return Smi::FromInt(new_length);
 }
 
+}  // namespace
+
+BUILTIN(ArrayPush) { return DoArrayPush(isolate, args); }
+
+// TODO(verwaest): This is a temporary helper until the FastArrayPush stub can
+// tailcall to the builtin directly.
+RUNTIME_FUNCTION(Runtime_ArrayPush) {
+  DCHECK_EQ(2, args.length());
+  Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
+  // Rewrap the arguments as builtins arguments.
+  BuiltinArguments<BuiltinExtraArguments::kNone> caller_args(
+      incoming->length() + 1, incoming->arguments() + 1);
+  return DoArrayPush(isolate, caller_args);
+}
 
 BUILTIN(ArrayPop) {
   HandleScope scope(isolate);
   Handle<Object> receiver = args.receiver();
-  MaybeHandle<FixedArrayBase> maybe_elms_obj =
-      EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
-  Handle<FixedArrayBase> elms_obj;
-  if (!maybe_elms_obj.ToHandle(&elms_obj)) {
+  if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0)) {
     return CallJsIntrinsic(isolate, isolate->array_pop(), args);
   }
 
@@ -430,12 +589,12 @@
   Handle<Object> result;
   if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
     // Fast Elements Path
-    result = array->GetElementsAccessor()->Pop(array, elms_obj);
+    result = array->GetElementsAccessor()->Pop(array);
   } else {
     // Use Slow Lookup otherwise
     uint32_t new_length = len - 1;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, Object::GetElement(isolate, array, new_length));
+        isolate, result, JSReceiver::GetElement(isolate, array, new_length));
     JSArray::SetLength(array, new_length);
   }
   return *result;
@@ -446,10 +605,7 @@
   HandleScope scope(isolate);
   Heap* heap = isolate->heap();
   Handle<Object> receiver = args.receiver();
-  MaybeHandle<FixedArrayBase> maybe_elms_obj =
-      EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
-  Handle<FixedArrayBase> elms_obj;
-  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
+  if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0) ||
       !IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
     return CallJsIntrinsic(isolate, isolate->array_shift(), args);
   }
@@ -463,7 +619,7 @@
     return CallJsIntrinsic(isolate, isolate->array_shift(), args);
   }
 
-  Handle<Object> first = array->GetElementsAccessor()->Shift(array, elms_obj);
+  Handle<Object> first = array->GetElementsAccessor()->Shift(array);
   return *first;
 }
 
@@ -471,28 +627,23 @@
 BUILTIN(ArrayUnshift) {
   HandleScope scope(isolate);
   Handle<Object> receiver = args.receiver();
-  MaybeHandle<FixedArrayBase> maybe_elms_obj =
-      EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
-  Handle<FixedArrayBase> elms_obj;
-  if (!maybe_elms_obj.ToHandle(&elms_obj)) {
+  if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
     return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
   DCHECK(!array->map()->is_observed());
   int to_add = args.length() - 1;
-  if (to_add == 0) {
-    return array->length();
-  }
-  // Currently fixed arrays cannot grow too big, so
-  // we should never hit this case.
-  DCHECK(to_add <= (Smi::kMaxValue - Smi::cast(array->length())->value()));
+  if (to_add == 0) return array->length();
 
-  if (to_add > 0 && JSArray::HasReadOnlyLength(array)) {
+  // Currently fixed arrays cannot grow too big, so we should never hit this.
+  DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+
+  if (JSArray::HasReadOnlyLength(array)) {
     return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
   }
 
   ElementsAccessor* accessor = array->GetElementsAccessor();
-  int new_length = accessor->Unshift(array, elms_obj, &args, to_add);
+  int new_length = accessor->Unshift(array, &args, to_add);
   return Smi::FromInt(new_length);
 }
 
@@ -500,41 +651,34 @@
 BUILTIN(ArraySlice) {
   HandleScope scope(isolate);
   Handle<Object> receiver = args.receiver();
-  Handle<JSObject> object;
-  Handle<FixedArrayBase> elms_obj;
   int len = -1;
   int relative_start = 0;
   int relative_end = 0;
-  bool is_sloppy_arguments = false;
 
   if (receiver->IsJSArray()) {
     DisallowHeapAllocation no_gc;
     JSArray* array = JSArray::cast(*receiver);
-    if (!array->HasFastElements() ||
-        !IsJSArrayFastElementMovingAllowed(isolate, array) ||
-        !isolate->IsArraySpeciesLookupChainIntact() ||
-        // If this is a subclass of Array, then call out to JS
-        !array->map()->new_target_is_base()) {
+    if (V8_UNLIKELY(!array->HasFastElements() ||
+                    !IsJSArrayFastElementMovingAllowed(isolate, array) ||
+                    !isolate->IsArraySpeciesLookupChainIntact() ||
+                    // If this is a subclass of Array, then call out to JS
+                    !array->HasArrayPrototype(isolate))) {
       AllowHeapAllocation allow_allocation;
       return CallJsIntrinsic(isolate, isolate->array_slice(), args);
     }
     len = Smi::cast(array->length())->value();
-    object = Handle<JSObject>::cast(receiver);
-    elms_obj = handle(array->elements(), isolate);
   } else if (receiver->IsJSObject() &&
              GetSloppyArgumentsLength(isolate, Handle<JSObject>::cast(receiver),
                                       &len)) {
+    DCHECK_EQ(FAST_ELEMENTS, JSObject::cast(*receiver)->GetElementsKind());
     // Array.prototype.slice(arguments, ...) is quite a common idiom
     // (notably more than 50% of invocations in Web apps).
     // Treat it in C++ as well.
-    is_sloppy_arguments = true;
-    object = Handle<JSObject>::cast(receiver);
-    elms_obj = handle(object->elements(), isolate);
   } else {
     AllowHeapAllocation allow_allocation;
     return CallJsIntrinsic(isolate, isolate->array_slice(), args);
   }
-  DCHECK(len >= 0);
+  DCHECK_LE(0, len);
   int argument_count = args.length() - 1;
   // Note carefully chosen defaults---if argument is missing,
   // it's undefined which gets converted to 0 for relative_start
@@ -567,36 +711,21 @@
   uint32_t actual_end =
       (relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len);
 
-  if (actual_end <= actual_start) {
-    Handle<JSArray> result_array = isolate->factory()->NewJSArray(
-        GetPackedElementsKind(object->GetElementsKind()), 0, 0);
-    return *result_array;
-  }
-
+  Handle<JSObject> object = Handle<JSObject>::cast(receiver);
   ElementsAccessor* accessor = object->GetElementsAccessor();
-  if (is_sloppy_arguments &&
-      !accessor->IsPacked(object, elms_obj, actual_start, actual_end)) {
-    // Don't deal with arguments with holes in C++
-    AllowHeapAllocation allow_allocation;
-    return CallJsIntrinsic(isolate, isolate->array_slice(), args);
-  }
-  Handle<JSArray> result_array =
-      accessor->Slice(object, elms_obj, actual_start, actual_end);
-  return *result_array;
+  return *accessor->Slice(object, actual_start, actual_end);
 }
 
 
 BUILTIN(ArraySplice) {
   HandleScope scope(isolate);
   Handle<Object> receiver = args.receiver();
-  MaybeHandle<FixedArrayBase> maybe_elms_obj =
-      EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
-  Handle<FixedArrayBase> elms_obj;
-  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
-      // If this is a subclass of Array, then call out to JS
-      !JSArray::cast(*receiver)->map()->new_target_is_base() ||
-      // If anything with @@species has been messed with, call out to JS
-      !isolate->IsArraySpeciesLookupChainIntact()) {
+  if (V8_UNLIKELY(
+          !EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3) ||
+          // If this is a subclass of Array, then call out to JS.
+          !Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
+          // If anything with @@species has been messed with, call out to JS.
+          !isolate->IsArraySpeciesLookupChainIntact())) {
     return CallJsIntrinsic(isolate, isolate->array_splice(), args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -645,7 +774,7 @@
   }
   ElementsAccessor* accessor = array->GetElementsAccessor();
   Handle<JSArray> result_array = accessor->Splice(
-      array, elms_obj, actual_start, actual_delete_count, &args, add_count);
+      array, actual_start, actual_delete_count, &args, add_count);
   return *result_array;
 }
 
@@ -680,17 +809,9 @@
 
   ~ArrayConcatVisitor() { clear_storage(); }
 
-  bool visit(uint32_t i, Handle<Object> elm) {
+  MUST_USE_RESULT bool visit(uint32_t i, Handle<Object> elm) {
     uint32_t index = index_offset_ + i;
 
-    if (!is_fixed_array()) {
-      Handle<Object> element_value;
-      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-          isolate_, element_value,
-          Object::SetElement(isolate_, storage_, index, elm, STRICT), false);
-      return true;
-    }
-
     if (i >= JSObject::kMaxElementCount - index_offset_) {
       set_exceeds_array_limit(true);
       // Exception hasn't been thrown at this point. Return true to
@@ -699,6 +820,14 @@
       return true;
     }
 
+    if (!is_fixed_array()) {
+      LookupIterator it(isolate_, storage_, index, LookupIterator::OWN);
+      MAYBE_RETURN(
+          JSReceiver::CreateDataProperty(&it, elm, Object::THROW_ON_ERROR),
+          false);
+      return true;
+    }
+
     if (fast_elements()) {
       if (index < static_cast<uint32_t>(storage_fixed_array()->length())) {
         storage_fixed_array()->set(index, *elm);
@@ -778,28 +907,26 @@
     Handle<SeededNumberDictionary> slow_storage(
         SeededNumberDictionary::New(isolate_, current_storage->length()));
     uint32_t current_length = static_cast<uint32_t>(current_storage->length());
-    for (uint32_t i = 0; i < current_length; i++) {
-      HandleScope loop_scope(isolate_);
-      Handle<Object> element(current_storage->get(i), isolate_);
-      if (!element->IsTheHole()) {
-        // The object holding this backing store has just been allocated, so
-        // it cannot yet be used as a prototype.
-        Handle<SeededNumberDictionary> new_storage =
-            SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
-                                                false);
-        if (!new_storage.is_identical_to(slow_storage)) {
-          slow_storage = loop_scope.CloseAndEscape(new_storage);
-        }
-      }
-    }
+    FOR_WITH_HANDLE_SCOPE(
+        isolate_, uint32_t, i = 0, i, i < current_length, i++, {
+          Handle<Object> element(current_storage->get(i), isolate_);
+          if (!element->IsTheHole()) {
+            // The object holding this backing store has just been allocated, so
+            // it cannot yet be used as a prototype.
+            Handle<SeededNumberDictionary> new_storage =
+                SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
+                                                    false);
+            if (!new_storage.is_identical_to(slow_storage)) {
+              slow_storage = loop_scope.CloseAndEscape(new_storage);
+            }
+          }
+        });
     clear_storage();
     set_storage(*slow_storage);
     set_fast_elements(false);
   }
 
-  inline void clear_storage() {
-    GlobalHandles::Destroy(Handle<Object>::cast(storage_).location());
-  }
+  inline void clear_storage() { GlobalHandles::Destroy(storage_.location()); }
 
   inline void set_storage(FixedArray* storage) {
     DCHECK(is_fixed_array());
@@ -913,7 +1040,8 @@
     case FAST_ELEMENTS:
     case FAST_HOLEY_SMI_ELEMENTS:
     case FAST_HOLEY_ELEMENTS: {
-      Handle<FixedArray> elements(FixedArray::cast(object->elements()));
+      DisallowHeapAllocation no_gc;
+      FixedArray* elements = FixedArray::cast(object->elements());
       uint32_t length = static_cast<uint32_t>(elements->length());
       if (range < length) length = range;
       for (uint32_t i = 0; i < length; i++) {
@@ -941,20 +1069,23 @@
       break;
     }
     case DICTIONARY_ELEMENTS: {
-      Handle<SeededNumberDictionary> dict(
-          SeededNumberDictionary::cast(object->elements()));
+      DisallowHeapAllocation no_gc;
+      SeededNumberDictionary* dict =
+          SeededNumberDictionary::cast(object->elements());
       uint32_t capacity = dict->Capacity();
-      for (uint32_t j = 0; j < capacity; j++) {
-        HandleScope loop_scope(isolate);
-        Handle<Object> k(dict->KeyAt(j), isolate);
-        if (dict->IsKey(*k)) {
-          DCHECK(k->IsNumber());
-          uint32_t index = static_cast<uint32_t>(k->Number());
-          if (index < range) {
-            indices->Add(index);
-          }
+      Heap* heap = isolate->heap();
+      Object* undefined = heap->undefined_value();
+      Object* the_hole = heap->the_hole_value();
+      FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, j = 0, j, j < capacity, j++, {
+        Object* k = dict->KeyAt(j);
+        if (k == undefined) continue;
+        if (k == the_hole) continue;
+        DCHECK(k->IsNumber());
+        uint32_t index = static_cast<uint32_t>(k->Number());
+        if (index < range) {
+          indices->Add(index);
         }
-      }
+      });
       break;
     }
 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
@@ -1022,18 +1153,17 @@
 
 bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
                          uint32_t length, ArrayConcatVisitor* visitor) {
-  for (uint32_t i = 0; i < length; ++i) {
-    HandleScope loop_scope(isolate);
+  FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, i = 0, i, i < length, ++i, {
     Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
     if (!maybe.IsJust()) return false;
     if (maybe.FromJust()) {
       Handle<Object> element_value;
-      ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_value,
-                                       Object::GetElement(isolate, receiver, i),
-                                       false);
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate, element_value, JSReceiver::GetElement(isolate, receiver, i),
+          false);
       if (!visitor->visit(i, element_value)) return false;
     }
-  }
+  });
   visitor->increase_index_offset(length);
   return true;
 }
@@ -1086,9 +1216,8 @@
       // to check the prototype for missing elements.
       Handle<FixedArray> elements(FixedArray::cast(array->elements()));
       int fast_length = static_cast<int>(length);
-      DCHECK_LE(fast_length, elements->length());
-      for (int j = 0; j < fast_length; j++) {
-        HandleScope loop_scope(isolate);
+      DCHECK(fast_length <= elements->length());
+      FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
         Handle<Object> element_value(elements->get(j), isolate);
         if (!element_value->IsTheHole()) {
           if (!visitor->visit(j, element_value)) return false;
@@ -1099,12 +1228,12 @@
             // Call GetElement on array, not its prototype, or getters won't
             // have the correct receiver.
             ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-                isolate, element_value, Object::GetElement(isolate, array, j),
-                false);
+                isolate, element_value,
+                JSReceiver::GetElement(isolate, array, j), false);
             if (!visitor->visit(j, element_value)) return false;
           }
         }
-      }
+      });
       break;
     }
     case FAST_HOLEY_DOUBLE_ELEMENTS:
@@ -1121,8 +1250,7 @@
           FixedDoubleArray::cast(array->elements()));
       int fast_length = static_cast<int>(length);
       DCHECK(fast_length <= elements->length());
-      for (int j = 0; j < fast_length; j++) {
-        HandleScope loop_scope(isolate);
+      FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
         if (!elements->is_the_hole(j)) {
           double double_value = elements->get_scalar(j);
           Handle<Object> element_value =
@@ -1136,12 +1264,12 @@
             // have the correct receiver.
             Handle<Object> element_value;
             ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-                isolate, element_value, Object::GetElement(isolate, array, j),
-                false);
+                isolate, element_value,
+                JSReceiver::GetElement(isolate, array, j), false);
             if (!visitor->visit(j, element_value)) return false;
           }
         }
-      }
+      });
       break;
     }
 
@@ -1152,31 +1280,31 @@
       // than length. This might introduce duplicates in the indices list.
       CollectElementIndices(array, length, &indices);
       indices.Sort(&compareUInt32);
-      int j = 0;
       int n = indices.length();
-      while (j < n) {
-        HandleScope loop_scope(isolate);
+      FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < n, (void)0, {
         uint32_t index = indices[j];
         Handle<Object> element;
         ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-            isolate, element, Object::GetElement(isolate, array, index), false);
+            isolate, element, JSReceiver::GetElement(isolate, array, index),
+            false);
         if (!visitor->visit(index, element)) return false;
         // Skip to next different index (i.e., omit duplicates).
         do {
           j++;
         } while (j < n && indices[j] == index);
-      }
+      });
       break;
     }
     case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
     case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
-      for (uint32_t index = 0; index < length; index++) {
-        HandleScope loop_scope(isolate);
-        Handle<Object> element;
-        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-            isolate, element, Object::GetElement(isolate, array, index), false);
-        if (!visitor->visit(index, element)) return false;
-      }
+      FOR_WITH_HANDLE_SCOPE(
+          isolate, uint32_t, index = 0, index, index < length, index++, {
+            Handle<Object> element;
+            ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+                isolate, element, JSReceiver::GetElement(isolate, array, index),
+                false);
+            if (!visitor->visit(index, element)) return false;
+          });
       break;
     }
     case NO_ELEMENTS:
@@ -1231,8 +1359,7 @@
 
   uint32_t estimate_result_length = 0;
   uint32_t estimate_nof_elements = 0;
-  for (int i = 0; i < argument_count; i++) {
-    HandleScope loop_scope(isolate);
+  FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < argument_count, i++, {
     Handle<Object> obj((*args)[i], isolate);
     uint32_t length_estimate;
     uint32_t element_estimate;
@@ -1264,7 +1391,7 @@
     } else {
       estimate_nof_elements += element_estimate;
     }
-  }
+  });
 
   // If estimated number of elements is more than half of length, a
   // fixed array (fast case) is more time and space-efficient than a
@@ -1289,6 +1416,7 @@
           double_storage->set(j, obj->Number());
           j++;
         } else {
+          DisallowHeapAllocation no_gc;
           JSArray* array = JSArray::cast(*obj);
           uint32_t length = static_cast<uint32_t>(array->length()->Number());
           switch (array->GetElementsKind()) {
@@ -1316,10 +1444,11 @@
             }
             case FAST_HOLEY_SMI_ELEMENTS:
             case FAST_SMI_ELEMENTS: {
+              Object* the_hole = isolate->heap()->the_hole_value();
               FixedArray* elements(FixedArray::cast(array->elements()));
               for (uint32_t i = 0; i < length; i++) {
                 Object* element = elements->get(i);
-                if (element->IsTheHole()) {
+                if (element == the_hole) {
                   failure = true;
                   break;
                 }
@@ -1381,7 +1510,7 @@
         return isolate->heap()->exception();
       }
     } else {
-      visitor.visit(0, obj);
+      if (!visitor.visit(0, obj)) return isolate->heap()->exception();
       visitor.increase_index_offset(1);
     }
   }
@@ -1400,6 +1529,12 @@
 
 
 MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
+  // We shouldn't overflow when adding another len.
+  const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
+  STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
+  STATIC_ASSERT(FixedDoubleArray::kMaxLength < kHalfOfMaxInt);
+  USE(kHalfOfMaxInt);
+
   int n_arguments = args->length();
   int result_len = 0;
   {
@@ -1409,27 +1544,24 @@
     for (int i = 0; i < n_arguments; i++) {
       Object* arg = (*args)[i];
       if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
-      if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
+      if (!JSObject::cast(arg)->HasFastElements()) {
         return MaybeHandle<JSArray>();
       }
-      // TODO(cbruni): support fast concatenation of DICTIONARY_ELEMENTS.
-      if (!JSObject::cast(arg)->HasFastElements()) {
+      if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
         return MaybeHandle<JSArray>();
       }
       Handle<JSArray> array(JSArray::cast(arg), isolate);
       if (HasConcatSpreadableModifier(isolate, array)) {
         return MaybeHandle<JSArray>();
       }
-      int len = Smi::cast(array->length())->value();
-
-      // We shouldn't overflow when adding another len.
-      const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
-      STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
-      USE(kHalfOfMaxInt);
-      result_len += len;
+      // The Array length is guaranted to be <= kHalfOfMaxInt thus we won't
+      // overflow.
+      result_len += Smi::cast(array->length())->value();
       DCHECK(result_len >= 0);
       // Throw an Error if we overflow the FixedArray limits
-      if (FixedArray::kMaxLength < result_len) {
+      if (FixedDoubleArray::kMaxLength < result_len ||
+          FixedArray::kMaxLength < result_len) {
+        AllowHeapAllocation allow_gc;
         THROW_NEW_ERROR(isolate,
                         NewRangeError(MessageTemplate::kInvalidArrayLength),
                         JSArray);
@@ -1460,12 +1592,21 @@
 
   Handle<JSArray> result_array;
 
+  // Avoid a real species read to avoid extra lookups to the array constructor
+  if (V8_LIKELY(receiver->IsJSArray() &&
+                Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
+                isolate->IsArraySpeciesLookupChainIntact())) {
+    if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
+      return *result_array;
+    }
+    if (isolate->has_pending_exception()) return isolate->heap()->exception();
+  }
   // Reading @@species happens before anything else with a side effect, so
   // we can do it here to determine whether to take the fast path.
   Handle<Object> species;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
-  if (*species == isolate->context()->native_context()->array_function()) {
+  if (*species == *isolate->array_function()) {
     if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
       return *result_array;
     }
@@ -1528,15 +1669,16 @@
           prop_value = JSObject::FastPropertyAt(from, representation, index);
         }
       } else {
-        ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, prop_value,
-                                         Object::GetProperty(from, next_key),
-                                         Nothing<bool>());
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+            isolate, prop_value, JSReceiver::GetProperty(from, next_key),
+            Nothing<bool>());
         stable = from->map() == *map;
       }
     } else {
       // If the map did change, do a slower lookup. We are still guaranteed that
       // the object has a simple shape, and that the key is a name.
-      LookupIterator it(from, next_key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+      LookupIterator it(from, next_key, from,
+                        LookupIterator::OWN_SKIP_INTERCEPTOR);
       if (!it.IsFound()) continue;
       DCHECK(it.state() == LookupIterator::DATA ||
              it.state() == LookupIterator::ACCESSOR);
@@ -1544,7 +1686,7 @@
       ASSIGN_RETURN_ON_EXCEPTION_VALUE(
           isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
     }
-    LookupIterator it(to, next_key);
+    LookupIterator it(to, next_key, to);
     bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
     Maybe<bool> result = Object::SetProperty(
         &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
@@ -1840,7 +1982,7 @@
       isolate, keys, JSReceiver::GetKeys(receiver, OWN_ONLY, ALL_PROPERTIES,
                                          CONVERT_TO_STRING));
 
-  Handle<Object> descriptors =
+  Handle<JSObject> descriptors =
       isolate->factory()->NewJSObject(isolate->object_function());
 
   for (int i = 0; i < keys->length(); ++i) {
@@ -1855,7 +1997,7 @@
                                          : undefined;
 
     LookupIterator it = LookupIterator::PropertyOrElement(
-        isolate, descriptors, key, LookupIterator::OWN);
+        isolate, descriptors, key, descriptors, LookupIterator::OWN);
     Maybe<bool> success = JSReceiver::CreateDataProperty(&it, from_descriptor,
                                                          Object::DONT_THROW);
     CHECK(success.FromJust());
@@ -1957,6 +2099,233 @@
 }
 
 
+// -----------------------------------------------------------------------------
+// ES6 section 20.2.2 Function Properties of the Math Object
+
+
+// ES6 section 20.2.2.2 Math.acos ( x )
+BUILTIN(MathAcos) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Handle<Object> x = args.at<Object>(1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+  return *isolate->factory()->NewHeapNumber(std::acos(x->Number()));
+}
+
+
+// ES6 section 20.2.2.4 Math.asin ( x )
+BUILTIN(MathAsin) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Handle<Object> x = args.at<Object>(1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+  return *isolate->factory()->NewHeapNumber(std::asin(x->Number()));
+}
+
+
+// ES6 section 20.2.2.6 Math.atan ( x )
+BUILTIN(MathAtan) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Handle<Object> x = args.at<Object>(1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+  return *isolate->factory()->NewHeapNumber(std::atan(x->Number()));
+}
+
+namespace {
+
+void Generate_MathRoundingOperation(
+    compiler::CodeStubAssembler* assembler,
+    compiler::Node* (compiler::CodeStubAssembler::*float64op)(
+        compiler::Node*)) {
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(4);
+
+  // We might need to loop once for ToNumber conversion.
+  Variable var_x(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_x);
+  var_x.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {x} value.
+    Node* x = var_x.value();
+
+    // Check if {x} is a Smi or a HeapObject.
+    Label if_xissmi(assembler), if_xisnotsmi(assembler);
+    assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+
+    assembler->Bind(&if_xissmi);
+    {
+      // Nothing to do when {x} is a Smi.
+      assembler->Return(x);
+    }
+
+    assembler->Bind(&if_xisnotsmi);
+    {
+      // Check if {x} is a HeapNumber.
+      Label if_xisheapnumber(assembler),
+          if_xisnotheapnumber(assembler, Label::kDeferred);
+      assembler->Branch(
+          assembler->WordEqual(assembler->LoadMap(x),
+                               assembler->HeapNumberMapConstant()),
+          &if_xisheapnumber, &if_xisnotheapnumber);
+
+      assembler->Bind(&if_xisheapnumber);
+      {
+        Node* x_value = assembler->LoadHeapNumberValue(x);
+        Node* value = (assembler->*float64op)(x_value);
+        Node* result = assembler->ChangeFloat64ToTagged(value);
+        assembler->Return(result);
+      }
+
+      assembler->Bind(&if_xisnotheapnumber);
+      {
+        // Need to convert {x} to a Number first.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_x.Bind(assembler->CallStub(callable, context, x));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+}
+
+}  // namespace
+
+// ES6 section 20.2.2.10 Math.ceil ( x )
+void Builtins::Generate_MathCeil(compiler::CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler,
+                                 &compiler::CodeStubAssembler::Float64Ceil);
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+void Builtins::Generate_MathClz32(compiler::CodeStubAssembler* assembler) {
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(4);
+
+  // Shared entry point for the clz32 operation.
+  Variable var_clz32_x(assembler, MachineRepresentation::kWord32);
+  Label do_clz32(assembler);
+
+  // We might need to loop once for ToNumber conversion.
+  Variable var_x(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_x);
+  var_x.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {x} value.
+    Node* x = var_x.value();
+
+    // Check if {x} is a Smi or a HeapObject.
+    Label if_xissmi(assembler), if_xisnotsmi(assembler);
+    assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+
+    assembler->Bind(&if_xissmi);
+    {
+      var_clz32_x.Bind(assembler->SmiToWord32(x));
+      assembler->Goto(&do_clz32);
+    }
+
+    assembler->Bind(&if_xisnotsmi);
+    {
+      // Check if {x} is a HeapNumber.
+      Label if_xisheapnumber(assembler),
+          if_xisnotheapnumber(assembler, Label::kDeferred);
+      assembler->Branch(
+          assembler->WordEqual(assembler->LoadMap(x),
+                               assembler->HeapNumberMapConstant()),
+          &if_xisheapnumber, &if_xisnotheapnumber);
+
+      assembler->Bind(&if_xisheapnumber);
+      {
+        var_clz32_x.Bind(assembler->TruncateHeapNumberValueToWord32(x));
+        assembler->Goto(&do_clz32);
+      }
+
+      assembler->Bind(&if_xisnotheapnumber);
+      {
+        // Need to convert {x} to a Number first.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_x.Bind(assembler->CallStub(callable, context, x));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+
+  assembler->Bind(&do_clz32);
+  {
+    Node* x_value = var_clz32_x.value();
+    Node* value = assembler->Word32Clz(x_value);
+    Node* result = assembler->ChangeInt32ToTagged(value);
+    assembler->Return(result);
+  }
+}
+
+// ES6 section 20.2.2.16 Math.floor ( x )
+void Builtins::Generate_MathFloor(compiler::CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler,
+                                 &compiler::CodeStubAssembler::Float64Floor);
+}
+
+// ES6 section 20.2.2.17 Math.fround ( x )
+BUILTIN(MathFround) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Handle<Object> x = args.at<Object>(1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+  float x32 = DoubleToFloat32(x->Number());
+  return *isolate->factory()->NewNumber(x32);
+}
+
+// ES6 section 20.2.2.19 Math.imul ( x, y )
+BUILTIN(MathImul) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<Object> x = args.at<Object>(1);
+  Handle<Object> y = args.at<Object>(2);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, y, Object::ToNumber(y));
+  int product = static_cast<int>(NumberToUint32(*x) * NumberToUint32(*y));
+  return *isolate->factory()->NewNumberFromInt(product);
+}
+
+// ES6 section 20.2.2.28 Math.round ( x )
+void Builtins::Generate_MathRound(compiler::CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler,
+                                 &compiler::CodeStubAssembler::Float64Round);
+}
+
+// ES6 section 20.2.2.32 Math.sqrt ( x )
+void Builtins::Generate_MathSqrt(compiler::CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Sqrt(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.35 Math.trunc ( x )
+void Builtins::Generate_MathTrunc(compiler::CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler,
+                                 &compiler::CodeStubAssembler::Float64Trunc);
+}
+
+// -----------------------------------------------------------------------------
+// ES6 section 26.1 The Reflect Object
+
+
 // ES6 section 26.1.3 Reflect.defineProperty
 BUILTIN(ReflectDefineProperty) {
   HandleScope scope(isolate);
@@ -3659,45 +4028,6 @@
   return *result;
 }
 
-// ES6 section 19.2.3.6 Function.prototype[@@hasInstance](V)
-BUILTIN(FunctionHasInstance) {
-  HandleScope scope(isolate);
-  Handle<Object> callable = args.receiver();
-  Handle<Object> object = args.atOrUndefined(isolate, 1);
-
-  // {callable} must have a [[Call]] internal method.
-  if (!callable->IsCallable()) {
-    return isolate->heap()->false_value();
-  }
-  // If {object} is not a receiver, return false.
-  if (!object->IsJSReceiver()) {
-    return isolate->heap()->false_value();
-  }
-  // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
-  // and use that instead of {callable}.
-  while (callable->IsJSBoundFunction()) {
-    callable =
-        handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
-               isolate);
-  }
-  DCHECK(callable->IsCallable());
-  // Get the "prototype" of {callable}; raise an error if it's not a receiver.
-  Handle<Object> prototype;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, prototype,
-      Object::GetProperty(callable, isolate->factory()->prototype_string()));
-  if (!prototype->IsJSReceiver()) {
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate,
-        NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
-  }
-  // Return whether or not {prototype} is in the prototype chain of {object}.
-  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-  Maybe<bool> result =
-      JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
-  MAYBE_RETURN(result, isolate->heap()->exception());
-  return isolate->heap()->ToBoolean(result.FromJust());
-}
 
 // ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
 BUILTIN(SymbolConstructor) {
@@ -3728,10 +4058,78 @@
   Handle<Object> object = args.at<Object>(0);
   Handle<String> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, JSObject::ObjectProtoToString(isolate, object));
+      isolate, result, Object::ObjectProtoToString(isolate, object));
   return *result;
 }
 
+// -----------------------------------------------------------------------------
+// ES6 section 21.1 String Objects
+
+namespace {
+
+bool ToUint16(Handle<Object> value, uint16_t* result) {
+  if (value->IsNumber() || Object::ToNumber(value).ToHandle(&value)) {
+    *result = DoubleToUint32(value->Number());
+    return true;
+  }
+  return false;
+}
+
+}  // namespace
+
+// ES6 21.1.2.1 String.fromCharCode ( ...codeUnits )
+BUILTIN(StringFromCharCode) {
+  HandleScope scope(isolate);
+  // Check resulting string length.
+  int index = 0;
+  Handle<String> result;
+  int const length = args.length() - 1;
+  if (length == 0) return isolate->heap()->empty_string();
+  DCHECK_LT(0, length);
+  // Load the first character code.
+  uint16_t code;
+  if (!ToUint16(args.at<Object>(1), &code)) return isolate->heap()->exception();
+  // Assume that the resulting String contains only one byte characters.
+  if (code <= String::kMaxOneByteCharCodeU) {
+    // Check for single one-byte character fast case.
+    if (length == 1) {
+      return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
+    }
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, isolate->factory()->NewRawOneByteString(length));
+    do {
+      Handle<SeqOneByteString>::cast(result)->Set(index, code);
+      if (++index == length) break;
+      if (!ToUint16(args.at<Object>(1 + index), &code)) {
+        return isolate->heap()->exception();
+      }
+    } while (code <= String::kMaxOneByteCharCodeU);
+  }
+  // Check if all characters fit into the one byte range.
+  if (index < length) {
+    // Fallback to two byte string.
+    Handle<String> new_result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, new_result, isolate->factory()->NewRawTwoByteString(length));
+    for (int new_index = 0; new_index < index; ++new_index) {
+      uint16_t new_code =
+          Handle<SeqOneByteString>::cast(result)->Get(new_index);
+      Handle<SeqTwoByteString>::cast(new_result)->Set(new_index, new_code);
+    }
+    while (true) {
+      Handle<SeqTwoByteString>::cast(new_result)->Set(index, code);
+      if (++index == length) break;
+      if (!ToUint16(args.at<Object>(1 + index), &code)) {
+        return isolate->heap()->exception();
+      }
+    }
+    result = new_result;
+  }
+  return *result;
+}
+
+// -----------------------------------------------------------------------------
+// ES6 section 21.1 ArrayBuffer Objects
 
 // ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
 BUILTIN(ArrayBufferConstructor) {
@@ -3845,9 +4243,9 @@
   HandleScope scope(isolate);
   Handle<HeapObject> function = args.target<HeapObject>();
   Handle<JSReceiver> receiver;
-  // TODO(ishell): turn this back to a DCHECK.
-  CHECK(function->IsFunctionTemplateInfo() ||
-        Handle<JSFunction>::cast(function)->shared()->IsApiFunction());
+
+  DCHECK(function->IsFunctionTemplateInfo() ||
+         Handle<JSFunction>::cast(function)->shared()->IsApiFunction());
 
   Handle<FunctionTemplateInfo> fun_data =
       function->IsFunctionTemplateInfo()
@@ -3893,8 +4291,7 @@
 
   Object* raw_call_data = fun_data->call_code();
   if (!raw_call_data->IsUndefined()) {
-    // TODO(ishell): remove this debugging code.
-    CHECK(raw_call_data->IsCallHandlerInfo());
+    DCHECK(raw_call_data->IsCallHandlerInfo());
     CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
     Object* callback_obj = call_data->callback();
     v8::FunctionCallback callback =
@@ -3912,14 +4309,8 @@
                                      args.length() - 1,
                                      is_construct);
 
-    v8::Local<v8::Value> value = custom.Call(callback);
-    Handle<Object> result;
-    if (value.IsEmpty()) {
-      result = isolate->factory()->undefined_value();
-    } else {
-      result = v8::Utils::OpenHandle(*value);
-      result->VerifyApiCallResultType();
-    }
+    Handle<Object> result = custom.Call(callback);
+    if (result.is_null()) result = isolate->factory()->undefined_value();
 
     RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
     if (!is_construct || result->IsJSObject()) {
@@ -4053,6 +4444,20 @@
                                                 Handle<Object> receiver,
                                                 int argc,
                                                 Handle<Object> args[]) {
+  Isolate* isolate = function->GetIsolate();
+  // Do proper receiver conversion for non-strict mode api functions.
+  if (!receiver->IsJSReceiver()) {
+    DCHECK(function->IsFunctionTemplateInfo() || function->IsJSFunction());
+    if (function->IsFunctionTemplateInfo() ||
+        is_sloppy(JSFunction::cast(*function)->shared()->language_mode())) {
+      if (receiver->IsUndefined() || receiver->IsNull()) {
+        receiver = handle(isolate->global_proxy(), isolate);
+      } else {
+        ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+                                   Object::ToObject(isolate, receiver), Object);
+      }
+    }
+  }
   // Construct BuiltinArguments object: function, arguments reversed, receiver.
   const int kBufferSize = 32;
   Object* small_argv[kBufferSize];
@@ -4069,7 +4474,6 @@
   argv[0] = *function;
   MaybeHandle<Object> result;
   {
-    auto isolate = function->GetIsolate();
     RelocatableArguments arguments(isolate, argc + 2, &argv[argc + 1]);
     result = HandleApiCallHelper<false>(isolate, arguments);
   }
@@ -4086,8 +4490,6 @@
 MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
     Isolate* isolate, bool is_construct_call,
     BuiltinArguments<BuiltinExtraArguments::kNone> args) {
-  Heap* heap = isolate->heap();
-
   Handle<Object> receiver = args.receiver();
 
   // Get the object called.
@@ -4122,12 +4524,11 @@
                                      &args[0] - 1,
                                      args.length() - 1,
                                      is_construct_call);
-    v8::Local<v8::Value> value = custom.Call(callback);
-    if (value.IsEmpty()) {
-      result = heap->undefined_value();
+    Handle<Object> result_handle = custom.Call(callback);
+    if (result_handle.is_null()) {
+      result = isolate->heap()->undefined_value();
     } else {
-      result = *reinterpret_cast<Object**>(*value);
-      result->VerifyApiCallResultType();
+      result = *result_handle;
     }
   }
   // Check for exceptions and return result.
@@ -4280,12 +4681,14 @@
 
 
 struct BuiltinDesc {
+  Handle<Code> (*builder)(Isolate*, struct BuiltinDesc const*);
   byte* generator;
   byte* c_code;
   const char* s_name;  // name is only used for generating log information.
   int name;
   Code::Flags flags;
   BuiltinExtraArguments extra_args;
+  int argc;
 };
 
 #define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
@@ -4303,8 +4706,60 @@
   friend class Builtins;
 };
 
-static BuiltinFunctionTable builtin_function_table =
-    BUILTIN_FUNCTION_TABLE_INIT;
+namespace {
+
+BuiltinFunctionTable builtin_function_table = BUILTIN_FUNCTION_TABLE_INIT;
+
+Handle<Code> MacroAssemblerBuilder(Isolate* isolate,
+                                   BuiltinDesc const* builtin_desc) {
+// For now we generate builtin adaptor code into a stack-allocated
+// buffer, before copying it into individual code objects. Be careful
+// with alignment, some platforms don't like unaligned code.
+#ifdef DEBUG
+  // We can generate a lot of debug code on Arm64.
+  const size_t buffer_size = 32 * KB;
+#elif V8_TARGET_ARCH_PPC64
+  // 8 KB is insufficient on PPC64 when FLAG_debug_code is on.
+  const size_t buffer_size = 10 * KB;
+#else
+  const size_t buffer_size = 8 * KB;
+#endif
+  union {
+    int force_alignment;
+    byte buffer[buffer_size];  // NOLINT(runtime/arrays)
+  } u;
+
+  MacroAssembler masm(isolate, u.buffer, sizeof(u.buffer),
+                      CodeObjectRequired::kYes);
+  // Generate the code/adaptor.
+  typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
+  Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
+  // We pass all arguments to the generator, but it may not use all of
+  // them.  This works because the first arguments are on top of the
+  // stack.
+  DCHECK(!masm.has_frame());
+  g(&masm, builtin_desc->name, builtin_desc->extra_args);
+  // Move the code into the object heap.
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  Code::Flags flags = builtin_desc->flags;
+  return isolate->factory()->NewCode(desc, flags, masm.CodeObject());
+}
+
+Handle<Code> CodeStubAssemblerBuilder(Isolate* isolate,
+                                      BuiltinDesc const* builtin_desc) {
+  Zone zone(isolate->allocator());
+  compiler::CodeStubAssembler assembler(isolate, &zone, builtin_desc->argc,
+                                        builtin_desc->flags,
+                                        builtin_desc->s_name);
+  // Generate the code/adaptor.
+  typedef void (*Generator)(compiler::CodeStubAssembler*);
+  Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
+  g(&assembler);
+  return assembler.GenerateCode();
+}
+
+}  // namespace
 
 // Define array of pointers to generators and C builtin functions.
 // We do this in a sort of roundabout way so that we can do the initialization
@@ -4312,47 +4767,70 @@
 // Code::Flags names a non-abstract type.
 void Builtins::InitBuiltinFunctionTable() {
   BuiltinDesc* functions = builtin_function_table.functions_;
-  functions[builtin_count].generator = NULL;
-  functions[builtin_count].c_code = NULL;
-  functions[builtin_count].s_name = NULL;
+  functions[builtin_count].builder = nullptr;
+  functions[builtin_count].generator = nullptr;
+  functions[builtin_count].c_code = nullptr;
+  functions[builtin_count].s_name = nullptr;
   functions[builtin_count].name = builtin_count;
   functions[builtin_count].flags = static_cast<Code::Flags>(0);
   functions[builtin_count].extra_args = BuiltinExtraArguments::kNone;
+  functions[builtin_count].argc = 0;
 
 #define DEF_FUNCTION_PTR_C(aname, aextra_args)                \
+  functions->builder = &MacroAssemblerBuilder;                \
   functions->generator = FUNCTION_ADDR(Generate_Adaptor);     \
   functions->c_code = FUNCTION_ADDR(Builtin_##aname);         \
   functions->s_name = #aname;                                 \
   functions->name = c_##aname;                                \
   functions->flags = Code::ComputeFlags(Code::BUILTIN);       \
   functions->extra_args = BuiltinExtraArguments::aextra_args; \
+  functions->argc = 0;                                        \
   ++functions;
 
 #define DEF_FUNCTION_PTR_A(aname, kind, state, extra)              \
+  functions->builder = &MacroAssemblerBuilder;                     \
   functions->generator = FUNCTION_ADDR(Generate_##aname);          \
   functions->c_code = NULL;                                        \
   functions->s_name = #aname;                                      \
   functions->name = k##aname;                                      \
   functions->flags = Code::ComputeFlags(Code::kind, state, extra); \
   functions->extra_args = BuiltinExtraArguments::kNone;            \
+  functions->argc = 0;                                             \
+  ++functions;
+
+#define DEF_FUNCTION_PTR_T(aname, aargc)                                 \
+  functions->builder = &CodeStubAssemblerBuilder;                        \
+  functions->generator = FUNCTION_ADDR(Generate_##aname);                \
+  functions->c_code = NULL;                                              \
+  functions->s_name = #aname;                                            \
+  functions->name = k##aname;                                            \
+  functions->flags =                                                     \
+      Code::ComputeFlags(Code::BUILTIN, UNINITIALIZED, kNoExtraICState); \
+  functions->extra_args = BuiltinExtraArguments::kNone;                  \
+  functions->argc = aargc;                                               \
   ++functions;
 
 #define DEF_FUNCTION_PTR_H(aname, kind)                     \
+  functions->builder = &MacroAssemblerBuilder;              \
   functions->generator = FUNCTION_ADDR(Generate_##aname);   \
   functions->c_code = NULL;                                 \
   functions->s_name = #aname;                               \
   functions->name = k##aname;                               \
   functions->flags = Code::ComputeHandlerFlags(Code::kind); \
   functions->extra_args = BuiltinExtraArguments::kNone;     \
+  functions->argc = 0;                                      \
   ++functions;
 
   BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
   BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
+  BUILTIN_LIST_T(DEF_FUNCTION_PTR_T)
   BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
   BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
 
 #undef DEF_FUNCTION_PTR_C
 #undef DEF_FUNCTION_PTR_A
+#undef DEF_FUNCTION_PTR_H
+#undef DEF_FUNCTION_PTR_T
 }
 
 
@@ -4364,43 +4842,15 @@
 
   const BuiltinDesc* functions = builtin_function_table.functions();
 
-  // For now we generate builtin adaptor code into a stack-allocated
-  // buffer, before copying it into individual code objects. Be careful
-  // with alignment, some platforms don't like unaligned code.
-#ifdef DEBUG
-  // We can generate a lot of debug code on Arm64.
-  const size_t buffer_size = 32*KB;
-#elif V8_TARGET_ARCH_PPC64
-  // 8 KB is insufficient on PPC64 when FLAG_debug_code is on.
-  const size_t buffer_size = 10 * KB;
-#else
-  const size_t buffer_size = 8*KB;
-#endif
-  union { int force_alignment; byte buffer[buffer_size]; } u;
-
   // Traverse the list of builtins and generate an adaptor in a
   // separate code object for each one.
   for (int i = 0; i < builtin_count; i++) {
     if (create_heap_objects) {
-      MacroAssembler masm(isolate, u.buffer, sizeof u.buffer,
-                          CodeObjectRequired::kYes);
-      // Generate the code/adaptor.
-      typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
-      Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
-      // We pass all arguments to the generator, but it may not use all of
-      // them.  This works because the first arguments are on top of the
-      // stack.
-      DCHECK(!masm.has_frame());
-      g(&masm, functions[i].name, functions[i].extra_args);
-      // Move the code into the object heap.
-      CodeDesc desc;
-      masm.GetCode(&desc);
-      Code::Flags flags = functions[i].flags;
-      Handle<Code> code =
-          isolate->factory()->NewCode(desc, flags, masm.CodeObject());
+      Handle<Code> code = (*functions[i].builder)(isolate, functions + i);
       // Log the event and add the code to the builtins array.
       PROFILE(isolate,
-              CodeCreateEvent(Logger::BUILTIN_TAG, *code, functions[i].s_name));
+              CodeCreateEvent(Logger::BUILTIN_TAG, AbstractCode::cast(*code),
+                              functions[i].s_name));
       builtins_[i] = *code;
       code->set_builtin_index(i);
 #ifdef ENABLE_DISASSEMBLER
@@ -4470,6 +4920,11 @@
       reinterpret_cast<Code**>(builtin_address(k##name));   \
   return Handle<Code>(code_address);                        \
 }
+#define DEFINE_BUILTIN_ACCESSOR_T(name, argc)                                 \
+  Handle<Code> Builtins::name() {                                             \
+    Code** code_address = reinterpret_cast<Code**>(builtin_address(k##name)); \
+    return Handle<Code>(code_address);                                        \
+  }
 #define DEFINE_BUILTIN_ACCESSOR_H(name, kind)               \
 Handle<Code> Builtins::name() {                             \
   Code** code_address =                                     \
@@ -4478,11 +4933,13 @@
 }
 BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
 BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
+BUILTIN_LIST_T(DEFINE_BUILTIN_ACCESSOR_T)
 BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
 BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
 #undef DEFINE_BUILTIN_ACCESSOR_C
 #undef DEFINE_BUILTIN_ACCESSOR_A
-
+#undef DEFINE_BUILTIN_ACCESSOR_T
+#undef DEFINE_BUILTIN_ACCESSOR_H
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins.h b/src/builtins.h
index 93e6e3d..221d06f 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -11,6 +11,13 @@
 namespace v8 {
 namespace internal {
 
+namespace compiler {
+
+// Forward declarations.
+class CodeStubAssembler;
+
+}  // namespace compiler
+
 // Specifies extra arguments required by a C++ builtin.
 enum class BuiltinExtraArguments : uint8_t {
   kNone = 0u,
@@ -110,12 +117,17 @@
   V(FunctionConstructor, kTargetAndNewTarget)                  \
   V(FunctionPrototypeBind, kNone)                              \
   V(FunctionPrototypeToString, kNone)                          \
-  V(FunctionHasInstance, kNone)                                \
                                                                \
   V(GeneratorFunctionConstructor, kTargetAndNewTarget)         \
                                                                \
   V(GlobalEval, kTarget)                                       \
                                                                \
+  V(MathAcos, kNone)                                           \
+  V(MathAsin, kNone)                                           \
+  V(MathAtan, kNone)                                           \
+  V(MathFround, kNone)                                         \
+  V(MathImul, kNone)                                           \
+                                                               \
   V(ObjectAssign, kNone)                                       \
   V(ObjectCreate, kNone)                                       \
   V(ObjectFreeze, kNone)                                       \
@@ -149,6 +161,8 @@
   V(ReflectSet, kNone)                                         \
   V(ReflectSetPrototypeOf, kNone)                              \
                                                                \
+  V(StringFromCharCode, kNone)                                 \
+                                                               \
   V(SymbolConstructor, kNone)                                  \
   V(SymbolConstructor_ConstructStub, kTarget)                  \
                                                                \
@@ -265,6 +279,7 @@
   V(DatePrototypeGetUTCMonth, BUILTIN, UNINITIALIZED, kNoExtraICState)         \
   V(DatePrototypeGetUTCSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
                                                                                \
+  V(FunctionHasInstance, BUILTIN, UNINITIALIZED, kNoExtraICState)              \
   V(FunctionPrototypeApply, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
   V(FunctionPrototypeCall, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
                                                                                \
@@ -285,7 +300,6 @@
                                                                                \
   V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
   V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState)                   \
-  V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
   V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState)                       \
                                                                                \
   V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
@@ -293,6 +307,16 @@
   V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState)          \
   CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
 
+// Define list of builtins implemented in TurboFan (with JS linkage).
+#define BUILTIN_LIST_T(V) \
+  V(MathCeil, 2)          \
+  V(MathClz32, 2)         \
+  V(MathFloor, 2)         \
+  V(MathRound, 2)         \
+  V(MathSqrt, 2)          \
+  V(MathTrunc, 2)         \
+  V(ObjectHasOwnProperty, 2)
+
 // Define list of builtin handlers implemented in assembly.
 #define BUILTIN_LIST_H(V)                    \
   V(LoadIC_Slow,             LOAD_IC)        \
@@ -331,14 +355,16 @@
   enum Name {
 #define DEF_ENUM_C(name, ignore) k##name,
 #define DEF_ENUM_A(name, kind, state, extra) k##name,
+#define DEF_ENUM_T(name, argc) k##name,
 #define DEF_ENUM_H(name, kind) k##name,
-    BUILTIN_LIST_C(DEF_ENUM_C)
-    BUILTIN_LIST_A(DEF_ENUM_A)
-    BUILTIN_LIST_H(DEF_ENUM_H)
-    BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
+    BUILTIN_LIST_C(DEF_ENUM_C) BUILTIN_LIST_A(DEF_ENUM_A)
+        BUILTIN_LIST_T(DEF_ENUM_T) BUILTIN_LIST_H(DEF_ENUM_H)
+            BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
 #undef DEF_ENUM_C
 #undef DEF_ENUM_A
-    builtin_count
+#undef DEF_ENUM_T
+#undef DEF_ENUM_H
+                builtin_count
   };
 
   enum CFunctionId {
@@ -351,13 +377,17 @@
 #define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
 #define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
   Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_T(name, argc) Handle<Code> name();
 #define DECLARE_BUILTIN_ACCESSOR_H(name, kind) Handle<Code> name();
   BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
   BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
+  BUILTIN_LIST_T(DECLARE_BUILTIN_ACCESSOR_T)
   BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
   BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
 #undef DECLARE_BUILTIN_ACCESSOR_C
 #undef DECLARE_BUILTIN_ACCESSOR_A
+#undef DECLARE_BUILTIN_ACCESSOR_T
+#undef DECLARE_BUILTIN_ACCESSOR_H
 
   // Convenience wrappers.
   Handle<Code> CallFunction(
@@ -548,6 +578,7 @@
   // ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( )
   static void Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm);
 
+  static void Generate_FunctionHasInstance(MacroAssembler* masm);
   static void Generate_FunctionPrototypeApply(MacroAssembler* masm);
   static void Generate_FunctionPrototypeCall(MacroAssembler* masm);
 
@@ -557,6 +588,12 @@
   static void Generate_InternalArrayCode(MacroAssembler* masm);
   static void Generate_ArrayCode(MacroAssembler* masm);
 
+  // ES6 section 20.2.2.10 Math.ceil ( x )
+  static void Generate_MathCeil(compiler::CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.11 Math.clz32 ( x )
+  static void Generate_MathClz32(compiler::CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.16 Math.floor ( x )
+  static void Generate_MathFloor(compiler::CodeStubAssembler* assembler);
   enum class MathMaxMinKind { kMax, kMin };
   static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
   // ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )
@@ -567,16 +604,25 @@
   static void Generate_MathMin(MacroAssembler* masm) {
     Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
   }
+  // ES6 section 20.2.2.28 Math.round ( x )
+  static void Generate_MathRound(compiler::CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.32 Math.sqrt ( x )
+  static void Generate_MathSqrt(compiler::CodeStubAssembler* assembler);
+  // ES6 section 20.2.2.35 Math.trunc ( x )
+  static void Generate_MathTrunc(compiler::CodeStubAssembler* assembler);
 
   // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
   static void Generate_NumberConstructor(MacroAssembler* masm);
   // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case.
   static void Generate_NumberConstructor_ConstructStub(MacroAssembler* masm);
 
+  // ES6 section 19.1.3.2 Object.prototype.hasOwnProperty
+  static void Generate_ObjectHasOwnProperty(
+      compiler::CodeStubAssembler* assembler);
+
   static void Generate_StringConstructor(MacroAssembler* masm);
   static void Generate_StringConstructor_ConstructStub(MacroAssembler* masm);
   static void Generate_OnStackReplacement(MacroAssembler* masm);
-  static void Generate_OsrAfterStackCheck(MacroAssembler* masm);
   static void Generate_InterruptCheck(MacroAssembler* masm);
   static void Generate_StackCheck(MacroAssembler* masm);
 
diff --git a/src/code-factory.cc b/src/code-factory.cc
index 9898282..fbfdd5f 100644
--- a/src/code-factory.cc
+++ b/src/code-factory.cc
@@ -119,13 +119,6 @@
 
 
 // static
-Callable CodeFactory::CompareNilIC(Isolate* isolate, NilValue nil_value) {
-  Handle<Code> code = CompareNilICStub::GetUninitialized(isolate, nil_value);
-  return Callable(code, CompareNilDescriptor(isolate));
-}
-
-
-// static
 Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op) {
   BinaryOpICStub stub(isolate, op);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -141,8 +134,8 @@
 
 // static
 Callable CodeFactory::ToBoolean(Isolate* isolate) {
-  Handle<Code> code = ToBooleanStub::GetUninitialized(isolate);
-  return Callable(code, ToBooleanDescriptor(isolate));
+  ToBooleanStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
 
@@ -154,6 +147,18 @@
 
 
 // static
+Callable CodeFactory::NonNumberToNumber(Isolate* isolate) {
+  NonNumberToNumberStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StringToNumber(Isolate* isolate) {
+  StringToNumberStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
 Callable CodeFactory::ToString(Isolate* isolate) {
   ToStringStub stub(isolate);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -168,6 +173,12 @@
 
 
 // static
+Callable CodeFactory::ToInteger(Isolate* isolate) {
+  ToIntegerStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
 Callable CodeFactory::ToLength(Isolate* isolate) {
   ToLengthStub stub(isolate);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -201,6 +212,83 @@
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
+// static
+Callable CodeFactory::Add(Isolate* isolate) {
+  AddStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::Subtract(Isolate* isolate) {
+  SubtractStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::BitwiseAnd(Isolate* isolate) {
+  BitwiseAndStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::BitwiseOr(Isolate* isolate) {
+  BitwiseOrStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::BitwiseXor(Isolate* isolate) {
+  BitwiseXorStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::LessThan(Isolate* isolate) {
+  LessThanStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::LessThanOrEqual(Isolate* isolate) {
+  LessThanOrEqualStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::GreaterThan(Isolate* isolate) {
+  GreaterThanStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::GreaterThanOrEqual(Isolate* isolate) {
+  GreaterThanOrEqualStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::Equal(Isolate* isolate) {
+  EqualStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::NotEqual(Isolate* isolate) {
+  NotEqualStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StrictEqual(Isolate* isolate) {
+  StrictEqualStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StrictNotEqual(Isolate* isolate) {
+  StrictNotEqualStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
 
 // static
 Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
@@ -209,13 +297,65 @@
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
+// static
+Callable CodeFactory::StringCompare(Isolate* isolate, Token::Value token) {
+  switch (token) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      return StringEqual(isolate);
+    case Token::NE:
+    case Token::NE_STRICT:
+      return StringNotEqual(isolate);
+    case Token::LT:
+      return StringLessThan(isolate);
+    case Token::GT:
+      return StringGreaterThan(isolate);
+    case Token::LTE:
+      return StringLessThanOrEqual(isolate);
+    case Token::GTE:
+      return StringGreaterThanOrEqual(isolate);
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return StringEqual(isolate);
+}
 
 // static
-Callable CodeFactory::StringCompare(Isolate* isolate) {
-  StringCompareStub stub(isolate);
+Callable CodeFactory::StringEqual(Isolate* isolate) {
+  StringEqualStub stub(isolate);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
+// static
+Callable CodeFactory::StringNotEqual(Isolate* isolate) {
+  StringNotEqualStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StringLessThan(Isolate* isolate) {
+  StringLessThanStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StringLessThanOrEqual(Isolate* isolate) {
+  StringLessThanOrEqualStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StringGreaterThan(Isolate* isolate) {
+  StringGreaterThanStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::StringGreaterThanOrEqual(Isolate* isolate) {
+  StringGreaterThanOrEqualStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
 
 // static
 Callable CodeFactory::SubString(Isolate* isolate) {
@@ -225,6 +365,12 @@
 
 
 // static
+Callable CodeFactory::StoreInterceptor(Isolate* isolate) {
+  StoreInterceptorStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
 Callable CodeFactory::Typeof(Isolate* isolate) {
   TypeofStub stub(isolate);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -310,6 +456,13 @@
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
+#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type)          \
+  Callable CodeFactory::Allocate##Type(Isolate* isolate) {              \
+    Allocate##Type##Stub stub(isolate);                                 \
+    return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor()); \
+  }
+SIMD128_TYPES(SIMD128_ALLOC)
+#undef SIMD128_ALLOC
 
 // static
 Callable CodeFactory::AllocateInNewSpace(Isolate* isolate) {
@@ -326,8 +479,9 @@
 
 
 // static
-Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
-  return Callable(isolate->builtins()->Call(mode),
+Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode,
+                           TailCallMode tail_call_mode) {
+  return Callable(isolate->builtins()->Call(mode, tail_call_mode),
                   CallTrampolineDescriptor(isolate));
 }
 
diff --git a/src/code-factory.h b/src/code-factory.h
index fb1a165..deb125f 100644
--- a/src/code-factory.h
+++ b/src/code-factory.h
@@ -54,6 +54,8 @@
       Isolate* isolate, LanguageMode mode,
       InlineCacheState initialization_state);
 
+  static Callable StoreInterceptor(Isolate* isolate);
+
   static Callable CompareIC(Isolate* isolate, Token::Value op);
   static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
 
@@ -66,8 +68,11 @@
   static Callable ToBoolean(Isolate* isolate);
 
   static Callable ToNumber(Isolate* isolate);
+  static Callable NonNumberToNumber(Isolate* isolate);
+  static Callable StringToNumber(Isolate* isolate);
   static Callable ToString(Isolate* isolate);
   static Callable ToName(Isolate* isolate);
+  static Callable ToInteger(Isolate* isolate);
   static Callable ToLength(Isolate* isolate);
   static Callable ToObject(Isolate* isolate);
   static Callable NumberToString(Isolate* isolate);
@@ -75,9 +80,29 @@
   static Callable RegExpConstructResult(Isolate* isolate);
   static Callable RegExpExec(Isolate* isolate);
 
+  static Callable Add(Isolate* isolate);
+  static Callable Subtract(Isolate* isolate);
+  static Callable BitwiseAnd(Isolate* isolate);
+  static Callable BitwiseOr(Isolate* isolate);
+  static Callable BitwiseXor(Isolate* isolate);
+  static Callable LessThan(Isolate* isolate);
+  static Callable LessThanOrEqual(Isolate* isolate);
+  static Callable GreaterThan(Isolate* isolate);
+  static Callable GreaterThanOrEqual(Isolate* isolate);
+  static Callable Equal(Isolate* isolate);
+  static Callable NotEqual(Isolate* isolate);
+  static Callable StrictEqual(Isolate* isolate);
+  static Callable StrictNotEqual(Isolate* isolate);
+
   static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
                             PretenureFlag pretenure_flag);
-  static Callable StringCompare(Isolate* isolate);
+  static Callable StringCompare(Isolate* isolate, Token::Value token);
+  static Callable StringEqual(Isolate* isolate);
+  static Callable StringNotEqual(Isolate* isolate);
+  static Callable StringLessThan(Isolate* isolate);
+  static Callable StringLessThanOrEqual(Isolate* isolate);
+  static Callable StringGreaterThan(Isolate* isolate);
+  static Callable StringGreaterThanOrEqual(Isolate* isolate);
   static Callable SubString(Isolate* isolate);
 
   static Callable Typeof(Isolate* isolate);
@@ -96,11 +121,16 @@
 
   static Callable AllocateHeapNumber(Isolate* isolate);
   static Callable AllocateMutableHeapNumber(Isolate* isolate);
+#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
+  static Callable Allocate##Type(Isolate* isolate);
+  SIMD128_TYPES(SIMD128_ALLOC)
+#undef SIMD128_ALLOC
   static Callable AllocateInNewSpace(Isolate* isolate);
 
   static Callable ArgumentAdaptor(Isolate* isolate);
   static Callable Call(Isolate* isolate,
-                       ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+                       ConvertReceiverMode mode = ConvertReceiverMode::kAny,
+                       TailCallMode tail_call_mode = TailCallMode::kDisallow);
   static Callable CallFunction(
       Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
   static Callable Construct(Isolate* isolate);
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 461baaa..1d2fb81 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -78,6 +78,9 @@
                             Representation representation,
                             bool transition_to_field);
 
+  HValue* BuildPushElement(HValue* object, HValue* argc,
+                           HValue* argument_elements, ElementsKind kind);
+
   enum ArgumentClass {
     NONE,
     SINGLE,
@@ -294,7 +297,7 @@
   if (FLAG_profile_hydrogen_code_stub_compilation) {
     timer.Start();
   }
-  Zone zone;
+  Zone zone(isolate->allocator());
   CompilationInfo info(CodeStub::MajorName(stub->MajorKey()), isolate, &zone,
                        stub->GetCodeFlags());
   // Parameter count is number of stack parameters.
@@ -780,6 +783,214 @@
   return DoGenerateCode(this);
 }
 
+HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
+                                                   HValue* argument_elements,
+                                                   ElementsKind kind) {
+  // Precheck whether all elements fit into the array.
+  if (!IsFastObjectElementsKind(kind)) {
+    LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+    HValue* start = graph()->GetConstant0();
+    HValue* key = builder.BeginBody(start, argc, Token::LT);
+    {
+      HInstruction* argument =
+          Add<HAccessArgumentsAt>(argument_elements, argc, key);
+      IfBuilder can_store(this);
+      can_store.IfNot<HIsSmiAndBranch>(argument);
+      if (IsFastDoubleElementsKind(kind)) {
+        can_store.And();
+        can_store.IfNot<HCompareMap>(argument,
+                                     isolate()->factory()->heap_number_map());
+      }
+      can_store.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+      can_store.End();
+    }
+    builder.EndBody();
+  }
+
+  HValue* length = Add<HLoadNamedField>(object, nullptr,
+                                        HObjectAccess::ForArrayLength(kind));
+  HValue* new_length = AddUncasted<HAdd>(length, argc);
+  HValue* max_key = AddUncasted<HSub>(new_length, graph()->GetConstant1());
+
+  HValue* elements = Add<HLoadNamedField>(object, nullptr,
+                                          HObjectAccess::ForElementsPointer());
+  elements = BuildCheckForCapacityGrow(object, elements, kind, length, max_key,
+                                       true, STORE);
+
+  LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+  HValue* start = graph()->GetConstant0();
+  HValue* key = builder.BeginBody(start, argc, Token::LT);
+  {
+    HValue* argument = Add<HAccessArgumentsAt>(argument_elements, argc, key);
+    HValue* index = AddUncasted<HAdd>(key, length);
+    AddElementAccess(elements, index, argument, object, nullptr, kind, STORE);
+  }
+  builder.EndBody();
+  return new_length;
+}
+
+template <>
+HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
+  // TODO(verwaest): Fix deoptimizer messages.
+  HValue* argc = GetArgumentsLength();
+  HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
+  HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
+                                                 graph()->GetConstantMinus1());
+  BuildCheckHeapObject(object);
+  HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
+  Add<HCheckInstanceType>(object, HCheckInstanceType::IS_JS_ARRAY);
+
+  // Disallow pushing onto prototypes. It might be the JSArray prototype.
+  // Disallow pushing onto non-extensible objects.
+  {
+    HValue* bit_field2 =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
+    HValue* mask =
+        Add<HConstant>(static_cast<int>(Map::IsPrototypeMapBits::kMask) |
+                       (1 << Map::kIsExtensible));
+    HValue* bits = AddUncasted<HBitwise>(Token::BIT_AND, bit_field2, mask);
+    IfBuilder check(this);
+    check.If<HCompareNumericAndBranch>(
+        bits, Add<HConstant>(1 << Map::kIsExtensible), Token::NE);
+    check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    check.End();
+  }
+
+  // Disallow pushing onto observed objects.
+  {
+    HValue* bit_field =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
+    HValue* mask = Add<HConstant>(1 << Map::kIsObserved);
+    HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field, mask);
+    IfBuilder check(this);
+    check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
+    check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    check.End();
+  }
+
+  // Disallow pushing onto arrays in dictionary named property mode. We need to
+  // figure out whether the length property is still writable.
+  {
+    HValue* bit_field3 =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
+    HValue* mask = Add<HConstant>(static_cast<int>(Map::DictionaryMap::kMask));
+    HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
+    IfBuilder check(this);
+    check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
+    check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    check.End();
+  }
+
+  // Check whether the length property is writable. The length property is the
+  // only default named property on arrays. It's nonconfigurable, hence is
+  // guaranteed to stay the first property.
+  {
+    HValue* descriptors =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapDescriptors());
+    HValue* details = Add<HLoadKeyed>(
+        descriptors, Add<HConstant>(DescriptorArray::ToDetailsIndex(0)),
+        nullptr, nullptr, FAST_SMI_ELEMENTS);
+    HValue* mask =
+        Add<HConstant>(READ_ONLY << PropertyDetails::AttributesField::kShift);
+    HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, details, mask);
+    IfBuilder readonly(this);
+    readonly.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
+    readonly.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    readonly.End();
+  }
+
+  HValue* null = Add<HLoadRoot>(Heap::kNullValueRootIndex);
+  HValue* empty = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
+  environment()->Push(map);
+  LoopBuilder check_prototypes(this);
+  check_prototypes.BeginBody(1);
+  {
+    HValue* parent_map = environment()->Pop();
+    HValue* prototype = Add<HLoadNamedField>(parent_map, nullptr,
+                                             HObjectAccess::ForPrototype());
+
+    IfBuilder is_null(this);
+    is_null.If<HCompareObjectEqAndBranch>(prototype, null);
+    is_null.Then();
+    check_prototypes.Break();
+    is_null.End();
+
+    HValue* prototype_map =
+        Add<HLoadNamedField>(prototype, nullptr, HObjectAccess::ForMap());
+    HValue* instance_type = Add<HLoadNamedField>(
+        prototype_map, nullptr, HObjectAccess::ForMapInstanceType());
+    IfBuilder check_instance_type(this);
+    check_instance_type.If<HCompareNumericAndBranch>(
+        instance_type, Add<HConstant>(LAST_CUSTOM_ELEMENTS_RECEIVER),
+        Token::LTE);
+    check_instance_type.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    check_instance_type.End();
+
+    HValue* elements = Add<HLoadNamedField>(
+        prototype, nullptr, HObjectAccess::ForElementsPointer());
+    IfBuilder no_elements(this);
+    no_elements.IfNot<HCompareObjectEqAndBranch>(elements, empty);
+    no_elements.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+    no_elements.End();
+
+    environment()->Push(prototype_map);
+  }
+  check_prototypes.EndBody();
+
+  HValue* bit_field2 =
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
+  HValue* kind = BuildDecodeField<Map::ElementsKindBits>(bit_field2);
+
+  // Below we only check the upper bound of the relevant ranges to include both
+  // holey and non-holey versions. We check them in order smi, object, double
+  // since smi < object < double.
+  STATIC_ASSERT(FAST_SMI_ELEMENTS < FAST_HOLEY_SMI_ELEMENTS);
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS < FAST_HOLEY_ELEMENTS);
+  STATIC_ASSERT(FAST_ELEMENTS < FAST_HOLEY_ELEMENTS);
+  STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FAST_HOLEY_DOUBLE_ELEMENTS);
+  STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FAST_HOLEY_DOUBLE_ELEMENTS);
+  IfBuilder has_smi_elements(this);
+  has_smi_elements.If<HCompareNumericAndBranch>(
+      kind, Add<HConstant>(FAST_HOLEY_SMI_ELEMENTS), Token::LTE);
+  has_smi_elements.Then();
+  {
+    HValue* new_length = BuildPushElement(object, argc, argument_elements,
+                                          FAST_HOLEY_SMI_ELEMENTS);
+    environment()->Push(new_length);
+  }
+  has_smi_elements.Else();
+  {
+    IfBuilder has_object_elements(this);
+    has_object_elements.If<HCompareNumericAndBranch>(
+        kind, Add<HConstant>(FAST_HOLEY_ELEMENTS), Token::LTE);
+    has_object_elements.Then();
+    {
+      HValue* new_length = BuildPushElement(object, argc, argument_elements,
+                                            FAST_HOLEY_ELEMENTS);
+      environment()->Push(new_length);
+    }
+    has_object_elements.Else();
+    {
+      IfBuilder has_double_elements(this);
+      has_double_elements.If<HCompareNumericAndBranch>(
+          kind, Add<HConstant>(FAST_HOLEY_DOUBLE_ELEMENTS), Token::LTE);
+      has_double_elements.Then();
+      {
+        HValue* new_length = BuildPushElement(object, argc, argument_elements,
+                                              FAST_HOLEY_DOUBLE_ELEMENTS);
+        environment()->Push(new_length);
+      }
+      has_double_elements.ElseDeopt(Deoptimizer::kFastArrayPushFailed);
+      has_double_elements.End();
+    }
+    has_object_elements.End();
+  }
+  has_smi_elements.End();
+
+  return environment()->Pop();
+}
+
+Handle<Code> FastArrayPushStub::GenerateCode() { return DoGenerateCode(this); }
 
 template <>
 HValue* CodeStubGraphBuilder<GrowArrayElementsStub>::BuildCodeStub() {
@@ -1185,36 +1396,6 @@
 
 
 template <>
-HValue* CodeStubGraphBuilder<AllocateHeapNumberStub>::BuildCodeStub() {
-  HValue* result =
-      Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapNumber(),
-                     NOT_TENURED, HEAP_NUMBER_TYPE);
-  AddStoreMapConstant(result, isolate()->factory()->heap_number_map());
-  return result;
-}
-
-
-Handle<Code> AllocateHeapNumberStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<AllocateMutableHeapNumberStub>::BuildCodeStub() {
-  HValue* result =
-      Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapObject(),
-                     NOT_TENURED, MUTABLE_HEAP_NUMBER_TYPE);
-  AddStoreMapConstant(result, isolate()->factory()->mutable_heap_number_map());
-  return result;
-}
-
-
-Handle<Code> AllocateMutableHeapNumberStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
 HValue* CodeStubGraphBuilder<AllocateInNewSpaceStub>::BuildCodeStub() {
   HValue* result = Add<HAllocate>(GetParameter(0), HType::Tagged(), NOT_TENURED,
                                   JS_OBJECT_TYPE);
@@ -1419,31 +1600,6 @@
 
 
 template <>
-HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
-  Isolate* isolate = graph()->isolate();
-  CompareNilICStub* stub = casted_stub();
-  HIfContinuation continuation;
-  Handle<Map> sentinel_map(isolate->heap()->meta_map());
-  Type* type = stub->GetType(zone(), sentinel_map);
-  BuildCompareNil(GetParameter(0), type, &continuation, kEmbedMapsViaWeakCells);
-  IfBuilder if_nil(this, &continuation);
-  if_nil.Then();
-  if (continuation.IsFalseReachable()) {
-    if_nil.Else();
-    if_nil.Return(graph()->GetConstantFalse());
-  }
-  if_nil.End();
-  return continuation.IsTrueReachable() ? graph()->GetConstantTrue()
-                                        : graph()->GetConstantUndefined();
-}
-
-
-Handle<Code> CompareNilICStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
 HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
   BinaryOpICState state = casted_stub()->state();
 
@@ -1588,11 +1744,10 @@
       }
       if_inputisprimitive.End();
       // Convert the primitive to a string value.
-      ToStringDescriptor descriptor(isolate());
       ToStringStub stub(isolate());
       HValue* values[] = {context(), Pop()};
       Push(AddUncasted<HCallWithDescriptor>(
-          Add<HConstant>(stub.GetCode()), 0, descriptor,
+          Add<HConstant>(stub.GetCode()), 0, stub.GetCallInterfaceDescriptor(),
           Vector<HValue*>(values, arraysize(values))));
     }
     if_inputisstring.End();
@@ -1706,10 +1861,9 @@
   return DoGenerateCode(this);
 }
 
-
 template <>
-HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
-  ToBooleanStub* stub = casted_stub();
+HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
+  ToBooleanICStub* stub = casted_stub();
   IfBuilder if_true(this);
   if_true.If<HBranch>(GetParameter(0), stub->types());
   if_true.Then();
@@ -1719,11 +1873,7 @@
   return graph()->GetConstantFalse();
 }
 
-
-Handle<Code> ToBooleanStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
+Handle<Code> ToBooleanICStub::GenerateCode() { return DoGenerateCode(this); }
 
 template <>
 HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
@@ -1855,7 +2005,7 @@
 
 template <>
 HValue* CodeStubGraphBuilder<ToObjectStub>::BuildCodeStub() {
-  HValue* receiver = GetParameter(ToObjectDescriptor::kReceiverIndex);
+  HValue* receiver = GetParameter(TypeConversionDescriptor::kArgumentIndex);
   return BuildToObject(receiver);
 }
 
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 4e5efcd..60b350c 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -7,6 +7,7 @@
 #include <sstream>
 
 #include "src/bootstrapper.h"
+#include "src/code-factory.h"
 #include "src/compiler/code-stub-assembler.h"
 #include "src/factory.h"
 #include "src/gdb-jit.h"
@@ -82,7 +83,8 @@
   std::ostringstream os;
   os << *this;
   PROFILE(isolate(),
-          CodeCreateEvent(Logger::STUB_TAG, *code, os.str().c_str()));
+          CodeCreateEvent(Logger::STUB_TAG, AbstractCode::cast(*code),
+                          os.str().c_str()));
   Counters* counters = isolate()->counters();
   counters->total_stubs_code_size()->Increment(code->instruction_size());
 #ifdef DEBUG
@@ -451,31 +453,9 @@
 }
 
 
-void CompareNilICStub::UpdateStatus(Handle<Object> object) {
-  State state = this->state();
-  DCHECK(!state.Contains(GENERIC));
-  State old_state = state;
-  if (object->IsNull()) {
-    state.Add(NULL_TYPE);
-  } else if (object->IsUndefined()) {
-    state.Add(UNDEFINED);
-  } else if (object->IsUndetectableObject() || object->IsSmi()) {
-    state.RemoveAll();
-    state.Add(GENERIC);
-  } else if (IsMonomorphic()) {
-    state.RemoveAll();
-    state.Add(GENERIC);
-  } else {
-    state.Add(MONOMORPHIC_MAP);
-  }
-  TraceTransition(old_state, state);
-  set_sub_minor_key(TypesBits::update(sub_minor_key(), state.ToIntegral()));
-}
-
-
 Handle<Code> TurboFanCodeStub::GenerateCode() {
   const char* name = CodeStub::MajorName(MajorKey());
-  Zone zone;
+  Zone zone(isolate()->allocator());
   CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
   compiler::CodeStubAssembler assembler(isolate(), &zone, descriptor,
                                         GetCodeFlags(), name);
@@ -483,6 +463,40 @@
   return assembler.GenerateCode();
 }
 
+void AllocateHeapNumberStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* result = assembler->AllocateHeapNumber();
+  assembler->Return(result);
+}
+
+void AllocateMutableHeapNumberStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+
+  Node* result = assembler->Allocate(HeapNumber::kSize);
+  assembler->StoreMapNoWriteBarrier(
+      result,
+      assembler->HeapConstant(isolate()->factory()->mutable_heap_number_map()));
+  assembler->Return(result);
+}
+
+#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type)            \
+  void Allocate##Type##Stub::GenerateAssembly(                              \
+      compiler::CodeStubAssembler* assembler) const {                       \
+    compiler::Node* result = assembler->Allocate(                           \
+        Simd128Value::kSize, compiler::CodeStubAssembler::kNone);           \
+    compiler::Node* map_offset =                                            \
+        assembler->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag); \
+    compiler::Node* map = assembler->IntPtrAdd(result, map_offset);         \
+    assembler->StoreNoWriteBarrier(                                         \
+        MachineRepresentation::kTagged, map,                                \
+        assembler->HeapConstant(isolate()->factory()->type##_map()));       \
+    assembler->Return(result);                                              \
+  }
+SIMD128_TYPES(SIMD128_GEN_ASM)
+#undef SIMD128_GEN_ASM
 
 void StringLengthStub::GenerateAssembly(
     compiler::CodeStubAssembler* assembler) const {
@@ -494,6 +508,2575 @@
   assembler->Return(result);
 }
 
+void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(2);
+
+  // Shared entry for floating point addition.
+  Label do_fadd(assembler);
+  Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
+      var_fadd_rhs(assembler, MachineRepresentation::kFloat64);
+
+  // We might need to loop several times due to ToPrimitive, ToString and/or
+  // ToNumber conversions.
+  Variable var_lhs(assembler, MachineRepresentation::kTagged),
+      var_rhs(assembler, MachineRepresentation::kTagged);
+  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+  Label loop(assembler, 2, loop_vars);
+  var_lhs.Bind(assembler->Parameter(0));
+  var_rhs.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {lhs} and {rhs} values.
+    Node* lhs = var_lhs.value();
+    Node* rhs = var_rhs.value();
+
+    // Check if the {lhs} is a Smi or a HeapObject.
+    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+    assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+    assembler->Bind(&if_lhsissmi);
+    {
+      // Check if the {rhs} is also a Smi.
+      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+      assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                        &if_rhsisnotsmi);
+
+      assembler->Bind(&if_rhsissmi);
+      {
+        // Try fast Smi addition first.
+        Node* pair = assembler->SmiAddWithOverflow(lhs, rhs);
+        Node* overflow = assembler->Projection(1, pair);
+
+        // Check if the Smi additon overflowed.
+        Label if_overflow(assembler), if_notoverflow(assembler);
+        assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+        assembler->Bind(&if_overflow);
+        {
+          var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+          var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+          assembler->Goto(&do_fadd);
+        }
+
+        assembler->Bind(&if_notoverflow);
+        assembler->Return(assembler->Projection(0, pair));
+      }
+
+      assembler->Bind(&if_rhsisnotsmi);
+      {
+        // Load the map of {rhs}.
+        Node* rhs_map = assembler->LoadObjectField(rhs, HeapObject::kMapOffset);
+
+        // Check if the {rhs} is a HeapNumber.
+        Label if_rhsisnumber(assembler),
+            if_rhsisnotnumber(assembler, Label::kDeferred);
+        Node* number_map = assembler->HeapNumberMapConstant();
+        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                          &if_rhsisnumber, &if_rhsisnotnumber);
+
+        assembler->Bind(&if_rhsisnumber);
+        {
+          var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+          var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+          assembler->Goto(&do_fadd);
+        }
+
+        assembler->Bind(&if_rhsisnotnumber);
+        {
+          // Load the instance type of {rhs}.
+          Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+
+          // Check if the {rhs} is a String.
+          Label if_rhsisstring(assembler, Label::kDeferred),
+              if_rhsisnotstring(assembler, Label::kDeferred);
+          assembler->Branch(assembler->Int32LessThan(
+                                rhs_instance_type,
+                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                            &if_rhsisstring, &if_rhsisnotstring);
+
+          assembler->Bind(&if_rhsisstring);
+          {
+            // Convert {lhs}, which is a Smi, to a String and concatenate the
+            // resulting string with the String {rhs}.
+            Callable callable = CodeFactory::StringAdd(
+                assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+            assembler->TailCallStub(callable, context, lhs, rhs);
+          }
+
+          assembler->Bind(&if_rhsisnotstring);
+          {
+            // Check if {rhs} is a JSReceiver.
+            Label if_rhsisreceiver(assembler, Label::kDeferred),
+                if_rhsisnotreceiver(assembler, Label::kDeferred);
+            assembler->Branch(
+                assembler->Int32LessThanOrEqual(
+                    assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                    rhs_instance_type),
+                &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+            assembler->Bind(&if_rhsisreceiver);
+            {
+              // Convert {rhs} to a primitive first passing no hint.
+              // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
+              var_rhs.Bind(
+                  assembler->CallRuntime(Runtime::kToPrimitive, context, rhs));
+              assembler->Goto(&loop);
+            }
+
+            assembler->Bind(&if_rhsisnotreceiver);
+            {
+              // Convert {rhs} to a Number first.
+              Callable callable =
+                  CodeFactory::NonNumberToNumber(assembler->isolate());
+              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+              assembler->Goto(&loop);
+            }
+          }
+        }
+      }
+    }
+
+    assembler->Bind(&if_lhsisnotsmi);
+    {
+      // Load the map and instance type of {lhs}.
+      Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+
+      // Check if {lhs} is a String.
+      Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
+      assembler->Branch(assembler->Int32LessThan(
+                            lhs_instance_type,
+                            assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                        &if_lhsisstring, &if_lhsisnotstring);
+
+      assembler->Bind(&if_lhsisstring);
+      {
+        // Convert {rhs} to a String (using the sequence of ToPrimitive with
+        // no hint followed by ToString) and concatenate the strings.
+        Callable callable = CodeFactory::StringAdd(
+            assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
+        assembler->TailCallStub(callable, context, lhs, rhs);
+      }
+
+      assembler->Bind(&if_lhsisnotstring);
+      {
+        // Check if {rhs} is a Smi.
+        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                          &if_rhsisnotsmi);
+
+        assembler->Bind(&if_rhsissmi);
+        {
+          // Check if {lhs} is a Number.
+          Label if_lhsisnumber(assembler),
+              if_lhsisnotnumber(assembler, Label::kDeferred);
+          assembler->Branch(assembler->Word32Equal(
+                                lhs_instance_type,
+                                assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+                            &if_lhsisnumber, &if_lhsisnotnumber);
+
+          assembler->Bind(&if_lhsisnumber);
+          {
+            // The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
+            var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+            var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+            assembler->Goto(&do_fadd);
+          }
+
+          assembler->Bind(&if_lhsisnotnumber);
+          {
+            // The {lhs} is neither a Number nor a String, and the {rhs} is a
+            // Smi.
+            Label if_lhsisreceiver(assembler, Label::kDeferred),
+                if_lhsisnotreceiver(assembler, Label::kDeferred);
+            assembler->Branch(
+                assembler->Int32LessThanOrEqual(
+                    assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                    lhs_instance_type),
+                &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+            assembler->Bind(&if_lhsisreceiver);
+            {
+              // Convert {lhs} to a primitive first passing no hint.
+              // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
+              var_lhs.Bind(
+                  assembler->CallRuntime(Runtime::kToPrimitive, context, lhs));
+              assembler->Goto(&loop);
+            }
+
+            assembler->Bind(&if_lhsisnotreceiver);
+            {
+              // Convert {lhs} to a Number first.
+              Callable callable =
+                  CodeFactory::NonNumberToNumber(assembler->isolate());
+              var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+              assembler->Goto(&loop);
+            }
+          }
+        }
+
+        assembler->Bind(&if_rhsisnotsmi);
+        {
+          // Load the instance type of {rhs}.
+          Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+          // Check if {rhs} is a String.
+          Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
+          assembler->Branch(assembler->Int32LessThan(
+                                rhs_instance_type,
+                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                            &if_rhsisstring, &if_rhsisnotstring);
+
+          assembler->Bind(&if_rhsisstring);
+          {
+            // Convert {lhs} to a String (using the sequence of ToPrimitive with
+            // no hint followed by ToString) and concatenate the strings.
+            Callable callable = CodeFactory::StringAdd(
+                assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+            assembler->TailCallStub(callable, context, lhs, rhs);
+          }
+
+          assembler->Bind(&if_rhsisnotstring);
+          {
+            // Check if {lhs} is a HeapNumber.
+            Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
+            assembler->Branch(assembler->Word32Equal(
+                                  lhs_instance_type,
+                                  assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+                              &if_lhsisnumber, &if_lhsisnotnumber);
+
+            assembler->Bind(&if_lhsisnumber);
+            {
+              // Check if {rhs} is also a HeapNumber.
+              Label if_rhsisnumber(assembler),
+                  if_rhsisnotnumber(assembler, Label::kDeferred);
+              assembler->Branch(assembler->Word32Equal(
+                                    rhs_instance_type,
+                                    assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+                                &if_rhsisnumber, &if_rhsisnotnumber);
+
+              assembler->Bind(&if_rhsisnumber);
+              {
+                // Perform a floating point addition.
+                var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+                var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+                assembler->Goto(&do_fadd);
+              }
+
+              assembler->Bind(&if_rhsisnotnumber);
+              {
+                // Check if {rhs} is a JSReceiver.
+                Label if_rhsisreceiver(assembler, Label::kDeferred),
+                    if_rhsisnotreceiver(assembler, Label::kDeferred);
+                assembler->Branch(
+                    assembler->Int32LessThanOrEqual(
+                        assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                        rhs_instance_type),
+                    &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+                assembler->Bind(&if_rhsisreceiver);
+                {
+                  // Convert {rhs} to a primitive first passing no hint.
+                  // TODO(bmeurer): Hook up ToPrimitiveStub here too.
+                  var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
+                                                      context, rhs));
+                  assembler->Goto(&loop);
+                }
+
+                assembler->Bind(&if_rhsisnotreceiver);
+                {
+                  // Convert {rhs} to a Number first.
+                  Callable callable =
+                      CodeFactory::NonNumberToNumber(assembler->isolate());
+                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+                  assembler->Goto(&loop);
+                }
+              }
+            }
+
+            assembler->Bind(&if_lhsisnotnumber);
+            {
+              // Check if {lhs} is a JSReceiver.
+              Label if_lhsisreceiver(assembler, Label::kDeferred),
+                  if_lhsisnotreceiver(assembler);
+              assembler->Branch(
+                  assembler->Int32LessThanOrEqual(
+                      assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                      lhs_instance_type),
+                  &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+              assembler->Bind(&if_lhsisreceiver);
+              {
+                // Convert {lhs} to a primitive first passing no hint.
+                // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
+                var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
+                                                    context, lhs));
+                assembler->Goto(&loop);
+              }
+
+              assembler->Bind(&if_lhsisnotreceiver);
+              {
+                // Check if {rhs} is a JSReceiver.
+                Label if_rhsisreceiver(assembler, Label::kDeferred),
+                    if_rhsisnotreceiver(assembler, Label::kDeferred);
+                assembler->Branch(
+                    assembler->Int32LessThanOrEqual(
+                        assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                        rhs_instance_type),
+                    &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+                assembler->Bind(&if_rhsisreceiver);
+                {
+                  // Convert {rhs} to a primitive first passing no hint.
+                  // TODO(bmeurer): Hook up ToPrimitiveStub here too.
+                  var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
+                                                      context, rhs));
+                  assembler->Goto(&loop);
+                }
+
+                assembler->Bind(&if_rhsisnotreceiver);
+                {
+                  // Convert {lhs} to a Number first.
+                  Callable callable =
+                      CodeFactory::NonNumberToNumber(assembler->isolate());
+                  var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+                  assembler->Goto(&loop);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  assembler->Bind(&do_fadd);
+  {
+    Node* lhs_value = var_fadd_lhs.value();
+    Node* rhs_value = var_fadd_rhs.value();
+    Node* value = assembler->Float64Add(lhs_value, rhs_value);
+    Node* result = assembler->ChangeFloat64ToTagged(value);
+    assembler->Return(result);
+  }
+}
+
+void SubtractStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(2);
+
+  // Shared entry for floating point subtraction.
+  Label do_fsub(assembler);
+  Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
+      var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
+
+  // We might need to loop several times due to ToPrimitive and/or ToNumber
+  // conversions.
+  Variable var_lhs(assembler, MachineRepresentation::kTagged),
+      var_rhs(assembler, MachineRepresentation::kTagged);
+  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+  Label loop(assembler, 2, loop_vars);
+  var_lhs.Bind(assembler->Parameter(0));
+  var_rhs.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {lhs} and {rhs} values.
+    Node* lhs = var_lhs.value();
+    Node* rhs = var_rhs.value();
+
+    // Check if the {lhs} is a Smi or a HeapObject.
+    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+    assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+    assembler->Bind(&if_lhsissmi);
+    {
+      // Check if the {rhs} is also a Smi.
+      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+      assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                        &if_rhsisnotsmi);
+
+      assembler->Bind(&if_rhsissmi);
+      {
+        // Try a fast Smi subtraction first.
+        Node* pair = assembler->SmiSubWithOverflow(lhs, rhs);
+        Node* overflow = assembler->Projection(1, pair);
+
+        // Check if the Smi subtraction overflowed.
+        Label if_overflow(assembler), if_notoverflow(assembler);
+        assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+        assembler->Bind(&if_overflow);
+        {
+          // The result doesn't fit into Smi range.
+          var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+          var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+          assembler->Goto(&do_fsub);
+        }
+
+        assembler->Bind(&if_notoverflow);
+        assembler->Return(assembler->Projection(0, pair));
+      }
+
+      assembler->Bind(&if_rhsisnotsmi);
+      {
+        // Load the map of the {rhs}.
+        Node* rhs_map = assembler->LoadMap(rhs);
+
+        // Check if {rhs} is a HeapNumber.
+        Label if_rhsisnumber(assembler),
+            if_rhsisnotnumber(assembler, Label::kDeferred);
+        Node* number_map = assembler->HeapNumberMapConstant();
+        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                          &if_rhsisnumber, &if_rhsisnotnumber);
+
+        assembler->Bind(&if_rhsisnumber);
+        {
+          // Perform a floating point subtraction.
+          var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+          var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+          assembler->Goto(&do_fsub);
+        }
+
+        assembler->Bind(&if_rhsisnotnumber);
+        {
+          // Convert the {rhs} to a Number first.
+          Callable callable = CodeFactory::NonNumberToNumber(isolate());
+          var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+          assembler->Goto(&loop);
+        }
+      }
+    }
+
+    assembler->Bind(&if_lhsisnotsmi);
+    {
+      // Load the map of the {lhs}.
+      Node* lhs_map = assembler->LoadMap(lhs);
+
+      // Check if the {lhs} is a HeapNumber.
+      Label if_lhsisnumber(assembler),
+          if_lhsisnotnumber(assembler, Label::kDeferred);
+      Node* number_map = assembler->HeapNumberMapConstant();
+      assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+                        &if_lhsisnumber, &if_lhsisnotnumber);
+
+      assembler->Bind(&if_lhsisnumber);
+      {
+        // Check if the {rhs} is a Smi.
+        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                          &if_rhsisnotsmi);
+
+        assembler->Bind(&if_rhsissmi);
+        {
+          // Perform a floating point subtraction.
+          var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+          var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+          assembler->Goto(&do_fsub);
+        }
+
+        assembler->Bind(&if_rhsisnotsmi);
+        {
+          // Load the map of the {rhs}.
+          Node* rhs_map = assembler->LoadMap(rhs);
+
+          // Check if the {rhs} is a HeapNumber.
+          Label if_rhsisnumber(assembler),
+              if_rhsisnotnumber(assembler, Label::kDeferred);
+          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                            &if_rhsisnumber, &if_rhsisnotnumber);
+
+          assembler->Bind(&if_rhsisnumber);
+          {
+            // Perform a floating point subtraction.
+            var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+            var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+            assembler->Goto(&do_fsub);
+          }
+
+          assembler->Bind(&if_rhsisnotnumber);
+          {
+            // Convert the {rhs} to a Number first.
+            Callable callable = CodeFactory::NonNumberToNumber(isolate());
+            var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+            assembler->Goto(&loop);
+          }
+        }
+      }
+
+      assembler->Bind(&if_lhsisnotnumber);
+      {
+        // Convert the {lhs} to a Number first.
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+
+  assembler->Bind(&do_fsub);
+  {
+    Node* lhs_value = var_fsub_lhs.value();
+    Node* rhs_value = var_fsub_rhs.value();
+    Node* value = assembler->Float64Sub(lhs_value, rhs_value);
+    Node* result = assembler->ChangeFloat64ToTagged(value);
+    assembler->Return(result);
+  }
+}
+
+void BitwiseAndStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  using compiler::Node;
+
+  Node* lhs = assembler->Parameter(0);
+  Node* rhs = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+  Node* value = assembler->Word32And(lhs_value, rhs_value);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  assembler->Return(result);
+}
+
+void BitwiseOrStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  using compiler::Node;
+
+  Node* lhs = assembler->Parameter(0);
+  Node* rhs = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+  Node* value = assembler->Word32Or(lhs_value, rhs_value);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  assembler->Return(result);
+}
+
+void BitwiseXorStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  using compiler::Node;
+
+  Node* lhs = assembler->Parameter(0);
+  Node* rhs = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+  Node* value = assembler->Word32Xor(lhs_value, rhs_value);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  assembler->Return(result);
+}
+
+namespace {
+
+enum RelationalComparisonMode {
+  kLessThan,
+  kLessThanOrEqual,
+  kGreaterThan,
+  kGreaterThanOrEqual
+};
+
+void GenerateAbstractRelationalComparison(
+    compiler::CodeStubAssembler* assembler, RelationalComparisonMode mode) {
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(2);
+
+  Label return_true(assembler), return_false(assembler);
+
+  // Shared entry for floating point comparison.
+  Label do_fcmp(assembler);
+  Variable var_fcmp_lhs(assembler, MachineRepresentation::kFloat64),
+      var_fcmp_rhs(assembler, MachineRepresentation::kFloat64);
+
+  // We might need to loop several times due to ToPrimitive and/or ToNumber
+  // conversions.
+  Variable var_lhs(assembler, MachineRepresentation::kTagged),
+      var_rhs(assembler, MachineRepresentation::kTagged);
+  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+  Label loop(assembler, 2, loop_vars);
+  var_lhs.Bind(assembler->Parameter(0));
+  var_rhs.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {lhs} and {rhs} values.
+    Node* lhs = var_lhs.value();
+    Node* rhs = var_rhs.value();
+
+    // Check if the {lhs} is a Smi or a HeapObject.
+    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+    assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+    assembler->Bind(&if_lhsissmi);
+    {
+      // Check if {rhs} is a Smi or a HeapObject.
+      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+      assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                        &if_rhsisnotsmi);
+
+      assembler->Bind(&if_rhsissmi);
+      {
+        // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
+        switch (mode) {
+          case kLessThan:
+            assembler->BranchIfSmiLessThan(lhs, rhs, &return_true,
+                                           &return_false);
+            break;
+          case kLessThanOrEqual:
+            assembler->BranchIfSmiLessThanOrEqual(lhs, rhs, &return_true,
+                                                  &return_false);
+            break;
+          case kGreaterThan:
+            assembler->BranchIfSmiLessThan(rhs, lhs, &return_true,
+                                           &return_false);
+            break;
+          case kGreaterThanOrEqual:
+            assembler->BranchIfSmiLessThanOrEqual(rhs, lhs, &return_true,
+                                                  &return_false);
+            break;
+        }
+      }
+
+      assembler->Bind(&if_rhsisnotsmi);
+      {
+        // Load the map of {rhs}.
+        Node* rhs_map = assembler->LoadMap(rhs);
+
+        // Check if the {rhs} is a HeapNumber.
+        Node* number_map = assembler->HeapNumberMapConstant();
+        Label if_rhsisnumber(assembler),
+            if_rhsisnotnumber(assembler, Label::kDeferred);
+        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                          &if_rhsisnumber, &if_rhsisnotnumber);
+
+        assembler->Bind(&if_rhsisnumber);
+        {
+          // Convert the {lhs} and {rhs} to floating point values, and
+          // perform a floating point comparison.
+          var_fcmp_lhs.Bind(assembler->SmiToFloat64(lhs));
+          var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+          assembler->Goto(&do_fcmp);
+        }
+
+        assembler->Bind(&if_rhsisnotnumber);
+        {
+          // Convert the {rhs} to a Number; we don't need to perform the
+          // dedicated ToPrimitive(rhs, hint Number) operation, as the
+          // ToNumber(rhs) will by itself already invoke ToPrimitive with
+          // a Number hint.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+          assembler->Goto(&loop);
+        }
+      }
+    }
+
+    assembler->Bind(&if_lhsisnotsmi);
+    {
+      // Load the HeapNumber map for later comparisons.
+      Node* number_map = assembler->HeapNumberMapConstant();
+
+      // Load the map of {lhs}.
+      Node* lhs_map = assembler->LoadMap(lhs);
+
+      // Check if {rhs} is a Smi or a HeapObject.
+      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+      assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                        &if_rhsisnotsmi);
+
+      assembler->Bind(&if_rhsissmi);
+      {
+        // Check if the {lhs} is a HeapNumber.
+        Label if_lhsisnumber(assembler),
+            if_lhsisnotnumber(assembler, Label::kDeferred);
+        assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+                          &if_lhsisnumber, &if_lhsisnotnumber);
+
+        assembler->Bind(&if_lhsisnumber);
+        {
+          // Convert the {lhs} and {rhs} to floating point values, and
+          // perform a floating point comparison.
+          var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+          var_fcmp_rhs.Bind(assembler->SmiToFloat64(rhs));
+          assembler->Goto(&do_fcmp);
+        }
+
+        assembler->Bind(&if_lhsisnotnumber);
+        {
+          // Convert the {lhs} to a Number; we don't need to perform the
+          // dedicated ToPrimitive(lhs, hint Number) operation, as the
+          // ToNumber(lhs) will by itself already invoke ToPrimitive with
+          // a Number hint.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+          assembler->Goto(&loop);
+        }
+      }
+
+      assembler->Bind(&if_rhsisnotsmi);
+      {
+        // Load the map of {rhs}.
+        Node* rhs_map = assembler->LoadMap(rhs);
+
+        // Check if {lhs} is a HeapNumber.
+        Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
+        assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+                          &if_lhsisnumber, &if_lhsisnotnumber);
+
+        assembler->Bind(&if_lhsisnumber);
+        {
+          // Check if {rhs} is also a HeapNumber.
+          Label if_rhsisnumber(assembler),
+              if_rhsisnotnumber(assembler, Label::kDeferred);
+          assembler->Branch(assembler->WordEqual(lhs_map, rhs_map),
+                            &if_rhsisnumber, &if_rhsisnotnumber);
+
+          assembler->Bind(&if_rhsisnumber);
+          {
+            // Convert the {lhs} and {rhs} to floating point values, and
+            // perform a floating point comparison.
+            var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+            var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+            assembler->Goto(&do_fcmp);
+          }
+
+          assembler->Bind(&if_rhsisnotnumber);
+          {
+            // Convert the {rhs} to a Number; we don't need to perform
+            // dedicated ToPrimitive(rhs, hint Number) operation, as the
+            // ToNumber(rhs) will by itself already invoke ToPrimitive with
+            // a Number hint.
+            Callable callable =
+                CodeFactory::NonNumberToNumber(assembler->isolate());
+            var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+            assembler->Goto(&loop);
+          }
+        }
+
+        assembler->Bind(&if_lhsisnotnumber);
+        {
+          // Load the instance type of {lhs}.
+          Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
+
+          // Check if {lhs} is a String.
+          Label if_lhsisstring(assembler),
+              if_lhsisnotstring(assembler, Label::kDeferred);
+          assembler->Branch(assembler->Int32LessThan(
+                                lhs_instance_type,
+                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                            &if_lhsisstring, &if_lhsisnotstring);
+
+          assembler->Bind(&if_lhsisstring);
+          {
+            // Load the instance type of {rhs}.
+            Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+
+            // Check if {rhs} is also a String.
+            Label if_rhsisstring(assembler),
+                if_rhsisnotstring(assembler, Label::kDeferred);
+            assembler->Branch(assembler->Int32LessThan(
+                                  rhs_instance_type, assembler->Int32Constant(
+                                                         FIRST_NONSTRING_TYPE)),
+                              &if_rhsisstring, &if_rhsisnotstring);
+
+            assembler->Bind(&if_rhsisstring);
+            {
+              // Both {lhs} and {rhs} are strings.
+              switch (mode) {
+                case kLessThan:
+                  assembler->TailCallStub(
+                      CodeFactory::StringLessThan(assembler->isolate()),
+                      context, lhs, rhs);
+                  break;
+                case kLessThanOrEqual:
+                  assembler->TailCallStub(
+                      CodeFactory::StringLessThanOrEqual(assembler->isolate()),
+                      context, lhs, rhs);
+                  break;
+                case kGreaterThan:
+                  assembler->TailCallStub(
+                      CodeFactory::StringGreaterThan(assembler->isolate()),
+                      context, lhs, rhs);
+                  break;
+                case kGreaterThanOrEqual:
+                  assembler->TailCallStub(CodeFactory::StringGreaterThanOrEqual(
+                                              assembler->isolate()),
+                                          context, lhs, rhs);
+                  break;
+              }
+            }
+
+            assembler->Bind(&if_rhsisnotstring);
+            {
+              // The {lhs} is a String, while {rhs} is neither a Number nor a
+              // String, so we need to call ToPrimitive(rhs, hint Number) if
+              // {rhs} is a receiver or ToNumber(lhs) and ToNumber(rhs) in the
+              // other cases.
+              STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+              Label if_rhsisreceiver(assembler, Label::kDeferred),
+                  if_rhsisnotreceiver(assembler, Label::kDeferred);
+              assembler->Branch(
+                  assembler->Int32LessThanOrEqual(
+                      assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                      rhs_instance_type),
+                  &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+              assembler->Bind(&if_rhsisreceiver);
+              {
+                // Convert {rhs} to a primitive first passing Number hint.
+                // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
+                var_rhs.Bind(assembler->CallRuntime(
+                    Runtime::kToPrimitive_Number, context, rhs));
+                assembler->Goto(&loop);
+              }
+
+              assembler->Bind(&if_rhsisnotreceiver);
+              {
+                // Convert both {lhs} and {rhs} to Number.
+                Callable callable = CodeFactory::ToNumber(assembler->isolate());
+                var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+                var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+                assembler->Goto(&loop);
+              }
+            }
+          }
+
+          assembler->Bind(&if_lhsisnotstring);
+          {
+            // The {lhs} is neither a Number nor a String, so we need to call
+            // ToPrimitive(lhs, hint Number) if {lhs} is a receiver or
+            // ToNumber(lhs) and ToNumber(rhs) in the other cases.
+            STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+            Label if_lhsisreceiver(assembler, Label::kDeferred),
+                if_lhsisnotreceiver(assembler, Label::kDeferred);
+            assembler->Branch(
+                assembler->Int32LessThanOrEqual(
+                    assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                    lhs_instance_type),
+                &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+            assembler->Bind(&if_lhsisreceiver);
+            {
+              // Convert {lhs} to a primitive first passing Number hint.
+              // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
+              var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive_Number,
+                                                  context, lhs));
+              assembler->Goto(&loop);
+            }
+
+            assembler->Bind(&if_lhsisnotreceiver);
+            {
+              // Convert both {lhs} and {rhs} to Number.
+              Callable callable = CodeFactory::ToNumber(assembler->isolate());
+              var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+              assembler->Goto(&loop);
+            }
+          }
+        }
+      }
+    }
+  }
+
+  assembler->Bind(&do_fcmp);
+  {
+    // Load the {lhs} and {rhs} floating point values.
+    Node* lhs = var_fcmp_lhs.value();
+    Node* rhs = var_fcmp_rhs.value();
+
+    // Perform a fast floating point comparison.
+    switch (mode) {
+      case kLessThan:
+        assembler->BranchIfFloat64LessThan(lhs, rhs, &return_true,
+                                           &return_false);
+        break;
+      case kLessThanOrEqual:
+        assembler->BranchIfFloat64LessThanOrEqual(lhs, rhs, &return_true,
+                                                  &return_false);
+        break;
+      case kGreaterThan:
+        assembler->BranchIfFloat64GreaterThan(lhs, rhs, &return_true,
+                                              &return_false);
+        break;
+      case kGreaterThanOrEqual:
+        assembler->BranchIfFloat64GreaterThanOrEqual(lhs, rhs, &return_true,
+                                                     &return_false);
+        break;
+    }
+  }
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+}
+
+enum ResultMode { kDontNegateResult, kNegateResult };
+
+void GenerateEqual_Same(compiler::CodeStubAssembler* assembler,
+                        compiler::Node* value,
+                        compiler::CodeStubAssembler::Label* if_equal,
+                        compiler::CodeStubAssembler::Label* if_notequal) {
+  // In case of abstract or strict equality checks, we need additional checks
+  // for NaN values because they are not considered equal, even if both the
+  // left and the right hand side reference exactly the same value.
+  // TODO(bmeurer): This seems to violate the SIMD.js specification, but it
+  // seems to be what is tested in the current SIMD.js testsuite.
+
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  // Check if {value} is a Smi or a HeapObject.
+  Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
+  assembler->Branch(assembler->WordIsSmi(value), &if_valueissmi,
+                    &if_valueisnotsmi);
+
+  assembler->Bind(&if_valueisnotsmi);
+  {
+    // Load the map of {value}.
+    Node* value_map = assembler->LoadMap(value);
+
+    // Check if {value} (and therefore {rhs}) is a HeapNumber.
+    Node* number_map = assembler->HeapNumberMapConstant();
+    Label if_valueisnumber(assembler), if_valueisnotnumber(assembler);
+    assembler->Branch(assembler->WordEqual(value_map, number_map),
+                      &if_valueisnumber, &if_valueisnotnumber);
+
+    assembler->Bind(&if_valueisnumber);
+    {
+      // Convert {value} (and therefore {rhs}) to floating point value.
+      Node* value_value = assembler->LoadHeapNumberValue(value);
+
+      // Check if the HeapNumber value is a NaN.
+      assembler->BranchIfFloat64IsNaN(value_value, if_notequal, if_equal);
+    }
+
+    assembler->Bind(&if_valueisnotnumber);
+    assembler->Goto(if_equal);
+  }
+
+  assembler->Bind(&if_valueissmi);
+  assembler->Goto(if_equal);
+}
+
+void GenerateEqual_Simd128Value_HeapObject(
+    compiler::CodeStubAssembler* assembler, compiler::Node* lhs,
+    compiler::Node* lhs_map, compiler::Node* rhs, compiler::Node* rhs_map,
+    compiler::CodeStubAssembler::Label* if_equal,
+    compiler::CodeStubAssembler::Label* if_notequal) {
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  // Check if {lhs} and {rhs} have the same map.
+  Label if_mapsame(assembler), if_mapnotsame(assembler);
+  assembler->Branch(assembler->WordEqual(lhs_map, rhs_map), &if_mapsame,
+                    &if_mapnotsame);
+
+  assembler->Bind(&if_mapsame);
+  {
+    // Both {lhs} and {rhs} are Simd128Values with the same map, need special
+    // handling for Float32x4 because of NaN comparisons.
+    Label if_float32x4(assembler), if_notfloat32x4(assembler);
+    Node* float32x4_map =
+        assembler->HeapConstant(assembler->factory()->float32x4_map());
+    assembler->Branch(assembler->WordEqual(lhs_map, float32x4_map),
+                      &if_float32x4, &if_notfloat32x4);
+
+    assembler->Bind(&if_float32x4);
+    {
+      // Both {lhs} and {rhs} are Float32x4, compare the lanes individually
+      // using a floating point comparison.
+      for (int offset = Float32x4::kValueOffset - kHeapObjectTag;
+           offset < Float32x4::kSize - kHeapObjectTag;
+           offset += sizeof(float)) {
+        // Load the floating point values for {lhs} and {rhs}.
+        Node* lhs_value = assembler->Load(MachineType::Float32(), lhs,
+                                          assembler->IntPtrConstant(offset));
+        Node* rhs_value = assembler->Load(MachineType::Float32(), rhs,
+                                          assembler->IntPtrConstant(offset));
+
+        // Perform a floating point comparison.
+        Label if_valueequal(assembler), if_valuenotequal(assembler);
+        assembler->Branch(assembler->Float32Equal(lhs_value, rhs_value),
+                          &if_valueequal, &if_valuenotequal);
+        assembler->Bind(&if_valuenotequal);
+        assembler->Goto(if_notequal);
+        assembler->Bind(&if_valueequal);
+      }
+
+      // All 4 lanes match, {lhs} and {rhs} considered equal.
+      assembler->Goto(if_equal);
+    }
+
+    assembler->Bind(&if_notfloat32x4);
+    {
+      // For other Simd128Values we just perform a bitwise comparison.
+      for (int offset = Simd128Value::kValueOffset - kHeapObjectTag;
+           offset < Simd128Value::kSize - kHeapObjectTag;
+           offset += kPointerSize) {
+        // Load the word values for {lhs} and {rhs}.
+        Node* lhs_value = assembler->Load(MachineType::Pointer(), lhs,
+                                          assembler->IntPtrConstant(offset));
+        Node* rhs_value = assembler->Load(MachineType::Pointer(), rhs,
+                                          assembler->IntPtrConstant(offset));
+
+        // Perform a bitwise word-comparison.
+        Label if_valueequal(assembler), if_valuenotequal(assembler);
+        assembler->Branch(assembler->WordEqual(lhs_value, rhs_value),
+                          &if_valueequal, &if_valuenotequal);
+        assembler->Bind(&if_valuenotequal);
+        assembler->Goto(if_notequal);
+        assembler->Bind(&if_valueequal);
+      }
+
+      // Bitwise comparison succeeded, {lhs} and {rhs} considered equal.
+      assembler->Goto(if_equal);
+    }
+  }
+
+  assembler->Bind(&if_mapnotsame);
+  assembler->Goto(if_notequal);
+}
+
+// ES6 section 7.2.12 Abstract Equality Comparison
+void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
+  // This is a slightly optimized version of Object::Equals represented as
+  // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
+  // change something functionality wise in here, remember to update the
+  // Object::Equals method as well.
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(2);
+
+  Label if_equal(assembler), if_notequal(assembler);
+
+  // Shared entry for floating point comparison.
+  Label do_fcmp(assembler);
+  Variable var_fcmp_lhs(assembler, MachineRepresentation::kFloat64),
+      var_fcmp_rhs(assembler, MachineRepresentation::kFloat64);
+
+  // We might need to loop several times due to ToPrimitive and/or ToNumber
+  // conversions.
+  Variable var_lhs(assembler, MachineRepresentation::kTagged),
+      var_rhs(assembler, MachineRepresentation::kTagged);
+  Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+  Label loop(assembler, 2, loop_vars);
+  var_lhs.Bind(assembler->Parameter(0));
+  var_rhs.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {lhs} and {rhs} values.
+    Node* lhs = var_lhs.value();
+    Node* rhs = var_rhs.value();
+
+    // Check if {lhs} and {rhs} refer to the same object.
+    Label if_same(assembler), if_notsame(assembler);
+    assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+    assembler->Bind(&if_same);
+    {
+      // The {lhs} and {rhs} reference the exact same value, yet we need special
+      // treatment for HeapNumber, as NaN is not equal to NaN.
+      GenerateEqual_Same(assembler, lhs, &if_equal, &if_notequal);
+    }
+
+    assembler->Bind(&if_notsame);
+    {
+      // Check if {lhs} is a Smi or a HeapObject.
+      Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+      assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi,
+                        &if_lhsisnotsmi);
+
+      assembler->Bind(&if_lhsissmi);
+      {
+        // Check if {rhs} is a Smi or a HeapObject.
+        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                          &if_rhsisnotsmi);
+
+        assembler->Bind(&if_rhsissmi);
+        assembler->Goto(&if_notequal);
+
+        assembler->Bind(&if_rhsisnotsmi);
+        {
+          // Load the map of {rhs}.
+          Node* rhs_map = assembler->LoadMap(rhs);
+
+          // Check if {rhs} is a HeapNumber.
+          Node* number_map = assembler->HeapNumberMapConstant();
+          Label if_rhsisnumber(assembler),
+              if_rhsisnotnumber(assembler, Label::kDeferred);
+          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                            &if_rhsisnumber, &if_rhsisnotnumber);
+
+          assembler->Bind(&if_rhsisnumber);
+          {
+            // Convert {lhs} and {rhs} to floating point values, and
+            // perform a floating point comparison.
+            var_fcmp_lhs.Bind(assembler->SmiToFloat64(lhs));
+            var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+            assembler->Goto(&do_fcmp);
+          }
+
+          assembler->Bind(&if_rhsisnotnumber);
+          {
+            // Load the instance type of the {rhs}.
+            Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+
+            // Check if the {rhs} is a String.
+            Label if_rhsisstring(assembler, Label::kDeferred),
+                if_rhsisnotstring(assembler, Label::kDeferred);
+            assembler->Branch(assembler->Int32LessThan(
+                                  rhs_instance_type, assembler->Int32Constant(
+                                                         FIRST_NONSTRING_TYPE)),
+                              &if_rhsisstring, &if_rhsisnotstring);
+
+            assembler->Bind(&if_rhsisstring);
+            {
+              // Convert the {rhs} to a Number.
+              Callable callable =
+                  CodeFactory::StringToNumber(assembler->isolate());
+              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+              assembler->Goto(&loop);
+            }
+
+            assembler->Bind(&if_rhsisnotstring);
+            {
+              // Check if the {rhs} is a Boolean.
+              Node* boolean_map = assembler->BooleanMapConstant();
+              Label if_rhsisboolean(assembler, Label::kDeferred),
+                  if_rhsisnotboolean(assembler, Label::kDeferred);
+              assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
+                                &if_rhsisboolean, &if_rhsisnotboolean);
+
+              assembler->Bind(&if_rhsisboolean);
+              {
+                // The {rhs} is a Boolean, load its number value.
+                var_rhs.Bind(
+                    assembler->LoadObjectField(rhs, Oddball::kToNumberOffset));
+                assembler->Goto(&loop);
+              }
+
+              assembler->Bind(&if_rhsisnotboolean);
+              {
+                // Check if the {rhs} is a Receiver.
+                STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+                Label if_rhsisreceiver(assembler, Label::kDeferred),
+                    if_rhsisnotreceiver(assembler, Label::kDeferred);
+                assembler->Branch(
+                    assembler->Int32LessThanOrEqual(
+                        assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                        rhs_instance_type),
+                    &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+                assembler->Bind(&if_rhsisreceiver);
+                {
+                  // Convert {rhs} to a primitive first (passing no hint).
+                  // TODO(bmeurer): Hook up ToPrimitiveStub here once it exists.
+                  var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
+                                                      context, rhs));
+                  assembler->Goto(&loop);
+                }
+
+                assembler->Bind(&if_rhsisnotreceiver);
+                assembler->Goto(&if_notequal);
+              }
+            }
+          }
+        }
+      }
+
+      assembler->Bind(&if_lhsisnotsmi);
+      {
+        // Check if {rhs} is a Smi or a HeapObject.
+        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                          &if_rhsisnotsmi);
+
+        assembler->Bind(&if_rhsissmi);
+        {
+          // The {lhs} is a HeapObject and the {rhs} is a Smi; swapping {lhs}
+          // and {rhs} is not observable and doesn't matter for the result, so
+          // we can just swap them and use the Smi handling above (for {lhs}
+          // being a Smi).
+          var_lhs.Bind(rhs);
+          var_rhs.Bind(lhs);
+          assembler->Goto(&loop);
+        }
+
+        assembler->Bind(&if_rhsisnotsmi);
+        {
+          Label if_lhsisstring(assembler), if_lhsisnumber(assembler),
+              if_lhsissymbol(assembler), if_lhsissimd128value(assembler),
+              if_lhsisoddball(assembler), if_lhsisreceiver(assembler);
+
+          // Both {lhs} and {rhs} are HeapObjects, load their maps
+          // and their instance types.
+          Node* lhs_map = assembler->LoadMap(lhs);
+          Node* rhs_map = assembler->LoadMap(rhs);
+
+          // Load the instance types of {lhs} and {rhs}.
+          Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
+          Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+
+          // Dispatch based on the instance type of {lhs}.
+          size_t const kNumCases = FIRST_NONSTRING_TYPE + 4;
+          Label* case_labels[kNumCases];
+          int32_t case_values[kNumCases];
+          for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+            case_labels[i] = new Label(assembler);
+            case_values[i] = i;
+          }
+          case_labels[FIRST_NONSTRING_TYPE + 0] = &if_lhsisnumber;
+          case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
+          case_labels[FIRST_NONSTRING_TYPE + 1] = &if_lhsissymbol;
+          case_values[FIRST_NONSTRING_TYPE + 1] = SYMBOL_TYPE;
+          case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsissimd128value;
+          case_values[FIRST_NONSTRING_TYPE + 2] = SIMD128_VALUE_TYPE;
+          case_labels[FIRST_NONSTRING_TYPE + 3] = &if_lhsisoddball;
+          case_values[FIRST_NONSTRING_TYPE + 3] = ODDBALL_TYPE;
+          assembler->Switch(lhs_instance_type, &if_lhsisreceiver, case_values,
+                            case_labels, arraysize(case_values));
+          for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+            assembler->Bind(case_labels[i]);
+            assembler->Goto(&if_lhsisstring);
+            delete case_labels[i];
+          }
+
+          assembler->Bind(&if_lhsisstring);
+          {
+            // Check if {rhs} is also a String.
+            Label if_rhsisstring(assembler),
+                if_rhsisnotstring(assembler, Label::kDeferred);
+            assembler->Branch(assembler->Int32LessThan(
+                                  rhs_instance_type, assembler->Int32Constant(
+                                                         FIRST_NONSTRING_TYPE)),
+                              &if_rhsisstring, &if_rhsisnotstring);
+
+            assembler->Bind(&if_rhsisstring);
+            {
+              // Both {lhs} and {rhs} are of type String, just do the
+              // string comparison then.
+              Callable callable =
+                  (mode == kDontNegateResult)
+                      ? CodeFactory::StringEqual(assembler->isolate())
+                      : CodeFactory::StringNotEqual(assembler->isolate());
+              assembler->TailCallStub(callable, context, lhs, rhs);
+            }
+
+            assembler->Bind(&if_rhsisnotstring);
+            {
+              // The {lhs} is a String and the {rhs} is some other HeapObject.
+              // Swapping {lhs} and {rhs} is not observable and doesn't matter
+              // for the result, so we can just swap them and use the String
+              // handling below (for {rhs} being a String).
+              var_lhs.Bind(rhs);
+              var_rhs.Bind(lhs);
+              assembler->Goto(&loop);
+            }
+          }
+
+          assembler->Bind(&if_lhsisnumber);
+          {
+            // Check if {rhs} is also a HeapNumber.
+            Label if_rhsisnumber(assembler),
+                if_rhsisnotnumber(assembler, Label::kDeferred);
+            assembler->Branch(
+                assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
+                &if_rhsisnumber, &if_rhsisnotnumber);
+
+            assembler->Bind(&if_rhsisnumber);
+            {
+              // Convert {lhs} and {rhs} to floating point values, and
+              // perform a floating point comparison.
+              var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+              var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+              assembler->Goto(&do_fcmp);
+            }
+
+            assembler->Bind(&if_rhsisnotnumber);
+            {
+              // The {lhs} is a Number, the {rhs} is some other HeapObject.
+              Label if_rhsisstring(assembler, Label::kDeferred),
+                  if_rhsisnotstring(assembler);
+              assembler->Branch(
+                  assembler->Int32LessThan(
+                      rhs_instance_type,
+                      assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                  &if_rhsisstring, &if_rhsisnotstring);
+
+              assembler->Bind(&if_rhsisstring);
+              {
+                // The {rhs} is a String and the {lhs} is a HeapNumber; we need
+                // to convert the {rhs} to a Number and compare the output to
+                // the Number on the {lhs}.
+                Callable callable =
+                    CodeFactory::StringToNumber(assembler->isolate());
+                var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+                assembler->Goto(&loop);
+              }
+
+              assembler->Bind(&if_rhsisnotstring);
+              {
+                // Check if the {rhs} is a JSReceiver.
+                Label if_rhsisreceiver(assembler, Label::kDeferred),
+                    if_rhsisnotreceiver(assembler);
+                STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+                assembler->Branch(
+                    assembler->Int32LessThanOrEqual(
+                        assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                        rhs_instance_type),
+                    &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+                assembler->Bind(&if_rhsisreceiver);
+                {
+                  // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+                  // Swapping {lhs} and {rhs} is not observable and doesn't
+                  // matter for the result, so we can just swap them and use
+                  // the JSReceiver handling below (for {lhs} being a
+                  // JSReceiver).
+                  var_lhs.Bind(rhs);
+                  var_rhs.Bind(lhs);
+                  assembler->Goto(&loop);
+                }
+
+                assembler->Bind(&if_rhsisnotreceiver);
+                {
+                  // Check if {rhs} is a Boolean.
+                  Label if_rhsisboolean(assembler),
+                      if_rhsisnotboolean(assembler);
+                  Node* boolean_map = assembler->BooleanMapConstant();
+                  assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
+                                    &if_rhsisboolean, &if_rhsisnotboolean);
+
+                  assembler->Bind(&if_rhsisboolean);
+                  {
+                    // The {rhs} is a Boolean, convert it to a Smi first.
+                    var_rhs.Bind(assembler->LoadObjectField(
+                        rhs, Oddball::kToNumberOffset));
+                    assembler->Goto(&loop);
+                  }
+
+                  assembler->Bind(&if_rhsisnotboolean);
+                  assembler->Goto(&if_notequal);
+                }
+              }
+            }
+          }
+
+          assembler->Bind(&if_lhsisoddball);
+          {
+            // The {lhs} is an Oddball and {rhs} is some other HeapObject.
+            Label if_lhsisboolean(assembler), if_lhsisnotboolean(assembler);
+            Node* boolean_map = assembler->BooleanMapConstant();
+            assembler->Branch(assembler->WordEqual(lhs_map, boolean_map),
+                              &if_lhsisboolean, &if_lhsisnotboolean);
+
+            assembler->Bind(&if_lhsisboolean);
+            {
+              // The {lhs} is a Boolean, check if {rhs} is also a Boolean.
+              Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
+              assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
+                                &if_rhsisboolean, &if_rhsisnotboolean);
+
+              assembler->Bind(&if_rhsisboolean);
+              {
+                // Both {lhs} and {rhs} are distinct Boolean values.
+                assembler->Goto(&if_notequal);
+              }
+
+              assembler->Bind(&if_rhsisnotboolean);
+              {
+                // Convert the {lhs} to a Number first.
+                var_lhs.Bind(
+                    assembler->LoadObjectField(lhs, Oddball::kToNumberOffset));
+                assembler->Goto(&loop);
+              }
+            }
+
+            assembler->Bind(&if_lhsisnotboolean);
+            {
+              // The {lhs} is either Null or Undefined; check if the {rhs} is
+              // undetectable (i.e. either also Null or Undefined or some
+              // undetectable JSReceiver).
+              Node* rhs_bitfield = assembler->LoadMapBitField(rhs_map);
+              assembler->BranchIfWord32Equal(
+                  assembler->Word32And(
+                      rhs_bitfield,
+                      assembler->Int32Constant(1 << Map::kIsUndetectable)),
+                  assembler->Int32Constant(0), &if_notequal, &if_equal);
+            }
+          }
+
+          assembler->Bind(&if_lhsissymbol);
+          {
+            // Check if the {rhs} is a JSReceiver.
+            Label if_rhsisreceiver(assembler, Label::kDeferred),
+                if_rhsisnotreceiver(assembler);
+            STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+            assembler->Branch(
+                assembler->Int32LessThanOrEqual(
+                    assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                    rhs_instance_type),
+                &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+            assembler->Bind(&if_rhsisreceiver);
+            {
+              // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+              // Swapping {lhs} and {rhs} is not observable and doesn't
+              // matter for the result, so we can just swap them and use
+              // the JSReceiver handling below (for {lhs} being a JSReceiver).
+              var_lhs.Bind(rhs);
+              var_rhs.Bind(lhs);
+              assembler->Goto(&loop);
+            }
+
+            assembler->Bind(&if_rhsisnotreceiver);
+            {
+              // The {rhs} is not a JSReceiver and also not the same Symbol
+              // as the {lhs}, so this is equality check is considered false.
+              assembler->Goto(&if_notequal);
+            }
+          }
+
+          assembler->Bind(&if_lhsissimd128value);
+          {
+            // Check if the {rhs} is also a Simd128Value.
+            Label if_rhsissimd128value(assembler),
+                if_rhsisnotsimd128value(assembler);
+            assembler->Branch(
+                assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
+                &if_rhsissimd128value, &if_rhsisnotsimd128value);
+
+            assembler->Bind(&if_rhsissimd128value);
+            {
+              // Both {lhs} and {rhs} is a Simd128Value.
+              GenerateEqual_Simd128Value_HeapObject(assembler, lhs, lhs_map,
+                                                    rhs, rhs_map, &if_equal,
+                                                    &if_notequal);
+            }
+
+            assembler->Bind(&if_rhsisnotsimd128value);
+            {
+              // Check if the {rhs} is a JSReceiver.
+              Label if_rhsisreceiver(assembler, Label::kDeferred),
+                  if_rhsisnotreceiver(assembler);
+              STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+              assembler->Branch(
+                  assembler->Int32LessThanOrEqual(
+                      assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                      rhs_instance_type),
+                  &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+              assembler->Bind(&if_rhsisreceiver);
+              {
+                // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+                // Swapping {lhs} and {rhs} is not observable and doesn't
+                // matter for the result, so we can just swap them and use
+                // the JSReceiver handling below (for {lhs} being a JSReceiver).
+                var_lhs.Bind(rhs);
+                var_rhs.Bind(lhs);
+                assembler->Goto(&loop);
+              }
+
+              assembler->Bind(&if_rhsisnotreceiver);
+              {
+                // The {rhs} is some other Primitive.
+                assembler->Goto(&if_notequal);
+              }
+            }
+          }
+
+          assembler->Bind(&if_lhsisreceiver);
+          {
+            // Check if the {rhs} is also a JSReceiver.
+            Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
+            STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+            assembler->Branch(
+                assembler->Int32LessThanOrEqual(
+                    assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
+                    rhs_instance_type),
+                &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+            assembler->Bind(&if_rhsisreceiver);
+            {
+              // Both {lhs} and {rhs} are different JSReceiver references, so
+              // this cannot be considered equal.
+              assembler->Goto(&if_notequal);
+            }
+
+            assembler->Bind(&if_rhsisnotreceiver);
+            {
+              // Check if {rhs} is Null or Undefined (an undetectable check
+              // is sufficient here, since we already know that {rhs} is not
+              // a JSReceiver).
+              Label if_rhsisundetectable(assembler),
+                  if_rhsisnotundetectable(assembler, Label::kDeferred);
+              Node* rhs_bitfield = assembler->LoadMapBitField(rhs_map);
+              assembler->BranchIfWord32Equal(
+                  assembler->Word32And(
+                      rhs_bitfield,
+                      assembler->Int32Constant(1 << Map::kIsUndetectable)),
+                  assembler->Int32Constant(0), &if_rhsisnotundetectable,
+                  &if_rhsisundetectable);
+
+              assembler->Bind(&if_rhsisundetectable);
+              {
+                // Check if {lhs} is an undetectable JSReceiver.
+                Node* lhs_bitfield = assembler->LoadMapBitField(lhs_map);
+                assembler->BranchIfWord32Equal(
+                    assembler->Word32And(
+                        lhs_bitfield,
+                        assembler->Int32Constant(1 << Map::kIsUndetectable)),
+                    assembler->Int32Constant(0), &if_notequal, &if_equal);
+              }
+
+              assembler->Bind(&if_rhsisnotundetectable);
+              {
+                // The {rhs} is some Primitive different from Null and
+                // Undefined, need to convert {lhs} to Primitive first.
+                // TODO(bmeurer): Hook up ToPrimitiveStub here once it exists.
+                var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
+                                                    context, lhs));
+                assembler->Goto(&loop);
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  assembler->Bind(&do_fcmp);
+  {
+    // Load the {lhs} and {rhs} floating point values.
+    Node* lhs = var_fcmp_lhs.value();
+    Node* rhs = var_fcmp_rhs.value();
+
+    // Perform a fast floating point comparison.
+    assembler->BranchIfFloat64Equal(lhs, rhs, &if_equal, &if_notequal);
+  }
+
+  assembler->Bind(&if_equal);
+  assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+
+  assembler->Bind(&if_notequal);
+  assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+}
+
+void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
+                         ResultMode mode) {
+  // Here's pseudo-code for the algorithm below in case of kDontNegateResult
+  // mode; for kNegateResult mode we properly negate the result.
+  //
+  // if (lhs == rhs) {
+  //   if (lhs->IsHeapNumber()) return HeapNumber::cast(lhs)->value() != NaN;
+  //   return true;
+  // }
+  // if (!lhs->IsSmi()) {
+  //   if (lhs->IsHeapNumber()) {
+  //     if (rhs->IsSmi()) {
+  //       return Smi::cast(rhs)->value() == HeapNumber::cast(lhs)->value();
+  //     } else if (rhs->IsHeapNumber()) {
+  //       return HeapNumber::cast(rhs)->value() ==
+  //       HeapNumber::cast(lhs)->value();
+  //     } else {
+  //       return false;
+  //     }
+  //   } else {
+  //     if (rhs->IsSmi()) {
+  //       return false;
+  //     } else {
+  //       if (lhs->IsString()) {
+  //         if (rhs->IsString()) {
+  //           return %StringEqual(lhs, rhs);
+  //         } else {
+  //           return false;
+  //         }
+  //       } else if (lhs->IsSimd128()) {
+  //         if (rhs->IsSimd128()) {
+  //           return %StrictEqual(lhs, rhs);
+  //         }
+  //       } else {
+  //         return false;
+  //       }
+  //     }
+  //   }
+  // } else {
+  //   if (rhs->IsSmi()) {
+  //     return false;
+  //   } else {
+  //     if (rhs->IsHeapNumber()) {
+  //       return Smi::cast(lhs)->value() == HeapNumber::cast(rhs)->value();
+  //     } else {
+  //       return false;
+  //     }
+  //   }
+  // }
+
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* lhs = assembler->Parameter(0);
+  Node* rhs = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  Label if_equal(assembler), if_notequal(assembler);
+
+  // Check if {lhs} and {rhs} refer to the same object.
+  Label if_same(assembler), if_notsame(assembler);
+  assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+  assembler->Bind(&if_same);
+  {
+    // The {lhs} and {rhs} reference the exact same value, yet we need special
+    // treatment for HeapNumber, as NaN is not equal to NaN.
+    GenerateEqual_Same(assembler, lhs, &if_equal, &if_notequal);
+  }
+
+  assembler->Bind(&if_notsame);
+  {
+    // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
+    // String and Simd128Value they can still be considered equal.
+    Node* number_map = assembler->HeapNumberMapConstant();
+
+    // Check if {lhs} is a Smi or a HeapObject.
+    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+    assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+    assembler->Bind(&if_lhsisnotsmi);
+    {
+      // Load the map of {lhs}.
+      Node* lhs_map = assembler->LoadMap(lhs);
+
+      // Check if {lhs} is a HeapNumber.
+      Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
+      assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+                        &if_lhsisnumber, &if_lhsisnotnumber);
+
+      assembler->Bind(&if_lhsisnumber);
+      {
+        // Check if {rhs} is a Smi or a HeapObject.
+        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                          &if_rhsisnotsmi);
+
+        assembler->Bind(&if_rhsissmi);
+        {
+          // Convert {lhs} and {rhs} to floating point values.
+          Node* lhs_value = assembler->LoadHeapNumberValue(lhs);
+          Node* rhs_value = assembler->SmiToFloat64(rhs);
+
+          // Perform a floating point comparison of {lhs} and {rhs}.
+          assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
+                                          &if_notequal);
+        }
+
+        assembler->Bind(&if_rhsisnotsmi);
+        {
+          // Load the map of {rhs}.
+          Node* rhs_map = assembler->LoadMap(rhs);
+
+          // Check if {rhs} is also a HeapNumber.
+          Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
+          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                            &if_rhsisnumber, &if_rhsisnotnumber);
+
+          assembler->Bind(&if_rhsisnumber);
+          {
+            // Convert {lhs} and {rhs} to floating point values.
+            Node* lhs_value = assembler->LoadHeapNumberValue(lhs);
+            Node* rhs_value = assembler->LoadHeapNumberValue(rhs);
+
+            // Perform a floating point comparison of {lhs} and {rhs}.
+            assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
+                                            &if_notequal);
+          }
+
+          assembler->Bind(&if_rhsisnotnumber);
+          assembler->Goto(&if_notequal);
+        }
+      }
+
+      assembler->Bind(&if_lhsisnotnumber);
+      {
+        // Check if {rhs} is a Smi or a HeapObject.
+        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+        assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                          &if_rhsisnotsmi);
+
+        assembler->Bind(&if_rhsissmi);
+        assembler->Goto(&if_notequal);
+
+        assembler->Bind(&if_rhsisnotsmi);
+        {
+          // Load the instance type of {lhs}.
+          Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
+
+          // Check if {lhs} is a String.
+          Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
+          assembler->Branch(assembler->Int32LessThan(
+                                lhs_instance_type,
+                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                            &if_lhsisstring, &if_lhsisnotstring);
+
+          assembler->Bind(&if_lhsisstring);
+          {
+            // Load the instance type of {rhs}.
+            Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+            // Check if {rhs} is also a String.
+            Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
+            assembler->Branch(assembler->Int32LessThan(
+                                  rhs_instance_type, assembler->Int32Constant(
+                                                         FIRST_NONSTRING_TYPE)),
+                              &if_rhsisstring, &if_rhsisnotstring);
+
+            assembler->Bind(&if_rhsisstring);
+            {
+              Callable callable =
+                  (mode == kDontNegateResult)
+                      ? CodeFactory::StringEqual(assembler->isolate())
+                      : CodeFactory::StringNotEqual(assembler->isolate());
+              assembler->TailCallStub(callable, context, lhs, rhs);
+            }
+
+            assembler->Bind(&if_rhsisnotstring);
+            assembler->Goto(&if_notequal);
+          }
+
+          assembler->Bind(&if_lhsisnotstring);
+          {
+            // Check if {lhs} is a Simd128Value.
+            Label if_lhsissimd128value(assembler),
+                if_lhsisnotsimd128value(assembler);
+            assembler->Branch(assembler->Word32Equal(
+                                  lhs_instance_type,
+                                  assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+                              &if_lhsissimd128value, &if_lhsisnotsimd128value);
+
+            assembler->Bind(&if_lhsissimd128value);
+            {
+              // Load the map of {rhs}.
+              Node* rhs_map = assembler->LoadMap(rhs);
+
+              // Check if {rhs} is also a Simd128Value that is equal to {lhs}.
+              GenerateEqual_Simd128Value_HeapObject(assembler, lhs, lhs_map,
+                                                    rhs, rhs_map, &if_equal,
+                                                    &if_notequal);
+            }
+
+            assembler->Bind(&if_lhsisnotsimd128value);
+            assembler->Goto(&if_notequal);
+          }
+        }
+      }
+    }
+
+    assembler->Bind(&if_lhsissmi);
+    {
+      // We already know that {lhs} and {rhs} are not reference equal, and {lhs}
+      // is a Smi; so {lhs} and {rhs} can only be strictly equal if {rhs} is a
+      // HeapNumber with an equal floating point value.
+
+      // Check if {rhs} is a Smi or a HeapObject.
+      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+      assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
+                        &if_rhsisnotsmi);
+
+      assembler->Bind(&if_rhsissmi);
+      assembler->Goto(&if_notequal);
+
+      assembler->Bind(&if_rhsisnotsmi);
+      {
+        // Load the map of the {rhs}.
+        Node* rhs_map = assembler->LoadMap(rhs);
+
+        // The {rhs} could be a HeapNumber with the same value as {lhs}.
+        Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
+        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                          &if_rhsisnumber, &if_rhsisnotnumber);
+
+        assembler->Bind(&if_rhsisnumber);
+        {
+          // Convert {lhs} and {rhs} to floating point values.
+          Node* lhs_value = assembler->SmiToFloat64(lhs);
+          Node* rhs_value = assembler->LoadHeapNumberValue(rhs);
+
+          // Perform a floating point comparison of {lhs} and {rhs}.
+          assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
+                                          &if_notequal);
+        }
+
+        assembler->Bind(&if_rhsisnotnumber);
+        assembler->Goto(&if_notequal);
+      }
+    }
+  }
+
+  assembler->Bind(&if_equal);
+  assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+
+  assembler->Bind(&if_notequal);
+  assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+}
+
+void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
+                                        RelationalComparisonMode mode) {
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Variable Variable;
+
+  Node* lhs = assembler->Parameter(0);
+  Node* rhs = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  Label if_less(assembler), if_equal(assembler), if_greater(assembler);
+
+  // Fast check to see if {lhs} and {rhs} refer to the same String object.
+  Label if_same(assembler), if_notsame(assembler);
+  assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+  assembler->Bind(&if_same);
+  assembler->Goto(&if_equal);
+
+  assembler->Bind(&if_notsame);
+  {
+    // Load instance types of {lhs} and {rhs}.
+    Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+    Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+    // Combine the instance types into a single 16-bit value, so we can check
+    // both of them at once.
+    Node* both_instance_types = assembler->Word32Or(
+        lhs_instance_type,
+        assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
+
+    // Check that both {lhs} and {rhs} are flat one-byte strings.
+    int const kBothSeqOneByteStringMask =
+        kStringEncodingMask | kStringRepresentationMask |
+        ((kStringEncodingMask | kStringRepresentationMask) << 8);
+    int const kBothSeqOneByteStringTag =
+        kOneByteStringTag | kSeqStringTag |
+        ((kOneByteStringTag | kSeqStringTag) << 8);
+    Label if_bothonebyteseqstrings(assembler),
+        if_notbothonebyteseqstrings(assembler);
+    assembler->Branch(assembler->Word32Equal(
+                          assembler->Word32And(both_instance_types,
+                                               assembler->Int32Constant(
+                                                   kBothSeqOneByteStringMask)),
+                          assembler->Int32Constant(kBothSeqOneByteStringTag)),
+                      &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+    assembler->Bind(&if_bothonebyteseqstrings);
+    {
+      // Load the length of {lhs} and {rhs}.
+      Node* lhs_length = assembler->LoadObjectField(lhs, String::kLengthOffset);
+      Node* rhs_length = assembler->LoadObjectField(rhs, String::kLengthOffset);
+
+      // Determine the minimum length.
+      Node* length = assembler->SmiMin(lhs_length, rhs_length);
+
+      // Compute the effective offset of the first character.
+      Node* begin = assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
+                                              kHeapObjectTag);
+
+      // Compute the first offset after the string from the length.
+      Node* end = assembler->IntPtrAdd(begin, assembler->SmiUntag(length));
+
+      // Loop over the {lhs} and {rhs} strings to see if they are equal.
+      Variable var_offset(assembler, MachineType::PointerRepresentation());
+      Label loop(assembler, &var_offset);
+      var_offset.Bind(begin);
+      assembler->Goto(&loop);
+      assembler->Bind(&loop);
+      {
+        // Check if {offset} equals {end}.
+        Node* offset = var_offset.value();
+        Label if_done(assembler), if_notdone(assembler);
+        assembler->Branch(assembler->WordEqual(offset, end), &if_done,
+                          &if_notdone);
+
+        assembler->Bind(&if_notdone);
+        {
+          // Load the next characters from {lhs} and {rhs}.
+          Node* lhs_value = assembler->Load(MachineType::Uint8(), lhs, offset);
+          Node* rhs_value = assembler->Load(MachineType::Uint8(), rhs, offset);
+
+          // Check if the characters match.
+          Label if_valueissame(assembler), if_valueisnotsame(assembler);
+          assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
+                            &if_valueissame, &if_valueisnotsame);
+
+          assembler->Bind(&if_valueissame);
+          {
+            // Advance to next character.
+            var_offset.Bind(
+                assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+          }
+          assembler->Goto(&loop);
+
+          assembler->Bind(&if_valueisnotsame);
+          assembler->BranchIf(assembler->Uint32LessThan(lhs_value, rhs_value),
+                              &if_less, &if_greater);
+        }
+
+        assembler->Bind(&if_done);
+        {
+          // All characters up to the min length are equal, decide based on
+          // string length.
+          Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
+          assembler->Branch(assembler->SmiEqual(lhs_length, rhs_length),
+                            &if_lengthisequal, &if_lengthisnotequal);
+
+          assembler->Bind(&if_lengthisequal);
+          assembler->Goto(&if_equal);
+
+          assembler->Bind(&if_lengthisnotequal);
+          assembler->BranchIfSmiLessThan(lhs_length, rhs_length, &if_less,
+                                         &if_greater);
+        }
+      }
+    }
+
+    assembler->Bind(&if_notbothonebyteseqstrings);
+    {
+      // TODO(bmeurer): Add fast case support for flattened cons strings;
+      // also add support for two byte string relational comparisons.
+      switch (mode) {
+        case kLessThan:
+          assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
+                                     rhs);
+          break;
+        case kLessThanOrEqual:
+          assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
+                                     lhs, rhs);
+          break;
+        case kGreaterThan:
+          assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
+                                     rhs);
+          break;
+        case kGreaterThanOrEqual:
+          assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
+                                     context, lhs, rhs);
+          break;
+      }
+    }
+  }
+
+  assembler->Bind(&if_less);
+  switch (mode) {
+    case kLessThan:
+    case kLessThanOrEqual:
+      assembler->Return(assembler->BooleanConstant(true));
+      break;
+
+    case kGreaterThan:
+    case kGreaterThanOrEqual:
+      assembler->Return(assembler->BooleanConstant(false));
+      break;
+  }
+
+  assembler->Bind(&if_equal);
+  switch (mode) {
+    case kLessThan:
+    case kGreaterThan:
+      assembler->Return(assembler->BooleanConstant(false));
+      break;
+
+    case kLessThanOrEqual:
+    case kGreaterThanOrEqual:
+      assembler->Return(assembler->BooleanConstant(true));
+      break;
+  }
+
+  assembler->Bind(&if_greater);
+  switch (mode) {
+    case kLessThan:
+    case kLessThanOrEqual:
+      assembler->Return(assembler->BooleanConstant(false));
+      break;
+
+    case kGreaterThan:
+    case kGreaterThanOrEqual:
+      assembler->Return(assembler->BooleanConstant(true));
+      break;
+  }
+}
+
+void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
+                         ResultMode mode) {
+  // Here's pseudo-code for the algorithm below in case of kDontNegateResult
+  // mode; for kNegateResult mode we properly negate the result.
+  //
+  // if (lhs == rhs) return true;
+  // if (lhs->length() != rhs->length()) return false;
+  // if (lhs->IsInternalizedString() && rhs->IsInternalizedString()) {
+  //   return false;
+  // }
+  // if (lhs->IsSeqOneByteString() && rhs->IsSeqOneByteString()) {
+  //   for (i = 0; i != lhs->length(); ++i) {
+  //     if (lhs[i] != rhs[i]) return false;
+  //   }
+  //   return true;
+  // }
+  // return %StringEqual(lhs, rhs);
+
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Variable Variable;
+
+  Node* lhs = assembler->Parameter(0);
+  Node* rhs = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  Label if_equal(assembler), if_notequal(assembler);
+
+  // Fast check to see if {lhs} and {rhs} refer to the same String object.
+  Label if_same(assembler), if_notsame(assembler);
+  assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+  assembler->Bind(&if_same);
+  assembler->Goto(&if_equal);
+
+  assembler->Bind(&if_notsame);
+  {
+    // The {lhs} and {rhs} don't refer to the exact same String object.
+
+    // Load the length of {lhs} and {rhs}.
+    Node* lhs_length = assembler->LoadObjectField(lhs, String::kLengthOffset);
+    Node* rhs_length = assembler->LoadObjectField(rhs, String::kLengthOffset);
+
+    // Check if the lengths of {lhs} and {rhs} are equal.
+    Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
+    assembler->Branch(assembler->WordEqual(lhs_length, rhs_length),
+                      &if_lengthisequal, &if_lengthisnotequal);
+
+    assembler->Bind(&if_lengthisequal);
+    {
+      // Load instance types of {lhs} and {rhs}.
+      Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+      Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+      // Combine the instance types into a single 16-bit value, so we can check
+      // both of them at once.
+      Node* both_instance_types = assembler->Word32Or(
+          lhs_instance_type,
+          assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
+
+      // Check if both {lhs} and {rhs} are internalized.
+      int const kBothInternalizedMask =
+          kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
+      int const kBothInternalizedTag =
+          kInternalizedTag | (kInternalizedTag << 8);
+      Label if_bothinternalized(assembler), if_notbothinternalized(assembler);
+      assembler->Branch(assembler->Word32Equal(
+                            assembler->Word32And(both_instance_types,
+                                                 assembler->Int32Constant(
+                                                     kBothInternalizedMask)),
+                            assembler->Int32Constant(kBothInternalizedTag)),
+                        &if_bothinternalized, &if_notbothinternalized);
+
+      assembler->Bind(&if_bothinternalized);
+      {
+        // Fast negative check for internalized-to-internalized equality.
+        assembler->Goto(&if_notequal);
+      }
+
+      assembler->Bind(&if_notbothinternalized);
+      {
+        // Check that both {lhs} and {rhs} are flat one-byte strings.
+        int const kBothSeqOneByteStringMask =
+            kStringEncodingMask | kStringRepresentationMask |
+            ((kStringEncodingMask | kStringRepresentationMask) << 8);
+        int const kBothSeqOneByteStringTag =
+            kOneByteStringTag | kSeqStringTag |
+            ((kOneByteStringTag | kSeqStringTag) << 8);
+        Label if_bothonebyteseqstrings(assembler),
+            if_notbothonebyteseqstrings(assembler);
+        assembler->Branch(
+            assembler->Word32Equal(
+                assembler->Word32And(
+                    both_instance_types,
+                    assembler->Int32Constant(kBothSeqOneByteStringMask)),
+                assembler->Int32Constant(kBothSeqOneByteStringTag)),
+            &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+        assembler->Bind(&if_bothonebyteseqstrings);
+        {
+          // Compute the effective offset of the first character.
+          Node* begin = assembler->IntPtrConstant(
+              SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+          // Compute the first offset after the string from the length.
+          Node* end =
+              assembler->IntPtrAdd(begin, assembler->SmiUntag(lhs_length));
+
+          // Loop over the {lhs} and {rhs} strings to see if they are equal.
+          Variable var_offset(assembler, MachineType::PointerRepresentation());
+          Label loop(assembler, &var_offset);
+          var_offset.Bind(begin);
+          assembler->Goto(&loop);
+          assembler->Bind(&loop);
+          {
+            // Check if {offset} equals {end}.
+            Node* offset = var_offset.value();
+            Label if_done(assembler), if_notdone(assembler);
+            assembler->Branch(assembler->WordEqual(offset, end), &if_done,
+                              &if_notdone);
+
+            assembler->Bind(&if_notdone);
+            {
+              // Load the next characters from {lhs} and {rhs}.
+              Node* lhs_value =
+                  assembler->Load(MachineType::Uint8(), lhs, offset);
+              Node* rhs_value =
+                  assembler->Load(MachineType::Uint8(), rhs, offset);
+
+              // Check if the characters match.
+              Label if_valueissame(assembler), if_valueisnotsame(assembler);
+              assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
+                                &if_valueissame, &if_valueisnotsame);
+
+              assembler->Bind(&if_valueissame);
+              {
+                // Advance to next character.
+                var_offset.Bind(
+                    assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+              }
+              assembler->Goto(&loop);
+
+              assembler->Bind(&if_valueisnotsame);
+              assembler->Goto(&if_notequal);
+            }
+
+            assembler->Bind(&if_done);
+            assembler->Goto(&if_equal);
+          }
+        }
+
+        assembler->Bind(&if_notbothonebyteseqstrings);
+        {
+          // TODO(bmeurer): Add fast case support for flattened cons strings;
+          // also add support for two byte string equality checks.
+          Runtime::FunctionId function_id = (mode == kDontNegateResult)
+                                                ? Runtime::kStringEqual
+                                                : Runtime::kStringNotEqual;
+          assembler->TailCallRuntime(function_id, context, lhs, rhs);
+        }
+      }
+    }
+
+    assembler->Bind(&if_lengthisnotequal);
+    {
+      // Mismatch in length of {lhs} and {rhs}, cannot be equal.
+      assembler->Goto(&if_notequal);
+    }
+  }
+
+  assembler->Bind(&if_equal);
+  assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+
+  assembler->Bind(&if_notequal);
+  assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+}
+
+}  // namespace
+
+void LessThanStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateAbstractRelationalComparison(assembler, kLessThan);
+}
+
+void LessThanOrEqualStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual);
+}
+
+void GreaterThanStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateAbstractRelationalComparison(assembler, kGreaterThan);
+}
+
+void GreaterThanOrEqualStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual);
+}
+
+void EqualStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
+  GenerateEqual(assembler, kDontNegateResult);
+}
+
+void NotEqualStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateEqual(assembler, kNegateResult);
+}
+
+void StrictEqualStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateStrictEqual(assembler, kDontNegateResult);
+}
+
+void StrictNotEqualStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateStrictEqual(assembler, kNegateResult);
+}
+
+void StringEqualStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateStringEqual(assembler, kDontNegateResult);
+}
+
+void StringNotEqualStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateStringEqual(assembler, kNegateResult);
+}
+
+void StringLessThanStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateStringRelationalComparison(assembler, kLessThan);
+}
+
+void StringLessThanOrEqualStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
+}
+
+void StringGreaterThanStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateStringRelationalComparison(assembler, kGreaterThan);
+}
+
+void StringGreaterThanOrEqualStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
+}
+
+void ToLengthStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(1);
+
+  // We might need to loop once for ToNumber conversion.
+  Variable var_len(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_len);
+  var_len.Bind(assembler->Parameter(0));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Shared entry points.
+    Label return_len(assembler),
+        return_two53minus1(assembler, Label::kDeferred),
+        return_zero(assembler, Label::kDeferred);
+
+    // Load the current {len} value.
+    Node* len = var_len.value();
+
+    // Check if {len} is a positive Smi.
+    assembler->GotoIf(assembler->WordIsPositiveSmi(len), &return_len);
+
+    // Check if {len} is a (negative) Smi.
+    assembler->GotoIf(assembler->WordIsSmi(len), &return_zero);
+
+    // Check if {len} is a HeapNumber.
+    Label if_lenisheapnumber(assembler),
+        if_lenisnotheapnumber(assembler, Label::kDeferred);
+    assembler->Branch(assembler->WordEqual(assembler->LoadMap(len),
+                                           assembler->HeapNumberMapConstant()),
+                      &if_lenisheapnumber, &if_lenisnotheapnumber);
+
+    assembler->Bind(&if_lenisheapnumber);
+    {
+      // Load the floating-point value of {len}.
+      Node* len_value = assembler->LoadHeapNumberValue(len);
+
+      // Check if {len} is not greater than zero.
+      assembler->GotoUnless(assembler->Float64GreaterThan(
+                                len_value, assembler->Float64Constant(0.0)),
+                            &return_zero);
+
+      // Check if {len} is greater than or equal to 2^53-1.
+      assembler->GotoIf(
+          assembler->Float64GreaterThanOrEqual(
+              len_value, assembler->Float64Constant(kMaxSafeInteger)),
+          &return_two53minus1);
+
+      // Round the {len} towards -Infinity.
+      Node* value = assembler->Float64Floor(len_value);
+      Node* result = assembler->ChangeFloat64ToTagged(value);
+      assembler->Return(result);
+    }
+
+    assembler->Bind(&if_lenisnotheapnumber);
+    {
+      // Need to convert {len} to a Number first.
+      Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
+      var_len.Bind(assembler->CallStub(callable, context, len));
+      assembler->Goto(&loop);
+    }
+
+    assembler->Bind(&return_len);
+    assembler->Return(var_len.value());
+
+    assembler->Bind(&return_two53minus1);
+    assembler->Return(assembler->NumberConstant(kMaxSafeInteger));
+
+    assembler->Bind(&return_zero);
+    assembler->Return(assembler->SmiConstant(Smi::FromInt(0)));
+  }
+}
+
+void ToBooleanStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Label Label;
+
+  Node* value = assembler->Parameter(0);
+  Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
+
+  // Check if {value} is a Smi or a HeapObject.
+  assembler->Branch(assembler->WordIsSmi(value), &if_valueissmi,
+                    &if_valueisnotsmi);
+
+  assembler->Bind(&if_valueissmi);
+  {
+    // The {value} is a Smi, only need to check against zero.
+    Label if_valueiszero(assembler), if_valueisnotzero(assembler);
+    assembler->Branch(assembler->SmiEqual(value, assembler->SmiConstant(0)),
+                      &if_valueiszero, &if_valueisnotzero);
+
+    assembler->Bind(&if_valueiszero);
+    assembler->Return(assembler->BooleanConstant(false));
+
+    assembler->Bind(&if_valueisnotzero);
+    assembler->Return(assembler->BooleanConstant(true));
+  }
+
+  assembler->Bind(&if_valueisnotsmi);
+  {
+    Label if_valueisstring(assembler), if_valueisheapnumber(assembler),
+        if_valueisoddball(assembler), if_valueisother(assembler);
+
+    // The {value} is a HeapObject, load its map.
+    Node* value_map = assembler->LoadMap(value);
+
+    // Load the {value}s instance type.
+    Node* value_instance_type = assembler->Load(
+        MachineType::Uint8(), value_map,
+        assembler->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
+
+    // Dispatch based on the instance type; we distinguish all String instance
+    // types, the HeapNumber type and the Oddball type.
+    size_t const kNumCases = FIRST_NONSTRING_TYPE + 2;
+    Label* case_labels[kNumCases];
+    int32_t case_values[kNumCases];
+    for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+      case_labels[i] = new Label(assembler);
+      case_values[i] = i;
+    }
+    case_labels[FIRST_NONSTRING_TYPE + 0] = &if_valueisheapnumber;
+    case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
+    case_labels[FIRST_NONSTRING_TYPE + 1] = &if_valueisoddball;
+    case_values[FIRST_NONSTRING_TYPE + 1] = ODDBALL_TYPE;
+    assembler->Switch(value_instance_type, &if_valueisother, case_values,
+                      case_labels, arraysize(case_values));
+    for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+      assembler->Bind(case_labels[i]);
+      assembler->Goto(&if_valueisstring);
+      delete case_labels[i];
+    }
+
+    assembler->Bind(&if_valueisstring);
+    {
+      // Load the string length field of the {value}.
+      Node* value_length =
+          assembler->LoadObjectField(value, String::kLengthOffset);
+
+      // Check if the {value} is the empty string.
+      Label if_valueisempty(assembler), if_valueisnotempty(assembler);
+      assembler->Branch(
+          assembler->SmiEqual(value_length, assembler->SmiConstant(0)),
+          &if_valueisempty, &if_valueisnotempty);
+
+      assembler->Bind(&if_valueisempty);
+      assembler->Return(assembler->BooleanConstant(false));
+
+      assembler->Bind(&if_valueisnotempty);
+      assembler->Return(assembler->BooleanConstant(true));
+    }
+
+    assembler->Bind(&if_valueisheapnumber);
+    {
+      Node* value_value = assembler->Load(
+          MachineType::Float64(), value,
+          assembler->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
+
+      Label if_valueispositive(assembler), if_valueisnotpositive(assembler),
+          if_valueisnegative(assembler), if_valueisnanorzero(assembler);
+      assembler->Branch(assembler->Float64LessThan(
+                            assembler->Float64Constant(0.0), value_value),
+                        &if_valueispositive, &if_valueisnotpositive);
+
+      assembler->Bind(&if_valueispositive);
+      assembler->Return(assembler->BooleanConstant(true));
+
+      assembler->Bind(&if_valueisnotpositive);
+      assembler->Branch(assembler->Float64LessThan(
+                            value_value, assembler->Float64Constant(0.0)),
+                        &if_valueisnegative, &if_valueisnanorzero);
+
+      assembler->Bind(&if_valueisnegative);
+      assembler->Return(assembler->BooleanConstant(true));
+
+      assembler->Bind(&if_valueisnanorzero);
+      assembler->Return(assembler->BooleanConstant(false));
+    }
+
+    assembler->Bind(&if_valueisoddball);
+    {
+      // The {value} is an Oddball, and every Oddball knows its boolean value.
+      Node* value_toboolean =
+          assembler->LoadObjectField(value, Oddball::kToBooleanOffset);
+      assembler->Return(value_toboolean);
+    }
+
+    assembler->Bind(&if_valueisother);
+    {
+      Node* value_map_bitfield = assembler->Load(
+          MachineType::Uint8(), value_map,
+          assembler->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
+      Node* value_map_undetectable = assembler->Word32And(
+          value_map_bitfield,
+          assembler->Int32Constant(1 << Map::kIsUndetectable));
+
+      // Check if the {value} is undetectable.
+      Label if_valueisundetectable(assembler),
+          if_valueisnotundetectable(assembler);
+      assembler->Branch(assembler->Word32Equal(value_map_undetectable,
+                                               assembler->Int32Constant(0)),
+                        &if_valueisnotundetectable, &if_valueisundetectable);
+
+      assembler->Bind(&if_valueisundetectable);
+      assembler->Return(assembler->BooleanConstant(false));
+
+      assembler->Bind(&if_valueisnotundetectable);
+      assembler->Return(assembler->BooleanConstant(true));
+    }
+  }
+}
+
+void ToIntegerStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  typedef compiler::CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(1);
+
+  // We might need to loop once for ToNumber conversion.
+  Variable var_arg(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_arg);
+  var_arg.Bind(assembler->Parameter(0));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Shared entry points.
+    Label return_arg(assembler), return_zero(assembler, Label::kDeferred);
+
+    // Load the current {arg} value.
+    Node* arg = var_arg.value();
+
+    // Check if {arg} is a Smi.
+    assembler->GotoIf(assembler->WordIsSmi(arg), &return_arg);
+
+    // Check if {arg} is a HeapNumber.
+    Label if_argisheapnumber(assembler),
+        if_argisnotheapnumber(assembler, Label::kDeferred);
+    assembler->Branch(assembler->WordEqual(assembler->LoadMap(arg),
+                                           assembler->HeapNumberMapConstant()),
+                      &if_argisheapnumber, &if_argisnotheapnumber);
+
+    assembler->Bind(&if_argisheapnumber);
+    {
+      // Load the floating-point value of {arg}.
+      Node* arg_value = assembler->LoadHeapNumberValue(arg);
+
+      // Check if {arg} is NaN.
+      assembler->GotoUnless(assembler->Float64Equal(arg_value, arg_value),
+                            &return_zero);
+
+      // Truncate {arg} towards zero.
+      Node* value = assembler->Float64Trunc(arg_value);
+      var_arg.Bind(assembler->ChangeFloat64ToTagged(value));
+      assembler->Goto(&return_arg);
+    }
+
+    assembler->Bind(&if_argisnotheapnumber);
+    {
+      // Need to convert {arg} to a Number first.
+      Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
+      var_arg.Bind(assembler->CallStub(callable, context, arg));
+      assembler->Goto(&loop);
+    }
+
+    assembler->Bind(&return_arg);
+    assembler->Return(var_arg.value());
+
+    assembler->Bind(&return_zero);
+    assembler->Return(assembler->SmiConstant(Smi::FromInt(0)));
+  }
+}
+
+void StoreInterceptorStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  Node* receiver = assembler->Parameter(0);
+  Node* name = assembler->Parameter(1);
+  Node* value = assembler->Parameter(2);
+  Node* context = assembler->Parameter(3);
+  assembler->TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
+                             receiver, name, value);
+}
+
+void LoadIndexedInterceptorStub::GenerateAssembly(
+    compiler::CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  typedef compiler::CodeStubAssembler::Label Label;
+  Node* receiver = assembler->Parameter(0);
+  Node* key = assembler->Parameter(1);
+  Node* slot = assembler->Parameter(2);
+  Node* vector = assembler->Parameter(3);
+  Node* context = assembler->Parameter(4);
+
+  Label if_keyispositivesmi(assembler), if_keyisinvalid(assembler);
+  assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositivesmi,
+                    &if_keyisinvalid);
+  assembler->Bind(&if_keyispositivesmi);
+  assembler->TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
+                             receiver, key);
+
+  assembler->Bind(&if_keyisinvalid);
+  assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key,
+                             slot, vector);
+}
 
 template<class StateType>
 void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
@@ -508,17 +3091,6 @@
 }
 
 
-void CompareNilICStub::PrintBaseName(std::ostream& os) const {  // NOLINT
-  CodeStub::PrintBaseName(os);
-  os << ((nil_value() == kNullValue) ? "(NullValue)" : "(UndefinedValue)");
-}
-
-
-void CompareNilICStub::PrintState(std::ostream& os) const {  // NOLINT
-  os << state();
-}
-
-
 // TODO(svenpanne) Make this a real infix_ostream_iterator.
 class SimpleListPrinter {
  public:
@@ -539,45 +3111,6 @@
 };
 
 
-std::ostream& operator<<(std::ostream& os, const CompareNilICStub::State& s) {
-  os << "(";
-  SimpleListPrinter p(os);
-  if (s.IsEmpty()) p.Add("None");
-  if (s.Contains(CompareNilICStub::UNDEFINED)) p.Add("Undefined");
-  if (s.Contains(CompareNilICStub::NULL_TYPE)) p.Add("Null");
-  if (s.Contains(CompareNilICStub::MONOMORPHIC_MAP)) p.Add("MonomorphicMap");
-  if (s.Contains(CompareNilICStub::GENERIC)) p.Add("Generic");
-  return os << ")";
-}
-
-
-Type* CompareNilICStub::GetType(Zone* zone, Handle<Map> map) {
-  State state = this->state();
-  if (state.Contains(CompareNilICStub::GENERIC)) return Type::Any();
-
-  Type* result = Type::None();
-  if (state.Contains(CompareNilICStub::UNDEFINED)) {
-    result = Type::Union(result, Type::Undefined(), zone);
-  }
-  if (state.Contains(CompareNilICStub::NULL_TYPE)) {
-    result = Type::Union(result, Type::Null(), zone);
-  }
-  if (state.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
-    Type* type = map.is_null() ? Type::Detectable() : Type::Class(map, zone);
-    result = Type::Union(result, type, zone);
-  }
-
-  return result;
-}
-
-
-Type* CompareNilICStub::GetInputType(Zone* zone, Handle<Map> map) {
-  Type* output_type = GetType(zone, map);
-  Type* nil_type = nil_value() == kNullValue ? Type::Null() : Type::Undefined();
-  return Type::Union(output_type, nil_type, zone);
-}
-
-
 void CallICStub::PrintState(std::ostream& os) const {  // NOLINT
   os << state();
 }
@@ -671,7 +3204,6 @@
 
 
 void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  NumberToStringDescriptor call_descriptor(isolate());
   descriptor->Initialize(
       Runtime::FunctionForId(Runtime::kNumberToString)->entry);
 }
@@ -732,21 +3264,21 @@
   descriptor->Initialize();
 }
 
+#define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \
+  void Allocate##Type##Stub::InitializeDescriptor(                 \
+      CodeStubDescriptor* descriptor) {                            \
+    descriptor->Initialize(                                        \
+        Runtime::FunctionForId(Runtime::kCreate##Type)->entry);    \
+  }
+SIMD128_TYPES(SIMD128_INIT_DESC)
+#undef SIMD128_INIT_DESC
 
 void AllocateInNewSpaceStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   descriptor->Initialize();
 }
 
-
-void CompareNilICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  descriptor->Initialize(FUNCTION_ADDR(Runtime_CompareNilIC_Miss));
-  descriptor->SetMissHandler(ExternalReference(
-      Runtime::FunctionForId(Runtime::kCompareNilIC_Miss), isolate()));
-}
-
-
-void ToBooleanStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
   descriptor->SetMissHandler(ExternalReference(
       Runtime::FunctionForId(Runtime::kToBooleanIC_Miss), isolate()));
@@ -848,8 +3380,7 @@
   return os;
 }
 
-
-bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
+bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
   Types new_types = types();
   Types old_types = new_types;
   bool to_boolean_value = new_types.UpdateStatus(object);
@@ -858,30 +3389,27 @@
   return to_boolean_value;
 }
 
-
-void ToBooleanStub::PrintState(std::ostream& os) const {  // NOLINT
+void ToBooleanICStub::PrintState(std::ostream& os) const {  // NOLINT
   os << types();
 }
 
-
-std::ostream& operator<<(std::ostream& os, const ToBooleanStub::Types& s) {
+std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& s) {
   os << "(";
   SimpleListPrinter p(os);
   if (s.IsEmpty()) p.Add("None");
-  if (s.Contains(ToBooleanStub::UNDEFINED)) p.Add("Undefined");
-  if (s.Contains(ToBooleanStub::BOOLEAN)) p.Add("Bool");
-  if (s.Contains(ToBooleanStub::NULL_TYPE)) p.Add("Null");
-  if (s.Contains(ToBooleanStub::SMI)) p.Add("Smi");
-  if (s.Contains(ToBooleanStub::SPEC_OBJECT)) p.Add("SpecObject");
-  if (s.Contains(ToBooleanStub::STRING)) p.Add("String");
-  if (s.Contains(ToBooleanStub::SYMBOL)) p.Add("Symbol");
-  if (s.Contains(ToBooleanStub::HEAP_NUMBER)) p.Add("HeapNumber");
-  if (s.Contains(ToBooleanStub::SIMD_VALUE)) p.Add("SimdValue");
+  if (s.Contains(ToBooleanICStub::UNDEFINED)) p.Add("Undefined");
+  if (s.Contains(ToBooleanICStub::BOOLEAN)) p.Add("Bool");
+  if (s.Contains(ToBooleanICStub::NULL_TYPE)) p.Add("Null");
+  if (s.Contains(ToBooleanICStub::SMI)) p.Add("Smi");
+  if (s.Contains(ToBooleanICStub::SPEC_OBJECT)) p.Add("SpecObject");
+  if (s.Contains(ToBooleanICStub::STRING)) p.Add("String");
+  if (s.Contains(ToBooleanICStub::SYMBOL)) p.Add("Symbol");
+  if (s.Contains(ToBooleanICStub::HEAP_NUMBER)) p.Add("HeapNumber");
+  if (s.Contains(ToBooleanICStub::SIMD_VALUE)) p.Add("SimdValue");
   return os << ")";
 }
 
-
-bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
+bool ToBooleanICStub::Types::UpdateStatus(Handle<Object> object) {
   if (object->IsUndefined()) {
     Add(UNDEFINED);
     return false;
@@ -896,16 +3424,16 @@
     return Smi::cast(*object)->value() != 0;
   } else if (object->IsJSReceiver()) {
     Add(SPEC_OBJECT);
-    return !object->IsUndetectableObject();
+    return !object->IsUndetectable();
   } else if (object->IsString()) {
-    DCHECK(!object->IsUndetectableObject());
+    DCHECK(!object->IsUndetectable());
     Add(STRING);
     return String::cast(*object)->length() != 0;
   } else if (object->IsSymbol()) {
     Add(SYMBOL);
     return true;
   } else if (object->IsHeapNumber()) {
-    DCHECK(!object->IsUndetectableObject());
+    DCHECK(!object->IsUndetectable());
     Add(HEAP_NUMBER);
     double value = HeapNumber::cast(*object)->value();
     return value != 0 && !std::isnan(value);
@@ -919,12 +3447,12 @@
   }
 }
 
-
-bool ToBooleanStub::Types::NeedsMap() const {
-  return Contains(ToBooleanStub::SPEC_OBJECT) ||
-         Contains(ToBooleanStub::STRING) || Contains(ToBooleanStub::SYMBOL) ||
-         Contains(ToBooleanStub::HEAP_NUMBER) ||
-         Contains(ToBooleanStub::SIMD_VALUE);
+bool ToBooleanICStub::Types::NeedsMap() const {
+  return Contains(ToBooleanICStub::SPEC_OBJECT) ||
+         Contains(ToBooleanICStub::STRING) ||
+         Contains(ToBooleanICStub::SYMBOL) ||
+         Contains(ToBooleanICStub::HEAP_NUMBER) ||
+         Contains(ToBooleanICStub::SIMD_VALUE);
 }
 
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index f370ce6..ace4aae 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -23,8 +23,7 @@
   /* PlatformCodeStubs */                   \
   V(ArrayConstructor)                       \
   V(BinaryOpICWithAllocationSite)           \
-  V(CallApiFunction)                        \
-  V(CallApiAccessor)                        \
+  V(CallApiCallback)                        \
   V(CallApiGetter)                          \
   V(CallConstruct)                          \
   V(CallIC)                                 \
@@ -38,7 +37,6 @@
   V(KeyedLoadICTrampoline)                  \
   V(LoadICTrampoline)                       \
   V(CallICTrampoline)                       \
-  V(LoadIndexedInterceptor)                 \
   V(LoadIndexedString)                      \
   V(MathPow)                                \
   V(ProfileEntryHook)                       \
@@ -46,11 +44,11 @@
   V(RegExpExec)                             \
   V(StoreBufferOverflow)                    \
   V(StoreElement)                           \
-  V(StringCompare)                          \
   V(StubFailureTrampoline)                  \
   V(SubString)                              \
   V(ToNumber)                               \
-  V(ToLength)                               \
+  V(NonNumberToNumber)                      \
+  V(StringToNumber)                         \
   V(ToString)                               \
   V(ToName)                                 \
   V(ToObject)                               \
@@ -59,18 +57,16 @@
   V(VectorStoreIC)                          \
   V(VectorKeyedStoreIC)                     \
   /* HydrogenCodeStubs */                   \
-  V(AllocateHeapNumber)                     \
-  V(AllocateMutableHeapNumber)              \
   V(AllocateInNewSpace)                     \
   V(ArrayNArgumentsConstructor)             \
   V(ArrayNoArgumentConstructor)             \
   V(ArraySingleArgumentConstructor)         \
   V(BinaryOpIC)                             \
   V(BinaryOpWithAllocationSite)             \
-  V(CompareNilIC)                           \
   V(CreateAllocationSite)                   \
   V(CreateWeakCell)                         \
   V(ElementsTransitionAndStore)             \
+  V(FastArrayPush)                          \
   V(FastCloneRegExp)                        \
   V(FastCloneShallowArray)                  \
   V(FastCloneShallowObject)                 \
@@ -96,20 +92,56 @@
   V(StoreGlobalViaContext)                  \
   V(StoreScriptContextField)                \
   V(StringAdd)                              \
-  V(ToBoolean)                              \
+  V(ToBooleanIC)                            \
   V(TransitionElementsKind)                 \
   V(KeyedLoadIC)                            \
   V(LoadIC)                                 \
   /* TurboFanCodeStubs */                   \
+  V(AllocateHeapNumber)                     \
+  V(AllocateMutableHeapNumber)              \
+  V(AllocateFloat32x4)                      \
+  V(AllocateInt32x4)                        \
+  V(AllocateUint32x4)                       \
+  V(AllocateBool32x4)                       \
+  V(AllocateInt16x8)                        \
+  V(AllocateUint16x8)                       \
+  V(AllocateBool16x8)                       \
+  V(AllocateInt8x16)                        \
+  V(AllocateUint8x16)                       \
+  V(AllocateBool8x16)                       \
   V(StringLength)                           \
+  V(Add)                                    \
+  V(Subtract)                               \
+  V(BitwiseAnd)                             \
+  V(BitwiseOr)                              \
+  V(BitwiseXor)                             \
+  V(LessThan)                               \
+  V(LessThanOrEqual)                        \
+  V(GreaterThan)                            \
+  V(GreaterThanOrEqual)                     \
+  V(Equal)                                  \
+  V(NotEqual)                               \
+  V(StrictEqual)                            \
+  V(StrictNotEqual)                         \
+  V(StringEqual)                            \
+  V(StringNotEqual)                         \
+  V(StringLessThan)                         \
+  V(StringLessThanOrEqual)                  \
+  V(StringGreaterThan)                      \
+  V(StringGreaterThanOrEqual)               \
+  V(ToBoolean)                              \
+  V(ToInteger)                              \
+  V(ToLength)                               \
   /* IC Handler stubs */                    \
   V(ArrayBufferViewLoadField)               \
   V(LoadConstant)                           \
   V(LoadFastElement)                        \
   V(LoadField)                              \
+  V(LoadIndexedInterceptor)                 \
   V(KeyedLoadSloppyArguments)               \
   V(KeyedStoreSloppyArguments)              \
   V(StoreField)                             \
+  V(StoreInterceptor)                       \
   V(StoreGlobal)                            \
   V(StoreTransition)
 
@@ -157,13 +189,24 @@
 #define CODE_STUB_LIST_MIPS(V)
 #endif
 
+// List of code stubs only used on S390 platforms.
+#ifdef V8_TARGET_ARCH_S390
+#define CODE_STUB_LIST_S390(V) \
+  V(DirectCEntry)              \
+  V(StoreRegistersState)       \
+  V(RestoreRegistersState)
+#else
+#define CODE_STUB_LIST_S390(V)
+#endif
+
 // Combined list of code stubs.
 #define CODE_STUB_LIST(V)         \
   CODE_STUB_LIST_ALL_PLATFORMS(V) \
   CODE_STUB_LIST_ARM(V)           \
   CODE_STUB_LIST_ARM64(V)         \
   CODE_STUB_LIST_PPC(V)           \
-  CODE_STUB_LIST_MIPS(V)
+  CODE_STUB_LIST_MIPS(V)          \
+  CODE_STUB_LIST_S390(V)
 
 static const int kHasReturnedMinusZeroSentinel = 1;
 
@@ -347,11 +390,10 @@
   Handle<Code> GenerateCode() override;                               \
   DEFINE_CODE_STUB(NAME, SUPER)
 
-#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER)                          \
- public:                                                                \
-  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
-    return DESC##Descriptor(isolate());                                 \
-  };                                                                    \
+#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER)                  \
+ public:                                                        \
+  void GenerateAssembly(compiler::CodeStubAssembler* assembler) \
+      const override;                                           \
   DEFINE_CODE_STUB(NAME, SUPER)
 
 #define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
@@ -584,6 +626,8 @@
 #include "src/mips/code-stubs-mips.h"
 #elif V8_TARGET_ARCH_MIPS64
 #include "src/mips64/code-stubs-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/code-stubs-s390.h"
 #elif V8_TARGET_ARCH_X87
 #include "src/x87/code-stubs-x87.h"
 #else
@@ -625,12 +669,212 @@
   InlineCacheState GetICState() const override { return MONOMORPHIC; }
   ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
 
-  void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
-
   DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_CODE_STUB(StringLength, TurboFanCodeStub);
+  DEFINE_TURBOFAN_CODE_STUB(StringLength, TurboFanCodeStub);
 };
 
+class AddStub final : public TurboFanCodeStub {
+ public:
+  explicit AddStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_CODE_STUB(Add, TurboFanCodeStub);
+};
+
+class SubtractStub final : public TurboFanCodeStub {
+ public:
+  explicit SubtractStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_CODE_STUB(Subtract, TurboFanCodeStub);
+};
+
+class BitwiseAndStub final : public TurboFanCodeStub {
+ public:
+  explicit BitwiseAndStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_CODE_STUB(BitwiseAnd, TurboFanCodeStub);
+};
+
+class BitwiseOrStub final : public TurboFanCodeStub {
+ public:
+  explicit BitwiseOrStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_CODE_STUB(BitwiseOr, TurboFanCodeStub);
+};
+
+class BitwiseXorStub final : public TurboFanCodeStub {
+ public:
+  explicit BitwiseXorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_CODE_STUB(BitwiseXor, TurboFanCodeStub);
+};
+
+class LessThanStub final : public TurboFanCodeStub {
+ public:
+  explicit LessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(LessThan, TurboFanCodeStub);
+};
+
+class LessThanOrEqualStub final : public TurboFanCodeStub {
+ public:
+  explicit LessThanOrEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(LessThanOrEqual, TurboFanCodeStub);
+};
+
+class GreaterThanStub final : public TurboFanCodeStub {
+ public:
+  explicit GreaterThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(GreaterThan, TurboFanCodeStub);
+};
+
+class GreaterThanOrEqualStub final : public TurboFanCodeStub {
+ public:
+  explicit GreaterThanOrEqualStub(Isolate* isolate)
+      : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(GreaterThanOrEqual, TurboFanCodeStub);
+};
+
+class EqualStub final : public TurboFanCodeStub {
+ public:
+  explicit EqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(Equal, TurboFanCodeStub);
+};
+
+class NotEqualStub final : public TurboFanCodeStub {
+ public:
+  explicit NotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(NotEqual, TurboFanCodeStub);
+};
+
+class StrictEqualStub final : public TurboFanCodeStub {
+ public:
+  explicit StrictEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(StrictEqual, TurboFanCodeStub);
+};
+
+class StrictNotEqualStub final : public TurboFanCodeStub {
+ public:
+  explicit StrictNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
+};
+
+class StringEqualStub final : public TurboFanCodeStub {
+ public:
+  explicit StringEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(StringEqual, TurboFanCodeStub);
+};
+
+class StringNotEqualStub final : public TurboFanCodeStub {
+ public:
+  explicit StringNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(StringNotEqual, TurboFanCodeStub);
+};
+
+class StringLessThanStub final : public TurboFanCodeStub {
+ public:
+  explicit StringLessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(StringLessThan, TurboFanCodeStub);
+};
+
+class StringLessThanOrEqualStub final : public TurboFanCodeStub {
+ public:
+  explicit StringLessThanOrEqualStub(Isolate* isolate)
+      : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(StringLessThanOrEqual, TurboFanCodeStub);
+};
+
+class StringGreaterThanStub final : public TurboFanCodeStub {
+ public:
+  explicit StringGreaterThanStub(Isolate* isolate)
+      : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(StringGreaterThan, TurboFanCodeStub);
+};
+
+class StringGreaterThanOrEqualStub final : public TurboFanCodeStub {
+ public:
+  explicit StringGreaterThanOrEqualStub(Isolate* isolate)
+      : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(StringGreaterThanOrEqual, TurboFanCodeStub);
+};
+
+class ToBooleanStub final : public TurboFanCodeStub {
+ public:
+  explicit ToBooleanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+  DEFINE_TURBOFAN_CODE_STUB(ToBoolean, TurboFanCodeStub);
+};
+
+class ToIntegerStub final : public TurboFanCodeStub {
+ public:
+  explicit ToIntegerStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+  DEFINE_TURBOFAN_CODE_STUB(ToInteger, TurboFanCodeStub);
+};
+
+class ToLengthStub final : public TurboFanCodeStub {
+ public:
+  explicit ToLengthStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+  DEFINE_TURBOFAN_CODE_STUB(ToLength, TurboFanCodeStub);
+};
+
+class StoreInterceptorStub : public TurboFanCodeStub {
+ public:
+  explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  void GenerateAssembly(compiler::CodeStubAssembler* assember) const override;
+
+  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+  DEFINE_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
+};
+
+class LoadIndexedInterceptorStub : public TurboFanCodeStub {
+ public:
+  explicit LoadIndexedInterceptorStub(Isolate* isolate)
+      : TurboFanCodeStub(isolate) {}
+
+  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
+  DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
+};
 
 enum StringAddFlags {
   // Omit both parameter checks.
@@ -658,7 +902,7 @@
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kNumber = 0;
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(NumberToString);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
   DEFINE_HYDROGEN_CODE_STUB(NumberToString, HydrogenCodeStub);
 };
 
@@ -873,12 +1117,29 @@
   DEFINE_HYDROGEN_CODE_STUB(GrowArrayElements, HydrogenCodeStub);
 };
 
+class FastArrayPushStub : public HydrogenCodeStub {
+ public:
+  explicit FastArrayPushStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+
+ private:
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastArrayPush);
+  DEFINE_HYDROGEN_CODE_STUB(FastArrayPush, HydrogenCodeStub);
+};
 
 class InstanceOfStub final : public PlatformCodeStub {
  public:
-  explicit InstanceOfStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+  explicit InstanceOfStub(Isolate* isolate, bool es6_instanceof = false)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = IsES6InstanceOfBits::encode(es6_instanceof);
+  }
+
+  bool is_es6_instanceof() const {
+    return IsES6InstanceOfBits::decode(minor_key_);
+  }
 
  private:
+  class IsES6InstanceOfBits : public BitField<bool, 0, 1> {};
+
   DEFINE_CALL_INTERFACE_DESCRIPTOR(InstanceOf);
   DEFINE_PLATFORM_CODE_STUB(InstanceOf, PlatformCodeStub);
 };
@@ -1013,20 +1274,6 @@
 };
 
 
-// TODO(mvstanton): Translate to hydrogen code stub.
-class LoadIndexedInterceptorStub : public PlatformCodeStub {
- public:
-  explicit LoadIndexedInterceptorStub(Isolate* isolate)
-      : PlatformCodeStub(isolate) {}
-
-  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-  Code::StubType GetStubType() const override { return Code::FAST; }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
-  DEFINE_PLATFORM_CODE_STUB(LoadIndexedInterceptor, PlatformCodeStub);
-};
-
-
 class LoadIndexedStringStub : public PlatformCodeStub {
  public:
   explicit LoadIndexedStringStub(Isolate* isolate)
@@ -1418,48 +1665,36 @@
   DEFINE_PLATFORM_CODE_STUB(StoreGlobalViaContext, PlatformCodeStub);
 };
 
-
-class CallApiFunctionStub : public PlatformCodeStub {
+class CallApiCallbackStub : public PlatformCodeStub {
  public:
-  explicit CallApiFunctionStub(Isolate* isolate, bool call_data_undefined)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = CallDataUndefinedBits::encode(call_data_undefined);
+  static const int kArgBits = 3;
+  static const int kArgMax = (1 << kArgBits) - 1;
+
+  // CallApiCallbackStub for regular setters and getters.
+  CallApiCallbackStub(Isolate* isolate, bool is_store, bool call_data_undefined,
+                      bool is_lazy)
+      : CallApiCallbackStub(isolate, is_store ? 1 : 0, is_store,
+                            call_data_undefined, is_lazy) {}
+
+  // CallApiCallbackStub for callback functions.
+  CallApiCallbackStub(Isolate* isolate, int argc, bool call_data_undefined)
+      : CallApiCallbackStub(isolate, argc, false, call_data_undefined, false) {}
+
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
+    return ApiCallbackDescriptorBase::ForArgs(isolate(), argc());
   }
 
  private:
-  bool call_data_undefined() const {
-    return CallDataUndefinedBits::decode(minor_key_);
-  }
-
-  class CallDataUndefinedBits : public BitField<bool, 0, 1> {};
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiFunction);
-  DEFINE_PLATFORM_CODE_STUB(CallApiFunction, PlatformCodeStub);
-};
-
-
-class CallApiAccessorStub : public PlatformCodeStub {
- public:
-  CallApiAccessorStub(Isolate* isolate, bool is_store, bool call_data_undefined,
-                      bool is_lazy)
+  CallApiCallbackStub(Isolate* isolate, int argc, bool is_store,
+                      bool call_data_undefined, bool is_lazy)
       : PlatformCodeStub(isolate) {
+    CHECK(0 <= argc && argc <= kArgMax);
     minor_key_ = IsStoreBits::encode(is_store) |
                  CallDataUndefinedBits::encode(call_data_undefined) |
-                 ArgumentBits::encode(is_store ? 1 : 0) |
+                 ArgumentBits::encode(argc) |
                  IsLazyAccessorBits::encode(is_lazy);
   }
 
- protected:
-  // For CallApiFunctionWithFixedArgsStub, see below.
-  static const int kArgBits = 3;
-  CallApiAccessorStub(Isolate* isolate, int argc, bool call_data_undefined)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = IsStoreBits::encode(false) |
-                 CallDataUndefinedBits::encode(call_data_undefined) |
-                 ArgumentBits::encode(argc);
-  }
-
- private:
   bool is_store() const { return IsStoreBits::decode(minor_key_); }
   bool is_lazy() const { return IsLazyAccessorBits::decode(minor_key_); }
   bool call_data_undefined() const {
@@ -1472,29 +1707,10 @@
   class ArgumentBits : public BitField<int, 2, kArgBits> {};
   class IsLazyAccessorBits : public BitField<bool, 3 + kArgBits, 1> {};
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiAccessor);
-  DEFINE_PLATFORM_CODE_STUB(CallApiAccessor, PlatformCodeStub);
+  DEFINE_PLATFORM_CODE_STUB(CallApiCallback, PlatformCodeStub);
 };
 
 
-// TODO(dcarney): see if it's possible to remove this later without performance
-// degradation.
-// This is not a real stub, but a way of generating the CallApiAccessorStub
-// (which has the same abi) which makes it clear that it is not an accessor.
-class CallApiFunctionWithFixedArgsStub : public CallApiAccessorStub {
- public:
-  static const int kMaxFixedArgs = (1 << kArgBits) - 1;
-  CallApiFunctionWithFixedArgsStub(Isolate* isolate, int argc,
-                                   bool call_data_undefined)
-      : CallApiAccessorStub(isolate, argc, call_data_undefined) {
-    DCHECK(0 <= argc && argc <= kMaxFixedArgs);
-  }
-};
-
-
-typedef ApiAccessorDescriptor ApiFunctionWithFixedArgsDescriptor;
-
-
 class CallApiGetterStub : public PlatformCodeStub {
  public:
   explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
@@ -1701,96 +1917,6 @@
 };
 
 
-class CompareNilICStub : public HydrogenCodeStub  {
- public:
-  Type* GetType(Zone* zone, Handle<Map> map = Handle<Map>());
-  Type* GetInputType(Zone* zone, Handle<Map> map);
-
-  CompareNilICStub(Isolate* isolate, NilValue nil) : HydrogenCodeStub(isolate) {
-    set_sub_minor_key(NilValueBits::encode(nil));
-  }
-
-  CompareNilICStub(Isolate* isolate, ExtraICState ic_state,
-                   InitializationState init_state = INITIALIZED)
-      : HydrogenCodeStub(isolate, init_state) {
-    set_sub_minor_key(ic_state);
-  }
-
-  static Handle<Code> GetUninitialized(Isolate* isolate,
-                                       NilValue nil) {
-    return CompareNilICStub(isolate, nil, UNINITIALIZED).GetCode();
-  }
-
-  InlineCacheState GetICState() const override {
-    State state = this->state();
-    if (state.Contains(GENERIC)) {
-      return MEGAMORPHIC;
-    } else if (state.Contains(MONOMORPHIC_MAP)) {
-      return MONOMORPHIC;
-    } else {
-      return PREMONOMORPHIC;
-    }
-  }
-
-  Code::Kind GetCodeKind() const override { return Code::COMPARE_NIL_IC; }
-
-  ExtraICState GetExtraICState() const override { return sub_minor_key(); }
-
-  void UpdateStatus(Handle<Object> object);
-
-  bool IsMonomorphic() const { return state().Contains(MONOMORPHIC_MAP); }
-
-  NilValue nil_value() const { return NilValueBits::decode(sub_minor_key()); }
-
-  void ClearState() {
-    set_sub_minor_key(TypesBits::update(sub_minor_key(), 0));
-  }
-
-  void PrintState(std::ostream& os) const override;     // NOLINT
-  void PrintBaseName(std::ostream& os) const override;  // NOLINT
-
- private:
-  CompareNilICStub(Isolate* isolate, NilValue nil,
-                   InitializationState init_state)
-      : HydrogenCodeStub(isolate, init_state) {
-    set_sub_minor_key(NilValueBits::encode(nil));
-  }
-
-  enum CompareNilType {
-    UNDEFINED,
-    NULL_TYPE,
-    MONOMORPHIC_MAP,
-    GENERIC,
-    NUMBER_OF_TYPES
-  };
-
-  // At most 6 different types can be distinguished, because the Code object
-  // only has room for a single byte to hold a set and there are two more
-  // boolean flags we need to store. :-P
-  STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
-
-  class State : public EnumSet<CompareNilType, byte> {
-   public:
-    State() : EnumSet<CompareNilType, byte>(0) { }
-    explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
-  };
-  friend std::ostream& operator<<(std::ostream& os, const State& s);
-
-  State state() const { return State(TypesBits::decode(sub_minor_key())); }
-
-  class NilValueBits : public BitField<NilValue, 0, 1> {};
-  class TypesBits : public BitField<byte, 1, NUMBER_OF_TYPES> {};
-
-  friend class CompareNilIC;
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(CompareNil);
-  DEFINE_HYDROGEN_CODE_STUB(CompareNilIC, HydrogenCodeStub);
-};
-
-
-std::ostream& operator<<(std::ostream& os, const CompareNilICStub::State& s);
-
-
 class CEntryStub : public PlatformCodeStub {
  public:
   CEntryStub(Isolate* isolate, int result_size,
@@ -2499,28 +2625,45 @@
   DEFINE_HYDROGEN_CODE_STUB(TransitionElementsKind, HydrogenCodeStub);
 };
 
-
-class AllocateHeapNumberStub final : public HydrogenCodeStub {
+class AllocateHeapNumberStub : public TurboFanCodeStub {
  public:
   explicit AllocateHeapNumberStub(Isolate* isolate)
-      : HydrogenCodeStub(isolate) {}
+      : TurboFanCodeStub(isolate) {}
 
- private:
+  void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
+  void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
+
   DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
-  DEFINE_HYDROGEN_CODE_STUB(AllocateHeapNumber, HydrogenCodeStub);
+  DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
 };
 
-
-class AllocateMutableHeapNumberStub final : public HydrogenCodeStub {
+class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
  public:
   explicit AllocateMutableHeapNumberStub(Isolate* isolate)
-      : HydrogenCodeStub(isolate) {}
+      : TurboFanCodeStub(isolate) {}
 
- private:
+  void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
+  void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
+
   DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
-  DEFINE_HYDROGEN_CODE_STUB(AllocateMutableHeapNumber, HydrogenCodeStub);
+  DEFINE_CODE_STUB(AllocateMutableHeapNumber, TurboFanCodeStub);
 };
 
+#define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type)     \
+  class Allocate##Type##Stub : public TurboFanCodeStub {                \
+   public:                                                              \
+    explicit Allocate##Type##Stub(Isolate* isolate)                     \
+        : TurboFanCodeStub(isolate) {}                                  \
+                                                                        \
+    void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
+    void GenerateAssembly(                                              \
+        compiler::CodeStubAssembler* assembler) const override;         \
+                                                                        \
+    DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate##Type);                   \
+    DEFINE_CODE_STUB(Allocate##Type, TurboFanCodeStub);                 \
+  };
+SIMD128_TYPES(SIMD128_ALLOC_STUB)
+#undef SIMD128_ALLOC_STUB
 
 class AllocateInNewSpaceStub final : public HydrogenCodeStub {
  public:
@@ -2727,8 +2870,7 @@
   DEFINE_PLATFORM_CODE_STUB(StoreElement, PlatformCodeStub);
 };
 
-
-class ToBooleanStub: public HydrogenCodeStub {
+class ToBooleanICStub : public HydrogenCodeStub {
  public:
   enum Type {
     UNDEFINED,
@@ -2755,14 +2897,14 @@
     bool UpdateStatus(Handle<Object> object);
     bool NeedsMap() const;
     bool CanBeUndetectable() const {
-      return Contains(ToBooleanStub::SPEC_OBJECT);
+      return Contains(ToBooleanICStub::SPEC_OBJECT);
     }
     bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
 
     static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
   };
 
-  ToBooleanStub(Isolate* isolate, ExtraICState state)
+  ToBooleanICStub(Isolate* isolate, ExtraICState state)
       : HydrogenCodeStub(isolate) {
     set_sub_minor_key(TypesBits::encode(static_cast<uint16_t>(state)));
   }
@@ -2776,7 +2918,7 @@
   bool SometimesSetsUpAFrame() override { return false; }
 
   static Handle<Code> GetUninitialized(Isolate* isolate) {
-    return ToBooleanStub(isolate, UNINITIALIZED).GetCode();
+    return ToBooleanICStub(isolate, UNINITIALIZED).GetCode();
   }
 
   ExtraICState GetExtraICState() const override { return types().ToIntegral(); }
@@ -2790,19 +2932,16 @@
   }
 
  private:
-  ToBooleanStub(Isolate* isolate, InitializationState init_state)
-      : HydrogenCodeStub(isolate, init_state) {
-  }
+  ToBooleanICStub(Isolate* isolate, InitializationState init_state)
+      : HydrogenCodeStub(isolate, init_state) {}
 
   class TypesBits : public BitField<uint16_t, 0, NUMBER_OF_TYPES> {};
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ToBoolean);
-  DEFINE_HYDROGEN_CODE_STUB(ToBoolean, HydrogenCodeStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+  DEFINE_HYDROGEN_CODE_STUB(ToBooleanIC, HydrogenCodeStub);
 };
 
-
-std::ostream& operator<<(std::ostream& os, const ToBooleanStub::Types& t);
-
+std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& t);
 
 class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
  public:
@@ -2910,17 +3049,25 @@
  public:
   explicit ToNumberStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ToNumber);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
   DEFINE_PLATFORM_CODE_STUB(ToNumber, PlatformCodeStub);
 };
 
-
-class ToLengthStub final : public PlatformCodeStub {
+class NonNumberToNumberStub final : public PlatformCodeStub {
  public:
-  explicit ToLengthStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+  explicit NonNumberToNumberStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ToLength);
-  DEFINE_PLATFORM_CODE_STUB(ToLength, PlatformCodeStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+  DEFINE_PLATFORM_CODE_STUB(NonNumberToNumber, PlatformCodeStub);
+};
+
+class StringToNumberStub final : public PlatformCodeStub {
+ public:
+  explicit StringToNumberStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
+  DEFINE_PLATFORM_CODE_STUB(StringToNumber, PlatformCodeStub);
 };
 
 
@@ -2928,7 +3075,7 @@
  public:
   explicit ToStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ToString);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
   DEFINE_PLATFORM_CODE_STUB(ToString, PlatformCodeStub);
 };
 
@@ -2937,7 +3084,7 @@
  public:
   explicit ToNameStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ToName);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
   DEFINE_PLATFORM_CODE_STUB(ToName, PlatformCodeStub);
 };
 
@@ -2946,20 +3093,10 @@
  public:
   explicit ToObjectStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ToObject);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
   DEFINE_HYDROGEN_CODE_STUB(ToObject, HydrogenCodeStub);
 };
 
-
-class StringCompareStub : public PlatformCodeStub {
- public:
-  explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(StringCompare);
-  DEFINE_PLATFORM_CODE_STUB(StringCompare, PlatformCodeStub);
-};
-
-
 #undef DEFINE_CALL_INTERFACE_DESCRIPTOR
 #undef DEFINE_PLATFORM_CODE_STUB
 #undef DEFINE_HANDLER_CODE_STUB
diff --git a/src/codegen.h b/src/codegen.h
index 512cbfc..f941696 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -56,6 +56,8 @@
 #include "src/mips/codegen-mips.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS64
 #include "src/mips64/codegen-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/codegen-s390.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
 #include "src/x87/codegen-x87.h"  // NOLINT
 #else
diff --git a/src/collector.h b/src/collector.h
new file mode 100644
index 0000000..8454aae
--- /dev/null
+++ b/src/collector.h
@@ -0,0 +1,247 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COLLECTOR_H_
+#define V8_COLLECTOR_H_
+
+#include "src/checks.h"
+#include "src/list.h"
+#include "src/vector.h"
+
+namespace v8 {
+namespace internal {
+
+/*
+ * A class that collects values into a backing store.
+ * Specialized versions of the class can allow access to the backing store
+ * in different ways.
+ * There is no guarantee that the backing store is contiguous (and, as a
+ * consequence, no guarantees that consecutively added elements are adjacent
+ * in memory). The collector may move elements unless it has guaranteed not
+ * to.
+ */
+template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
+class Collector {
+ public:
+  explicit Collector(int initial_capacity = kMinCapacity)
+      : index_(0), size_(0) {
+    current_chunk_ = Vector<T>::New(initial_capacity);
+  }
+
+  virtual ~Collector() {
+    // Free backing store (in reverse allocation order).
+    current_chunk_.Dispose();
+    for (int i = chunks_.length() - 1; i >= 0; i--) {
+      chunks_.at(i).Dispose();
+    }
+  }
+
+  // Add a single element.
+  inline void Add(T value) {
+    if (index_ >= current_chunk_.length()) {
+      Grow(1);
+    }
+    current_chunk_[index_] = value;
+    index_++;
+    size_++;
+  }
+
+  // Add a block of contiguous elements and return a Vector backed by the
+  // memory area.
+  // A basic Collector will keep this vector valid as long as the Collector
+  // is alive.
+  inline Vector<T> AddBlock(int size, T initial_value) {
+    DCHECK(size > 0);
+    if (size > current_chunk_.length() - index_) {
+      Grow(size);
+    }
+    T* position = current_chunk_.start() + index_;
+    index_ += size;
+    size_ += size;
+    for (int i = 0; i < size; i++) {
+      position[i] = initial_value;
+    }
+    return Vector<T>(position, size);
+  }
+
+  // Add a contiguous block of elements and return a vector backed
+  // by the added block.
+  // A basic Collector will keep this vector valid as long as the Collector
+  // is alive.
+  inline Vector<T> AddBlock(Vector<const T> source) {
+    if (source.length() > current_chunk_.length() - index_) {
+      Grow(source.length());
+    }
+    T* position = current_chunk_.start() + index_;
+    index_ += source.length();
+    size_ += source.length();
+    for (int i = 0; i < source.length(); i++) {
+      position[i] = source[i];
+    }
+    return Vector<T>(position, source.length());
+  }
+
+  // Write the contents of the collector into the provided vector.
+  void WriteTo(Vector<T> destination) {
+    DCHECK(size_ <= destination.length());
+    int position = 0;
+    for (int i = 0; i < chunks_.length(); i++) {
+      Vector<T> chunk = chunks_.at(i);
+      for (int j = 0; j < chunk.length(); j++) {
+        destination[position] = chunk[j];
+        position++;
+      }
+    }
+    for (int i = 0; i < index_; i++) {
+      destination[position] = current_chunk_[i];
+      position++;
+    }
+  }
+
+  // Allocate a single contiguous vector, copy all the collected
+  // elements to the vector, and return it.
+  // The caller is responsible for freeing the memory of the returned
+  // vector (e.g., using Vector::Dispose).
+  Vector<T> ToVector() {
+    Vector<T> new_store = Vector<T>::New(size_);
+    WriteTo(new_store);
+    return new_store;
+  }
+
+  // Resets the collector to be empty.
+  virtual void Reset() {
+    for (int i = chunks_.length() - 1; i >= 0; i--) {
+      chunks_.at(i).Dispose();
+    }
+    chunks_.Rewind(0);
+    index_ = 0;
+    size_ = 0;
+  }
+
+  // Total number of elements added to collector so far.
+  inline int size() { return size_; }
+
+ protected:
+  static const int kMinCapacity = 16;
+  List<Vector<T> > chunks_;
+  Vector<T> current_chunk_;  // Block of memory currently being written into.
+  int index_;                // Current index in current chunk.
+  int size_;                 // Total number of elements in collector.
+
+  // Creates a new current chunk, and stores the old chunk in the chunks_ list.
+  void Grow(int min_capacity) {
+    DCHECK(growth_factor > 1);
+    int new_capacity;
+    int current_length = current_chunk_.length();
+    if (current_length < kMinCapacity) {
+      // The collector started out as empty.
+      new_capacity = min_capacity * growth_factor;
+      if (new_capacity < kMinCapacity) new_capacity = kMinCapacity;
+    } else {
+      int growth = current_length * (growth_factor - 1);
+      if (growth > max_growth) {
+        growth = max_growth;
+      }
+      new_capacity = current_length + growth;
+      if (new_capacity < min_capacity) {
+        new_capacity = min_capacity + growth;
+      }
+    }
+    NewChunk(new_capacity);
+    DCHECK(index_ + min_capacity <= current_chunk_.length());
+  }
+
+  // Before replacing the current chunk, give a subclass the option to move
+  // some of the current data into the new chunk. The function may update
+  // the current index_ value to represent data no longer in the current chunk.
+  // Returns the initial index of the new chunk (after copied data).
+  virtual void NewChunk(int new_capacity) {
+    Vector<T> new_chunk = Vector<T>::New(new_capacity);
+    if (index_ > 0) {
+      chunks_.Add(current_chunk_.SubVector(0, index_));
+    } else {
+      current_chunk_.Dispose();
+    }
+    current_chunk_ = new_chunk;
+    index_ = 0;
+  }
+};
+
+/*
+ * A collector that allows sequences of values to be guaranteed to
+ * stay consecutive.
+ * If the backing store grows while a sequence is active, the current
+ * sequence might be moved, but after the sequence is ended, it will
+ * not move again.
+ * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
+ * as well, if inside an active sequence where another element is added.
+ */
+template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
+class SequenceCollector : public Collector<T, growth_factor, max_growth> {
+ public:
+  explicit SequenceCollector(int initial_capacity)
+      : Collector<T, growth_factor, max_growth>(initial_capacity),
+        sequence_start_(kNoSequence) {}
+
+  virtual ~SequenceCollector() {}
+
+  void StartSequence() {
+    DCHECK(sequence_start_ == kNoSequence);
+    sequence_start_ = this->index_;
+  }
+
+  Vector<T> EndSequence() {
+    DCHECK(sequence_start_ != kNoSequence);
+    int sequence_start = sequence_start_;
+    sequence_start_ = kNoSequence;
+    if (sequence_start == this->index_) return Vector<T>();
+    return this->current_chunk_.SubVector(sequence_start, this->index_);
+  }
+
+  // Drops the currently added sequence, and all collected elements in it.
+  void DropSequence() {
+    DCHECK(sequence_start_ != kNoSequence);
+    int sequence_length = this->index_ - sequence_start_;
+    this->index_ = sequence_start_;
+    this->size_ -= sequence_length;
+    sequence_start_ = kNoSequence;
+  }
+
+  virtual void Reset() {
+    sequence_start_ = kNoSequence;
+    this->Collector<T, growth_factor, max_growth>::Reset();
+  }
+
+ private:
+  static const int kNoSequence = -1;
+  int sequence_start_;
+
+  // Move the currently active sequence to the new chunk.
+  virtual void NewChunk(int new_capacity) {
+    if (sequence_start_ == kNoSequence) {
+      // Fall back on default behavior if no sequence has been started.
+      this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
+      return;
+    }
+    int sequence_length = this->index_ - sequence_start_;
+    Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
+    DCHECK(sequence_length < new_chunk.length());
+    for (int i = 0; i < sequence_length; i++) {
+      new_chunk[i] = this->current_chunk_[sequence_start_ + i];
+    }
+    if (sequence_start_ > 0) {
+      this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
+    } else {
+      this->current_chunk_.Dispose();
+    }
+    this->current_chunk_ = new_chunk;
+    this->index_ = sequence_length;
+    sequence_start_ = 0;
+  }
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COLLECTOR_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index c47e1b7..8bb5332 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -31,7 +31,7 @@
 #include "src/parsing/scanner-character-streams.h"
 #include "src/profiler/cpu-profiler.h"
 #include "src/runtime-profiler.h"
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/code-serializer.h"
 #include "src/vm-state-inl.h"
 
 namespace v8 {
@@ -66,13 +66,27 @@
 #undef PARSE_INFO_GETTER
 #undef PARSE_INFO_GETTER_WITH_DEFAULT
 
+// A wrapper around a CompilationInfo that detaches the Handles from
+// the underlying DeferredHandleScope and stores them in info_ on
+// destruction.
+class CompilationHandleScope BASE_EMBEDDED {
+ public:
+  explicit CompilationHandleScope(CompilationInfo* info)
+      : deferred_(info->isolate()), info_(info) {}
+  ~CompilationHandleScope() { info_->set_deferred_handles(deferred_.Detach()); }
+
+ private:
+  DeferredHandleScope deferred_;
+  CompilationInfo* info_;
+};
 
 // Exactly like a CompilationInfo, except being allocated via {new} and it also
 // creates and enters a Zone on construction and deallocates it on destruction.
 class CompilationInfoWithZone : public CompilationInfo {
  public:
   explicit CompilationInfoWithZone(Handle<JSFunction> function)
-      : CompilationInfo(new ParseInfo(&zone_, function)) {}
+      : CompilationInfo(new ParseInfo(&zone_, function)),
+        zone_(function->GetIsolate()->allocator()) {}
 
   // Virtual destructor because a CompilationInfoWithZone has to exit the
   // zone scope and get rid of dependent maps even when the destructor is
@@ -88,6 +102,8 @@
   Zone zone_;
 };
 
+// ----------------------------------------------------------------------------
+// Implementation of CompilationInfo
 
 bool CompilationInfo::has_shared_info() const {
   return parse_info_ && !parse_info_->shared_info().is_null();
@@ -127,12 +143,6 @@
   if (FLAG_turbo_types) MarkAsTypingEnabled();
 
   if (has_shared_info()) {
-    if (shared_info()->is_compiled()) {
-      // We should initialize the CompilationInfo feedback vector from the
-      // passed in shared info, rather than creating a new one.
-      feedback_vector_ = Handle<TypeFeedbackVector>(
-          shared_info()->feedback_vector(), parse_info->isolate());
-    }
     if (shared_info()->never_compiled()) MarkAsFirstCompile();
   }
 }
@@ -206,20 +216,6 @@
 }
 
 
-void CompilationInfo::EnsureFeedbackVector() {
-  if (feedback_vector_.is_null()) {
-    Handle<TypeFeedbackMetadata> feedback_metadata =
-        TypeFeedbackMetadata::New(isolate(), literal()->feedback_vector_spec());
-    feedback_vector_ = TypeFeedbackVector::New(isolate(), feedback_metadata);
-  }
-
-  // It's very important that recompiles do not alter the structure of the
-  // type feedback vector.
-  CHECK(!feedback_vector_->metadata()->SpecDiffersFrom(
-      literal()->feedback_vector_spec()));
-}
-
-
 bool CompilationInfo::has_simple_parameters() {
   return scope()->has_simple_parameters();
 }
@@ -293,11 +289,38 @@
   return name;
 }
 
+StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
+  switch (output_code_kind()) {
+    case Code::STUB:
+    case Code::BYTECODE_HANDLER:
+    case Code::HANDLER:
+    case Code::BUILTIN:
+      return StackFrame::STUB;
+    case Code::WASM_FUNCTION:
+      return StackFrame::WASM;
+    case Code::JS_TO_WASM_FUNCTION:
+      return StackFrame::JS_TO_WASM;
+    case Code::WASM_TO_JS_FUNCTION:
+      return StackFrame::WASM_TO_JS;
+    default:
+      UNIMPLEMENTED();
+      return StackFrame::NONE;
+  }
+}
 
 bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
   return is_sloppy(language_mode()) && !is_native();
 }
 
+#if DEBUG
+void CompilationInfo::PrintAstForTesting() {
+  PrintF("--- Source from AST ---\n%s\n",
+         PrettyPrinter(isolate()).PrintProgram(literal()));
+}
+#endif
+
+// ----------------------------------------------------------------------------
+// Implementation of OptimizedCompileJob
 
 class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
  public:
@@ -352,6 +375,14 @@
     return AbortOptimization(kFunctionBeingDebugged);
   }
 
+  // Resuming a suspended frame is not supported by Crankshaft/TurboFan.
+  if (info()->shared_info()->HasBuiltinFunctionId() &&
+      (info()->shared_info()->builtin_function_id() == kGeneratorObjectNext ||
+       info()->shared_info()->builtin_function_id() == kGeneratorObjectReturn ||
+       info()->shared_info()->builtin_function_id() == kGeneratorObjectThrow)) {
+    return AbortOptimization(kGeneratorResumeMethod);
+  }
+
   // Limit the number of times we try to optimize functions.
   const int kMaxOptCount =
       FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
@@ -360,7 +391,7 @@
   }
 
   // Check the whitelist for Crankshaft.
-  if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
+  if (!info()->shared_info()->PassesFilter(FLAG_hydrogen_filter)) {
     return AbortOptimization(kHydrogenFilter);
   }
 
@@ -401,7 +432,8 @@
       !optimization_disabled;
 
   // 3. Explicitly enabled by the command-line filter.
-  bool passes_turbo_filter = info()->closure()->PassesFilter(FLAG_turbo_filter);
+  bool passes_turbo_filter =
+      info()->shared_info()->PassesFilter(FLAG_turbo_filter);
 
   // If this is OSR request, OSR must be enabled by Turbofan.
   bool passes_osr_test = FLAG_turbo_osr || !info()->is_osr();
@@ -461,11 +493,6 @@
     return AbortOptimization(kTooManyParametersLocals);
   }
 
-  if (scope->HasIllegalRedeclaration()) {
-    // Crankshaft cannot handle illegal redeclarations.
-    return AbortOptimization(kFunctionWithIllegalRedeclaration);
-  }
-
   if (FLAG_trace_opt) {
     OFStream os(stdout);
     os << "[compiling method " << Brief(*info()->closure())
@@ -490,13 +517,13 @@
         info()->shared_info()->disable_optimization_reason());
   }
 
-  graph_builder_ = (info()->is_tracking_positions() || FLAG_trace_ic)
-                       ? new (info()->zone())
-                             HOptimizedGraphBuilderWithPositions(info())
-                       : new (info()->zone()) HOptimizedGraphBuilder(info());
+  HOptimizedGraphBuilder* graph_builder =
+      (info()->is_tracking_positions() || FLAG_trace_ic)
+          ? new (info()->zone()) HOptimizedGraphBuilderWithPositions(info())
+          : new (info()->zone()) HOptimizedGraphBuilder(info());
 
   Timer t(this, &time_taken_to_create_graph_);
-  graph_ = graph_builder_->CreateGraph();
+  graph_ = graph_builder->CreateGraph();
 
   if (isolate()->has_pending_exception()) {
     return SetLastStatus(FAILED);
@@ -533,7 +560,7 @@
     chunk_ = LChunk::NewChunk(graph_);
     if (chunk_ != NULL) return SetLastStatus(SUCCEEDED);
   } else if (bailout_reason != kNoReason) {
-    graph_builder_->Bailout(bailout_reason);
+    info_->AbortOptimization(bailout_reason);
   }
 
   return SetLastStatus(BAILED_OUT);
@@ -674,6 +701,10 @@
   }
 }
 
+// ----------------------------------------------------------------------------
+// Local helper methods that make up the compilation pipeline.
+
+namespace {
 
 // Sets the expected number of properties based on estimate from compiler.
 void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
@@ -696,18 +727,16 @@
   shared->set_expected_nof_properties(estimate);
 }
 
-
-static void MaybeDisableOptimization(Handle<SharedFunctionInfo> shared_info,
-                                     BailoutReason bailout_reason) {
+void MaybeDisableOptimization(Handle<SharedFunctionInfo> shared_info,
+                              BailoutReason bailout_reason) {
   if (bailout_reason != kNoReason) {
     shared_info->DisableOptimization(bailout_reason);
   }
 }
 
-
-static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
-                                      CompilationInfo* info,
-                                      Handle<SharedFunctionInfo> shared) {
+void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+                               CompilationInfo* info,
+                               Handle<SharedFunctionInfo> shared) {
   // SharedFunctionInfo is passed separately, because if CompilationInfo
   // was created using Script object, it will not have it.
 
@@ -717,8 +746,9 @@
   if (info->isolate()->logger()->is_logging_code_events() ||
       info->isolate()->cpu_profiler()->is_profiling()) {
     Handle<Script> script = info->parse_info()->script();
-    Handle<Code> code = info->code();
-    if (code.is_identical_to(info->isolate()->builtins()->CompileLazy())) {
+    Handle<AbstractCode> abstract_code = info->abstract_code();
+    if (abstract_code.is_identical_to(
+            info->isolate()->builtins()->CompileLazy())) {
       return;
     }
     int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
@@ -729,15 +759,37 @@
                               : info->isolate()->heap()->empty_string();
     Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
     PROFILE(info->isolate(),
-            CodeCreateEvent(log_tag, *code, *shared, info, script_name,
+            CodeCreateEvent(log_tag, *abstract_code, *shared, info, script_name,
                             line_num, column_num));
   }
 }
 
-static bool CompileUnoptimizedCode(CompilationInfo* info) {
+void EnsureFeedbackVector(CompilationInfo* info) {
+  if (!info->has_shared_info()) return;
+
+  // If no type feedback vector exists, we create one now. At this point the
+  // AstNumbering pass has already run. Note the snapshot can contain outdated
+  // vectors for a different configuration, hence we also recreate a new vector
+  // when the function is not compiled (i.e. no code was serialized).
+  if (info->shared_info()->feedback_vector()->is_empty() ||
+      !info->shared_info()->is_compiled()) {
+    Handle<TypeFeedbackMetadata> feedback_metadata = TypeFeedbackMetadata::New(
+        info->isolate(), info->literal()->feedback_vector_spec());
+    Handle<TypeFeedbackVector> feedback_vector =
+        TypeFeedbackVector::New(info->isolate(), feedback_metadata);
+    info->shared_info()->set_feedback_vector(*feedback_vector);
+  }
+
+  // It's very important that recompiles do not alter the structure of the type
+  // feedback vector. Verify that the structure fits the function literal.
+  CHECK(!info->shared_info()->feedback_vector()->metadata()->SpecDiffersFrom(
+      info->literal()->feedback_vector_spec()));
+}
+
+bool CompileUnoptimizedCode(CompilationInfo* info) {
   DCHECK(AllowCompilation::IsAllowed(info->isolate()));
   if (!Compiler::Analyze(info->parse_info()) ||
-      !FullCodeGenerator::MakeCode(info)) {
+      !(EnsureFeedbackVector(info), FullCodeGenerator::MakeCode(info))) {
     Isolate* isolate = info->isolate();
     if (!isolate->has_pending_exception()) isolate->StackOverflow();
     return false;
@@ -745,24 +797,31 @@
   return true;
 }
 
+bool UseIgnition(CompilationInfo* info) {
+  // TODO(4681): Generator functions are not yet supported.
+  if (info->shared_info()->is_generator()) {
+    return false;
+  }
 
-static bool UseIgnition(CompilationInfo* info) {
-  // Cannot use Ignition when the {function_data} is already used.
-  if (info->has_shared_info() && info->shared_info()->HasBuiltinFunctionId()) {
+  // TODO(4681): Resuming a suspended frame is not supported.
+  if (info->shared_info()->HasBuiltinFunctionId() &&
+      (info->shared_info()->builtin_function_id() == kGeneratorObjectNext ||
+       info->shared_info()->builtin_function_id() == kGeneratorObjectReturn ||
+       info->shared_info()->builtin_function_id() == kGeneratorObjectThrow)) {
     return false;
   }
 
   // Checks whether top level functions should be passed by the filter.
-  if (info->closure().is_null()) {
+  if (info->shared_info()->is_toplevel()) {
     Vector<const char> filter = CStrVector(FLAG_ignition_filter);
     return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
   }
 
   // Finally respect the filter.
-  return info->closure()->PassesFilter(FLAG_ignition_filter);
+  return info->shared_info()->PassesFilter(FLAG_ignition_filter);
 }
 
-static int CodeAndMetadataSize(CompilationInfo* info) {
+int CodeAndMetadataSize(CompilationInfo* info) {
   int size = 0;
   if (info->has_bytecode_array()) {
     Handle<BytecodeArray> bytecode_array = info->bytecode_array();
@@ -780,9 +839,9 @@
   return size;
 }
 
-
-static bool GenerateBaselineCode(CompilationInfo* info) {
+bool GenerateBaselineCode(CompilationInfo* info) {
   bool success;
+  EnsureFeedbackVector(info);
   if (FLAG_ignition && UseIgnition(info)) {
     success = interpreter::Interpreter::MakeBytecode(info);
   } else {
@@ -797,8 +856,7 @@
   return success;
 }
 
-
-static bool CompileBaselineCode(CompilationInfo* info) {
+bool CompileBaselineCode(CompilationInfo* info) {
   DCHECK(AllowCompilation::IsAllowed(info->isolate()));
   if (!Compiler::Analyze(info->parse_info()) || !GenerateBaselineCode(info)) {
     Isolate* isolate = info->isolate();
@@ -808,8 +866,21 @@
   return true;
 }
 
+void InstallBaselineCompilationResult(CompilationInfo* info,
+                                      Handle<SharedFunctionInfo> shared,
+                                      Handle<ScopeInfo> scope_info) {
+  // Assert that we are not overwriting (possibly patched) debug code.
+  DCHECK(!shared->HasDebugCode());
+  DCHECK(!info->code().is_null());
+  shared->ReplaceCode(*info->code());
+  shared->set_scope_info(*scope_info);
+  if (info->has_bytecode_array()) {
+    DCHECK(!shared->HasBytecodeArray());  // Only compiled once.
+    shared->set_bytecode_array(*info->bytecode_array());
+  }
+}
 
-MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
+MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCodeCommon(
     CompilationInfo* info) {
   VMState<COMPILER> state(info->isolate());
   PostponeInterruptsScope postpone(info->isolate());
@@ -824,29 +895,20 @@
 
   // Compile either unoptimized code or bytecode for the interpreter.
   if (!CompileBaselineCode(info)) return MaybeHandle<Code>();
-  if (info->code()->kind() == Code::FUNCTION) {  // Only for full code.
-    RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
-  }
+  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
 
   // Update the shared function info with the scope info. Allocating the
   // ScopeInfo object may cause a GC.
   Handle<ScopeInfo> scope_info =
       ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
-  shared->set_scope_info(*scope_info);
 
-  // Update the code and feedback vector for the shared function info.
-  shared->ReplaceCode(*info->code());
-  shared->set_feedback_vector(*info->feedback_vector());
-  if (info->has_bytecode_array()) {
-    DCHECK(shared->function_data()->IsUndefined());
-    shared->set_function_data(*info->bytecode_array());
-  }
+  // Install compilation result on the shared function info
+  InstallBaselineCompilationResult(info, shared, scope_info);
 
   return info->code();
 }
 
-
-MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
+MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
     Handle<JSFunction> function, BailoutId osr_ast_id) {
   Handle<SharedFunctionInfo> shared(function->shared());
   DisallowHeapAllocation no_gc;
@@ -862,8 +924,7 @@
   return MaybeHandle<Code>();
 }
 
-
-static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
+void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
   Handle<Code> code = info->code();
   if (code->kind() != Code::OPTIMIZED_FUNCTION) return;  // Nothing to do.
 
@@ -894,8 +955,7 @@
   }
 }
 
-
-static bool Renumber(ParseInfo* parse_info) {
+bool Renumber(ParseInfo* parse_info) {
   if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
                               parse_info->literal())) {
     return false;
@@ -905,30 +965,14 @@
     FunctionLiteral* lit = parse_info->literal();
     shared_info->set_ast_node_count(lit->ast_node_count());
     MaybeDisableOptimization(shared_info, lit->dont_optimize_reason());
-    shared_info->set_dont_crankshaft(lit->flags() &
-                                     AstProperties::kDontCrankshaft);
+    shared_info->set_dont_crankshaft(
+        shared_info->dont_crankshaft() ||
+        (lit->flags() & AstProperties::kDontCrankshaft));
   }
   return true;
 }
 
-
-bool Compiler::Analyze(ParseInfo* info) {
-  DCHECK_NOT_NULL(info->literal());
-  if (!Rewriter::Rewrite(info)) return false;
-  if (!Scope::Analyze(info)) return false;
-  if (!Renumber(info)) return false;
-  DCHECK_NOT_NULL(info->scope());
-  return true;
-}
-
-
-bool Compiler::ParseAndAnalyze(ParseInfo* info) {
-  if (!Parser::ParseStatic(info)) return false;
-  return Compiler::Analyze(info);
-}
-
-
-static bool GetOptimizedCodeNow(CompilationInfo* info) {
+bool GetOptimizedCodeNow(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
   CanonicalHandleScope canonical(isolate);
   TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
@@ -959,8 +1003,7 @@
   return true;
 }
 
-
-static bool GetOptimizedCodeLater(CompilationInfo* info) {
+bool GetOptimizedCodeLater(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
   CanonicalHandleScope canonical(isolate);
   TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
@@ -1002,8 +1045,7 @@
   return true;
 }
 
-
-MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
+MaybeHandle<Code> GetUnoptimizedCode(Handle<JSFunction> function) {
   DCHECK(!function->GetIsolate()->has_pending_exception());
   DCHECK(!function->is_compiled());
   if (function->shared()->is_compiled()) {
@@ -1018,8 +1060,69 @@
   return result;
 }
 
+MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
+                                   Compiler::ConcurrencyMode mode,
+                                   BailoutId osr_ast_id = BailoutId::None(),
+                                   JavaScriptFrame* osr_frame = nullptr) {
+  Isolate* isolate = function->GetIsolate();
+  Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+  if (shared->HasDebugInfo()) return MaybeHandle<Code>();
 
-MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
+  Handle<Code> cached_code;
+  if (GetCodeFromOptimizedCodeMap(function, osr_ast_id)
+          .ToHandle(&cached_code)) {
+    if (FLAG_trace_opt) {
+      PrintF("[found optimized code for ");
+      function->ShortPrint();
+      if (!osr_ast_id.IsNone()) {
+        PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
+      }
+      PrintF("]\n");
+    }
+    return cached_code;
+  }
+
+  DCHECK(AllowCompilation::IsAllowed(isolate));
+
+  if (shared->is_compiled()) {
+    shared->code()->set_profiler_ticks(0);
+  }
+
+  // TODO(mstarzinger): We cannot properly deserialize a scope chain containing
+  // an eval scope and hence would fail at parsing the eval source again.
+  if (shared->disable_optimization_reason() == kEval) {
+    return MaybeHandle<Code>();
+  }
+
+  // TODO(mstarzinger): We cannot properly deserialize a scope chain for the
+  // builtin context, hence Genesis::InstallExperimentalNatives would fail.
+  if (shared->is_toplevel() && isolate->bootstrapper()->IsActive()) {
+    return MaybeHandle<Code>();
+  }
+
+  base::SmartPointer<CompilationInfo> info(
+      new CompilationInfoWithZone(function));
+  VMState<COMPILER> state(isolate);
+  DCHECK(!isolate->has_pending_exception());
+  PostponeInterruptsScope postpone(isolate);
+
+  info->SetOptimizingForOsr(osr_ast_id);
+
+  if (mode == Compiler::CONCURRENT) {
+    if (GetOptimizedCodeLater(info.get())) {
+      info.Detach();  // The background recompile job owns this now.
+      return isolate->builtins()->InOptimizationQueue();
+    }
+  } else {
+    info->set_osr_frame(osr_frame);
+    if (GetOptimizedCodeNow(info.get())) return info->code();
+  }
+
+  if (isolate->has_pending_exception()) isolate->clear_pending_exception();
+  return MaybeHandle<Code>();
+}
+
+MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
   Isolate* isolate = function->GetIsolate();
   DCHECK(!isolate->has_pending_exception());
   DCHECK(!function->is_compiled());
@@ -1058,7 +1161,7 @@
 
   if (FLAG_always_opt) {
     Handle<Code> opt_code;
-    if (Compiler::GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
+    if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
             .ToHandle(&opt_code)) {
       result = opt_code;
     }
@@ -1068,73 +1171,12 @@
 }
 
 
-bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
-  if (function->is_compiled()) return true;
-  MaybeHandle<Code> maybe_code = Compiler::GetLazyCode(function);
-  Handle<Code> code;
-  if (!maybe_code.ToHandle(&code)) {
-    if (flag == CLEAR_EXCEPTION) {
-      function->GetIsolate()->clear_pending_exception();
-    }
-    return false;
-  }
-  function->ReplaceCode(*code);
-  DCHECK(function->is_compiled());
-  return true;
-}
-
-
-// TODO(turbofan): In the future, unoptimized code with deopt support could
-// be generated lazily once deopt is triggered.
-bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
-  DCHECK_NOT_NULL(info->literal());
-  DCHECK(info->has_scope());
-  Handle<SharedFunctionInfo> shared = info->shared_info();
-  if (!shared->has_deoptimization_support()) {
-    // TODO(titzer): just reuse the ParseInfo for the unoptimized compile.
-    CompilationInfoWithZone unoptimized(info->closure());
-    // Note that we use the same AST that we will use for generating the
-    // optimized code.
-    ParseInfo* parse_info = unoptimized.parse_info();
-    parse_info->set_literal(info->literal());
-    parse_info->set_scope(info->scope());
-    parse_info->set_context(info->context());
-    unoptimized.EnableDeoptimizationSupport();
-    // If the current code has reloc info for serialization, also include
-    // reloc info for serialization for the new code, so that deopt support
-    // can be added without losing IC state.
-    if (shared->code()->kind() == Code::FUNCTION &&
-        shared->code()->has_reloc_info_for_serialization()) {
-      unoptimized.PrepareForSerializing();
-    }
-    if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
-
-    shared->EnableDeoptimizationSupport(*unoptimized.code());
-    shared->set_feedback_vector(*unoptimized.feedback_vector());
-
-    info->MarkAsCompiled();
-
-    // The scope info might not have been set if a lazily compiled
-    // function is inlined before being called for the first time.
-    if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
-      Handle<ScopeInfo> target_scope_info =
-          ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
-      shared->set_scope_info(*target_scope_info);
-    }
-
-    // The existing unoptimized code was replaced with the new one.
-    RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
-  }
-  return true;
-}
-
-
 bool CompileEvalForDebugging(Handle<JSFunction> function,
                              Handle<SharedFunctionInfo> shared) {
   Handle<Script> script(Script::cast(shared->script()));
   Handle<Context> context(function->context());
 
-  Zone zone;
+  Zone zone(function->GetIsolate()->allocator());
   ParseInfo parse_info(&zone, script);
   CompilationInfo info(&parse_info);
   Isolate* isolate = info.isolate();
@@ -1176,61 +1218,25 @@
   return true;
 }
 
-
-static inline bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
+inline bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
   return shared->is_toplevel() && shared->script()->IsScript() &&
          Script::cast(shared->script())->compilation_type() ==
              Script::COMPILATION_TYPE_EVAL;
 }
 
-
-bool Compiler::CompileDebugCode(Handle<JSFunction> function) {
-  Handle<SharedFunctionInfo> shared(function->shared());
-  if (IsEvalToplevel(shared)) {
-    return CompileEvalForDebugging(function, shared);
-  } else {
-    CompilationInfoWithZone info(function);
-    return CompileForDebugging(&info);
-  }
+Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
+    Isolate* isolate, FunctionLiteral* literal, Handle<Script> script) {
+  Handle<Code> code = isolate->builtins()->CompileLazy();
+  Handle<ScopeInfo> scope_info = handle(ScopeInfo::Empty(isolate));
+  Handle<SharedFunctionInfo> result = isolate->factory()->NewSharedFunctionInfo(
+      literal->name(), literal->materialized_literal_count(), literal->kind(),
+      code, scope_info);
+  SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
+  SharedFunctionInfo::SetScript(result, script);
+  return result;
 }
 
-
-bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
-  DCHECK(shared->allows_lazy_compilation_without_context());
-  DCHECK(!IsEvalToplevel(shared));
-  Zone zone;
-  ParseInfo parse_info(&zone, shared);
-  CompilationInfo info(&parse_info);
-  return CompileForDebugging(&info);
-}
-
-
-void Compiler::CompileForLiveEdit(Handle<Script> script) {
-  // TODO(635): support extensions.
-  Zone zone;
-  ParseInfo parse_info(&zone, script);
-  CompilationInfo info(&parse_info);
-  PostponeInterruptsScope postpone(info.isolate());
-  VMState<COMPILER> state(info.isolate());
-
-  // Get rid of old list of shared function infos.
-  info.MarkAsFirstCompile();
-  info.MarkAsDebug();
-  info.parse_info()->set_global();
-  if (!Parser::ParseStatic(info.parse_info())) return;
-
-  LiveEditFunctionTracker tracker(info.isolate(), parse_info.literal());
-  if (!CompileUnoptimizedCode(&info)) return;
-  if (info.has_shared_info()) {
-    Handle<ScopeInfo> scope_info =
-        ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
-    info.shared_info()->set_scope_info(*scope_info);
-  }
-  tracker.RecordRootFunctionInfo(info.code());
-}
-
-
-static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
+Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
   TimerEventScope<TimerEventCompileCode> timer(isolate);
   TRACE_EVENT0("v8", "V8.CompileCode");
@@ -1263,6 +1269,13 @@
                                    FLAG_min_preparse_length) &&
                               !info->is_debug();
 
+      // Consider parsing eagerly when targeting the code cache.
+      parse_allow_lazy &= !(FLAG_serialize_eager && info->will_serialize());
+
+      // Consider parsing eagerly when targeting Ignition.
+      parse_allow_lazy &= !(FLAG_ignition && FLAG_ignition_eager &&
+                            !isolate->serializer_enabled());
+
       parse_info->set_allow_lazy_parsing(parse_allow_lazy);
       if (!parse_allow_lazy &&
           (options == ScriptCompiler::kProduceParserCache ||
@@ -1295,31 +1308,25 @@
     HistogramTimerScope timer(rate);
     TRACE_EVENT0("v8", info->is_eval() ? "V8.CompileEval" : "V8.Compile");
 
-    // Compile the code.
-    if (!CompileBaselineCode(info)) {
-      return Handle<SharedFunctionInfo>::null();
-    }
-
-    // Allocate function.
-    DCHECK(!info->code().is_null());
-    result = isolate->factory()->NewSharedFunctionInfo(
-        lit->name(), lit->materialized_literal_count(), lit->kind(),
-        info->code(),
-        ScopeInfo::Create(info->isolate(), info->zone(), info->scope()),
-        info->feedback_vector());
-    if (info->has_bytecode_array()) {
-      DCHECK(result->function_data()->IsUndefined());
-      result->set_function_data(*info->bytecode_array());
-    }
-
+    // Allocate a shared function info object.
     DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position());
-    SharedFunctionInfo::InitFromFunctionLiteral(result, lit);
-    SharedFunctionInfo::SetScript(result, script);
+    result = NewSharedFunctionInfoForLiteral(isolate, lit, script);
     result->set_is_toplevel(true);
     if (info->is_eval()) {
       // Eval scripts cannot be (re-)compiled without context.
       result->set_allows_lazy_compilation_without_context(false);
     }
+    parse_info->set_shared_info(result);
+
+    // Compile the code.
+    if (!CompileBaselineCode(info)) {
+      return Handle<SharedFunctionInfo>::null();
+    }
+
+    // Install compilation result on the shared function info
+    Handle<ScopeInfo> scope_info =
+        ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
+    InstallBaselineCompilationResult(info, result, scope_info);
 
     Handle<String> script_name =
         script->name()->IsString()
@@ -1329,8 +1336,8 @@
         ? Logger::EVAL_TAG
         : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
 
-    PROFILE(isolate, CodeCreateEvent(
-                log_tag, *info->code(), *result, info, *script_name));
+    PROFILE(isolate, CodeCreateEvent(log_tag, *info->abstract_code(), *result,
+                                     info, *script_name));
 
     // Hint to the runtime system used when allocating space for initial
     // property space by setting the expected number of properties for
@@ -1347,6 +1354,152 @@
   return result;
 }
 
+}  // namespace
+
+// ----------------------------------------------------------------------------
+// Implementation of Compiler
+
+bool Compiler::Analyze(ParseInfo* info) {
+  DCHECK_NOT_NULL(info->literal());
+  if (!Rewriter::Rewrite(info)) return false;
+  if (!Scope::Analyze(info)) return false;
+  if (!Renumber(info)) return false;
+  DCHECK_NOT_NULL(info->scope());
+  return true;
+}
+
+bool Compiler::ParseAndAnalyze(ParseInfo* info) {
+  if (!Parser::ParseStatic(info)) return false;
+  return Compiler::Analyze(info);
+}
+
+bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
+  if (function->is_compiled()) return true;
+  MaybeHandle<Code> maybe_code = GetLazyCode(function);
+  Handle<Code> code;
+  if (!maybe_code.ToHandle(&code)) {
+    if (flag == CLEAR_EXCEPTION) {
+      function->GetIsolate()->clear_pending_exception();
+    }
+    return false;
+  }
+  DCHECK(code->IsJavaScriptCode());
+  function->ReplaceCode(*code);
+  DCHECK(function->is_compiled());
+  return true;
+}
+
+bool Compiler::CompileOptimized(Handle<JSFunction> function,
+                                ConcurrencyMode mode) {
+  Handle<Code> code;
+  if (GetOptimizedCode(function, mode).ToHandle(&code)) {
+    // Optimization succeeded, return optimized code.
+    function->ReplaceCode(*code);
+  } else {
+    // Optimization failed, get unoptimized code.
+    Isolate* isolate = function->GetIsolate();
+    if (isolate->has_pending_exception()) {  // Possible stack overflow.
+      return false;
+    }
+    code = Handle<Code>(function->shared()->code(), isolate);
+    if (code->kind() != Code::FUNCTION &&
+        code->kind() != Code::OPTIMIZED_FUNCTION) {
+      if (!GetUnoptimizedCode(function).ToHandle(&code)) {
+        return false;
+      }
+    }
+    function->ReplaceCode(*code);
+  }
+
+  DCHECK(function->code()->kind() == Code::FUNCTION ||
+         function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
+         (function->code()->is_interpreter_entry_trampoline() &&
+          function->shared()->HasBytecodeArray()) ||
+         function->IsInOptimizationQueue());
+  return true;
+}
+
+bool Compiler::CompileDebugCode(Handle<JSFunction> function) {
+  Handle<SharedFunctionInfo> shared(function->shared());
+  if (IsEvalToplevel(shared)) {
+    return CompileEvalForDebugging(function, shared);
+  } else {
+    CompilationInfoWithZone info(function);
+    return CompileForDebugging(&info);
+  }
+}
+
+bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
+  DCHECK(shared->allows_lazy_compilation_without_context());
+  DCHECK(!IsEvalToplevel(shared));
+  Zone zone(shared->GetIsolate()->allocator());
+  ParseInfo parse_info(&zone, shared);
+  CompilationInfo info(&parse_info);
+  return CompileForDebugging(&info);
+}
+
+// TODO(turbofan): In the future, unoptimized code with deopt support could
+// be generated lazily once deopt is triggered.
+bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
+  DCHECK_NOT_NULL(info->literal());
+  DCHECK(info->has_scope());
+  Handle<SharedFunctionInfo> shared = info->shared_info();
+  if (!shared->has_deoptimization_support()) {
+    // TODO(titzer): just reuse the ParseInfo for the unoptimized compile.
+    CompilationInfoWithZone unoptimized(info->closure());
+    // Note that we use the same AST that we will use for generating the
+    // optimized code.
+    ParseInfo* parse_info = unoptimized.parse_info();
+    parse_info->set_literal(info->literal());
+    parse_info->set_scope(info->scope());
+    parse_info->set_context(info->context());
+    unoptimized.EnableDeoptimizationSupport();
+    // If the current code has reloc info for serialization, also include
+    // reloc info for serialization for the new code, so that deopt support
+    // can be added without losing IC state.
+    if (shared->code()->kind() == Code::FUNCTION &&
+        shared->code()->has_reloc_info_for_serialization()) {
+      unoptimized.PrepareForSerializing();
+    }
+    EnsureFeedbackVector(&unoptimized);
+    if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
+
+    shared->EnableDeoptimizationSupport(*unoptimized.code());
+
+    info->MarkAsCompiled();
+
+    // The scope info might not have been set if a lazily compiled
+    // function is inlined before being called for the first time.
+    if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
+      Handle<ScopeInfo> target_scope_info =
+          ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
+      shared->set_scope_info(*target_scope_info);
+    }
+
+    // The existing unoptimized code was replaced with the new one.
+    RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
+  }
+  return true;
+}
+
+void Compiler::CompileForLiveEdit(Handle<Script> script) {
+  // TODO(635): support extensions.
+  Zone zone(script->GetIsolate()->allocator());
+  ParseInfo parse_info(&zone, script);
+  CompilationInfo info(&parse_info);
+  PostponeInterruptsScope postpone(info.isolate());
+  VMState<COMPILER> state(info.isolate());
+
+  // Get rid of old list of shared function infos.
+  info.MarkAsFirstCompile();
+  info.MarkAsDebug();
+  info.parse_info()->set_global();
+  if (!Parser::ParseStatic(info.parse_info())) return;
+
+  LiveEditFunctionTracker tracker(info.isolate(), parse_info.literal());
+  if (!CompileUnoptimizedCode(&info)) return;
+  tracker.RecordRootFunctionInfo(info.code());
+}
 
 MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
     Handle<String> source, Handle<SharedFunctionInfo> outer_info,
@@ -1373,7 +1526,7 @@
       script->set_column_offset(column_offset);
     }
     script->set_origin_options(options);
-    Zone zone;
+    Zone zone(isolate->allocator());
     ParseInfo parse_info(&zone, script);
     CompilationInfo info(&parse_info);
     parse_info.set_eval();
@@ -1401,8 +1554,6 @@
       compilation_cache->PutEval(source, outer_info, context, shared_info,
                                  line_offset);
     }
-  } else if (shared_info->ic_age() != isolate->heap()->global_ic_age()) {
-    shared_info->ResetForNewContext(isolate->heap()->global_ic_age());
   }
 
   Handle<JSFunction> result =
@@ -1417,8 +1568,7 @@
   return result;
 }
 
-
-Handle<SharedFunctionInfo> Compiler::CompileScript(
+Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
     Handle<String> source, Handle<Object> script_name, int line_offset,
     int column_offset, ScriptOriginOptions resource_options,
     Handle<Object> source_map_url, Handle<Context> context,
@@ -1443,12 +1593,7 @@
   isolate->counters()->total_load_size()->Increment(source_length);
   isolate->counters()->total_compile_size()->Increment(source_length);
 
-  // TODO(rossberg): The natives do not yet obey strong mode rules
-  // (for example, some macros use '==').
-  bool use_strong = FLAG_use_strong && !isolate->bootstrapper()->IsActive();
-  LanguageMode language_mode =
-      construct_language_mode(FLAG_use_strict, use_strong);
-
+  LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
   CompilationCache* compilation_cache = isolate->compilation_cache();
 
   // Do a lookup in the compilation cache but not for extensions.
@@ -1505,10 +1650,10 @@
     }
 
     // Compile the function and add it to the cache.
-    Zone zone;
+    Zone zone(isolate->allocator());
     ParseInfo parse_info(&zone, script);
     CompilationInfo info(&parse_info);
-    if (FLAG_harmony_modules && is_module) {
+    if (is_module) {
       parse_info.set_module();
     } else {
       parse_info.set_global();
@@ -1553,16 +1698,14 @@
   return result;
 }
 
-
-Handle<SharedFunctionInfo> Compiler::CompileStreamedScript(
+Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForStreamedScript(
     Handle<Script> script, ParseInfo* parse_info, int source_length) {
   Isolate* isolate = script->GetIsolate();
   // TODO(titzer): increment the counters in caller.
   isolate->counters()->total_load_size()->Increment(source_length);
   isolate->counters()->total_compile_size()->Increment(source_length);
 
-  LanguageMode language_mode =
-      construct_language_mode(FLAG_use_strict, FLAG_use_strong);
+  LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
   parse_info->set_language_mode(
       static_cast<LanguageMode>(parse_info->language_mode() | language_mode));
 
@@ -1587,8 +1730,8 @@
     // On the first compile, there are no existing shared function info for
     // inner functions yet, so do not try to find them. All bets are off for
     // live edit though.
-    DCHECK(script->FindSharedFunctionInfo(literal).is_null() ||
-           isolate->debug()->live_edit_enabled());
+    SLOW_DCHECK(script->FindSharedFunctionInfo(literal).is_null() ||
+                isolate->debug()->live_edit_enabled());
   } else {
     maybe_existing = script->FindSharedFunctionInfo(literal);
   }
@@ -1604,10 +1747,18 @@
     }
   }
 
-  Zone zone;
+  // Allocate a shared function info object.
+  Handle<SharedFunctionInfo> result;
+  if (!maybe_existing.ToHandle(&result)) {
+    result = NewSharedFunctionInfoForLiteral(isolate, literal, script);
+    result->set_is_toplevel(false);
+  }
+
+  Zone zone(isolate->allocator());
   ParseInfo parse_info(&zone, script);
   CompilationInfo info(&parse_info);
   parse_info.set_literal(literal);
+  parse_info.set_shared_info(result);
   parse_info.set_scope(literal->scope());
   parse_info.set_language_mode(literal->scope()->language_mode());
   if (outer_info->will_serialize()) info.PrepareForSerializing();
@@ -1633,49 +1784,35 @@
 
   bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
 
+  // Consider compiling eagerly when targeting the code cache.
+  lazy &= !(FLAG_serialize_eager && info.will_serialize());
+
+  // Consider compiling eagerly when compiling bytecode for Ignition.
+  lazy &=
+      !(FLAG_ignition && FLAG_ignition_eager && !isolate->serializer_enabled());
+
   // Generate code
   TimerEventScope<TimerEventCompileCode> timer(isolate);
   TRACE_EVENT0("v8", "V8.CompileCode");
-  Handle<ScopeInfo> scope_info;
   if (lazy) {
-    Handle<Code> code = isolate->builtins()->CompileLazy();
-    info.SetCode(code);
-    // There's no need in theory for a lazy-compiled function to have a type
-    // feedback vector, but some parts of the system expect all
-    // SharedFunctionInfo instances to have one.  The size of the vector depends
-    // on how many feedback-needing nodes are in the tree, and when lazily
-    // parsing we might not know that, if this function was never parsed before.
-    // In that case the vector will be replaced the next time MakeCode is
-    // called.
-    info.EnsureFeedbackVector();
-    scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
+    info.SetCode(isolate->builtins()->CompileLazy());
   } else if (Renumber(info.parse_info()) && GenerateBaselineCode(&info)) {
     // Code generation will ensure that the feedback vector is present and
     // appropriately sized.
     DCHECK(!info.code().is_null());
-    scope_info = ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
+    Handle<ScopeInfo> scope_info =
+        ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
     if (literal->should_eager_compile() &&
         literal->should_be_used_once_hint()) {
       info.code()->MarkToBeExecutedOnce(isolate);
     }
+    // Install compilation result on the shared function info.
+    InstallBaselineCompilationResult(&info, result, scope_info);
   } else {
     return Handle<SharedFunctionInfo>::null();
   }
 
   if (maybe_existing.is_null()) {
-    // Create a shared function info object.
-    Handle<SharedFunctionInfo> result =
-        isolate->factory()->NewSharedFunctionInfo(
-            literal->name(), literal->materialized_literal_count(),
-            literal->kind(), info.code(), scope_info, info.feedback_vector());
-    if (info.has_bytecode_array()) {
-      DCHECK(result->function_data()->IsUndefined());
-      result->set_function_data(*info.bytecode_array());
-    }
-
-    SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
-    SharedFunctionInfo::SetScript(result, script);
-    result->set_is_toplevel(false);
     // If the outer function has been compiled before, we cannot be sure that
     // shared function info for this function literal has been created for the
     // first time. It may have already been compiled previously.
@@ -1690,15 +1827,9 @@
     SetExpectedNofPropertiesFromEstimate(result,
                                          literal->expected_property_count());
     live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
-    return result;
-  } else if (!lazy) {
-    // Assert that we are not overwriting (possibly patched) debug code.
-    DCHECK(!existing->HasDebugCode());
-    existing->ReplaceCode(*info.code());
-    existing->set_scope_info(*scope_info);
-    existing->set_feedback_vector(*info.feedback_vector());
   }
-  return existing;
+
+  return result;
 }
 
 Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
@@ -1721,9 +1852,9 @@
   Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
   Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
       name, literals, FunctionKind::kNormalFunction, code,
-      Handle<ScopeInfo>(fun->shared()->scope_info()),
-      Handle<TypeFeedbackVector>(fun->shared()->feedback_vector()));
+      Handle<ScopeInfo>(fun->shared()->scope_info()));
   shared->set_construct_stub(*construct_stub);
+  shared->set_feedback_vector(fun->shared()->feedback_vector());
 
   // Copy the function data to the shared function info.
   shared->set_function_data(fun->shared()->function_data());
@@ -1733,81 +1864,15 @@
   return shared;
 }
 
-MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
-                                             ConcurrencyMode mode,
-                                             BailoutId osr_ast_id,
-                                             JavaScriptFrame* osr_frame) {
-  Isolate* isolate = function->GetIsolate();
-  Handle<SharedFunctionInfo> shared(function->shared(), isolate);
-  if (shared->HasDebugInfo()) return MaybeHandle<Code>();
-
-  Handle<Code> cached_code;
-  if (GetCodeFromOptimizedCodeMap(
-          function, osr_ast_id).ToHandle(&cached_code)) {
-    if (FLAG_trace_opt) {
-      PrintF("[found optimized code for ");
-      function->ShortPrint();
-      if (!osr_ast_id.IsNone()) {
-        PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
-      }
-      PrintF("]\n");
-    }
-    return cached_code;
-  }
-
-  DCHECK(AllowCompilation::IsAllowed(isolate));
-
-  Handle<Code> current_code(shared->code());
-  if (!shared->is_compiled() ||
-      shared->scope_info() == ScopeInfo::Empty(isolate)) {
-    // The function was never compiled. Compile it unoptimized first.
-    // TODO(titzer): reuse the AST and scope info from this compile.
-    CompilationInfoWithZone unoptimized(function);
-    unoptimized.EnableDeoptimizationSupport();
-    if (!GetUnoptimizedCodeCommon(&unoptimized).ToHandle(&current_code)) {
-      return MaybeHandle<Code>();
-    }
-    shared->ReplaceCode(*current_code);
-  }
-
-  current_code->set_profiler_ticks(0);
-
-  // TODO(mstarzinger): We cannot properly deserialize a scope chain containing
-  // an eval scope and hence would fail at parsing the eval source again.
-  if (shared->disable_optimization_reason() == kEval) {
-    return MaybeHandle<Code>();
-  }
-
-  // TODO(mstarzinger): We cannot properly deserialize a scope chain for the
-  // builtin context, hence Genesis::InstallExperimentalNatives would fail.
-  if (shared->is_toplevel() && isolate->bootstrapper()->IsActive()) {
-    return MaybeHandle<Code>();
-  }
-
-  base::SmartPointer<CompilationInfo> info(
-      new CompilationInfoWithZone(function));
-  VMState<COMPILER> state(isolate);
-  DCHECK(!isolate->has_pending_exception());
-  PostponeInterruptsScope postpone(isolate);
-
-  info->SetOptimizingForOsr(osr_ast_id, current_code);
-
-  if (mode == CONCURRENT) {
-    if (GetOptimizedCodeLater(info.get())) {
-      info.Detach();  // The background recompile job owns this now.
-      return isolate->builtins()->InOptimizationQueue();
-    }
-  } else {
-    info->set_osr_frame(osr_frame);
-    if (GetOptimizedCodeNow(info.get())) return info->code();
-  }
-
-  if (isolate->has_pending_exception()) isolate->clear_pending_exception();
-  return MaybeHandle<Code>();
+MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
+                                                   BailoutId osr_ast_id,
+                                                   JavaScriptFrame* osr_frame) {
+  DCHECK(!osr_ast_id.IsNone());
+  DCHECK_NOT_NULL(osr_frame);
+  return GetOptimizedCode(function, NOT_CONCURRENT, osr_ast_id, osr_frame);
 }
 
-MaybeHandle<Code> Compiler::GetConcurrentlyOptimizedCode(
-    OptimizedCompileJob* job) {
+void Compiler::FinalizeOptimizedCompileJob(OptimizedCompileJob* job) {
   // Take ownership of compilation info.  Deleting compilation info
   // also tears down the zone and the recompile job.
   base::SmartPointer<CompilationInfo> info(job->info());
@@ -1843,7 +1908,8 @@
         info->closure()->ShortPrint();
         PrintF("]\n");
       }
-      return Handle<Code>(*info->code());
+      info->closure()->ReplaceCode(*info->code());
+      return;
     }
   }
 
@@ -1853,45 +1919,44 @@
     info->closure()->ShortPrint();
     PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
   }
-  return MaybeHandle<Code>();
+  info->closure()->ReplaceCode(shared->code());
 }
 
+void Compiler::PostInstantiation(Handle<JSFunction> function,
+                                 PretenureFlag pretenure) {
+  Handle<SharedFunctionInfo> shared(function->shared());
 
-CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
-    : name_(name), info_(info) {
-  if (FLAG_hydrogen_stats) {
-    info_zone_start_allocation_size_ = info->zone()->allocation_size();
-    timer_.Start();
+  if (FLAG_always_opt && shared->allows_lazy_compilation()) {
+    function->MarkForOptimization();
+  }
+
+  CodeAndLiterals cached = shared->SearchOptimizedCodeMap(
+      function->context()->native_context(), BailoutId::None());
+  if (cached.code != nullptr) {
+    // Caching of optimized code enabled and optimized code found.
+    DCHECK(!cached.code->marked_for_deoptimization());
+    DCHECK(function->shared()->is_compiled());
+    function->ReplaceCode(cached.code);
+  }
+
+  if (cached.literals != nullptr) {
+    function->set_literals(cached.literals);
+  } else {
+    Isolate* isolate = function->GetIsolate();
+    int number_of_literals = shared->num_literals();
+    Handle<LiteralsArray> literals =
+        LiteralsArray::New(isolate, handle(shared->feedback_vector()),
+                           number_of_literals, pretenure);
+    function->set_literals(*literals);
+
+    // Cache context-specific literals.
+    MaybeHandle<Code> code;
+    if (cached.code != nullptr) code = handle(cached.code);
+    Handle<Context> native_context(function->context()->native_context());
+    SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
+                                              literals, BailoutId::None());
   }
 }
 
-
-CompilationPhase::~CompilationPhase() {
-  if (FLAG_hydrogen_stats) {
-    size_t size = zone()->allocation_size();
-    size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
-    isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
-  }
-}
-
-
-bool CompilationPhase::ShouldProduceTraceOutput() const {
-  // Trace if the appropriate trace flag is set and the phase name's first
-  // character is in the FLAG_trace_phase command line parameter.
-  AllowHandleDereference allow_deref;
-  bool tracing_on = info()->IsStub()
-      ? FLAG_trace_hydrogen_stubs
-      : (FLAG_trace_hydrogen &&
-         info()->closure()->PassesFilter(FLAG_trace_hydrogen_filter));
-  return (tracing_on &&
-      base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
-}
-
-#if DEBUG
-void CompilationInfo::PrintAstForTesting() {
-  PrintF("--- Source from AST ---\n%s\n",
-         PrettyPrinter(isolate()).PrintProgram(literal()));
-}
-#endif
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler.h b/src/compiler.h
index a56fa13..fa04399 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -9,7 +9,6 @@
 #include "src/ast/ast.h"
 #include "src/bailout-reason.h"
 #include "src/compilation-dependencies.h"
-#include "src/signature.h"
 #include "src/source-position.h"
 #include "src/zone.h"
 
@@ -17,10 +16,107 @@
 namespace internal {
 
 // Forward declarations.
+class CompilationInfo;
 class JavaScriptFrame;
+class OptimizedCompileJob;
 class ParseInfo;
 class ScriptData;
 
+// The V8 compiler API.
+//
+// This is the central hub for dispatching to the various compilers within V8.
+// Logic for which compiler to choose and how to wire compilation results into
+// the object heap should be kept inside this class.
+//
+// General strategy: Scripts are translated into anonymous functions w/o
+// parameters which then can be executed. If the source code contains other
+// functions, they might be compiled and allocated as part of the compilation
+// of the source code or deferred for lazy compilation at a later point.
+class Compiler : public AllStatic {
+ public:
+  enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
+  enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
+
+  // ===========================================================================
+  // The following family of methods ensures a given function is compiled. The
+  // general contract is that failures will be reported by returning {false},
+  // whereas successful compilation ensures the {is_compiled} predicate on the
+  // given function holds (except for live-edit, which compiles the world).
+
+  static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
+  static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
+  static bool CompileDebugCode(Handle<JSFunction> function);
+  static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
+  static void CompileForLiveEdit(Handle<Script> script);
+
+  // Generate and install code from previously queued optimization job.
+  static void FinalizeOptimizedCompileJob(OptimizedCompileJob* job);
+
+  // Give the compiler a chance to perform low-latency initialization tasks of
+  // the given {function} on its instantiation. Note that only the runtime will
+  // offer this chance, optimized closure instantiation will not call this.
+  static void PostInstantiation(Handle<JSFunction> function, PretenureFlag);
+
+  // Parser::Parse, then Compiler::Analyze.
+  static bool ParseAndAnalyze(ParseInfo* info);
+  // Rewrite, analyze scopes, and renumber.
+  static bool Analyze(ParseInfo* info);
+  // Adds deoptimization support, requires ParseAndAnalyze.
+  static bool EnsureDeoptimizationSupport(CompilationInfo* info);
+
+  // ===========================================================================
+  // The following family of methods instantiates new functions for scripts or
+  // function literals. The decision whether those functions will be compiled,
+  // is left to the discretion of the compiler.
+  //
+  // Please note this interface returns shared function infos.  This means you
+  // need to call Factory::NewFunctionFromSharedFunctionInfo before you have a
+  // real function with a context.
+
+  // Create a (bound) function for a String source within a context for eval.
+  MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
+      Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+      Handle<Context> context, LanguageMode language_mode,
+      ParseRestriction restriction, int line_offset, int column_offset = 0,
+      Handle<Object> script_name = Handle<Object>(),
+      ScriptOriginOptions options = ScriptOriginOptions());
+
+  // Create a shared function info object for a String source within a context.
+  static Handle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
+      Handle<String> source, Handle<Object> script_name, int line_offset,
+      int column_offset, ScriptOriginOptions resource_options,
+      Handle<Object> source_map_url, Handle<Context> context,
+      v8::Extension* extension, ScriptData** cached_data,
+      ScriptCompiler::CompileOptions compile_options,
+      NativesFlag is_natives_code, bool is_module);
+
+  // Create a shared function info object for a Script that has already been
+  // parsed while the script was being loaded from a streamed source.
+  static Handle<SharedFunctionInfo> GetSharedFunctionInfoForStreamedScript(
+      Handle<Script> script, ParseInfo* info, int source_length);
+
+  // Create a shared function info object (the code may be lazily compiled).
+  static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
+      FunctionLiteral* node, Handle<Script> script, CompilationInfo* outer);
+
+  // Create a shared function info object for a native function literal.
+  static Handle<SharedFunctionInfo> GetSharedFunctionInfoForNative(
+      v8::Extension* extension, Handle<String> name);
+
+  // ===========================================================================
+  // The following family of methods provides support for OSR. Code generated
+  // for entry via OSR might not be suitable for normal entry, hence will be
+  // returned directly to the caller.
+  //
+  // Please note this interface is the only part dealing with {Code} objects
+  // directly. Other methods are agnostic to {Code} and can use an interpreter
+  // instead of generating JIT code for a function at all.
+
+  // Generate and return optimized code for OSR, or empty handle on failure.
+  MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCodeForOSR(
+      Handle<JSFunction> function, BailoutId osr_ast_id,
+      JavaScriptFrame* osr_frame);
+};
 
 struct InlinedFunctionInfo {
   InlinedFunctionInfo(int parent_id, SourcePosition inline_position,
@@ -101,7 +197,6 @@
   Handle<Code> code() const { return code_; }
   Code::Flags code_flags() const { return code_flags_; }
   BailoutId osr_ast_id() const { return osr_ast_id_; }
-  Handle<Code> unoptimized_code() const { return unoptimized_code_; }
   int opt_count() const { return opt_count_; }
   int num_parameters() const;
   int num_parameters_including_this() const;
@@ -116,6 +211,11 @@
   bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
   Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
 
+  Handle<AbstractCode> abstract_code() const {
+    return has_bytecode_array() ? Handle<AbstractCode>::cast(bytecode_array())
+                                : Handle<AbstractCode>::cast(code());
+  }
+
   bool is_tracking_positions() const { return track_positions_; }
 
   bool is_calling() const {
@@ -218,14 +318,10 @@
     // Generate a pre-aged prologue if we are optimizing for size, which
     // will make code flushing more aggressive. Only apply to Code::FUNCTION,
     // since StaticMarkingVisitor::IsFlushable only flushes proper functions.
-    return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
-           !is_debug() && output_code_kind() == Code::FUNCTION;
+    return FLAG_optimize_for_size && FLAG_age_code && !is_debug() &&
+           output_code_kind() == Code::FUNCTION;
   }
 
-  void EnsureFeedbackVector();
-  Handle<TypeFeedbackVector> feedback_vector() const {
-    return feedback_vector_;
-  }
   void SetCode(Handle<Code> code) { code_ = code; }
 
   void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
@@ -261,10 +357,9 @@
     code_flags_ =
         Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
   }
-  void SetOptimizingForOsr(BailoutId osr_ast_id, Handle<Code> unoptimized) {
+  void SetOptimizingForOsr(BailoutId osr_ast_id) {
     SetOptimizing();
     osr_ast_id_ = osr_ast_id;
-    unoptimized_code_ = unoptimized;
   }
 
   // Deoptimization support.
@@ -288,7 +383,7 @@
   }
 
   void ReopenHandlesInNewHandleScope() {
-    unoptimized_code_ = Handle<Code>(*unoptimized_code_);
+    // Empty for now but will be needed once fields move from ParseInfo.
   }
 
   void AbortOptimization(BailoutReason reason) {
@@ -377,12 +472,26 @@
     return Code::ExtractKindFromFlags(code_flags_);
   }
 
+  StackFrame::Type GetOutputStackFrameType() const;
+
  protected:
   ParseInfo* parse_info_;
 
   void DisableFutureOptimization() {
     if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
-      shared_info()->DisableOptimization(bailout_reason());
+      // If Crankshaft tried to optimize this function, bailed out, and
+      // doesn't want to try again, then use TurboFan next time.
+      if (!shared_info()->dont_crankshaft() &&
+          bailout_reason() != kOptimizedTooManyTimes) {
+        shared_info()->set_dont_crankshaft(true);
+        if (FLAG_trace_opt) {
+          PrintF("[disabled Crankshaft for ");
+          shared_info()->ShortPrint();
+          PrintF(", reason: %s]\n", GetBailoutReason(bailout_reason()));
+        }
+      } else {
+        shared_info()->DisableOptimization(bailout_reason());
+      }
     }
   }
 
@@ -421,16 +530,9 @@
   // The compiled code.
   Handle<Code> code_;
 
-  // Used by codegen, ultimately kept rooted by the SharedFunctionInfo.
-  Handle<TypeFeedbackVector> feedback_vector_;
-
   // Compilation mode flag and whether deoptimization is allowed.
   Mode mode_;
   BailoutId osr_ast_id_;
-  // The unoptimized code we patched for OSR may not be the shared code
-  // afterwards, since we may need to compile it again to include deoptimization
-  // data.  Keep track which code we patched.
-  Handle<Code> unoptimized_code_;
 
   // Holds the bytecode array generated by the interpreter.
   // TODO(rmcilroy/mstarzinger): Temporary work-around until compiler.cc is
@@ -475,25 +577,7 @@
 };
 
 
-// A wrapper around a CompilationInfo that detaches the Handles from
-// the underlying DeferredHandleScope and stores them in info_ on
-// destruction.
-class CompilationHandleScope BASE_EMBEDDED {
- public:
-  explicit CompilationHandleScope(CompilationInfo* info)
-      : deferred_(info->isolate()), info_(info) {}
-  ~CompilationHandleScope() {
-    info_->set_deferred_handles(deferred_.Detach());
-  }
-
- private:
-  DeferredHandleScope deferred_;
-  CompilationInfo* info_;
-};
-
-
 class HGraph;
-class HOptimizedGraphBuilder;
 class LChunk;
 
 // A helper class that calls the three compilation phases in
@@ -505,12 +589,7 @@
 class OptimizedCompileJob: public ZoneObject {
  public:
   explicit OptimizedCompileJob(CompilationInfo* info)
-      : info_(info),
-        graph_builder_(NULL),
-        graph_(NULL),
-        chunk_(NULL),
-        last_status_(FAILED),
-        awaiting_install_(false) { }
+      : info_(info), graph_(NULL), chunk_(NULL), last_status_(FAILED) {}
 
   enum Status {
     FAILED, BAILED_OUT, SUCCEEDED
@@ -534,23 +613,14 @@
     return SetLastStatus(BAILED_OUT);
   }
 
-  void WaitForInstall() {
-    DCHECK(info_->is_osr());
-    awaiting_install_ = true;
-  }
-
-  bool IsWaitingForInstall() { return awaiting_install_; }
-
  private:
   CompilationInfo* info_;
-  HOptimizedGraphBuilder* graph_builder_;
   HGraph* graph_;
   LChunk* chunk_;
   base::TimeDelta time_taken_to_create_graph_;
   base::TimeDelta time_taken_to_optimize_;
   base::TimeDelta time_taken_to_codegen_;
   Status last_status_;
-  bool awaiting_install_;
 
   MUST_USE_RESULT Status SetLastStatus(Status status) {
     last_status_ = status;
@@ -575,106 +645,6 @@
   };
 };
 
-
-// The V8 compiler
-//
-// General strategy: Source code is translated into an anonymous function w/o
-// parameters which then can be executed. If the source code contains other
-// functions, they will be compiled and allocated as part of the compilation
-// of the source code.
-
-// Please note this interface returns shared function infos.  This means you
-// need to call Factory::NewFunctionFromSharedFunctionInfo before you have a
-// real function with a context.
-
-class Compiler : public AllStatic {
- public:
-  MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
-      Handle<JSFunction> function);
-  MUST_USE_RESULT static MaybeHandle<Code> GetLazyCode(
-      Handle<JSFunction> function);
-
-  static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
-  static bool CompileDebugCode(Handle<JSFunction> function);
-  static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
-  static void CompileForLiveEdit(Handle<Script> script);
-
-  // Parser::Parse, then Compiler::Analyze.
-  static bool ParseAndAnalyze(ParseInfo* info);
-  // Rewrite, analyze scopes, and renumber.
-  static bool Analyze(ParseInfo* info);
-  // Adds deoptimization support, requires ParseAndAnalyze.
-  static bool EnsureDeoptimizationSupport(CompilationInfo* info);
-
-  // Compile a String source within a context for eval.
-  MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
-      Handle<String> source, Handle<SharedFunctionInfo> outer_info,
-      Handle<Context> context, LanguageMode language_mode,
-      ParseRestriction restriction, int line_offset, int column_offset = 0,
-      Handle<Object> script_name = Handle<Object>(),
-      ScriptOriginOptions options = ScriptOriginOptions());
-
-  // Compile a String source within a context.
-  static Handle<SharedFunctionInfo> CompileScript(
-      Handle<String> source, Handle<Object> script_name, int line_offset,
-      int column_offset, ScriptOriginOptions resource_options,
-      Handle<Object> source_map_url, Handle<Context> context,
-      v8::Extension* extension, ScriptData** cached_data,
-      ScriptCompiler::CompileOptions compile_options,
-      NativesFlag is_natives_code, bool is_module);
-
-  static Handle<SharedFunctionInfo> CompileStreamedScript(Handle<Script> script,
-                                                          ParseInfo* info,
-                                                          int source_length);
-
-  // Create a shared function info object (the code may be lazily compiled).
-  static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
-      FunctionLiteral* node, Handle<Script> script, CompilationInfo* outer);
-
-  // Create a shared function info object for a native function literal.
-  static Handle<SharedFunctionInfo> GetSharedFunctionInfoForNative(
-      v8::Extension* extension, Handle<String> name);
-
-  enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
-
-  // Generate and return optimized code or start a concurrent optimization job.
-  // In the latter case, return the InOptimizationQueue builtin.  On failure,
-  // return the empty handle.
-  MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCode(
-      Handle<JSFunction> function, ConcurrencyMode mode,
-      BailoutId osr_ast_id = BailoutId::None(),
-      JavaScriptFrame* osr_frame = nullptr);
-
-  // Generate and return code from previously queued optimization job.
-  // On failure, return the empty handle.
-  MUST_USE_RESULT static MaybeHandle<Code> GetConcurrentlyOptimizedCode(
-      OptimizedCompileJob* job);
-};
-
-
-class CompilationPhase BASE_EMBEDDED {
- public:
-  CompilationPhase(const char* name, CompilationInfo* info);
-  ~CompilationPhase();
-
- protected:
-  bool ShouldProduceTraceOutput() const;
-
-  const char* name() const { return name_; }
-  CompilationInfo* info() const { return info_; }
-  Isolate* isolate() const { return info()->isolate(); }
-  Zone* zone() { return &zone_; }
-
- private:
-  const char* name_;
-  CompilationInfo* info_;
-  Zone zone_;
-  size_t info_zone_start_allocation_size_;
-  base::ElapsedTimer timer_;
-
-  DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index bdf4c47..a0b5022 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -54,6 +54,7 @@
   SBit OutputSBit() const {
     switch (instr_->flags_mode()) {
       case kFlags_branch:
+      case kFlags_deoptimize:
       case kFlags_set:
         return SetCC;
       case kFlags_none:
@@ -149,8 +150,11 @@
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
+
+  MemOperand SlotToMemOperand(int slot) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
 };
@@ -164,7 +168,9 @@
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final {
-    __ vmov(result_, std::numeric_limits<float>::quiet_NaN());
+    // Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
+    __ vmov(result_, -1.0f);
+    __ vsqrt(result_, result_);
   }
 
  private:
@@ -178,7 +184,9 @@
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final {
-    __ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
+    // Compute sqrt(-1.0), which results in a quiet double-precision NaN.
+    __ vmov(result_, -1.0);
+    __ vsqrt(result_, result_);
   }
 
  private:
@@ -222,7 +230,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -236,7 +245,7 @@
                                              : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ Push(lr);
     }
@@ -249,7 +258,7 @@
       __ add(scratch1_, object_, Operand(index_));
     }
     __ CallStub(&stub);
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       __ Pop(lr);
     }
   }
@@ -262,6 +271,7 @@
   Register const scratch0_;
   Register const scratch1_;
   RecordWriteMode const mode_;
+  bool must_save_lr_;
 };
 
 
@@ -378,6 +388,11 @@
     DCHECK_EQ(LeaveCC, i.OutputSBit());           \
   } while (0)
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ LeaveFrame(StackFrame::MANUAL);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -394,7 +409,7 @@
     __ sub(sp, sp, Operand(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     if (FLAG_enable_embedded_constant_pool) {
       __ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
     }
@@ -404,14 +419,39 @@
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &done);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ ldr(caller_args_count_reg,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   ArmOperandConverter i(this, instr);
 
-  masm()->MaybeCheckConstPool();
-
-  switch (ArchOpcodeField::decode(instr->opcode())) {
+  __ MaybeCheckConstPool();
+  InstructionCode opcode = instr->opcode();
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
@@ -427,9 +467,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (instr->InputAt(0)->IsImmediate()) {
         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -458,6 +504,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -468,6 +515,11 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(ip);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -535,7 +587,7 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ ldr(i.OutputRegister(), MemOperand(fp, 0));
       } else {
         __ mov(i.OutputRegister(), fp);
@@ -742,6 +794,67 @@
       __ teq(i.InputRegister(0), i.InputOperand2(1));
       DCHECK_EQ(SetCC, i.OutputSBit());
       break;
+    case kArmAddPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ add(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
+             SBit::SetCC);
+      __ adc(i.OutputRegister(1), i.InputRegister(1),
+             Operand(i.InputRegister(3)));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSubPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ sub(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
+             SBit::SetCC);
+      __ sbc(i.OutputRegister(1), i.InputRegister(1),
+             Operand(i.InputRegister(3)));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmMulPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+               i.InputRegister(2));
+      __ mla(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(3),
+             i.OutputRegister(1));
+      __ mla(i.OutputRegister(1), i.InputRegister(2), i.InputRegister(1),
+             i.OutputRegister(1));
+      break;
+    case kArmLslPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), i.InputInt32(2));
+      } else {
+        __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), kScratchReg, i.InputRegister(2));
+      }
+      break;
+    case kArmLsrPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), i.InputInt32(2));
+      } else {
+        __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), kScratchReg, i.InputRegister(2));
+      }
+      break;
+    case kArmAsrPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), i.InputInt32(2));
+      } else {
+        __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), kScratchReg, i.InputRegister(2));
+      }
+      break;
     case kArmVcmpF32:
       if (instr->InputAt(1)->IsDoubleRegister()) {
         __ VFPCompareAndSetFlags(i.InputFloat32Register(0),
@@ -1155,29 +1268,32 @@
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
+  // TODO(turbofan): We should be able to generate better code by sharing the
+  // actual final call site and just bl'ing to it here, similar to what we do
+  // in the lithium backend.
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+  __ CheckConstPool(false, false);
 }
 
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    if (FLAG_enable_embedded_constant_pool) {
-      __ Push(lr, fp, pp);
-      // Adjust FP to point to saved FP.
-      __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      if (FLAG_enable_embedded_constant_pool) {
+        __ Push(lr, fp, pp);
+        // Adjust FP to point to saved FP.
+        __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+      } else {
+        __ Push(lr, fp);
+        __ mov(fp, sp);
+      }
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
     } else {
-      __ Push(lr, fp);
-      __ mov(fp, sp);
+      __ StubPrologue(info()->GetOutputStackFrameType());
     }
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(0);
   }
-  frame_access_state()->SetFrameAccessToDefault();
 
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
@@ -1247,15 +1363,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ LeaveFrame(StackFrame::MANUAL);
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ b(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ LeaveFrame(StackFrame::MANUAL);
+      AssembleDeconstructFrame();
     }
   }
   __ Ret(pop_count);
@@ -1311,9 +1427,9 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int offset;
-          if (IsMaterializableFromFrame(src_object, &offset)) {
-            __ ldr(dst, MemOperand(fp, offset));
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ ldr(dst, g.SlotToMemOperand(slot));
           } else if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 50fa555..5e6f5c9 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -46,6 +46,12 @@
   V(ArmUxtab)                      \
   V(ArmRbit)                       \
   V(ArmUxtah)                      \
+  V(ArmAddPair)                    \
+  V(ArmSubPair)                    \
+  V(ArmMulPair)                    \
+  V(ArmLslPair)                    \
+  V(ArmLsrPair)                    \
+  V(ArmAsrPair)                    \
   V(ArmVcmpF32)                    \
   V(ArmVaddF32)                    \
   V(ArmVsubF32)                    \
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index d950e8c..466765e 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -48,6 +48,12 @@
     case kArmUxtab:
     case kArmUxtah:
     case kArmRbit:
+    case kArmAddPair:
+    case kArmSubPair:
+    case kArmMulPair:
+    case kArmLslPair:
+    case kArmLsrPair:
+    case kArmAsrPair:
     case kArmVcmpF32:
     case kArmVaddF32:
     case kArmVsubF32:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index 14b30b1..76d9e3c 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -237,8 +237,13 @@
   DCHECK_GE(arraysize(outputs), output_count);
   DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -369,9 +374,7 @@
       inputs[input_count++] = g.UseUniqueRegister(index);
       addressing_mode = kMode_Offset_RR;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -691,8 +694,13 @@
   DCHECK_GE(arraysize(outputs), output_count);
   DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -759,6 +767,120 @@
   VisitShift(this, node, TryMatchASR);
 }
 
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  ArmOperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the output
+  // registers.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmAddPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  ArmOperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the output
+  // register.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmSubPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  ArmOperandGenerator g(this);
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 g.UseUniqueRegister(node->InputAt(2)),
+                                 g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmMulPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  ArmOperandGenerator g(this);
+  // We use g.UseUniqueRegister here for InputAt(0) to guarantee that there is
+  // no register aliasing with output registers.
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmLslPair, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  ArmOperandGenerator g(this);
+  // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
+  // guarantee that there is no register aliasing with output register.
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmLsrPair, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  ArmOperandGenerator g(this);
+  // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
+  // guarantee that there is no register aliasing with output register.
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmAsrPair, 2, outputs, 3, inputs);
+}
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
   VisitShift(this, node, TryMatchROR);
@@ -1013,7 +1135,9 @@
   VisitRR(this, kArmVcvtU32F64, node);
 }
 
-
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kArmVcvtU32F64, node);
+}
 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   VisitRR(this, kArmVcvtF32F64, node);
 }
@@ -1272,6 +1396,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 namespace {
 
@@ -1284,6 +1409,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1357,8 +1485,7 @@
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
-  } else {
-    DCHECK(cont->IsSet());
+  } else if (cont->IsSet()) {
     outputs[output_count++] = g.DefineAsRegister(cont->result());
   }
 
@@ -1366,8 +1493,13 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -1482,7 +1614,11 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
+                             cont->frame_state());
   } else {
+    DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    value_operand);
   }
@@ -1490,13 +1626,23 @@
 
 }  // namespace
 
-
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   ArmOperandGenerator g(this);
@@ -1527,7 +1673,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1537,32 +1683,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
   }
   FlagsContinuation cont;
@@ -1572,7 +1720,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
   }
   FlagsContinuation cont;
@@ -1581,37 +1729,39 @@
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kFloatLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kFloatLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kFloatLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kFloatLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index e45c677..456e7e7 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -207,13 +207,13 @@
   MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
     DCHECK_NOT_NULL(op);
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
+  }
+
+  MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     if (offset.from_frame_pointer()) {
-      int from_sp =
-          offset.offset() +
-          ((frame()->GetSpToFpSlotCount() + frame_access_state()->sp_delta()) *
-           kPointerSize);
+      int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
       // Convert FP-offsets to SP-offsets if it results in better code.
       if (Assembler::IsImmLSUnscaled(from_sp) ||
           Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
@@ -279,7 +279,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -293,7 +294,7 @@
                                              : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ Push(lr);
     }
@@ -301,7 +302,7 @@
                          remembered_set_action, save_fp_mode);
     __ Add(scratch1_, object_, index_);
     __ CallStub(&stub);
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       __ Pop(lr);
     }
   }
@@ -313,6 +314,7 @@
   Register const scratch0_;
   Register const scratch1_;
   RecordWriteMode const mode_;
+  bool must_save_lr_;
 };
 
 
@@ -466,6 +468,15 @@
     }                                                                       \
   } while (0)
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
+    __ Mov(csp, fp);
+  } else {
+    __ Mov(jssp, fp);
+  }
+  __ Pop(fp, lr);
+}
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -482,13 +493,37 @@
     __ Claim(-sp_slot_delta);
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
     __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ Cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ B(ne, &done);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ Ldr(caller_args_count_reg,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
@@ -506,21 +541,31 @@
         __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
         __ Call(target);
       }
+      RecordCallPosition(instr);
       // TODO(titzer): this is ugly. JSSP should be a caller-save register
       // in this case, but it is not possible to express in the register
       // allocator.
-      CallDescriptor::Flags flags =
-          static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+      CallDescriptor::Flags flags(MiscField::decode(opcode));
       if (flags & CallDescriptor::kRestoreJSSP) {
-        __ mov(jssp, csp);
+        __ Ldr(jssp, MemOperand(csp));
+        __ Mov(csp, jssp);
+      }
+      if (flags & CallDescriptor::kRestoreCSP) {
+        __ Mov(csp, jssp);
+        __ AssertCspAligned();
       }
       frame_access_state()->ClearSPDelta();
-      RecordCallPosition(instr);
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (instr->InputAt(0)->IsImmediate()) {
         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -545,18 +590,23 @@
       }
       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Call(x10);
+      RecordCallPosition(instr);
       // TODO(titzer): this is ugly. JSSP should be a caller-save register
       // in this case, but it is not possible to express in the register
       // allocator.
-      CallDescriptor::Flags flags =
-          static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+      CallDescriptor::Flags flags(MiscField::decode(opcode));
       if (flags & CallDescriptor::kRestoreJSSP) {
-        __ mov(jssp, csp);
+        __ Ldr(jssp, MemOperand(csp));
+        __ Mov(csp, jssp);
+      }
+      if (flags & CallDescriptor::kRestoreCSP) {
+        __ Mov(csp, jssp);
+        __ AssertCspAligned();
       }
       frame_access_state()->ClearSPDelta();
-      RecordCallPosition(instr);
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -569,6 +619,11 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(x10);
       frame_access_state()->ClearSPDelta();
@@ -628,7 +683,7 @@
       __ mov(i.OutputRegister(), fp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ ldr(i.OutputRegister(), MemOperand(fp, 0));
       } else {
         __ mov(i.OutputRegister(), fp);
@@ -931,22 +986,46 @@
       // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
       break;
     case kArm64ClaimCSP: {
-      int count = i.InputInt32(0);
+      int count = RoundUp(i.InputInt32(0), 2);
       Register prev = __ StackPointer();
-      __ SetStackPointer(csp);
-      __ Claim(count);
-      __ SetStackPointer(prev);
-      frame_access_state()->IncreaseSPDelta(count);
+      if (prev.Is(jssp)) {
+        // TODO(titzer): make this a macro-assembler method.
+        // Align the CSP and store the previous JSSP on the stack.
+        UseScratchRegisterScope scope(masm());
+        Register tmp = scope.AcquireX();
+
+        int sp_alignment = __ ActivationFrameAlignment();
+        __ Sub(tmp, jssp, kPointerSize);
+        __ And(tmp, tmp, Operand(~static_cast<uint64_t>(sp_alignment - 1)));
+        __ Mov(csp, tmp);
+        __ Str(jssp, MemOperand(csp));
+        if (count > 0) {
+          __ SetStackPointer(csp);
+          __ Claim(count);
+          __ SetStackPointer(prev);
+        }
+      } else {
+        __ AssertCspAligned();
+        if (count > 0) {
+          __ Claim(count);
+          frame_access_state()->IncreaseSPDelta(count);
+        }
+      }
       break;
     }
     case kArm64ClaimJSSP: {
       int count = i.InputInt32(0);
       if (csp.Is(__ StackPointer())) {
-        // No JSP is set up. Compute it from the CSP.
-        int even = RoundUp(count, 2);
-        __ Sub(jssp, csp, count * kPointerSize);
-        __ Sub(csp, csp, even * kPointerSize);  // Must always be aligned.
-        frame_access_state()->IncreaseSPDelta(even);
+        // No JSSP is set up. Compute it from the CSP.
+        __ AssertCspAligned();
+        if (count > 0) {
+          int even = RoundUp(count, 2);
+          __ Sub(jssp, csp, count * kPointerSize);
+          __ Sub(csp, csp, even * kPointerSize);  // Must always be aligned.
+          frame_access_state()->IncreaseSPDelta(even);
+        } else {
+          __ Mov(jssp, csp);
+        }
       } else {
         // JSSP is the current stack pointer, just use regular Claim().
         __ Claim(count);
@@ -1424,34 +1503,38 @@
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
 }
 
+void CodeGenerator::AssembleSetupStackPointer() {
+  const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
+    __ SetStackPointer(csp);
+  } else {
+    __ SetStackPointer(jssp);
+  }
+}
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    __ SetStackPointer(csp);
-    __ Push(lr, fp);
-    __ Mov(fp, csp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ SetStackPointer(jssp);
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    if (descriptor->UseNativeStack()) {
-      __ SetStackPointer(csp);
-    } else {
-      __ SetStackPointer(jssp);
-    }
-    __ StubPrologue();
-  } else {
-    if (descriptor->UseNativeStack()) {
-      __ SetStackPointer(csp);
-    } else {
-      __ SetStackPointer(jssp);
-    }
-    frame()->SetElidedFrameSizeInSlots(0);
+  if (descriptor->UseNativeStack()) {
+    __ AssertCspAligned();
   }
-  frame_access_state()->SetFrameAccessToDefault();
 
   int stack_shrink_slots = frame()->GetSpillSlotCount();
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsJSFunctionCall()) {
+      DCHECK(!descriptor->UseNativeStack());
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      if (descriptor->IsCFunctionCall()) {
+        __ Push(lr, fp);
+        __ Mov(fp, masm_.StackPointer());
+        __ Claim(stack_shrink_slots);
+      } else {
+        __ StubPrologue(info()->GetOutputStackFrameType(),
+                        frame()->GetTotalFrameSlotCount());
+      }
+    }
+  }
+
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1465,15 +1548,9 @@
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
-  // If frame()->needs_frame() is false, then
-  // frame()->AlignSavedCalleeRegisterSlots() is guaranteed to return 0.
-  if (csp.Is(masm()->StackPointer()) && frame()->needs_frame()) {
-    // The system stack pointer requires 16-byte alignment at function call
-    // boundaries.
-
-    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+  if (descriptor->IsJSFunctionCall()) {
+    __ Claim(stack_shrink_slots);
   }
-  __ Claim(stack_shrink_slots);
 
   // Save FP registers.
   CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
@@ -1518,27 +1595,27 @@
 
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
   if (descriptor->IsCFunctionCall()) {
-    __ Mov(csp, fp);
-    __ Pop(fp, lr);
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ B(&return_label_);
       return;
     } else {
       __ Bind(&return_label_);
+      AssembleDeconstructFrame();
       if (descriptor->UseNativeStack()) {
-        __ Mov(csp, fp);
         pop_count += (pop_count & 1);  // align
-      } else {
-        __ Mov(jssp, fp);
       }
-      __ Pop(fp, lr);
     }
   } else if (descriptor->UseNativeStack()) {
     pop_count += (pop_count & 1);  // align
   }
   __ Drop(pop_count);
+
+  if (descriptor->UseNativeStack()) {
+    __ AssertCspAligned();
+  }
   __ Ret();
 }
 
@@ -1576,9 +1653,9 @@
       if (src.type() == Constant::kHeapObject) {
         Handle<HeapObject> src_object = src.ToHeapObject();
         Heap::RootListIndex index;
-        int offset;
-        if (IsMaterializableFromFrame(src_object, &offset)) {
-          __ Ldr(dst, MemOperand(fp, offset));
+        int slot;
+        if (IsMaterializableFromFrame(src_object, &slot)) {
+          __ Ldr(dst, g.SlotToMemOperand(slot, masm()));
         } else if (IsMaterializableFromRoot(src_object, &index)) {
           __ LoadRoot(dst, index);
         } else {
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 26a2896..d90deae 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -289,8 +289,13 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -413,9 +418,7 @@
       inputs[input_count++] = g.UseUniqueRegister(index);
       addressing_mode = kMode_MRR;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -1283,6 +1286,9 @@
   VisitRR(this, kArm64Float64ToUint32, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kArm64Float64ToUint32, node);
+}
 
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   Arm64OperandGenerator g(this);
@@ -1628,20 +1634,20 @@
     Node* node) {
   Arm64OperandGenerator g(this);
 
+  bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
   bool to_native_stack = descriptor->UseNativeStack();
 
+  bool always_claim = to_native_stack != from_native_stack;
+
   int claim_count = static_cast<int>(arguments->size());
   int slot = claim_count - 1;
-  if (to_native_stack) {
-    // Native stack must always be aligned to 16 (2 words).
-    claim_count = RoundUp(claim_count, 2);
-  }
-  // TODO(titzer): claim and poke probably take small immediates.
   // Bump the stack pointer(s).
-  if (claim_count > 0) {
+  if (claim_count > 0 || always_claim) {
+    // TODO(titzer): claim and poke probably take small immediates.
     // TODO(titzer): it would be better to bump the csp here only
     //                and emit paired stores with increment for non c frames.
     ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
+    // Claim(0) isn't a nop if there is a mismatch between CSP and JSSP.
     Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
   }
 
@@ -1662,6 +1668,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 namespace {
 
@@ -1674,6 +1681,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1789,85 +1799,72 @@
   }
 }
 
-}  // namespace
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
-                                      BasicBlock* fbranch) {
-  OperandGenerator g(this);
-  Node* user = branch;
-  Node* value = branch->InputAt(0);
-
-  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-
-  // Try to combine with comparisons against 0 by simply inverting the branch.
-  while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
-    Int32BinopMatcher m(value);
-    if (m.right().Is(0)) {
-      user = value;
-      value = m.left().node();
-      cont.Negate();
-    } else {
-      break;
-    }
-  }
-
-  // Try to combine the branch with a comparison.
-  if (CanCover(user, value)) {
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  while (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitWord32Compare(this, value, &cont);
+      case IrOpcode::kWord32Equal: {
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(selector, value, cont);
+      }
       case IrOpcode::kInt32LessThan:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWord32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kInt32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWord32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kUint32LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
-        return VisitWord32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kUint32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
-        return VisitWord32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kWord64Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
       case IrOpcode::kInt64LessThan:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
       case IrOpcode::kInt64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
       case IrOpcode::kUint64LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
-        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
       case IrOpcode::kUint64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
-        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
       case IrOpcode::kFloat32Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat32LessThan:
-        cont.OverwriteAndNegateIfEqual(kFloatLessThan);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kFloatLessThan);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat64Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kFloat64LessThan:
-        cont.OverwriteAndNegateIfEqual(kFloatLessThan);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kFloatLessThan);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kFloat64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kProjection:
         // Check if this is the overflow output projection of an
         // <Operation>WithOverflow node.
@@ -1879,24 +1876,24 @@
           // *AFTER* this branch).
           Node* const node = value->InputAt(0);
           Node* const result = NodeProperties::FindProjection(node, 0);
-          if (result == nullptr || IsDefined(result)) {
+          if (result == nullptr || selector->IsDefined(result)) {
             switch (node->opcode()) {
               case IrOpcode::kInt32AddWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
-                                                     kArithmeticImm, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(
+                    selector, node, kArm64Add32, kArithmeticImm, cont);
               case IrOpcode::kInt32SubWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
-                                                     kArithmeticImm, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(
+                    selector, node, kArm64Sub32, kArithmeticImm, cont);
               case IrOpcode::kInt64AddWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
-                                                     kArithmeticImm, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
+                                                     kArithmeticImm, cont);
               case IrOpcode::kInt64SubWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
-                                                     kArithmeticImm, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Sub,
+                                                     kArithmeticImm, cont);
               default:
                 break;
             }
@@ -1904,55 +1901,84 @@
         }
         break;
       case IrOpcode::kInt32Add:
-        return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
+        return VisitWordCompare(selector, value, kArm64Cmn32, cont, true,
                                 kArithmeticImm);
       case IrOpcode::kInt32Sub:
-        return VisitWord32Compare(this, value, &cont);
+        return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kWord32And: {
         Int32BinopMatcher m(value);
-        if (m.right().HasValue() &&
+        if (cont->IsBranch() && m.right().HasValue() &&
             (base::bits::CountPopulation32(m.right().Value()) == 1)) {
           // If the mask has only one bit set, we can use tbz/tbnz.
-          DCHECK((cont.condition() == kEqual) ||
-                 (cont.condition() == kNotEqual));
-          Emit(cont.Encode(kArm64TestAndBranch32), g.NoOutput(),
-               g.UseRegister(m.left().node()),
-               g.TempImmediate(
-                   base::bits::CountTrailingZeros32(m.right().Value())),
-               g.Label(cont.true_block()), g.Label(cont.false_block()));
+          DCHECK((cont->condition() == kEqual) ||
+                 (cont->condition() == kNotEqual));
+          selector->Emit(
+              cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
+              g.UseRegister(m.left().node()),
+              g.TempImmediate(
+                  base::bits::CountTrailingZeros32(m.right().Value())),
+              g.Label(cont->true_block()), g.Label(cont->false_block()));
           return;
         }
-        return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
+        return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
                                 kLogical32Imm);
       }
       case IrOpcode::kWord64And: {
         Int64BinopMatcher m(value);
-        if (m.right().HasValue() &&
+        if (cont->IsBranch() && m.right().HasValue() &&
             (base::bits::CountPopulation64(m.right().Value()) == 1)) {
           // If the mask has only one bit set, we can use tbz/tbnz.
-          DCHECK((cont.condition() == kEqual) ||
-                 (cont.condition() == kNotEqual));
-          Emit(cont.Encode(kArm64TestAndBranch), g.NoOutput(),
-               g.UseRegister(m.left().node()),
-               g.TempImmediate(
-                   base::bits::CountTrailingZeros64(m.right().Value())),
-               g.Label(cont.true_block()), g.Label(cont.false_block()));
+          DCHECK((cont->condition() == kEqual) ||
+                 (cont->condition() == kNotEqual));
+          selector->Emit(
+              cont->Encode(kArm64TestAndBranch), g.NoOutput(),
+              g.UseRegister(m.left().node()),
+              g.TempImmediate(
+                  base::bits::CountTrailingZeros64(m.right().Value())),
+              g.Label(cont->true_block()), g.Label(cont->false_block()));
           return;
         }
-        return VisitWordCompare(this, value, kArm64Tst, &cont, true,
+        return VisitWordCompare(selector, value, kArm64Tst, cont, true,
                                 kLogical64Imm);
       }
       default:
         break;
     }
+    break;
   }
 
   // Branch could not be combined with a compare, compare against 0 and branch.
-  Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
-       g.UseRegister(value), g.Label(cont.true_block()),
-       g.Label(cont.false_block()));
+  if (cont->IsBranch()) {
+    selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
+                   g.UseRegister(value), g.Label(cont->true_block()),
+                   g.Label(cont->false_block()));
+  } else {
+    DCHECK(cont->IsDeoptimize());
+    selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
+                             g.UseRegister(value), g.UseRegister(value),
+                             cont->frame_state());
+  }
 }
 
+}  // namespace
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   Arm64OperandGenerator g(this);
@@ -1984,7 +2010,7 @@
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
   Node* const user = node;
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(user);
   if (m.right().Is(0)) {
     Node* const value = m.left().node();
@@ -2018,32 +2044,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitWord64Equal(Node* const node) {
   Node* const user = node;
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int64BinopMatcher m(user);
   if (m.right().Is(0)) {
     Node* const value = m.left().node();
@@ -2064,7 +2092,7 @@
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
                                          kArithmeticImm, &cont);
   }
@@ -2075,7 +2103,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
                                          kArithmeticImm, &cont);
   }
@@ -2086,7 +2114,7 @@
 
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
                                          &cont);
   }
@@ -2097,7 +2125,7 @@
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
                                          &cont);
   }
@@ -2107,61 +2135,65 @@
 
 
 void InstructionSelector::VisitInt64LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
 }
 
 
 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
 }
 
 
 void InstructionSelector::VisitUint64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
 }
 
 
 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
 }
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kFloatLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kFloatLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kFloatLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kFloatLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index abcf828..89bb619 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -616,12 +616,6 @@
     NewNode(javascript()->CallRuntime(Runtime::kTraceEnter));
   }
 
-  // Visit illegal re-declaration and bail out if it exists.
-  if (scope->HasIllegalRedeclaration()) {
-    VisitForEffect(scope->GetIllegalRedeclaration());
-    return;
-  }
-
   // Visit declarations within the function scope.
   VisitDeclarations(scope->declarations());
 
@@ -646,7 +640,7 @@
   }
 
   NonLiveFrameStateSlotReplacer replacer(
-      &state_values_cache_, jsgraph()->UndefinedConstant(),
+      &state_values_cache_, jsgraph()->OptimizedOutConstant(),
       liveness_analyzer()->local_count(), local_zone());
   Variable* arguments = info()->scope()->arguments();
   if (arguments != nullptr && arguments->IsStackAllocated()) {
@@ -1448,9 +1442,11 @@
   }
   try_control.EndTry();
 
-  // Clear message object as we enter the catch block.
-  Node* the_hole = jsgraph()->TheHoleConstant();
-  NewNode(javascript()->StoreMessage(), the_hole);
+  // If requested, clear message object as we enter the catch block.
+  if (stmt->clear_pending_message()) {
+    Node* the_hole = jsgraph()->TheHoleConstant();
+    NewNode(javascript()->StoreMessage(), the_hole);
+  }
 
   // Create a catch scope that binds the exception.
   Node* exception = try_control.GetExceptionNode();
@@ -1644,8 +1640,7 @@
     }
   }
 
-  // Set both the prototype and constructor to have fast properties, and also
-  // freeze them in strong mode.
+  // Set both the prototype and constructor to have fast properties.
   prototype = environment()->Pop();
   literal = environment()->Pop();
   const Operator* op =
@@ -1725,7 +1720,7 @@
   // Create node to deep-copy the literal boilerplate.
   const Operator* op = javascript()->CreateLiteralObject(
       expr->constant_properties(), expr->ComputeFlags(true),
-      expr->literal_index());
+      expr->literal_index(), expr->properties_count());
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->CreateLiteralId(),
                     OutputFrameStateCombine::Push());
@@ -1900,13 +1895,6 @@
     }
   }
 
-  // Transform literals that contain functions to fast properties.
-  literal = environment()->Top();  // Reload from operand stack.
-  if (expr->has_function()) {
-    const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
-    NewNode(op, literal);
-  }
-
   ast_context()->ProduceValue(environment()->Pop());
 }
 
@@ -1928,7 +1916,7 @@
   // Create node to deep-copy the literal boilerplate.
   const Operator* op = javascript()->CreateLiteralArray(
       expr->constant_elements(), expr->ComputeFlags(true),
-      expr->literal_index());
+      expr->literal_index(), expr->values()->length());
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->CreateLiteralId(),
                     OutputFrameStateCombine::Push());
@@ -2576,22 +2564,12 @@
     return VisitCallJSRuntime(expr);
   }
 
-  const Runtime::Function* function = expr->function();
-
-  // TODO(mstarzinger): This bailout is a gigantic hack, the owner is ashamed.
-  if (function->function_id == Runtime::kInlineGeneratorNext ||
-      function->function_id == Runtime::kInlineGeneratorReturn ||
-      function->function_id == Runtime::kInlineGeneratorThrow) {
-    ast_context()->ProduceValue(jsgraph()->TheHoleConstant());
-    return SetStackOverflow();
-  }
-
   // Evaluate all arguments to the runtime call.
   ZoneList<Expression*>* args = expr->arguments();
   VisitForValues(args);
 
   // Create node to perform the runtime call.
-  Runtime::FunctionId functionId = function->function_id;
+  Runtime::FunctionId functionId = expr->function()->function_id;
   const Operator* call = javascript()->CallRuntime(functionId, args->length());
   FrameStateBeforeAndAfter states(this, expr->CallId());
   Node* value = ProcessArguments(call, args->length());
@@ -2704,11 +2682,9 @@
   }
 
   // Convert old value into a number.
-  if (!is_strong(language_mode())) {
-    old_value = NewNode(javascript()->ToNumber(), old_value);
-    PrepareFrameState(old_value, expr->ToNumberId(),
-                      OutputFrameStateCombine::Push());
-  }
+  old_value = NewNode(javascript()->ToNumber(), old_value);
+  PrepareFrameState(old_value, expr->ToNumberId(),
+                    OutputFrameStateCombine::Push());
 
   // Create a proper eager frame state for the stores.
   environment()->Push(old_value);
@@ -2731,10 +2707,8 @@
     FrameStateBeforeAndAfter states(this, BailoutId::None());
     value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
                           expr->binary_op(), expr->CountBinOpFeedbackId());
-    // This should never deoptimize outside strong mode because otherwise we
-    // have converted to number before.
-    states.AddToNode(value, is_strong(language_mode()) ? expr->ToNumberId()
-                                                       : BailoutId::None(),
+    // This should never deoptimize because we have converted to number before.
+    states.AddToNode(value, BailoutId::None(),
                      OutputFrameStateCombine::Ignore());
   }
 
@@ -2821,8 +2795,57 @@
   }
 }
 
+void AstGraphBuilder::VisitLiteralCompareNil(CompareOperation* expr,
+                                             Expression* sub_expr,
+                                             Node* nil_value) {
+  const Operator* op = nullptr;
+  switch (expr->op()) {
+    case Token::EQ:
+      op = javascript()->Equal();
+      break;
+    case Token::EQ_STRICT:
+      op = javascript()->StrictEqual();
+      break;
+    default:
+      UNREACHABLE();
+  }
+  VisitForValue(sub_expr);
+  FrameStateBeforeAndAfter states(this, sub_expr->id());
+  Node* value_to_compare = environment()->Pop();
+  Node* value = NewNode(op, value_to_compare, nil_value);
+  states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+  return ast_context()->ProduceValue(value);
+}
+
+void AstGraphBuilder::VisitLiteralCompareTypeof(CompareOperation* expr,
+                                                Expression* sub_expr,
+                                                Handle<String> check) {
+  VisitTypeofExpression(sub_expr);
+  FrameStateBeforeAndAfter states(this, sub_expr->id());
+  Node* typeof_arg = NewNode(javascript()->TypeOf(), environment()->Pop());
+  Node* value = NewNode(javascript()->StrictEqual(), typeof_arg,
+                        jsgraph()->Constant(check));
+  states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+  return ast_context()->ProduceValue(value);
+}
 
 void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  // Check for a few fast cases. The AST visiting behavior must be in sync
+  // with the full codegen: We don't push both left and right values onto
+  // the expression stack when one side is a special-case literal.
+  Expression* sub_expr = nullptr;
+  Handle<String> check;
+  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+    return VisitLiteralCompareTypeof(expr, sub_expr, check);
+  }
+  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+    return VisitLiteralCompareNil(expr, sub_expr,
+                                  jsgraph()->UndefinedConstant());
+  }
+  if (expr->IsLiteralCompareNull(&sub_expr)) {
+    return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
+  }
+
   const Operator* op;
   switch (expr->op()) {
     case Token::EQ:
@@ -2850,6 +2873,7 @@
       op = javascript()->GreaterThanOrEqual();
       break;
     case Token::INSTANCEOF:
+      DCHECK(!FLAG_harmony_instanceof);
       op = javascript()->InstanceOf();
       break;
     case Token::IN:
@@ -2984,23 +3008,25 @@
   ast_context()->ProduceValue(value);
 }
 
-
-void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
-  Node* operand;
-  if (expr->expression()->IsVariableProxy()) {
+void AstGraphBuilder::VisitTypeofExpression(Expression* expr) {
+  if (expr->IsVariableProxy()) {
     // Typeof does not throw a reference error on global variables, hence we
     // perform a non-contextual load in case the operand is a variable proxy.
-    VariableProxy* proxy = expr->expression()->AsVariableProxy();
+    VariableProxy* proxy = expr->AsVariableProxy();
     VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
     FrameStateBeforeAndAfter states(this, BeforeId(proxy));
-    operand =
-        BuildVariableLoad(proxy->var(), expr->expression()->id(), states, pair,
+    Node* load =
+        BuildVariableLoad(proxy->var(), expr->id(), states, pair,
                           OutputFrameStateCombine::Push(), INSIDE_TYPEOF);
+    environment()->Push(load);
   } else {
-    VisitForValue(expr->expression());
-    operand = environment()->Pop();
+    VisitForValue(expr);
   }
-  Node* value = NewNode(javascript()->TypeOf(), operand);
+}
+
+void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+  VisitTypeofExpression(expr->expression());
+  Node* value = NewNode(javascript()->TypeOf(), environment()->Pop());
   ast_context()->ProduceValue(value);
 }
 
@@ -3052,7 +3078,7 @@
 
 
 LanguageMode AstGraphBuilder::language_mode() const {
-  return info()->language_mode();
+  return current_scope()->language_mode();
 }
 
 
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 6cff237..e206db0 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -419,11 +419,20 @@
   void VisitTypeof(UnaryOperation* expr);
   void VisitNot(UnaryOperation* expr);
 
+  // Dispatched from VisitTypeof, VisitLiteralCompareTypeof.
+  void VisitTypeofExpression(Expression* expr);
+
   // Dispatched from VisitBinaryOperation.
   void VisitComma(BinaryOperation* expr);
   void VisitLogicalExpression(BinaryOperation* expr);
   void VisitArithmeticExpression(BinaryOperation* expr);
 
+  // Dispatched from VisitCompareOperation.
+  void VisitLiteralCompareNil(CompareOperation* expr, Expression* sub_expr,
+                              Node* nil_value);
+  void VisitLiteralCompareTypeof(CompareOperation* expr, Expression* sub_expr,
+                                 Handle<String> check);
+
   // Dispatched from VisitForInStatement.
   void VisitForInAssignment(Expression* expr, Node* value,
                             const VectorSlotPair& feedback,
diff --git a/src/compiler/branch-elimination.cc b/src/compiler/branch-elimination.cc
index bc56e73..427612c 100644
--- a/src/compiler/branch-elimination.cc
+++ b/src/compiler/branch-elimination.cc
@@ -15,11 +15,11 @@
 BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
                                      Zone* zone)
     : AdvancedReducer(editor),
+      jsgraph_(js_graph),
       node_conditions_(zone, js_graph->graph()->NodeCount()),
       zone_(zone),
       dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {}
 
-
 BranchElimination::~BranchElimination() {}
 
 
@@ -27,6 +27,9 @@
   switch (node->opcode()) {
     case IrOpcode::kDead:
       return NoChange();
+    case IrOpcode::kDeoptimizeIf:
+    case IrOpcode::kDeoptimizeUnless:
+      return ReduceDeoptimizeConditional(node);
     case IrOpcode::kMerge:
       return ReduceMerge(node);
     case IrOpcode::kLoop:
@@ -76,6 +79,41 @@
   return TakeConditionsFromFirstControl(node);
 }
 
+Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
+  DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
+         node->opcode() == IrOpcode::kDeoptimizeUnless);
+  bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
+  Node* condition = NodeProperties::GetValueInput(node, 0);
+  Node* frame_state = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  ControlPathConditions const* conditions = node_conditions_.Get(control);
+  // If we do not know anything about the predecessor, do not propagate just
+  // yet because we will have to recompute anyway once we compute the
+  // predecessor.
+  if (conditions == nullptr) {
+    DCHECK_NULL(node_conditions_.Get(node));
+    return NoChange();
+  }
+  Maybe<bool> condition_value = conditions->LookupCondition(condition);
+  if (condition_value.IsJust()) {
+    // If we know the condition we can discard the branch.
+    if (condition_is_true == condition_value.FromJust()) {
+      // We don't to update the conditions here, because we're replacing with
+      // the {control} node that already contains the right information.
+      return Replace(control);
+    } else {
+      control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+                                 frame_state, effect, control);
+      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+      NodeProperties::MergeControlToEnd(graph(), common(), control);
+      Revisit(graph()->end());
+      return Replace(dead());
+    }
+  }
+  return UpdateConditions(
+      node, conditions->AddCondition(zone_, condition, condition_is_true));
+}
 
 Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
   // Add the condition to the list arriving from the input branch.
@@ -264,6 +302,12 @@
   return false;
 }
 
+Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
+
+CommonOperatorBuilder* BranchElimination::common() const {
+  return jsgraph()->common();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/branch-elimination.h b/src/compiler/branch-elimination.h
index a7ac926..7abeeca 100644
--- a/src/compiler/branch-elimination.h
+++ b/src/compiler/branch-elimination.h
@@ -11,6 +11,8 @@
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class CommonOperatorBuilder;
 class JSGraph;
 
 
@@ -73,6 +75,7 @@
   };
 
   Reduction ReduceBranch(Node* node);
+  Reduction ReduceDeoptimizeConditional(Node* node);
   Reduction ReduceIf(Node* node, bool is_true_branch);
   Reduction ReduceLoop(Node* node);
   Reduction ReduceMerge(Node* node);
@@ -84,7 +87,11 @@
                              const ControlPathConditions* conditions);
 
   Node* dead() const { return dead_; }
+  Graph* graph() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  CommonOperatorBuilder* common() const;
 
+  JSGraph* const jsgraph_;
   PathConditionsForControlNodes node_conditions_;
   Zone* zone_;
   Node* dead_;
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index e28c19d..2249cbc 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -444,7 +444,7 @@
       bytecode_array_(handle(info->shared_info()->bytecode_array())),
       exception_handler_table_(
           handle(HandlerTable::cast(bytecode_array()->handler_table()))),
-      feedback_vector_(info->feedback_vector()),
+      feedback_vector_(handle(info->shared_info()->feedback_vector())),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kInterpretedFunction,
           bytecode_array()->parameter_count(),
@@ -570,17 +570,11 @@
   environment()->BindAccumulator(node);
 }
 
-void BytecodeGraphBuilder::VisitLdaSmi8() {
+void BytecodeGraphBuilder::VisitLdaSmi() {
   Node* node = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
   environment()->BindAccumulator(node);
 }
 
-void BytecodeGraphBuilder::VisitLdaConstantWide() {
-  Node* node =
-      jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
-  environment()->BindAccumulator(node);
-}
-
 void BytecodeGraphBuilder::VisitLdaConstant() {
   Node* node =
       jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
@@ -629,8 +623,6 @@
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
 }
 
-void BytecodeGraphBuilder::VisitMovWide() { VisitMov(); }
-
 void BytecodeGraphBuilder::BuildLoadGlobal(
     TypeofMode typeof_mode) {
   FrameStateBeforeAndAfter states(this);
@@ -652,14 +644,6 @@
   BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
 }
 
-void BytecodeGraphBuilder::VisitLdaGlobalWide() {
-  BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
-}
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofWide() {
-  BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
-}
-
 void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
   FrameStateBeforeAndAfter states(this);
   Handle<Name> name =
@@ -681,14 +665,6 @@
   BuildStoreGlobal(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitStaGlobalSloppyWide() {
-  BuildStoreGlobal(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitStaGlobalStrictWide() {
-  BuildStoreGlobal(LanguageMode::STRICT);
-}
-
 void BytecodeGraphBuilder::VisitLdaContextSlot() {
   // TODO(mythria): LoadContextSlots are unrolled by the required depth when
   // generating bytecode. Hence the value of depth is always 0. Update this
@@ -704,8 +680,6 @@
   environment()->BindAccumulator(node);
 }
 
-void BytecodeGraphBuilder::VisitLdaContextSlotWide() { VisitLdaContextSlot(); }
-
 void BytecodeGraphBuilder::VisitStaContextSlot() {
   // TODO(mythria): LoadContextSlots are unrolled by the required depth when
   // generating bytecode. Hence the value of depth is always 0. Update this
@@ -718,8 +692,6 @@
   NewNode(op, context, value);
 }
 
-void BytecodeGraphBuilder::VisitStaContextSlotWide() { VisitStaContextSlot(); }
-
 void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
   FrameStateBeforeAndAfter states(this);
   Node* name =
@@ -752,12 +724,6 @@
   environment()->BindAccumulator(store, &states);
 }
 
-void BytecodeGraphBuilder::VisitLdaLookupSlotWide() { VisitLdaLookupSlot(); }
-
-void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide() {
-  VisitLdaLookupSlotInsideTypeof();
-}
-
 void BytecodeGraphBuilder::VisitStaLookupSlotSloppy() {
   BuildStaLookupSlot(LanguageMode::SLOPPY);
 }
@@ -766,14 +732,6 @@
   BuildStaLookupSlot(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide() {
-  VisitStaLookupSlotSloppy();
-}
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide() {
-  VisitStaLookupSlotStrict();
-}
-
 void BytecodeGraphBuilder::BuildNamedLoad() {
   FrameStateBeforeAndAfter states(this);
   Node* object =
@@ -790,8 +748,6 @@
 
 void BytecodeGraphBuilder::VisitLoadIC() { BuildNamedLoad(); }
 
-void BytecodeGraphBuilder::VisitLoadICWide() { BuildNamedLoad(); }
-
 void BytecodeGraphBuilder::BuildKeyedLoad() {
   FrameStateBeforeAndAfter states(this);
   Node* key = environment()->LookupAccumulator();
@@ -807,8 +763,6 @@
 
 void BytecodeGraphBuilder::VisitKeyedLoadIC() { BuildKeyedLoad(); }
 
-void BytecodeGraphBuilder::VisitKeyedLoadICWide() { BuildKeyedLoad(); }
-
 void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
   FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
@@ -832,14 +786,6 @@
   BuildNamedStore(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitStoreICSloppyWide() {
-  BuildNamedStore(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitStoreICStrictWide() {
-  BuildNamedStore(LanguageMode::STRICT);
-}
-
 void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
   FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
@@ -863,14 +809,6 @@
   BuildKeyedStore(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide() {
-  BuildKeyedStore(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide() {
-  BuildKeyedStore(LanguageMode::STRICT);
-}
-
 void BytecodeGraphBuilder::VisitPushContext() {
   Node* new_context = environment()->LookupAccumulator();
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0),
@@ -888,14 +826,12 @@
   Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
       bytecode_iterator().GetConstantForIndexOperand(0));
   PretenureFlag tenured =
-      bytecode_iterator().GetImmediateOperand(1) ? TENURED : NOT_TENURED;
+      bytecode_iterator().GetFlagOperand(1) ? TENURED : NOT_TENURED;
   const Operator* op = javascript()->CreateClosure(shared_info, tenured);
   Node* closure = NewNode(op);
   environment()->BindAccumulator(closure);
 }
 
-void BytecodeGraphBuilder::VisitCreateClosureWide() { VisitCreateClosure(); }
-
 void BytecodeGraphBuilder::BuildCreateArguments(CreateArgumentsType type) {
   FrameStateBeforeAndAfter states(this);
   const Operator* op = javascript()->CreateArguments(type);
@@ -921,61 +857,39 @@
   environment()->BindAccumulator(literal, &states);
 }
 
-void BytecodeGraphBuilder::BuildCreateRegExpLiteral() {
+void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
   Handle<String> constant_pattern =
       Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
-  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
+  int literal_flags = bytecode_iterator().GetFlagOperand(2);
   const Operator* op = javascript()->CreateLiteralRegExp(
       constant_pattern, literal_flags, literal_index);
   BuildCreateLiteral(op);
 }
 
-void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
-  BuildCreateRegExpLiteral();
-}
-
-void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide() {
-  BuildCreateRegExpLiteral();
-}
-
-void BytecodeGraphBuilder::BuildCreateArrayLiteral() {
+void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
   Handle<FixedArray> constant_elements = Handle<FixedArray>::cast(
       bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
-  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
+  int literal_flags = bytecode_iterator().GetFlagOperand(2);
+  int number_of_elements = constant_elements->length();
   const Operator* op = javascript()->CreateLiteralArray(
-      constant_elements, literal_flags, literal_index);
-  BuildCreateLiteral(op);
-}
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
-  BuildCreateArrayLiteral();
-}
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteralWide() {
-  BuildCreateArrayLiteral();
-}
-
-void BytecodeGraphBuilder::BuildCreateObjectLiteral() {
-  Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0));
-  int literal_index = bytecode_iterator().GetIndexOperand(1);
-  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
-  const Operator* op = javascript()->CreateLiteralObject(
-      constant_properties, literal_flags, literal_index);
+      constant_elements, literal_flags, literal_index, number_of_elements);
   BuildCreateLiteral(op);
 }
 
 void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
-  BuildCreateObjectLiteral();
+  Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(0));
+  int literal_index = bytecode_iterator().GetIndexOperand(1);
+  int literal_flags = bytecode_iterator().GetFlagOperand(2);
+  // TODO(mstarzinger): Thread through number of properties.
+  int number_of_properties = constant_properties->length() / 2;
+  const Operator* op = javascript()->CreateLiteralObject(
+      constant_properties, literal_flags, literal_index, number_of_properties);
+  BuildCreateLiteral(op);
 }
 
-void BytecodeGraphBuilder::VisitCreateObjectLiteralWide() {
-  BuildCreateObjectLiteral();
-}
-
-
 Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
                                                  Node* callee,
                                                  interpreter::Register receiver,
@@ -1013,17 +927,15 @@
 
 void BytecodeGraphBuilder::VisitCall() { BuildCall(TailCallMode::kDisallow); }
 
-void BytecodeGraphBuilder::VisitCallWide() {
-  BuildCall(TailCallMode::kDisallow);
+void BytecodeGraphBuilder::VisitTailCall() {
+  TailCallMode tail_call_mode =
+      bytecode_array_->GetIsolate()->is_tail_call_elimination_enabled()
+          ? TailCallMode::kAllow
+          : TailCallMode::kDisallow;
+  BuildCall(tail_call_mode);
 }
 
-void BytecodeGraphBuilder::VisitTailCall() { BuildCall(TailCallMode::kAllow); }
-
-void BytecodeGraphBuilder::VisitTailCallWide() {
-  BuildCall(TailCallMode::kAllow);
-}
-
-void BytecodeGraphBuilder::BuildCallJSRuntime() {
+void BytecodeGraphBuilder::VisitCallJSRuntime() {
   FrameStateBeforeAndAfter states(this);
   Node* callee =
       BuildLoadNativeContextField(bytecode_iterator().GetIndexOperand(0));
@@ -1036,10 +948,6 @@
   environment()->BindAccumulator(value, &states);
 }
 
-void BytecodeGraphBuilder::VisitCallJSRuntime() { BuildCallJSRuntime(); }
-
-void BytecodeGraphBuilder::VisitCallJSRuntimeWide() { BuildCallJSRuntime(); }
-
 Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
     const Operator* call_runtime_op, interpreter::Register first_arg,
     size_t arity) {
@@ -1053,10 +961,10 @@
   return value;
 }
 
-void BytecodeGraphBuilder::BuildCallRuntime() {
+void BytecodeGraphBuilder::VisitCallRuntime() {
   FrameStateBeforeAndAfter states(this);
-  Runtime::FunctionId functionId =
-      static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+  Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
+      bytecode_iterator().GetRuntimeIdOperand(0));
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
@@ -1066,14 +974,10 @@
   environment()->BindAccumulator(value, &states);
 }
 
-void BytecodeGraphBuilder::VisitCallRuntime() { BuildCallRuntime(); }
-
-void BytecodeGraphBuilder::VisitCallRuntimeWide() { BuildCallRuntime(); }
-
-void BytecodeGraphBuilder::BuildCallRuntimeForPair() {
+void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
   FrameStateBeforeAndAfter states(this);
-  Runtime::FunctionId functionId =
-      static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+  Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
+      bytecode_iterator().GetRuntimeIdOperand(0));
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
   interpreter::Register first_return =
@@ -1085,12 +989,18 @@
   environment()->BindRegistersToProjections(first_return, return_pair, &states);
 }
 
-void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
-  BuildCallRuntimeForPair();
-}
+void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
+  FrameStateBeforeAndAfter states(this);
+  Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
+      bytecode_iterator().GetRuntimeIdOperand(0));
+  interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
-void BytecodeGraphBuilder::VisitCallRuntimeForPairWide() {
-  BuildCallRuntimeForPair();
+  // Create node to perform the runtime call. Turbofan will take care of the
+  // lowering.
+  const Operator* call = javascript()->CallRuntime(functionId, arg_count);
+  Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+  environment()->BindAccumulator(value, &states);
 }
 
 Node* BytecodeGraphBuilder::ProcessCallNewArguments(
@@ -1108,7 +1018,7 @@
   return value;
 }
 
-void BytecodeGraphBuilder::BuildCallConstruct() {
+void BytecodeGraphBuilder::VisitNew() {
   FrameStateBeforeAndAfter states(this);
   interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
@@ -1124,10 +1034,6 @@
   environment()->BindAccumulator(value, &states);
 }
 
-void BytecodeGraphBuilder::VisitNew() { BuildCallConstruct(); }
-
-void BytecodeGraphBuilder::VisitNewWide() { BuildCallConstruct(); }
-
 void BytecodeGraphBuilder::BuildThrow() {
   FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
@@ -1282,10 +1188,6 @@
   BuildCompareOp(javascript()->StrictEqual());
 }
 
-void BytecodeGraphBuilder::VisitTestNotEqualStrict() {
-  BuildCompareOp(javascript()->StrictNotEqual());
-}
-
 void BytecodeGraphBuilder::VisitTestLessThan() {
   BuildCompareOp(javascript()->LessThan());
 }
@@ -1307,6 +1209,7 @@
 }
 
 void BytecodeGraphBuilder::VisitTestInstanceOf() {
+  DCHECK(!FLAG_harmony_instanceof);
   BuildCompareOp(javascript()->InstanceOf());
 }
 
@@ -1332,7 +1235,6 @@
 
 void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
 
-void BytecodeGraphBuilder::VisitJumpConstantWide() { BuildJump(); }
 
 void BytecodeGraphBuilder::VisitJumpIfTrue() {
   BuildJumpIfEqual(jsgraph()->TrueConstant());
@@ -1342,10 +1244,6 @@
   BuildJumpIfEqual(jsgraph()->TrueConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide() {
-  BuildJumpIfEqual(jsgraph()->TrueConstant());
-}
-
 void BytecodeGraphBuilder::VisitJumpIfFalse() {
   BuildJumpIfEqual(jsgraph()->FalseConstant());
 }
@@ -1354,10 +1252,6 @@
   BuildJumpIfEqual(jsgraph()->FalseConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide() {
-  BuildJumpIfEqual(jsgraph()->FalseConstant());
-}
-
 void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue() {
   BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
 }
@@ -1366,10 +1260,6 @@
   BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide() {
-  BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
-}
-
 void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse() {
   BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
 }
@@ -1378,20 +1268,12 @@
   BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide() {
-  BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
-}
-
 void BytecodeGraphBuilder::VisitJumpIfNotHole() { BuildJumpIfNotHole(); }
 
 void BytecodeGraphBuilder::VisitJumpIfNotHoleConstant() {
   BuildJumpIfNotHole();
 }
 
-void BytecodeGraphBuilder::VisitJumpIfNotHoleConstantWide() {
-  BuildJumpIfNotHole();
-}
-
 void BytecodeGraphBuilder::VisitJumpIfNull() {
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
@@ -1400,10 +1282,6 @@
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfNullConstantWide() {
-  BuildJumpIfEqual(jsgraph()->NullConstant());
-}
-
 void BytecodeGraphBuilder::VisitJumpIfUndefined() {
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
@@ -1412,10 +1290,6 @@
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide() {
-  BuildJumpIfEqual(jsgraph()->UndefinedConstant());
-}
-
 void BytecodeGraphBuilder::VisitStackCheck() {
   FrameStateBeforeAndAfter states(this);
   Node* node = NewNode(javascript()->StackCheck());
@@ -1451,8 +1325,6 @@
 
 void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
 
-void BytecodeGraphBuilder::VisitForInPrepareWide() { BuildForInPrepare(); }
-
 void BytecodeGraphBuilder::VisitForInDone() {
   FrameStateBeforeAndAfter states(this);
   Node* index =
@@ -1482,8 +1354,6 @@
 
 void BytecodeGraphBuilder::VisitForInNext() { BuildForInNext(); }
 
-void BytecodeGraphBuilder::VisitForInNextWide() { BuildForInNext(); }
-
 void BytecodeGraphBuilder::VisitForInStep() {
   FrameStateBeforeAndAfter states(this);
   Node* index =
@@ -1492,6 +1362,21 @@
   environment()->BindAccumulator(index, &states);
 }
 
+void BytecodeGraphBuilder::VisitWide() {
+  // Consumed by the BytecodeArrayIterator.
+  UNREACHABLE();
+}
+
+void BytecodeGraphBuilder::VisitExtraWide() {
+  // Consumed by the BytecodeArrayIterator.
+  UNREACHABLE();
+}
+
+void BytecodeGraphBuilder::VisitIllegal() {
+  // Never present in valid bytecode.
+  UNREACHABLE();
+}
+
 void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
   if (merge_environments_[current_offset] != nullptr) {
     if (environment() != nullptr) {
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index 2fa5967..c842c24 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -111,9 +111,6 @@
                                     size_t arity);
 
   void BuildCreateLiteral(const Operator* op);
-  void BuildCreateRegExpLiteral();
-  void BuildCreateArrayLiteral();
-  void BuildCreateObjectLiteral();
   void BuildCreateArguments(CreateArgumentsType type);
   void BuildLoadGlobal(TypeofMode typeof_mode);
   void BuildStoreGlobal(LanguageMode language_mode);
@@ -124,10 +121,6 @@
   void BuildLdaLookupSlot(TypeofMode typeof_mode);
   void BuildStaLookupSlot(LanguageMode language_mode);
   void BuildCall(TailCallMode tail_call_mode);
-  void BuildCallJSRuntime();
-  void BuildCallRuntime();
-  void BuildCallRuntimeForPair();
-  void BuildCallConstruct();
   void BuildThrow();
   void BuildBinaryOp(const Operator* op);
   void BuildCompareOp(const Operator* op);
@@ -135,6 +128,7 @@
   void BuildCastOperator(const Operator* op);
   void BuildForInPrepare();
   void BuildForInNext();
+  void BuildInvokeIntrinsic();
 
   // Control flow plumbing.
   void BuildJump();
diff --git a/src/compiler/c-linkage.cc b/src/compiler/c-linkage.cc
index 783d9d6..b38e529 100644
--- a/src/compiler/c-linkage.cc
+++ b/src/compiler/c-linkage.cc
@@ -123,6 +123,26 @@
       d20.bit() | d21.bit() | d22.bit() | d23.bit() | d24.bit() | d25.bit() | \
       d26.bit() | d27.bit() | d28.bit() | d29.bit() | d30.bit() | d31.bit()
 
+#elif V8_TARGET_ARCH_S390X
+// ===========================================================================
+// == s390x ==================================================================
+// ===========================================================================
+#define PARAM_REGISTERS r2, r3, r4, r5, r6
+#define CALLEE_SAVE_REGISTERS \
+  r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
+#define CALLEE_SAVE_FP_REGISTERS                                        \
+  d8.bit() | d9.bit() | d10.bit() | d11.bit() | d12.bit() | d13.bit() | \
+      d14.bit() | d15.bit()
+
+#elif V8_TARGET_ARCH_S390
+// ===========================================================================
+// == s390 ===================================================================
+// ===========================================================================
+#define PARAM_REGISTERS r2, r3, r4, r5, r6
+#define CALLEE_SAVE_REGISTERS \
+  r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
+#define CALLEE_SAVE_FP_REGISTERS (d4.bit() | d6.bit())
+
 #else
 // ===========================================================================
 // == unknown ================================================================
@@ -210,6 +230,11 @@
   // The target for C calls is always an address (i.e. machine pointer).
   MachineType target_type = MachineType::Pointer();
   LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+  CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
+  if (set_initialize_root_flag) {
+    flags |= CallDescriptor::kInitializeRootRegister;
+  }
+
   return new (zone) CallDescriptor(  // --
       CallDescriptor::kCallAddress,  // kind
       target_type,                   // target MachineType
@@ -220,10 +245,7 @@
       Operator::kNoProperties,       // properties
       kCalleeSaveRegisters,          // callee-saved registers
       kCalleeSaveFPRegisters,        // callee-saved fp regs
-      set_initialize_root_flag ?     // flags
-          CallDescriptor::kInitializeRootRegister
-                               : CallDescriptor::kNoFlags,
-      "c-call");
+      flags, "c-call");
 }
 
 }  // namespace compiler
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
index e217f37..907b36a 100644
--- a/src/compiler/change-lowering.cc
+++ b/src/compiler/change-lowering.cc
@@ -55,6 +55,8 @@
       return ObjectIsSmi(node);
     case IrOpcode::kObjectIsNumber:
       return ObjectIsNumber(node);
+    case IrOpcode::kObjectIsUndetectable:
+      return ObjectIsUndetectable(node);
     default:
       return NoChange();
   }
@@ -603,6 +605,13 @@
       graph()->start(), control);
 }
 
+Node* ChangeLowering::LoadMapBitField(Node* map) {
+  return graph()->NewNode(
+      machine()->Load(MachineType::Uint8()), map,
+      jsgraph()->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag),
+      graph()->start(), graph()->start());
+}
+
 Node* ChangeLowering::LoadMapInstanceType(Node* map) {
   return graph()->NewNode(
       machine()->Load(MachineType::Uint8()), map,
@@ -650,6 +659,31 @@
   return Changed(node);
 }
 
+Reduction ChangeLowering::ObjectIsUndetectable(Node* node) {
+  Node* input = NodeProperties::GetValueInput(node, 0);
+  // TODO(bmeurer): Optimize somewhat based on input type.
+  Node* check = IsSmi(input);
+  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* vtrue = jsgraph()->Int32Constant(0);
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* vfalse = graph()->NewNode(
+      machine()->Word32Equal(),
+      graph()->NewNode(
+          machine()->Word32Equal(),
+          graph()->NewNode(machine()->Word32And(),
+                           jsgraph()->Uint32Constant(1 << Map::kIsUndetectable),
+                           LoadMapBitField(LoadHeapObjectMap(input, if_false))),
+          jsgraph()->Int32Constant(0)),
+      jsgraph()->Int32Constant(0));
+  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  node->ReplaceInput(0, vtrue);
+  node->AppendInput(graph()->zone(), vfalse);
+  node->AppendInput(graph()->zone(), control);
+  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
+  return Changed(node);
+}
+
 Reduction ChangeLowering::ObjectIsSmi(Node* node) {
   node->ReplaceInput(0,
                      graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h
index defadd9..7e5078b 100644
--- a/src/compiler/change-lowering.h
+++ b/src/compiler/change-lowering.h
@@ -58,11 +58,13 @@
 
   Node* IsSmi(Node* value);
   Node* LoadHeapObjectMap(Node* object, Node* control);
+  Node* LoadMapBitField(Node* map);
   Node* LoadMapInstanceType(Node* map);
 
   Reduction ObjectIsNumber(Node* node);
   Reduction ObjectIsReceiver(Node* node);
   Reduction ObjectIsSmi(Node* node);
+  Reduction ObjectIsUndetectable(Node* node);
 
   Node* ComputeIndex(const ElementAccess& access, Node* const key);
   Graph* graph() const;
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 7295948..7de32c5 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -139,6 +139,19 @@
   Instruction* instr_;
 };
 
+// Eager deoptimization exit.
+class DeoptimizationExit : public ZoneObject {
+ public:
+  explicit DeoptimizationExit(int deoptimization_id)
+      : deoptimization_id_(deoptimization_id) {}
+
+  int deoptimization_id() const { return deoptimization_id_; }
+  Label* label() { return &label_; }
+
+ private:
+  int const deoptimization_id_;
+  Label label_;
+};
 
 // Generator for out-of-line code that is emitted after the main code is done.
 class OutOfLineCode : public ZoneObject {
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 712cfe0..086da56 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -31,7 +31,6 @@
   size_t const target_count_;
 };
 
-
 CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
                              InstructionSequence* code, CompilationInfo* info)
     : frame_access_state_(new (code->zone()) FrameAccessState(frame)),
@@ -45,6 +44,7 @@
       resolver_(this),
       safepoints_(code->zone()),
       handlers_(code->zone()),
+      deoptimization_exits_(code->zone()),
       deoptimization_states_(code->zone()),
       deoptimization_literals_(code->zone()),
       inlined_function_count_(0),
@@ -56,12 +56,8 @@
   for (int i = 0; i < code->InstructionBlockCount(); ++i) {
     new (&labels_[i]) Label;
   }
-  if (code->ContainsCall()) {
-    frame->MarkNeedsFrame();
-  }
 }
 
-
 Handle<Code> CodeGenerator::GenerateCode() {
   CompilationInfo* info = this->info();
 
@@ -80,14 +76,11 @@
   }
   // Architecture-specific, linkage-specific prologue.
   info->set_prologue_offset(masm()->pc_offset());
-  AssemblePrologue();
-  if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
-    masm()->InitializeRootRegister();
-  }
 
   // Define deoptimization literals for all inlined functions.
   DCHECK_EQ(0u, deoptimization_literals_.size());
-  for (auto& inlined : info->inlined_functions()) {
+  for (const CompilationInfo::InlinedFunctionHolder& inlined :
+       info->inlined_functions()) {
     if (!inlined.shared_info.is_identical_to(info->shared_info())) {
       DefineDeoptimizationLiteral(inlined.shared_info);
     }
@@ -96,15 +89,19 @@
 
   // Define deoptimization literals for all unoptimized code objects of inlined
   // functions. This ensures unoptimized code is kept alive by optimized code.
-  for (auto& inlined : info->inlined_functions()) {
+  for (const CompilationInfo::InlinedFunctionHolder& inlined :
+       info->inlined_functions()) {
     if (!inlined.shared_info.is_identical_to(info->shared_info())) {
       DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
     }
   }
 
+  // Finish the Frame
+  frame()->AlignFrame(kFrameAlignmentInBytes);
+  AssembleSetupStackPointer();
   // Assemble all non-deferred blocks, followed by deferred ones.
   for (int deferred = 0; deferred < 2; ++deferred) {
-    for (auto const block : code()->instruction_blocks()) {
+    for (const InstructionBlock* block : code()->instruction_blocks()) {
       if (block->IsDeferred() == (deferred == 0)) {
         continue;
       }
@@ -141,9 +138,26 @@
         SNPrintF(buffer, " --");
         masm()->RecordComment(buffer_start);
       }
+
+      frame_access_state()->MarkHasFrame(block->needs_frame());
+
       masm()->bind(GetLabel(current_block_));
-      for (int i = block->code_start(); i < block->code_end(); ++i) {
-        AssembleInstruction(code()->InstructionAt(i));
+      if (block->must_construct_frame()) {
+        AssemblePrologue();
+        // We need to setup the root register after we assemble the prologue, to
+        // avoid clobbering callee saved registers in case of C linkage and
+        // using the roots.
+        // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
+        if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
+          masm()->InitializeRootRegister();
+        }
+      }
+
+      if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
+        ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+        AssembleBlock(block);
+      } else {
+        AssembleBlock(block);
       }
     }
   }
@@ -158,6 +172,12 @@
     }
   }
 
+  // Assemble all eager deoptimization exits.
+  for (DeoptimizationExit* exit : deoptimization_exits_) {
+    masm()->bind(exit->label());
+    AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER);
+  }
+
   // Ensure there is space for lazy deoptimization in the code.
   if (info->ShouldEnsureSpaceForLazyDeopt()) {
     int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
@@ -211,7 +231,8 @@
 
   // Emit a code line info recording stop event.
   void* line_info = recorder->DetachJITHandlerData();
-  LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
+  LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(
+                                AbstractCode::cast(*result), line_info));
 
   return result;
 }
@@ -232,7 +253,7 @@
       safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
   int stackSlotToSpillSlotDelta =
       frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
-  for (auto& operand : references->reference_operands()) {
+  for (const InstructionOperand& operand : references->reference_operands()) {
     if (operand.IsStackSlot()) {
       int index = LocationOperand::cast(operand).index();
       DCHECK(index >= 0);
@@ -250,16 +271,15 @@
   }
 }
 
-
 bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
-                                              int* offset_return) {
+                                              int* slot_return) {
   if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
     if (info()->has_context() && object.is_identical_to(info()->context()) &&
         !info()->is_osr()) {
-      *offset_return = StandardFrameConstants::kContextOffset;
+      *slot_return = Frame::kContextSlot;
       return true;
     } else if (object.is_identical_to(info()->closure())) {
-      *offset_return = JavaScriptFrameConstants::kFunctionOffset;
+      *slot_return = Frame::kJSFunctionSlot;
       return true;
     }
   }
@@ -282,43 +302,82 @@
   return false;
 }
 
+void CodeGenerator::AssembleBlock(const InstructionBlock* block) {
+  for (int i = block->code_start(); i < block->code_end(); ++i) {
+    Instruction* instr = code()->InstructionAt(i);
+    AssembleInstruction(instr, block);
+  }
+}
 
-void CodeGenerator::AssembleInstruction(Instruction* instr) {
+void CodeGenerator::AssembleInstruction(Instruction* instr,
+                                        const InstructionBlock* block) {
   AssembleGaps(instr);
+  DCHECK_IMPLIES(
+      block->must_deconstruct_frame(),
+      instr != code()->InstructionAt(block->last_instruction_index()) ||
+          instr->IsRet() || instr->IsJump());
+  if (instr->IsJump() && block->must_deconstruct_frame()) {
+    AssembleDeconstructFrame();
+  }
   AssembleSourcePosition(instr);
   // Assemble architecture-specific code for the instruction.
   AssembleArchInstruction(instr);
 
   FlagsMode mode = FlagsModeField::decode(instr->opcode());
   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
-  if (mode == kFlags_branch) {
-    // Assemble a branch after this instruction.
-    InstructionOperandConverter i(this, instr);
-    RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
-    RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
+  switch (mode) {
+    case kFlags_branch: {
+      // Assemble a branch after this instruction.
+      InstructionOperandConverter i(this, instr);
+      RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
+      RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
 
-    if (true_rpo == false_rpo) {
-      // redundant branch.
-      if (!IsNextInAssemblyOrder(true_rpo)) {
-        AssembleArchJump(true_rpo);
+      if (true_rpo == false_rpo) {
+        // redundant branch.
+        if (!IsNextInAssemblyOrder(true_rpo)) {
+          AssembleArchJump(true_rpo);
+        }
+        return;
       }
-      return;
+      if (IsNextInAssemblyOrder(true_rpo)) {
+        // true block is next, can fall through if condition negated.
+        std::swap(true_rpo, false_rpo);
+        condition = NegateFlagsCondition(condition);
+      }
+      BranchInfo branch;
+      branch.condition = condition;
+      branch.true_label = GetLabel(true_rpo);
+      branch.false_label = GetLabel(false_rpo);
+      branch.fallthru = IsNextInAssemblyOrder(false_rpo);
+      // Assemble architecture-specific branch.
+      AssembleArchBranch(instr, &branch);
+      break;
     }
-    if (IsNextInAssemblyOrder(true_rpo)) {
-      // true block is next, can fall through if condition negated.
-      std::swap(true_rpo, false_rpo);
-      condition = NegateFlagsCondition(condition);
+    case kFlags_deoptimize: {
+      // Assemble a conditional eager deoptimization after this instruction.
+      InstructionOperandConverter i(this, instr);
+      size_t frame_state_offset = MiscField::decode(instr->opcode());
+      DeoptimizationExit* const exit =
+          AddDeoptimizationExit(instr, frame_state_offset);
+      Label continue_label;
+      BranchInfo branch;
+      branch.condition = condition;
+      branch.true_label = exit->label();
+      branch.false_label = &continue_label;
+      branch.fallthru = true;
+      // Assemble architecture-specific branch.
+      AssembleArchBranch(instr, &branch);
+      masm()->bind(&continue_label);
+      break;
     }
-    BranchInfo branch;
-    branch.condition = condition;
-    branch.true_label = GetLabel(true_rpo);
-    branch.false_label = GetLabel(false_rpo);
-    branch.fallthru = IsNextInAssemblyOrder(false_rpo);
-    // Assemble architecture-specific branch.
-    AssembleArchBranch(instr, &branch);
-  } else if (mode == kFlags_set) {
-    // Assemble a boolean materialization after this instruction.
-    AssembleArchBoolean(instr, condition);
+    case kFlags_set: {
+      // Assemble a boolean materialization after this instruction.
+      AssembleArchBoolean(instr, condition);
+      break;
+    }
+    case kFlags_none: {
+      break;
+    }
   }
 }
 
@@ -595,6 +654,9 @@
           shared_info_id,
           static_cast<unsigned int>(descriptor->parameters_count()));
       break;
+    case FrameStateType::kTailCallerFunction:
+      translation->BeginTailCallerFrame(shared_info_id);
+      break;
     case FrameStateType::kConstructStub:
       translation->BeginConstructStubFrame(
           shared_info_id,
@@ -714,15 +776,22 @@
   last_lazy_deopt_pc_ = masm()->pc_offset();
 }
 
+DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
+    Instruction* instr, size_t frame_state_offset) {
+  int const deoptimization_id = BuildTranslation(
+      instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
+  DeoptimizationExit* const exit =
+      new (zone()) DeoptimizationExit(deoptimization_id);
+  deoptimization_exits_.push_back(exit);
+  return exit;
+}
 
 int CodeGenerator::TailCallFrameStackSlotDelta(int stack_param_delta) {
-  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  int spill_slots = frame()->GetSpillSlotCount();
-  bool has_frame = descriptor->IsJSFunctionCall() || spill_slots > 0;
   // Leave the PC on the stack on platforms that have that as part of their ABI
   int pc_slots = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
-  int sp_slot_delta =
-      has_frame ? (frame()->GetTotalFrameSlotCount() - pc_slots) : 0;
+  int sp_slot_delta = frame_access_state()->has_frame()
+                          ? (frame()->GetTotalFrameSlotCount() - pc_slots)
+                          : 0;
   // Discard only slots that won't be used by new parameters.
   sp_slot_delta += stack_param_delta;
   return sp_slot_delta;
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index 70bf81f..b82181c 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -16,6 +16,7 @@
 namespace compiler {
 
 // Forward declarations.
+class DeoptimizationExit;
 class FrameAccessState;
 class Linkage;
 class OutOfLineCode;
@@ -76,15 +77,18 @@
 
   // Check if a heap object can be materialized by loading from the frame, which
   // is usually way cheaper than materializing the actual heap object constant.
-  bool IsMaterializableFromFrame(Handle<HeapObject> object, int* offset_return);
+  bool IsMaterializableFromFrame(Handle<HeapObject> object, int* slot_return);
   // Check if a heap object can be materialized by loading from a heap root,
   // which is cheaper on some platforms than materializing the actual heap
   // object constant.
   bool IsMaterializableFromRoot(Handle<HeapObject> object,
                                 Heap::RootListIndex* index_return);
 
+  // Assemble instructions for the specified block.
+  void AssembleBlock(const InstructionBlock* block);
+
   // Assemble code for the specified instruction.
-  void AssembleInstruction(Instruction* instr);
+  void AssembleInstruction(Instruction* instr, const InstructionBlock* block);
   void AssembleSourcePosition(Instruction* instr);
   void AssembleGaps(Instruction* instr);
 
@@ -105,6 +109,9 @@
   // Generates an architecture-specific, descriptor-specific prologue
   // to set up a stack frame.
   void AssemblePrologue();
+
+  void AssembleSetupStackPointer();
+
   // Generates an architecture-specific, descriptor-specific return sequence
   // to tear down a stack frame.
   void AssembleReturn();
@@ -112,9 +119,15 @@
   // Generates code to deconstruct a the caller's frame, including arguments.
   void AssembleDeconstructActivationRecord(int stack_param_delta);
 
+  void AssembleDeconstructFrame();
+
   // Generates code to manipulate the stack in preparation for a tail call.
   void AssemblePrepareTailCall(int stack_param_delta);
 
+  // Generates code to pop current frame if it is an arguments adaptor frame.
+  void AssemblePopArgumentsAdaptorFrame(Register args_reg, Register scratch1,
+                                        Register scratch2, Register scratch3);
+
   // ===========================================================================
   // ============== Architecture-specific gap resolver methods. ================
   // ===========================================================================
@@ -144,10 +157,10 @@
   void RecordCallPosition(Instruction* instr);
   void PopulateDeoptimizationData(Handle<Code> code);
   int DefineDeoptimizationLiteral(Handle<Object> literal);
-  FrameStateDescriptor* GetFrameStateDescriptor(
-      Instruction* instr, size_t frame_access_state_offset);
+  FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
+                                                size_t frame_state_offset);
   int BuildTranslation(Instruction* instr, int pc_offset,
-                       size_t frame_access_state_offset,
+                       size_t frame_state_offset,
                        OutputFrameStateCombine state_combine);
   void BuildTranslationForFrameStateDescriptor(
       FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
@@ -165,6 +178,9 @@
   void EnsureSpaceForLazyDeopt();
   void MarkLazyDeoptSite();
 
+  DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
+                                            size_t frame_state_offset);
+
   // Converts the delta in the number of stack parameter passed from a tail
   // caller to the callee into the distance (in pointers) the SP must be
   // adjusted, taking frame elision and other relevant factors into
@@ -210,6 +226,7 @@
   GapResolver resolver_;
   SafepointTableBuilder safepoints_;
   ZoneVector<HandlerInfo> handlers_;
+  ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
   ZoneDeque<DeoptimizationState*> deoptimization_states_;
   ZoneDeque<Handle<Object>> deoptimization_literals_;
   size_t inlined_function_count_;
diff --git a/src/compiler/code-stub-assembler.cc b/src/compiler/code-stub-assembler.cc
index 45f47d3..bbb4d63 100644
--- a/src/compiler/code-stub-assembler.cc
+++ b/src/compiler/code-stub-assembler.cc
@@ -28,12 +28,29 @@
                                      const CallInterfaceDescriptor& descriptor,
                                      Code::Flags flags, const char* name,
                                      size_t result_size)
-    : raw_assembler_(new RawMachineAssembler(
-          isolate, new (zone) Graph(zone),
+    : CodeStubAssembler(
+          isolate, zone,
           Linkage::GetStubCallDescriptor(
               isolate, zone, descriptor, descriptor.GetStackParameterCount(),
               CallDescriptor::kNoFlags, Operator::kNoProperties,
-              MachineType::AnyTagged(), result_size))),
+              MachineType::AnyTagged(), result_size),
+          flags, name) {}
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+                                     int parameter_count, Code::Flags flags,
+                                     const char* name)
+    : CodeStubAssembler(isolate, zone, Linkage::GetJSCallDescriptor(
+                                           zone, false, parameter_count,
+                                           CallDescriptor::kNoFlags),
+                        flags, name) {}
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+                                     CallDescriptor* call_descriptor,
+                                     Code::Flags flags, const char* name)
+    : raw_assembler_(new RawMachineAssembler(
+          isolate, new (zone) Graph(zone), call_descriptor,
+          MachineType::PointerRepresentation(),
+          InstructionSelector::SupportedMachineOperatorFlags())),
       flags_(flags),
       name_(name),
       code_generated_(false),
@@ -72,6 +89,9 @@
   return raw_assembler_->NumberConstant(value);
 }
 
+Node* CodeStubAssembler::SmiConstant(Smi* value) {
+  return IntPtrConstant(bit_cast<intptr_t>(value));
+}
 
 Node* CodeStubAssembler::HeapConstant(Handle<HeapObject> object) {
   return raw_assembler_->HeapConstant(object);
@@ -86,11 +106,30 @@
   return raw_assembler_->ExternalConstant(address);
 }
 
+Node* CodeStubAssembler::Float64Constant(double value) {
+  return raw_assembler_->Float64Constant(value);
+}
+
+Node* CodeStubAssembler::BooleanMapConstant() {
+  return HeapConstant(isolate()->factory()->boolean_map());
+}
+
+Node* CodeStubAssembler::HeapNumberMapConstant() {
+  return HeapConstant(isolate()->factory()->heap_number_map());
+}
+
+Node* CodeStubAssembler::NullConstant() {
+  return LoadRoot(Heap::kNullValueRootIndex);
+}
+
+Node* CodeStubAssembler::UndefinedConstant() {
+  return LoadRoot(Heap::kUndefinedValueRootIndex);
+}
+
 Node* CodeStubAssembler::Parameter(int value) {
   return raw_assembler_->Parameter(value);
 }
 
-
 void CodeStubAssembler::Return(Node* value) {
   return raw_assembler_->Return(value);
 }
@@ -112,19 +151,253 @@
 }
 
 Node* CodeStubAssembler::SmiShiftBitsConstant() {
-  return Int32Constant(kSmiShiftSize + kSmiTagSize);
+  return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
 }
 
+Node* CodeStubAssembler::Float64Round(Node* x) {
+  Node* one = Float64Constant(1.0);
+  Node* one_half = Float64Constant(0.5);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this);
+
+  // Round up {x} towards Infinity.
+  var_x.Bind(Float64Ceil(x));
+
+  GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
+         &return_x);
+  var_x.Bind(Float64Sub(var_x.value(), one));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Ceil(Node* x) {
+  if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
+    return raw_assembler_->Float64RoundUp(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than zero.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]0,2^52[.
+    GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+    // Round positive {x} towards Infinity.
+    var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+    GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
+    var_x.Bind(Float64Add(var_x.value(), one));
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]-2^52,0[
+    GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+    GotoUnless(Float64LessThan(x, zero), &return_x);
+
+    // Round negated {x} towards Infinity and return the result negated.
+    Node* minus_x = Float64Neg(x);
+    var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+    GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+    var_x.Bind(Float64Sub(var_x.value(), one));
+    Goto(&return_minus_x);
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Floor(Node* x) {
+  if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
+    return raw_assembler_->Float64RoundDown(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than zero.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]0,2^52[.
+    GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+    // Round positive {x} towards -Infinity.
+    var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+    GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+    var_x.Bind(Float64Sub(var_x.value(), one));
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]-2^52,0[
+    GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+    GotoUnless(Float64LessThan(x, zero), &return_x);
+
+    // Round negated {x} towards -Infinity and return the result negated.
+    Node* minus_x = Float64Neg(x);
+    var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+    GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
+    var_x.Bind(Float64Add(var_x.value(), one));
+    Goto(&return_minus_x);
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Trunc(Node* x) {
+  if (raw_assembler_->machine()->Float64RoundTruncate().IsSupported()) {
+    return raw_assembler_->Float64RoundTruncate(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than 0.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
+      var_x.Bind(raw_assembler_->Float64RoundDown(x));
+    } else {
+      // Just return {x} unless it's in the range ]0,2^52[.
+      GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+      // Round positive {x} towards -Infinity.
+      var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+      GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+      var_x.Bind(Float64Sub(var_x.value(), one));
+    }
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
+      var_x.Bind(raw_assembler_->Float64RoundUp(x));
+      Goto(&return_x);
+    } else {
+      // Just return {x} unless its in the range ]-2^52,0[.
+      GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+      GotoUnless(Float64LessThan(x, zero), &return_x);
+
+      // Round negated {x} towards -Infinity and return result negated.
+      Node* minus_x = Float64Neg(x);
+      var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+      GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+      var_x.Bind(Float64Sub(var_x.value(), one));
+      Goto(&return_minus_x);
+    }
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
 
 Node* CodeStubAssembler::SmiTag(Node* value) {
   return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
 }
 
-
 Node* CodeStubAssembler::SmiUntag(Node* value) {
   return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
 }
 
+Node* CodeStubAssembler::SmiToWord32(Node* value) {
+  Node* result = raw_assembler_->WordSar(value, SmiShiftBitsConstant());
+  if (raw_assembler_->machine()->Is64()) {
+    result = raw_assembler_->TruncateInt64ToInt32(result);
+  }
+  return result;
+}
+
+Node* CodeStubAssembler::SmiToFloat64(Node* value) {
+  return ChangeInt32ToFloat64(SmiUntag(value));
+}
+
+Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
+
+Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
+  return IntPtrAddWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
+
+Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
+  return IntPtrSubWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
+
+Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
+  return IntPtrLessThan(a, b);
+}
+
+Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
+  return IntPtrLessThanOrEqual(a, b);
+}
+
+Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
+  // TODO(bmeurer): Consider using Select once available.
+  Variable min(this, MachineRepresentation::kTagged);
+  Label if_a(this), if_b(this), join(this);
+  BranchIfSmiLessThan(a, b, &if_a, &if_b);
+  Bind(&if_a);
+  min.Bind(a);
+  Goto(&join);
+  Bind(&if_b);
+  min.Bind(b);
+  Goto(&join);
+  Bind(&join);
+  return min.value();
+}
+
 #define DEFINE_CODE_STUB_ASSEMBER_BINARY_OP(name)   \
   Node* CodeStubAssembler::name(Node* a, Node* b) { \
     return raw_assembler_->name(a, b);              \
@@ -132,56 +405,129 @@
 CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_BINARY_OP)
 #undef DEFINE_CODE_STUB_ASSEMBER_BINARY_OP
 
-Node* CodeStubAssembler::ChangeInt32ToInt64(Node* value) {
-  return raw_assembler_->ChangeInt32ToInt64(value);
+Node* CodeStubAssembler::WordShl(Node* value, int shift) {
+  return raw_assembler_->WordShl(value, IntPtrConstant(shift));
 }
 
-Node* CodeStubAssembler::WordShl(Node* value, int shift) {
-  return raw_assembler_->WordShl(value, Int32Constant(shift));
-}
+#define DEFINE_CODE_STUB_ASSEMBER_UNARY_OP(name) \
+  Node* CodeStubAssembler::name(Node* a) { return raw_assembler_->name(a); }
+CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_UNARY_OP)
+#undef DEFINE_CODE_STUB_ASSEMBER_UNARY_OP
 
 Node* CodeStubAssembler::WordIsSmi(Node* a) {
-  return WordEqual(raw_assembler_->WordAnd(a, Int32Constant(kSmiTagMask)),
-                   Int32Constant(0));
+  return WordEqual(raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask)),
+                   IntPtrConstant(0));
 }
 
-Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset) {
-  return raw_assembler_->Load(MachineType::AnyTagged(), buffer,
-                              IntPtrConstant(offset));
+Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
+  return WordEqual(
+      raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
+      IntPtrConstant(0));
 }
 
-Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
-  return raw_assembler_->Load(MachineType::AnyTagged(), object,
+Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
+                                          MachineType rep) {
+  return raw_assembler_->Load(rep, buffer, IntPtrConstant(offset));
+}
+
+Node* CodeStubAssembler::LoadObjectField(Node* object, int offset,
+                                         MachineType rep) {
+  return raw_assembler_->Load(rep, object,
                               IntPtrConstant(offset - kHeapObjectTag));
 }
 
+Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
+  return Load(MachineType::Float64(), object,
+              IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
+  return StoreNoWriteBarrier(
+      MachineRepresentation::kFloat64, object,
+      IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
+  Node* value = LoadHeapNumberValue(object);
+  return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kJavaScript,
+                                                value);
+}
+
+Node* CodeStubAssembler::LoadMapBitField(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kBitField2Offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
+  return Load(MachineType::Uint32(), map,
+              IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
+  return LoadObjectField(map, Map::kDescriptorsOffset);
+}
+
+Node* CodeStubAssembler::LoadNameHash(Node* name) {
+  return Load(MachineType::Uint32(), name,
+              IntPtrConstant(Name::kHashFieldOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadFixedArrayElementInt32Index(
+    Node* object, Node* int32_index, int additional_offset) {
+  Node* header_size = IntPtrConstant(additional_offset +
+                                     FixedArray::kHeaderSize - kHeapObjectTag);
+  Node* scaled_index = WordShl(int32_index, IntPtrConstant(kPointerSizeLog2));
+  Node* offset = IntPtrAdd(scaled_index, header_size);
+  return Load(MachineType::AnyTagged(), object, offset);
+}
+
 Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
                                                        Node* smi_index,
                                                        int additional_offset) {
-  Node* header_size = raw_assembler_->Int32Constant(
-      additional_offset + FixedArray::kHeaderSize - kHeapObjectTag);
+  int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
+  Node* header_size = IntPtrConstant(additional_offset +
+                                     FixedArray::kHeaderSize - kHeapObjectTag);
   Node* scaled_index =
-      (kSmiShiftSize == 0)
-          ? raw_assembler_->Word32Shl(
-                smi_index, Int32Constant(kPointerSizeLog2 - kSmiTagSize))
-          : raw_assembler_->Word32Shl(SmiUntag(smi_index),
-                                      Int32Constant(kPointerSizeLog2));
-  Node* offset = raw_assembler_->Int32Add(scaled_index, header_size);
-  return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
+      (kSmiShiftBits > kPointerSizeLog2)
+          ? WordSar(smi_index, IntPtrConstant(kSmiShiftBits - kPointerSizeLog2))
+          : WordShl(smi_index,
+                    IntPtrConstant(kPointerSizeLog2 - kSmiShiftBits));
+  Node* offset = IntPtrAdd(scaled_index, header_size);
+  return Load(MachineType::AnyTagged(), object, offset);
 }
 
 Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
                                                             int index) {
-  Node* offset = raw_assembler_->Int32Constant(
-      FixedArray::kHeaderSize - kHeapObjectTag + index * kPointerSize);
+  Node* offset = IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag +
+                                index * kPointerSize);
   return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
 }
 
+Node* CodeStubAssembler::StoreFixedArrayElementNoWriteBarrier(Node* object,
+                                                              Node* index,
+                                                              Node* value) {
+  Node* offset =
+      IntPtrAdd(WordShl(index, IntPtrConstant(kPointerSizeLog2)),
+                IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag));
+  return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
+                             value);
+}
+
 Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
   if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
     Handle<Object> root = isolate()->heap()->root_handle(root_index);
     if (root->IsSmi()) {
-      return Int32Constant(Handle<Smi>::cast(root)->value());
+      return SmiConstant(Smi::cast(*root));
     } else {
       return HeapConstant(Handle<HeapObject>::cast(root));
     }
@@ -197,6 +543,135 @@
   return nullptr;
 }
 
+Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
+                                              AllocationFlags flags,
+                                              Node* top_address,
+                                              Node* limit_address) {
+  Node* top = Load(MachineType::Pointer(), top_address);
+  Node* limit = Load(MachineType::Pointer(), limit_address);
+
+  // If there's not enough space, call the runtime.
+  RawMachineLabel runtime_call(RawMachineLabel::kDeferred), no_runtime_call,
+      merge_runtime;
+  raw_assembler_->Branch(
+      raw_assembler_->IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes),
+      &runtime_call, &no_runtime_call);
+
+  raw_assembler_->Bind(&runtime_call);
+  // AllocateInTargetSpace does not use the context.
+  Node* context = IntPtrConstant(0);
+  Node* runtime_flags = SmiTag(Int32Constant(
+      AllocateDoubleAlignFlag::encode(false) |
+      AllocateTargetSpace::encode(flags & kPretenured
+                                      ? AllocationSpace::OLD_SPACE
+                                      : AllocationSpace::NEW_SPACE)));
+  Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
+                                     SmiTag(size_in_bytes), runtime_flags);
+  raw_assembler_->Goto(&merge_runtime);
+
+  // When there is enough space, return `top' and bump it up.
+  raw_assembler_->Bind(&no_runtime_call);
+  Node* no_runtime_result = top;
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+                      IntPtrAdd(top, size_in_bytes));
+  no_runtime_result =
+      IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
+  raw_assembler_->Goto(&merge_runtime);
+
+  raw_assembler_->Bind(&merge_runtime);
+  return raw_assembler_->Phi(MachineType::PointerRepresentation(),
+                             runtime_result, no_runtime_result);
+}
+
+Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
+                                            AllocationFlags flags,
+                                            Node* top_address,
+                                            Node* limit_address) {
+  Node* top = Load(MachineType::Pointer(), top_address);
+  Node* limit = Load(MachineType::Pointer(), limit_address);
+  Node* adjusted_size = size_in_bytes;
+  if (flags & kDoubleAlignment) {
+    // TODO(epertoso): Simd128 alignment.
+    RawMachineLabel aligned, not_aligned, merge;
+    raw_assembler_->Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)),
+                           &not_aligned, &aligned);
+
+    raw_assembler_->Bind(&not_aligned);
+    Node* not_aligned_size =
+        IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
+    raw_assembler_->Goto(&merge);
+
+    raw_assembler_->Bind(&aligned);
+    raw_assembler_->Goto(&merge);
+
+    raw_assembler_->Bind(&merge);
+    adjusted_size = raw_assembler_->Phi(MachineType::PointerRepresentation(),
+                                        not_aligned_size, adjusted_size);
+  }
+
+  Node* address = AllocateRawUnaligned(adjusted_size, kNone, top, limit);
+
+  RawMachineLabel needs_filler, doesnt_need_filler, merge_address;
+  raw_assembler_->Branch(
+      raw_assembler_->IntPtrEqual(adjusted_size, size_in_bytes),
+      &doesnt_need_filler, &needs_filler);
+
+  raw_assembler_->Bind(&needs_filler);
+  // Store a filler and increase the address by kPointerSize.
+  // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
+  // it when Simd128 alignment is supported.
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
+                      LoadRoot(Heap::kOnePointerFillerMapRootIndex));
+  Node* address_with_filler = IntPtrAdd(address, IntPtrConstant(kPointerSize));
+  raw_assembler_->Goto(&merge_address);
+
+  raw_assembler_->Bind(&doesnt_need_filler);
+  Node* address_without_filler = address;
+  raw_assembler_->Goto(&merge_address);
+
+  raw_assembler_->Bind(&merge_address);
+  address = raw_assembler_->Phi(MachineType::PointerRepresentation(),
+                                address_with_filler, address_without_filler);
+  // Update the top.
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+                      IntPtrAdd(top, adjusted_size));
+  return address;
+}
+
+Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
+  bool const new_space = !(flags & kPretenured);
+  Node* top_address = ExternalConstant(
+      new_space
+          ? ExternalReference::new_space_allocation_top_address(isolate())
+          : ExternalReference::old_space_allocation_top_address(isolate()));
+  Node* limit_address = ExternalConstant(
+      new_space
+          ? ExternalReference::new_space_allocation_limit_address(isolate())
+          : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+#ifdef V8_HOST_ARCH_32_BIT
+  if (flags & kDoubleAlignment) {
+    return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
+                              limit_address);
+  }
+#endif
+
+  return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
+                              limit_address);
+}
+
+Node* CodeStubAssembler::AllocateHeapNumber() {
+  Node* result = Allocate(HeapNumber::kSize, kNone);
+  StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
+  Node* result = AllocateHeapNumber();
+  StoreHeapNumberValue(result, value);
+  return result;
+}
+
 Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
   return raw_assembler_->Load(rep, base);
 }
@@ -230,6 +705,232 @@
   return raw_assembler_->Projection(index, value);
 }
 
+Node* CodeStubAssembler::LoadMap(Node* object) {
+  return LoadObjectField(object, HeapObject::kMapOffset);
+}
+
+Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
+  return StoreNoWriteBarrier(
+      MachineRepresentation::kTagged, object,
+      IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
+}
+
+Node* CodeStubAssembler::LoadInstanceType(Node* object) {
+  return LoadMapInstanceType(LoadMap(object));
+}
+
+Node* CodeStubAssembler::LoadElements(Node* object) {
+  return LoadObjectField(object, JSObject::kElementsOffset);
+}
+
+Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
+  return LoadObjectField(array, FixedArrayBase::kLengthOffset);
+}
+
+Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
+                                        uint32_t mask) {
+  return raw_assembler_->Word32Shr(
+      raw_assembler_->Word32And(word32, raw_assembler_->Int32Constant(mask)),
+      raw_assembler_->Int32Constant(shift));
+}
+
+Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
+  Node* value32 = raw_assembler_->TruncateFloat64ToInt32(
+      TruncationMode::kRoundToZero, value);
+  Node* value64 = ChangeInt32ToFloat64(value32);
+
+  Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
+
+  Label if_valueisequal(this), if_valueisnotequal(this);
+  Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
+  Bind(&if_valueisequal);
+  {
+    Label if_valueiszero(this), if_valueisnotzero(this);
+    Branch(Float64Equal(value, Float64Constant(0.0)), &if_valueiszero,
+           &if_valueisnotzero);
+
+    Bind(&if_valueiszero);
+    BranchIfInt32LessThan(raw_assembler_->Float64ExtractHighWord32(value),
+                          Int32Constant(0), &if_valueisheapnumber,
+                          &if_valueisint32);
+
+    Bind(&if_valueisnotzero);
+    Goto(&if_valueisint32);
+  }
+  Bind(&if_valueisnotequal);
+  Goto(&if_valueisheapnumber);
+
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Bind(&if_valueisint32);
+  {
+    if (raw_assembler_->machine()->Is64()) {
+      Node* result = SmiTag(ChangeInt32ToInt64(value32));
+      var_result.Bind(result);
+      Goto(&if_join);
+    } else {
+      Node* pair = Int32AddWithOverflow(value32, value32);
+      Node* overflow = Projection(1, pair);
+      Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
+      Branch(overflow, &if_overflow, &if_notoverflow);
+      Bind(&if_overflow);
+      Goto(&if_valueisheapnumber);
+      Bind(&if_notoverflow);
+      {
+        Node* result = Projection(0, pair);
+        var_result.Bind(result);
+        Goto(&if_join);
+      }
+    }
+  }
+  Bind(&if_valueisheapnumber);
+  {
+    Node* result = AllocateHeapNumberWithValue(value);
+    var_result.Bind(result);
+    Goto(&if_join);
+  }
+  Bind(&if_join);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
+  if (raw_assembler_->machine()->Is64()) {
+    return SmiTag(ChangeInt32ToInt64(value));
+  }
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Node* pair = Int32AddWithOverflow(value, value);
+  Node* overflow = Projection(1, pair);
+  Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
+      if_join(this);
+  Branch(overflow, &if_overflow, &if_notoverflow);
+  Bind(&if_overflow);
+  {
+    Node* value64 = ChangeInt32ToFloat64(value);
+    Node* result = AllocateHeapNumberWithValue(value64);
+    var_result.Bind(result);
+  }
+  Goto(&if_join);
+  Bind(&if_notoverflow);
+  {
+    Node* result = Projection(0, pair);
+    var_result.Bind(result);
+  }
+  Goto(&if_join);
+  Bind(&if_join);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
+  // We might need to loop once due to ToNumber conversion.
+  Variable var_value(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kFloat64);
+  Label loop(this, &var_value), done_loop(this, &var_result);
+  var_value.Bind(value);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {value}.
+    value = var_value.value();
+
+    // Check if the {value} is a Smi or a HeapObject.
+    Label if_valueissmi(this), if_valueisnotsmi(this);
+    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+    Bind(&if_valueissmi);
+    {
+      // Convert the Smi {value}.
+      var_result.Bind(SmiToFloat64(value));
+      Goto(&done_loop);
+    }
+
+    Bind(&if_valueisnotsmi);
+    {
+      // Check if {value} is a HeapNumber.
+      Label if_valueisheapnumber(this),
+          if_valueisnotheapnumber(this, Label::kDeferred);
+      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+             &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+      Bind(&if_valueisheapnumber);
+      {
+        // Load the floating point value.
+        var_result.Bind(LoadHeapNumberValue(value));
+        Goto(&done_loop);
+      }
+
+      Bind(&if_valueisnotheapnumber);
+      {
+        // Convert the {value} to a Number first.
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_value.Bind(CallStub(callable, context, value));
+        Goto(&loop);
+      }
+    }
+  }
+  Bind(&done_loop);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
+  // We might need to loop once due to ToNumber conversion.
+  Variable var_value(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kWord32);
+  Label loop(this, &var_value), done_loop(this, &var_result);
+  var_value.Bind(value);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {value}.
+    value = var_value.value();
+
+    // Check if the {value} is a Smi or a HeapObject.
+    Label if_valueissmi(this), if_valueisnotsmi(this);
+    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+    Bind(&if_valueissmi);
+    {
+      // Convert the Smi {value}.
+      var_result.Bind(SmiToWord32(value));
+      Goto(&done_loop);
+    }
+
+    Bind(&if_valueisnotsmi);
+    {
+      // Check if {value} is a HeapNumber.
+      Label if_valueisheapnumber(this),
+          if_valueisnotheapnumber(this, Label::kDeferred);
+      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+             &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+      Bind(&if_valueisheapnumber);
+      {
+        // Truncate the floating point value.
+        var_result.Bind(TruncateHeapNumberValueToWord32(value));
+        Goto(&done_loop);
+      }
+
+      Bind(&if_valueisnotheapnumber);
+      {
+        // Convert the {value} to a Number first.
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_value.Bind(CallStub(callable, context, value));
+        Goto(&loop);
+      }
+    }
+  }
+  Bind(&done_loop);
+  return var_result.value();
+}
+
+void CodeStubAssembler::BranchIf(Node* condition, Label* if_true,
+                                 Label* if_false) {
+  Label if_condition_is_true(this), if_condition_is_false(this);
+  Branch(condition, &if_condition_is_true, &if_condition_is_false);
+  Bind(&if_condition_is_true);
+  Goto(if_true);
+  Bind(&if_condition_is_false);
+  Goto(if_false);
+}
+
 Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
                                Node** args) {
   CallPrologue();
@@ -290,6 +991,11 @@
 }
 
 Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                         Node* context) {
+  return raw_assembler_->TailCallRuntime0(function_id, context);
+}
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
                                          Node* context, Node* arg1) {
   return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
 }
@@ -314,6 +1020,12 @@
                                           context);
 }
 
+Node* CodeStubAssembler::CallStub(Callable const& callable, Node* context,
+                                  Node* arg1, size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return CallStub(callable.descriptor(), target, context, arg1, result_size);
+}
+
 Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
                                   Node* target, Node* context, Node* arg1,
                                   size_t result_size) {
@@ -401,12 +1113,28 @@
   return CallN(call_descriptor, target, args);
 }
 
-Node* CodeStubAssembler::TailCallStub(CodeStub& stub, Node** args) {
-  Node* code_target = HeapConstant(stub.GetCode());
-  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), stub.GetCallInterfaceDescriptor(),
-      stub.GetStackParameterCount(), CallDescriptor::kSupportsTailCalls);
-  return raw_assembler_->TailCallN(descriptor, code_target, args);
+Node* CodeStubAssembler::TailCallStub(Callable const& callable, Node* context,
+                                      Node* arg1, Node* arg2,
+                                      size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
+                      result_size);
+}
+
+Node* CodeStubAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+                                      Node* target, Node* context, Node* arg1,
+                                      Node* arg2, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(3);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = context;
+
+  return raw_assembler_->TailCallN(call_descriptor, target, args);
 }
 
 Node* CodeStubAssembler::TailCall(
@@ -425,6 +1153,18 @@
   raw_assembler_->Goto(label->label_);
 }
 
+void CodeStubAssembler::GotoIf(Node* condition, Label* true_label) {
+  Label false_label(this);
+  Branch(condition, true_label, &false_label);
+  Bind(&false_label);
+}
+
+void CodeStubAssembler::GotoUnless(Node* condition, Label* false_label) {
+  Label true_label(this);
+  Branch(condition, &true_label, false_label);
+  Bind(&true_label);
+}
+
 void CodeStubAssembler::Branch(Node* condition,
                                CodeStubAssembler::Label* true_label,
                                CodeStubAssembler::Label* false_label) {
@@ -450,11 +1190,15 @@
 }
 
 // RawMachineAssembler delegate helpers:
-Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
+Isolate* CodeStubAssembler::isolate() const {
+  return raw_assembler_->isolate();
+}
 
-Graph* CodeStubAssembler::graph() { return raw_assembler_->graph(); }
+Factory* CodeStubAssembler::factory() const { return isolate()->factory(); }
 
-Zone* CodeStubAssembler::zone() { return raw_assembler_->zone(); }
+Graph* CodeStubAssembler::graph() const { return raw_assembler_->graph(); }
+
+Zone* CodeStubAssembler::zone() const { return raw_assembler_->zone(); }
 
 // The core implementation of Variable is stored through an indirection so
 // that it can outlive the often block-scoped Variable declarations. This is
@@ -488,27 +1232,20 @@
   return impl_->value_ != nullptr;
 }
 
-CodeStubAssembler::Label::Label(CodeStubAssembler* assembler)
-    : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
-  void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
-  label_ = new (buffer) RawMachineLabel();
-}
-
 CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
                                 int merged_value_count,
-                                CodeStubAssembler::Variable** merged_variables)
+                                CodeStubAssembler::Variable** merged_variables,
+                                CodeStubAssembler::Label::Type type)
     : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
   void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
-  label_ = new (buffer) RawMachineLabel();
+  label_ = new (buffer)
+      RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
+                                        : RawMachineLabel::kNonDeferred);
   for (int i = 0; i < merged_value_count; ++i) {
     variable_phis_[merged_variables[i]->impl_] = nullptr;
   }
 }
 
-CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
-                                CodeStubAssembler::Variable* merged_variable)
-    : CodeStubAssembler::Label(assembler, 1, &merged_variable) {}
-
 void CodeStubAssembler::Label::MergeVariables() {
   ++merge_count_;
   for (auto var : assembler_->variables_) {
@@ -539,16 +1276,17 @@
         assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
       } else {
         auto i = variable_merges_.find(var);
-        USE(i);
-        // If the following assert fires, then you've declared a variable that
-        // has the same bound value along all paths up until the point you bound
-        // this label, but then later merged a path with a new value for the
-        // variable after the label bind (it's not possible to add phis to the
-        // bound label after the fact, just make sure to list the variable in
-        // the label's constructor's list of merged variables).
-        DCHECK(find_if(i->second.begin(), i->second.end(),
-                       [node](Node* e) -> bool { return node != e; }) ==
-               i->second.end());
+        if (i != variable_merges_.end()) {
+          // If the following assert fires, then you've declared a variable that
+          // has the same bound value along all paths up until the point you
+          // bound this label, but then later merged a path with a new value for
+          // the variable after the label bind (it's not possible to add phis to
+          // the bound label after the fact, just make sure to list the variable
+          // in the label's constructor's list of merged variables).
+          DCHECK(find_if(i->second.begin(), i->second.end(),
+                         [node](Node* e) -> bool { return node != e; }) ==
+                 i->second.end());
+        }
       }
     }
   }
diff --git a/src/compiler/code-stub-assembler.h b/src/compiler/code-stub-assembler.h
index 2ab1376..9fcb890 100644
--- a/src/compiler/code-stub-assembler.h
+++ b/src/compiler/code-stub-assembler.h
@@ -19,8 +19,10 @@
 namespace v8 {
 namespace internal {
 
+class Callable;
 class CallInterfaceDescriptor;
 class Isolate;
+class Factory;
 class Zone;
 
 namespace compiler {
@@ -33,49 +35,91 @@
 class RawMachineLabel;
 class Schedule;
 
-#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \
-  V(IntPtrAdd)                                \
-  V(IntPtrSub)                                \
-  V(Int32Add)                                 \
-  V(Int32Sub)                                 \
-  V(Int32Mul)                                 \
-  V(Int32GreaterThanOrEqual)                  \
-  V(WordEqual)                                \
-  V(WordNotEqual)                             \
-  V(WordOr)                                   \
-  V(WordAnd)                                  \
-  V(WordXor)                                  \
-  V(WordShl)                                  \
-  V(WordShr)                                  \
-  V(WordSar)                                  \
-  V(WordRor)                                  \
-  V(Word32Equal)                              \
-  V(Word32NotEqual)                           \
-  V(Word32Or)                                 \
-  V(Word32And)                                \
-  V(Word32Xor)                                \
-  V(Word32Shl)                                \
-  V(Word32Shr)                                \
-  V(Word32Sar)                                \
-  V(Word32Ror)                                \
-  V(Word64Equal)                              \
-  V(Word64NotEqual)                           \
-  V(Word64Or)                                 \
-  V(Word64And)                                \
-  V(Word64Xor)                                \
-  V(Word64Shr)                                \
-  V(Word64Sar)                                \
-  V(Word64Ror)                                \
-  V(UintPtrGreaterThanOrEqual)
+#define CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+  V(Float32Equal)                                     \
+  V(Float32LessThan)                                  \
+  V(Float32LessThanOrEqual)                           \
+  V(Float32GreaterThan)                               \
+  V(Float32GreaterThanOrEqual)                        \
+  V(Float64Equal)                                     \
+  V(Float64LessThan)                                  \
+  V(Float64LessThanOrEqual)                           \
+  V(Float64GreaterThan)                               \
+  V(Float64GreaterThanOrEqual)                        \
+  V(Int32GreaterThan)                                 \
+  V(Int32GreaterThanOrEqual)                          \
+  V(Int32LessThan)                                    \
+  V(Int32LessThanOrEqual)                             \
+  V(IntPtrLessThan)                                   \
+  V(IntPtrLessThanOrEqual)                            \
+  V(Uint32LessThan)                                   \
+  V(UintPtrGreaterThanOrEqual)                        \
+  V(WordEqual)                                        \
+  V(WordNotEqual)                                     \
+  V(Word32Equal)                                      \
+  V(Word32NotEqual)                                   \
+  V(Word64Equal)                                      \
+  V(Word64NotEqual)
+
+#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V)   \
+  CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+  V(Float64Add)                                 \
+  V(Float64Sub)                                 \
+  V(Float64InsertLowWord32)                     \
+  V(Float64InsertHighWord32)                    \
+  V(IntPtrAdd)                                  \
+  V(IntPtrAddWithOverflow)                      \
+  V(IntPtrSub)                                  \
+  V(IntPtrSubWithOverflow)                      \
+  V(Int32Add)                                   \
+  V(Int32AddWithOverflow)                       \
+  V(Int32Sub)                                   \
+  V(Int32Mul)                                   \
+  V(WordOr)                                     \
+  V(WordAnd)                                    \
+  V(WordXor)                                    \
+  V(WordShl)                                    \
+  V(WordShr)                                    \
+  V(WordSar)                                    \
+  V(WordRor)                                    \
+  V(Word32Or)                                   \
+  V(Word32And)                                  \
+  V(Word32Xor)                                  \
+  V(Word32Shl)                                  \
+  V(Word32Shr)                                  \
+  V(Word32Sar)                                  \
+  V(Word32Ror)                                  \
+  V(Word64Or)                                   \
+  V(Word64And)                                  \
+  V(Word64Xor)                                  \
+  V(Word64Shr)                                  \
+  V(Word64Sar)                                  \
+  V(Word64Ror)
+
+#define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \
+  V(Float64Neg)                              \
+  V(Float64Sqrt)                             \
+  V(ChangeFloat64ToUint32)                   \
+  V(ChangeInt32ToFloat64)                    \
+  V(ChangeInt32ToInt64)                      \
+  V(ChangeUint32ToFloat64)                   \
+  V(ChangeUint32ToUint64)                    \
+  V(Word32Clz)
 
 class CodeStubAssembler {
  public:
+  // Create with CallStub linkage.
   // |result_size| specifies the number of results returned by the stub.
   // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
   CodeStubAssembler(Isolate* isolate, Zone* zone,
                     const CallInterfaceDescriptor& descriptor,
                     Code::Flags flags, const char* name,
                     size_t result_size = 1);
+
+  // Create with JSCall linkage.
+  CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+                    Code::Flags flags, const char* name);
+
   virtual ~CodeStubAssembler();
 
   Handle<Code> GenerateCode();
@@ -95,6 +139,14 @@
     Impl* impl_;
   };
 
+  enum AllocationFlag : uint8_t {
+    kNone = 0,
+    kDoubleAlignment = 1,
+    kPretenured = 1 << 1
+  };
+
+  typedef base::Flags<AllocationFlag> AllocationFlags;
+
   // ===========================================================================
   // Base Assembler
   // ===========================================================================
@@ -103,15 +155,23 @@
   Node* Int32Constant(int value);
   Node* IntPtrConstant(intptr_t value);
   Node* NumberConstant(double value);
+  Node* SmiConstant(Smi* value);
   Node* HeapConstant(Handle<HeapObject> object);
   Node* BooleanConstant(bool value);
   Node* ExternalConstant(ExternalReference address);
+  Node* Float64Constant(double value);
+  Node* BooleanMapConstant();
+  Node* HeapNumberMapConstant();
+  Node* NullConstant();
+  Node* UndefinedConstant();
 
   Node* Parameter(int value);
   void Return(Node* value);
 
   void Bind(Label* label);
   void Goto(Label* label);
+  void GotoIf(Node* condition, Label* true_label);
+  void GotoUnless(Node* condition, Label* false_label);
   void Branch(Node* condition, Label* true_label, Label* false_label);
 
   void Switch(Node* index, Label* default_label, int32_t* case_values,
@@ -142,8 +202,10 @@
 
   Node* WordShl(Node* value, int shift);
 
-  // Conversions
-  Node* ChangeInt32ToInt64(Node* value);
+// Unary
+#define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a);
+  CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP)
+#undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP
 
   // Projections
   Node* Projection(int index, Node* value);
@@ -160,6 +222,7 @@
   Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
                     Node* arg2, Node* arg3, Node* arg4, Node* arg5);
 
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
                         Node* arg1);
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
@@ -169,6 +232,9 @@
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
                         Node* arg1, Node* arg2, Node* arg3, Node* arg4);
 
+  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+                 size_t result_size = 1);
+
   Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                  Node* context, Node* arg1, size_t result_size = 1);
   Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
@@ -183,7 +249,13 @@
                  Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
                  Node* arg5, size_t result_size = 1);
 
-  Node* TailCallStub(CodeStub& stub, Node** args);
+  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+                     Node* arg2, size_t result_size = 1);
+
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, Node* arg1, Node* arg2,
+                     size_t result_size = 1);
+
   Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
                  Node** args, size_t result_size = 1);
 
@@ -191,31 +263,141 @@
   // Macros
   // ===========================================================================
 
-  // Tag and untag Smi values.
+  // Float64 operations.
+  Node* Float64Ceil(Node* x);
+  Node* Float64Floor(Node* x);
+  Node* Float64Round(Node* x);
+  Node* Float64Trunc(Node* x);
+
+  // Tag a Word as a Smi value.
   Node* SmiTag(Node* value);
+  // Untag a Smi value as a Word.
   Node* SmiUntag(Node* value);
 
+  // Smi conversions.
+  Node* SmiToFloat64(Node* value);
+  Node* SmiToWord32(Node* value);
+
+  // Smi operations.
+  Node* SmiAdd(Node* a, Node* b);
+  Node* SmiAddWithOverflow(Node* a, Node* b);
+  Node* SmiSub(Node* a, Node* b);
+  Node* SmiSubWithOverflow(Node* a, Node* b);
+  Node* SmiEqual(Node* a, Node* b);
+  Node* SmiLessThan(Node* a, Node* b);
+  Node* SmiLessThanOrEqual(Node* a, Node* b);
+  Node* SmiMin(Node* a, Node* b);
+
   // Load a value from the root array.
   Node* LoadRoot(Heap::RootListIndex root_index);
 
   // Check a value for smi-ness
   Node* WordIsSmi(Node* a);
 
+  // Check that the value is a positive smi.
+  Node* WordIsPositiveSmi(Node* a);
+
   // Load an object pointer from a buffer that isn't in the heap.
-  Node* LoadBufferObject(Node* buffer, int offset);
+  Node* LoadBufferObject(Node* buffer, int offset,
+                         MachineType rep = MachineType::AnyTagged());
   // Load a field from an object on the heap.
-  Node* LoadObjectField(Node* object, int offset);
+  Node* LoadObjectField(Node* object, int offset,
+                        MachineType rep = MachineType::AnyTagged());
+  // Load the floating point value of a HeapNumber.
+  Node* LoadHeapNumberValue(Node* object);
+  // Store the floating point value of a HeapNumber.
+  Node* StoreHeapNumberValue(Node* object, Node* value);
+  // Truncate the floating point value of a HeapNumber to an Int32.
+  Node* TruncateHeapNumberValueToWord32(Node* object);
+  // Load the bit field of a Map.
+  Node* LoadMapBitField(Node* map);
+  // Load bit field 2 of a map.
+  Node* LoadMapBitField2(Node* map);
+  // Load bit field 3 of a map.
+  Node* LoadMapBitField3(Node* map);
+  // Load the instance type of a map.
+  Node* LoadMapInstanceType(Node* map);
+  // Load the instance descriptors of a map.
+  Node* LoadMapDescriptors(Node* map);
+
+  // Load the hash field of a name.
+  Node* LoadNameHash(Node* name);
 
   // Load an array element from a FixedArray.
+  Node* LoadFixedArrayElementInt32Index(Node* object, Node* int32_index,
+                                        int additional_offset = 0);
   Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
                                       int additional_offset = 0);
   Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
 
+  // Allocate an object of the given size.
+  Node* Allocate(int size, AllocationFlags flags = kNone);
+  // Allocate a HeapNumber without initializing its value.
+  Node* AllocateHeapNumber();
+  // Allocate a HeapNumber with a specific value.
+  Node* AllocateHeapNumberWithValue(Node* value);
+
+  // Store an array element to a FixedArray.
+  Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index,
+                                             Node* value);
+  // Load the Map of an HeapObject.
+  Node* LoadMap(Node* object);
+  // Store the Map of an HeapObject.
+  Node* StoreMapNoWriteBarrier(Node* object, Node* map);
+  // Load the instance type of an HeapObject.
+  Node* LoadInstanceType(Node* object);
+
+  // Load the elements backing store of a JSObject.
+  Node* LoadElements(Node* object);
+  // Load the length of a fixed array base instance.
+  Node* LoadFixedArrayBaseLength(Node* array);
+
+  // Returns a node that is true if the given bit is set in |word32|.
+  template <typename T>
+  Node* BitFieldDecode(Node* word32) {
+    return BitFieldDecode(word32, T::kShift, T::kMask);
+  }
+
+  Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask);
+
+  // Conversions.
+  Node* ChangeFloat64ToTagged(Node* value);
+  Node* ChangeInt32ToTagged(Node* value);
+  Node* TruncateTaggedToFloat64(Node* context, Node* value);
+  Node* TruncateTaggedToWord32(Node* context, Node* value);
+
+  // Branching helpers.
+  // TODO(danno): Can we be more cleverish wrt. edge-split?
+  void BranchIf(Node* condition, Label* if_true, Label* if_false);
+
+#define BRANCH_HELPER(name)                                                \
+  void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
+    BranchIf(name(a, b), if_true, if_false);                               \
+  }
+  CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
+#undef BRANCH_HELPER
+
+  void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) {
+    BranchIf(SmiLessThan(a, b), if_true, if_false);
+  }
+
+  void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true,
+                                  Label* if_false) {
+    BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
+  }
+
+  void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
+    BranchIfFloat64Equal(value, value, if_false, if_true);
+  }
+
+  // Helpers which delegate to RawMachineAssembler.
+  Factory* factory() const;
+  Isolate* isolate() const;
+  Zone* zone() const;
+
  protected:
   // Protected helpers which delegate to RawMachineAssembler.
-  Graph* graph();
-  Isolate* isolate();
-  Zone* zone();
+  Graph* graph() const;
 
   // Enables subclasses to perform operations before and after a call.
   virtual void CallPrologue();
@@ -224,11 +406,20 @@
  private:
   friend class CodeStubAssemblerTester;
 
+  CodeStubAssembler(Isolate* isolate, Zone* zone,
+                    CallDescriptor* call_descriptor, Code::Flags flags,
+                    const char* name);
+
   Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
   Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
 
   Node* SmiShiftBitsConstant();
 
+  Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
+                           Node* top_address, Node* limit_address);
+  Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
+                             Node* top_adddress, Node* limit_address);
+
   base::SmartPointer<RawMachineAssembler> raw_assembler_;
   Code::Flags flags_;
   const char* name_;
@@ -238,13 +429,25 @@
   DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
 };
 
+DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
+
 class CodeStubAssembler::Label {
  public:
-  explicit Label(CodeStubAssembler* assembler);
-  Label(CodeStubAssembler* assembler, int merged_variable_count,
-        CodeStubAssembler::Variable** merged_variables);
+  enum Type { kDeferred, kNonDeferred };
+
+  explicit Label(CodeStubAssembler* assembler,
+                 CodeStubAssembler::Label::Type type =
+                     CodeStubAssembler::Label::kNonDeferred)
+      : CodeStubAssembler::Label(assembler, 0, nullptr, type) {}
   Label(CodeStubAssembler* assembler,
-        CodeStubAssembler::Variable* merged_variable);
+        CodeStubAssembler::Variable* merged_variable,
+        CodeStubAssembler::Label::Type type =
+            CodeStubAssembler::Label::kNonDeferred)
+      : CodeStubAssembler::Label(assembler, 1, &merged_variable, type) {}
+  Label(CodeStubAssembler* assembler, int merged_variable_count,
+        CodeStubAssembler::Variable** merged_variables,
+        CodeStubAssembler::Label::Type type =
+            CodeStubAssembler::Label::kNonDeferred);
   ~Label() {}
 
  private:
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index 2334541..22e16a2 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -57,6 +57,9 @@
   switch (node->opcode()) {
     case IrOpcode::kBranch:
       return ReduceBranch(node);
+    case IrOpcode::kDeoptimizeIf:
+    case IrOpcode::kDeoptimizeUnless:
+      return ReduceDeoptimizeConditional(node);
     case IrOpcode::kMerge:
       return ReduceMerge(node);
     case IrOpcode::kEffectPhi:
@@ -123,6 +126,37 @@
   return Replace(dead());
 }
 
+Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
+  DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
+         node->opcode() == IrOpcode::kDeoptimizeUnless);
+  bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
+  Node* condition = NodeProperties::GetValueInput(node, 0);
+  Node* frame_state = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  // Swap DeoptimizeIf/DeoptimizeUnless on {node} if {cond} is a BooleaNot
+  // and use the input to BooleanNot as new condition for {node}.  Note we
+  // assume that {cond} was already properly optimized before we get here
+  // (as guaranteed by the graph reduction logic).
+  if (condition->opcode() == IrOpcode::kBooleanNot) {
+    NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
+    NodeProperties::ChangeOp(node, condition_is_true
+                                       ? common()->DeoptimizeIf()
+                                       : common()->DeoptimizeUnless());
+    return Changed(node);
+  }
+  Decision const decision = DecideCondition(condition);
+  if (decision == Decision::kUnknown) return NoChange();
+  if (condition_is_true == (decision == Decision::kTrue)) {
+    return Replace(control);
+  }
+  control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+                             frame_state, effect, control);
+  // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+  NodeProperties::MergeControlToEnd(graph(), common(), control);
+  Revisit(graph()->end());
+  return Replace(dead());
+}
 
 Reduction CommonOperatorReducer::ReduceMerge(Node* node) {
   DCHECK_EQ(IrOpcode::kMerge, node->opcode());
diff --git a/src/compiler/common-operator-reducer.h b/src/compiler/common-operator-reducer.h
index 7184755..49d9f1d 100644
--- a/src/compiler/common-operator-reducer.h
+++ b/src/compiler/common-operator-reducer.h
@@ -30,6 +30,7 @@
 
  private:
   Reduction ReduceBranch(Node* node);
+  Reduction ReduceDeoptimizeConditional(Node* node);
   Reduction ReduceMerge(Node* node);
   Reduction ReduceEffectPhi(Node* node);
   Reduction ReducePhi(Node* node);
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index c92bae9..3bb1b34 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -142,21 +142,21 @@
   return os;
 }
 
-
-#define CACHED_OP_LIST(V)                                  \
-  V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1)           \
-  V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1)          \
-  V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1)         \
-  V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1)       \
-  V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1)       \
-  V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1)           \
-  V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)       \
-  V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
-  V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)   \
-  V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0)     \
+#define CACHED_OP_LIST(V)                                    \
+  V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1)             \
+  V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 0, 1)     \
+  V(DeoptimizeUnless, Operator::kFoldable, 2, 1, 1, 0, 0, 1) \
+  V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1)            \
+  V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1)           \
+  V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1)         \
+  V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1)         \
+  V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1)             \
+  V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)         \
+  V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)   \
+  V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)     \
+  V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0)       \
   V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
 
-
 #define CACHED_RETURN_LIST(V) \
   V(1)                        \
   V(2)                        \
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 7c3f3da..7c59f47 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -133,6 +133,8 @@
   const Operator* IfDefault();
   const Operator* Throw();
   const Operator* Deoptimize(DeoptimizeKind kind);
+  const Operator* DeoptimizeIf();
+  const Operator* DeoptimizeUnless();
   const Operator* Return(int value_input_count = 1);
   const Operator* Terminate();
 
diff --git a/src/compiler/fast-accessor-assembler.cc b/src/compiler/fast-accessor-assembler.cc
deleted file mode 100644
index 518003b..0000000
--- a/src/compiler/fast-accessor-assembler.cc
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/fast-accessor-assembler.h"
-
-#include "src/base/logging.h"
-#include "src/code-stubs.h"  // For CallApiFunctionStub.
-#include "src/compiler/graph.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/compiler/schedule.h"
-#include "src/compiler/verifier.h"
-#include "src/handles-inl.h"
-#include "src/objects.h"  // For FAA::GetInternalField impl.
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
-    : zone_(),
-      assembler_(new RawMachineAssembler(
-          isolate, new (zone()) Graph(zone()),
-          Linkage::GetJSCallDescriptor(&zone_, false, 1,
-                                       CallDescriptor::kNoFlags))),
-      state_(kBuilding) {}
-
-
-FastAccessorAssembler::~FastAccessorAssembler() {}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::IntegerConstant(
-    int const_value) {
-  CHECK_EQ(kBuilding, state_);
-  return FromRaw(assembler_->NumberConstant(const_value));
-}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::GetReceiver() {
-  CHECK_EQ(kBuilding, state_);
-
-  // For JS call descriptor, the receiver is parameter 0. If we use other
-  // call descriptors, this may or may not hold. So let's check.
-  CHECK(assembler_->call_descriptor()->IsJSFunctionCall());
-  return FromRaw(assembler_->Parameter(0));
-}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
-    ValueId value, int field_no) {
-  CHECK_EQ(kBuilding, state_);
-  // Determine the 'value' object's instance type.
-  Node* object_map =
-      assembler_->Load(MachineType::Pointer(), FromId(value),
-                       assembler_->IntPtrConstant(
-                           Internals::kHeapObjectMapOffset - kHeapObjectTag));
-  Node* instance_type = assembler_->WordAnd(
-      assembler_->Load(
-          MachineType::Uint16(), object_map,
-          assembler_->IntPtrConstant(
-              Internals::kMapInstanceTypeAndBitFieldOffset - kHeapObjectTag)),
-      assembler_->IntPtrConstant(0xff));
-
-  // Check whether we have a proper JSObject.
-  RawMachineLabel is_jsobject, is_not_jsobject, merge;
-  assembler_->Branch(
-      assembler_->WordEqual(
-          instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
-      &is_jsobject, &is_not_jsobject);
-
-  // JSObject? Then load the internal field field_no.
-  assembler_->Bind(&is_jsobject);
-  Node* internal_field = assembler_->Load(
-      MachineType::Pointer(), FromId(value),
-      assembler_->IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag +
-                                 kPointerSize * field_no));
-  assembler_->Goto(&merge);
-
-  // No JSObject? Return undefined.
-  // TODO(vogelheim): Check whether this is the appropriate action, or whether
-  //                  the method should take a label instead.
-  assembler_->Bind(&is_not_jsobject);
-  Node* fail_value = assembler_->UndefinedConstant();
-  assembler_->Goto(&merge);
-
-  // Return.
-  assembler_->Bind(&merge);
-  Node* phi = assembler_->Phi(MachineRepresentation::kTagged, internal_field,
-                              fail_value);
-  return FromRaw(phi);
-}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(ValueId value,
-                                                                int offset) {
-  CHECK_EQ(kBuilding, state_);
-  return FromRaw(assembler_->Load(MachineType::IntPtr(), FromId(value),
-                                  assembler_->IntPtrConstant(offset)));
-}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(ValueId value,
-                                                                 int offset) {
-  CHECK_EQ(kBuilding, state_);
-  return FromRaw(
-      assembler_->Load(MachineType::AnyTagged(),
-                       assembler_->Load(MachineType::Pointer(), FromId(value),
-                                        assembler_->IntPtrConstant(offset))));
-}
-
-
-void FastAccessorAssembler::ReturnValue(ValueId value) {
-  CHECK_EQ(kBuilding, state_);
-  assembler_->Return(FromId(value));
-}
-
-
-void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
-  CHECK_EQ(kBuilding, state_);
-  RawMachineLabel pass, fail;
-  assembler_->Branch(
-      assembler_->Word32Equal(
-          assembler_->Word32And(FromId(value), assembler_->Int32Constant(mask)),
-          assembler_->Int32Constant(0)),
-      &pass, &fail);
-  assembler_->Bind(&fail);
-  assembler_->Return(assembler_->NullConstant());
-  assembler_->Bind(&pass);
-}
-
-
-void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value) {
-  CHECK_EQ(kBuilding, state_);
-  RawMachineLabel is_null, not_null;
-  assembler_->Branch(
-      assembler_->IntPtrEqual(FromId(value), assembler_->IntPtrConstant(0)),
-      &is_null, &not_null);
-  assembler_->Bind(&is_null);
-  assembler_->Return(assembler_->NullConstant());
-  assembler_->Bind(&not_null);
-}
-
-
-FastAccessorAssembler::LabelId FastAccessorAssembler::MakeLabel() {
-  CHECK_EQ(kBuilding, state_);
-  RawMachineLabel* label =
-      new (zone()->New(sizeof(RawMachineLabel))) RawMachineLabel;
-  return FromRaw(label);
-}
-
-
-void FastAccessorAssembler::SetLabel(LabelId label_id) {
-  CHECK_EQ(kBuilding, state_);
-  assembler_->Bind(FromId(label_id));
-}
-
-
-void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
-                                               LabelId label_id) {
-  CHECK_EQ(kBuilding, state_);
-  RawMachineLabel pass;
-  assembler_->Branch(
-      assembler_->IntPtrEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
-      &pass, FromId(label_id));
-  assembler_->Bind(&pass);
-}
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
-    FunctionCallback callback_function, ValueId arg) {
-  CHECK_EQ(kBuilding, state_);
-
-  // Create API function stub.
-  CallApiFunctionStub stub(assembler_->isolate(), true);
-
-  // Wrap the FunctionCallback in an ExternalReference.
-  ApiFunction callback_api_function(FUNCTION_ADDR(callback_function));
-  ExternalReference callback(&callback_api_function,
-                             ExternalReference::DIRECT_API_CALL,
-                             assembler_->isolate());
-
-  // The stub has 5 parameters, and kJSParam (here: 1) parameters to pass
-  // through to the callback.
-  // See: ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType
-  static const int kStackParam = 1;
-  Node* args[] = {
-      // Stub/register parameters:
-      assembler_->Parameter(0),                /* receiver (use accessor's) */
-      assembler_->UndefinedConstant(),         /* call_data (undefined) */
-      assembler_->NullConstant(),              /* holder (null) */
-      assembler_->ExternalConstant(callback),  /* API callback function */
-      assembler_->IntPtrConstant(kStackParam), /* # JS arguments */
-
-      // kStackParam stack parameter(s):
-      FromId(arg),
-
-      // Context parameter. (See Linkage::GetStubCallDescriptor.)
-      assembler_->UndefinedConstant()};
-  CHECK_EQ(5 + kStackParam + 1, arraysize(args));
-
-  Node* call = assembler_->CallN(
-      Linkage::GetStubCallDescriptor(
-          assembler_->isolate(), zone(), stub.GetCallInterfaceDescriptor(),
-          kStackParam + stub.GetStackParameterCount(),
-          CallDescriptor::kNoFlags),
-      assembler_->HeapConstant(stub.GetCode()), args);
-  return FromRaw(call);
-}
-
-MaybeHandle<Code> FastAccessorAssembler::Build() {
-  CHECK_EQ(kBuilding, state_);
-
-  // Cleanup: We no longer need this.
-  nodes_.clear();
-  labels_.clear();
-
-  // Export the schedule and call the compiler.
-  Schedule* schedule = assembler_->Export();
-  Code::Flags flags = Code::ComputeFlags(Code::STUB);
-  MaybeHandle<Code> code = Pipeline::GenerateCodeForCodeStub(
-      assembler_->isolate(), assembler_->call_descriptor(), assembler_->graph(),
-      schedule, flags, "FastAccessorAssembler");
-
-  // Update state & return.
-  state_ = !code.is_null() ? kBuilt : kError;
-  return code;
-}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::FromRaw(Node* node) {
-  nodes_.push_back(node);
-  ValueId value = {nodes_.size() - 1};
-  return value;
-}
-
-
-FastAccessorAssembler::LabelId FastAccessorAssembler::FromRaw(
-    RawMachineLabel* label) {
-  labels_.push_back(label);
-  LabelId label_id = {labels_.size() - 1};
-  return label_id;
-}
-
-
-Node* FastAccessorAssembler::FromId(ValueId value) const {
-  CHECK_LT(value.value_id, nodes_.size());
-  CHECK_NOT_NULL(nodes_.at(value.value_id));
-  return nodes_.at(value.value_id);
-}
-
-
-RawMachineLabel* FastAccessorAssembler::FromId(LabelId label) const {
-  CHECK_LT(label.label_id, labels_.size());
-  CHECK_NOT_NULL(labels_.at(label.label_id));
-  return labels_.at(label.label_id);
-}
-
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/frame-elider.cc b/src/compiler/frame-elider.cc
index 7c3f9b2..5ad4aad 100644
--- a/src/compiler/frame-elider.cc
+++ b/src/compiler/frame-elider.cc
@@ -19,11 +19,12 @@
 
 
 void FrameElider::MarkBlocks() {
-  for (auto block : instruction_blocks()) {
+  for (InstructionBlock* block : instruction_blocks()) {
     if (block->needs_frame()) continue;
-    for (auto i = block->code_start(); i < block->code_end(); ++i) {
-      if (InstructionAt(i)->IsCall() ||
-          InstructionAt(i)->opcode() == ArchOpcode::kArchDeoptimize) {
+    for (int i = block->code_start(); i < block->code_end(); ++i) {
+      const Instruction* instr = InstructionAt(i);
+      if (instr->IsCall() || instr->IsDeoptimizeCall() ||
+          instr->arch_opcode() == ArchOpcode::kArchStackPointer) {
         block->mark_needs_frame();
         break;
       }
@@ -33,13 +34,13 @@
 
 
 void FrameElider::PropagateMarks() {
-  while (PropagateInOrder() && PropagateReversed()) {
+  while (PropagateInOrder() || PropagateReversed()) {
   }
 }
 
 
 void FrameElider::MarkDeConstruction() {
-  for (auto block : instruction_blocks()) {
+  for (InstructionBlock* block : instruction_blocks()) {
     if (block->needs_frame()) {
       // Special case: The start block needs a frame.
       if (block->predecessors().empty()) {
@@ -47,15 +48,25 @@
       }
       // Find "frame -> no frame" transitions, inserting frame
       // deconstructions.
-      for (auto succ : block->successors()) {
+      for (RpoNumber& succ : block->successors()) {
         if (!InstructionBlockAt(succ)->needs_frame()) {
           DCHECK_EQ(1U, block->SuccessorCount());
+          const Instruction* last =
+              InstructionAt(block->last_instruction_index());
+          if (last->IsThrow() || last->IsTailCall() ||
+              last->IsDeoptimizeCall()) {
+            // We need to keep the frame if we exit the block through any
+            // of these.
+            continue;
+          }
+          // The only cases when we need to deconstruct are ret and jump.
+          DCHECK(last->IsRet() || last->IsJump());
           block->mark_must_deconstruct_frame();
         }
       }
     } else {
       // Find "no frame -> frame" transitions, inserting frame constructions.
-      for (auto succ : block->successors()) {
+      for (RpoNumber& succ : block->successors()) {
         if (InstructionBlockAt(succ)->needs_frame()) {
           DCHECK_NE(1U, block->SuccessorCount());
           InstructionBlockAt(succ)->mark_must_construct_frame();
@@ -68,7 +79,7 @@
 
 bool FrameElider::PropagateInOrder() {
   bool changed = false;
-  for (auto block : instruction_blocks()) {
+  for (InstructionBlock* block : instruction_blocks()) {
     changed |= PropagateIntoBlock(block);
   }
   return changed;
@@ -77,7 +88,7 @@
 
 bool FrameElider::PropagateReversed() {
   bool changed = false;
-  for (auto block : base::Reversed(instruction_blocks())) {
+  for (InstructionBlock* block : base::Reversed(instruction_blocks())) {
     changed |= PropagateIntoBlock(block);
   }
   return changed;
@@ -94,7 +105,7 @@
 
   // Propagate towards the end ("downwards") if there is a predecessor needing
   // a frame, but don't "bleed" from deferred code to non-deferred code.
-  for (auto pred : block->predecessors()) {
+  for (RpoNumber& pred : block->predecessors()) {
     if (InstructionBlockAt(pred)->needs_frame() &&
         (!InstructionBlockAt(pred)->IsDeferred() || block->IsDeferred())) {
       block->mark_needs_frame();
@@ -104,7 +115,7 @@
 
   // Propagate towards start ("upwards") if there are successors and all of
   // them need a frame.
-  for (auto succ : block->successors()) {
+  for (RpoNumber& succ : block->successors()) {
     if (!InstructionBlockAt(succ)->needs_frame()) return false;
   }
   block->mark_needs_frame();
diff --git a/src/compiler/frame-states.cc b/src/compiler/frame-states.cc
index 387d6a9..91827d0 100644
--- a/src/compiler/frame-states.cc
+++ b/src/compiler/frame-states.cc
@@ -58,6 +58,9 @@
     case FrameStateType::kArgumentsAdaptor:
       os << "ARGUMENTS_ADAPTOR";
       break;
+    case FrameStateType::kTailCallerFunction:
+      os << "TAIL_CALLER_FRAME";
+      break;
     case FrameStateType::kConstructStub:
       os << "CONSTRUCT_STUB";
       break;
diff --git a/src/compiler/frame-states.h b/src/compiler/frame-states.h
index 60ff9b5..2552bcb 100644
--- a/src/compiler/frame-states.h
+++ b/src/compiler/frame-states.h
@@ -79,10 +79,10 @@
   kJavaScriptFunction,   // Represents an unoptimized JavaScriptFrame.
   kInterpretedFunction,  // Represents an InterpretedFrame.
   kArgumentsAdaptor,     // Represents an ArgumentsAdaptorFrame.
+  kTailCallerFunction,   // Represents a frame removed by tail call elimination.
   kConstructStub         // Represents a ConstructStubFrame.
 };
 
-
 class FrameStateFunctionInfo {
  public:
   FrameStateFunctionInfo(FrameStateType type, int parameter_count,
diff --git a/src/compiler/frame.cc b/src/compiler/frame.cc
index b08030b..3d93e15 100644
--- a/src/compiler/frame.cc
+++ b/src/compiler/frame.cc
@@ -13,17 +13,32 @@
 namespace compiler {
 
 Frame::Frame(int fixed_frame_size_in_slots, const CallDescriptor* descriptor)
-    : needs_frame_((descriptor != nullptr) &&
-                   descriptor->RequiresFrameAsIncoming()),
-      frame_slot_count_(fixed_frame_size_in_slots),
+    : frame_slot_count_(fixed_frame_size_in_slots),
       callee_saved_slot_count_(0),
       spill_slot_count_(0),
       allocated_registers_(nullptr),
       allocated_double_registers_(nullptr) {}
 
+int Frame::AlignFrame(int alignment) {
+  DCHECK_EQ(0, callee_saved_slot_count_);
+  int alignment_slots = alignment / kPointerSize;
+  int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
+  if (delta != alignment_slots) {
+    frame_slot_count_ += delta;
+    if (spill_slot_count_ != 0) {
+      spill_slot_count_ += delta;
+    }
+  }
+  return delta;
+}
+
+void FrameAccessState::MarkHasFrame(bool state) {
+  has_frame_ = state;
+  SetFrameAccessToDefault();
+}
 
 void FrameAccessState::SetFrameAccessToDefault() {
-  if (frame()->needs_frame() && !FLAG_turbo_sp_frame_access) {
+  if (has_frame() && !FLAG_turbo_sp_frame_access) {
     SetFrameAccessToFP();
   } else {
     SetFrameAccessToSP();
@@ -32,16 +47,12 @@
 
 
 FrameOffset FrameAccessState::GetFrameOffset(int spill_slot) const {
-  const int offset =
-      (StandardFrameConstants::kFixedSlotCountAboveFp - spill_slot - 1) *
-      kPointerSize;
+  const int frame_offset = FrameSlotToFPOffset(spill_slot);
   if (access_frame_with_fp()) {
-    DCHECK(frame()->needs_frame());
-    return FrameOffset::FromFramePointer(offset);
+    return FrameOffset::FromFramePointer(frame_offset);
   } else {
     // No frame. Retrieve all parameters relative to stack pointer.
-    int sp_offset =
-        offset + ((frame()->GetSpToFpSlotCount() + sp_delta()) * kPointerSize);
+    int sp_offset = frame_offset + GetSPToFPOffset();
     return FrameOffset::FromStackPointer(sp_offset);
   }
 }
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index 011a0f0..d413d3e 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -59,9 +59,9 @@
 //       |- - - - - - - - -|   |                        |
 //   1   | saved frame ptr | Fixed                      |
 //       |- - - - - - - - -| Header <-- frame ptr       |
-//   2   |     Context     |   |                        |
+//   2   |Context/Frm. Type|   |                        |
 //       |- - - - - - - - -|   |                        |
-//   3   |JSFunction/Marker|   v                        |
+//   3   |   [JSFunction]  |   v                        |
 //       +-----------------+----                        |
 //   4   |    spill 1      |   ^                      Callee
 //       |- - - - - - - - -|   |                   frame slots
@@ -81,26 +81,13 @@
   explicit Frame(int fixed_frame_size_in_slots,
                  const CallDescriptor* descriptor);
 
-  inline bool needs_frame() const { return needs_frame_; }
-  inline void MarkNeedsFrame() { needs_frame_ = true; }
-
   inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
 
-  inline int GetSpToFpSlotCount() const {
-    return GetTotalFrameSlotCount() -
-           StandardFrameConstants::kFixedSlotCountAboveFp;
-  }
   inline int GetSavedCalleeRegisterSlotCount() const {
     return callee_saved_slot_count_;
   }
   inline int GetSpillSlotCount() const { return spill_slot_count_; }
 
-  inline void SetElidedFrameSizeInSlots(int slots) {
-    DCHECK_EQ(0, callee_saved_slot_count_);
-    DCHECK_EQ(0, spill_slot_count_);
-    frame_slot_count_ = slots;
-  }
-
   void SetAllocatedRegisters(BitVector* regs) {
     DCHECK(allocated_registers_ == nullptr);
     allocated_registers_ = regs;
@@ -115,33 +102,34 @@
     return !allocated_double_registers_->IsEmpty();
   }
 
-  int AlignSavedCalleeRegisterSlots() {
+  int AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
     DCHECK_EQ(0, callee_saved_slot_count_);
-    needs_frame_ = true;
-    int delta = frame_slot_count_ & 1;
-    frame_slot_count_ += delta;
+    int alignment_slots = alignment / kPointerSize;
+    int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
+    if (delta != alignment_slots) {
+      frame_slot_count_ += delta;
+    }
     return delta;
   }
 
   void AllocateSavedCalleeRegisterSlots(int count) {
-    needs_frame_ = true;
     frame_slot_count_ += count;
     callee_saved_slot_count_ += count;
   }
 
   int AllocateSpillSlot(int width) {
     DCHECK_EQ(0, callee_saved_slot_count_);
-    needs_frame_ = true;
     int frame_slot_count_before = frame_slot_count_;
     int slot = AllocateAlignedFrameSlot(width);
     spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
     return slot;
   }
 
+  int AlignFrame(int alignment = kDoubleSize);
+
   int ReserveSpillSlots(size_t slot_count) {
     DCHECK_EQ(0, callee_saved_slot_count_);
     DCHECK_EQ(0, spill_slot_count_);
-    needs_frame_ = true;
     spill_slot_count_ += static_cast<int>(slot_count);
     frame_slot_count_ += static_cast<int>(slot_count);
     return frame_slot_count_ - 1;
@@ -163,7 +151,6 @@
   }
 
  private:
-  bool needs_frame_;
   int frame_slot_count_;
   int callee_saved_slot_count_;
   int spill_slot_count_;
@@ -205,21 +192,36 @@
 class FrameAccessState : public ZoneObject {
  public:
   explicit FrameAccessState(Frame* const frame)
-      : frame_(frame), access_frame_with_fp_(false), sp_delta_(0) {
-    SetFrameAccessToDefault();
-  }
+      : frame_(frame),
+        access_frame_with_fp_(false),
+        sp_delta_(0),
+        has_frame_(false) {}
 
   Frame* frame() const { return frame_; }
+  void MarkHasFrame(bool state);
 
   int sp_delta() const { return sp_delta_; }
   void ClearSPDelta() { sp_delta_ = 0; }
   void IncreaseSPDelta(int amount) { sp_delta_ += amount; }
 
   bool access_frame_with_fp() const { return access_frame_with_fp_; }
+
+  // Regardless of how we access slots on the stack - using sp or fp - do we
+  // have a frame, at the current stage in code generation.
+  bool has_frame() const { return has_frame_; }
+
   void SetFrameAccessToDefault();
   void SetFrameAccessToFP() { access_frame_with_fp_ = true; }
   void SetFrameAccessToSP() { access_frame_with_fp_ = false; }
 
+  int GetSPToFPSlotCount() const {
+    int frame_slot_count =
+        (has_frame() ? frame()->GetTotalFrameSlotCount() : kElidedFrameSlots) -
+        StandardFrameConstants::kFixedSlotCountAboveFp;
+    return frame_slot_count + sp_delta();
+  }
+  int GetSPToFPOffset() const { return GetSPToFPSlotCount() * kPointerSize; }
+
   // Get the frame offset for a given spill slot. The location depends on the
   // calling convention and the specific frame layout, and may thus be
   // architecture-specific. Negative spill slots indicate arguments on the
@@ -230,6 +232,7 @@
   Frame* const frame_;
   bool access_frame_with_fp_;
   int sp_delta_;
+  bool has_frame_;
 };
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
index 4107b0f..35e91fa 100644
--- a/src/compiler/gap-resolver.cc
+++ b/src/compiler/gap-resolver.cc
@@ -29,7 +29,7 @@
   auto it =
       std::remove_if(moves->begin(), moves->end(), std::ptr_fun(IsRedundant));
   moves->erase(it, moves->end());
-  for (auto move : *moves) {
+  for (MoveOperands* move : *moves) {
     if (!move->IsEliminated()) PerformMove(moves, move);
   }
 }
@@ -53,7 +53,7 @@
   // Perform a depth-first traversal of the move graph to resolve dependencies.
   // Any unperformed, unpending move with a source the same as this one's
   // destination blocks this one so recursively perform all such moves.
-  for (auto other : *moves) {
+  for (MoveOperands* other : *moves) {
     if (other->Blocks(destination) && !other->IsPending()) {
       // Though PerformMove can change any source operand in the move graph,
       // this call cannot create a blocking move via a swap (this loop does not
@@ -103,7 +103,7 @@
   // Any unperformed (including pending) move with a source of either this
   // move's source or destination needs to have their source changed to
   // reflect the state of affairs after the swap.
-  for (auto other : *moves) {
+  for (MoveOperands* other : *moves) {
     if (other->Blocks(source)) {
       other->set_source(destination);
     } else if (other->Blocks(destination)) {
diff --git a/src/compiler/graph-replay.cc b/src/compiler/graph-replay.cc
index 7f4cc95..cb775e9 100644
--- a/src/compiler/graph-replay.cc
+++ b/src/compiler/graph-replay.cc
@@ -20,7 +20,7 @@
 void GraphReplayPrinter::PrintReplay(Graph* graph) {
   GraphReplayPrinter replay;
   PrintF("  Node* nil = graph()->NewNode(common()->Dead());\n");
-  Zone zone;
+  Zone zone(graph->zone()->allocator());
   AllNodes nodes(&zone, graph);
 
   // Allocate the nodes first.
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 0785176..301e390 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -197,7 +197,8 @@
 
 
 std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
-  Zone tmp_zone;
+  base::AccountingAllocator allocator;
+  Zone tmp_zone(&allocator);
   os << "{\n\"nodes\":[";
   JSONGraphNodeWriter(os, &tmp_zone, &ad.graph, ad.positions).Print();
   os << "],\n\"edges\":[";
@@ -231,8 +232,8 @@
   void PrintInputs(InputIterator* i, int count, const char* prefix);
   void PrintType(Node* node);
 
-  void PrintLiveRange(LiveRange* range, const char* type, int vreg);
-  void PrintLiveRangeChain(TopLevelLiveRange* range, const char* type);
+  void PrintLiveRange(const LiveRange* range, const char* type, int vreg);
+  void PrintLiveRangeChain(const TopLevelLiveRange* range, const char* type);
 
   class Tag final BASE_EMBEDDED {
    public:
@@ -505,31 +506,30 @@
   Tag tag(this, "intervals");
   PrintStringProperty("name", phase);
 
-  for (auto range : data->fixed_double_live_ranges()) {
+  for (const TopLevelLiveRange* range : data->fixed_double_live_ranges()) {
     PrintLiveRangeChain(range, "fixed");
   }
 
-  for (auto range : data->fixed_live_ranges()) {
+  for (const TopLevelLiveRange* range : data->fixed_live_ranges()) {
     PrintLiveRangeChain(range, "fixed");
   }
 
-  for (auto range : data->live_ranges()) {
+  for (const TopLevelLiveRange* range : data->live_ranges()) {
     PrintLiveRangeChain(range, "object");
   }
 }
 
-
-void GraphC1Visualizer::PrintLiveRangeChain(TopLevelLiveRange* range,
+void GraphC1Visualizer::PrintLiveRangeChain(const TopLevelLiveRange* range,
                                             const char* type) {
   if (range == nullptr || range->IsEmpty()) return;
   int vreg = range->vreg();
-  for (LiveRange* child = range; child != nullptr; child = child->next()) {
+  for (const LiveRange* child = range; child != nullptr;
+       child = child->next()) {
     PrintLiveRange(child, type, vreg);
   }
 }
 
-
-void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
+void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
                                        int vreg) {
   if (range != nullptr && !range->IsEmpty()) {
     PrintIndent();
@@ -545,7 +545,7 @@
         os_ << " \"" << assigned_reg.ToString() << "\"";
       }
     } else if (range->spilled()) {
-      auto top = range->TopLevel();
+      const TopLevelLiveRange* top = range->TopLevel();
       int index = -1;
       if (top->HasSpillRange()) {
         index = kMaxInt;  // This hasn't been set yet.
@@ -564,8 +564,8 @@
     }
 
     os_ << " " << vreg;
-    for (auto interval = range->first_interval(); interval != nullptr;
-         interval = interval->next()) {
+    for (const UseInterval* interval = range->first_interval();
+         interval != nullptr; interval = interval->next()) {
       os_ << " [" << interval->start().value() << ", "
           << interval->end().value() << "[";
     }
@@ -584,14 +584,16 @@
 
 
 std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
-  Zone tmp_zone;
+  base::AccountingAllocator allocator;
+  Zone tmp_zone(&allocator);
   GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
   return os;
 }
 
 
 std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
-  Zone tmp_zone;
+  base::AccountingAllocator allocator;
+  Zone tmp_zone(&allocator);
   GraphC1Visualizer(os, &tmp_zone)
       .PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
   return os;
@@ -600,7 +602,8 @@
 
 std::ostream& operator<<(std::ostream& os,
                          const AsC1VRegisterAllocationData& ac) {
-  Zone tmp_zone;
+  base::AccountingAllocator allocator;
+  Zone tmp_zone(&allocator);
   GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_);
   return os;
 }
@@ -610,7 +613,8 @@
 const int kVisited = 2;
 
 std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
-  Zone local_zone;
+  base::AccountingAllocator allocator;
+  Zone local_zone(&allocator);
   ZoneVector<byte> state(ar.graph.NodeCount(), kUnvisited, &local_zone);
   ZoneStack<Node*> stack(&local_zone);
 
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
index ba69617..ff1a17e 100644
--- a/src/compiler/graph.cc
+++ b/src/compiler/graph.cc
@@ -25,7 +25,7 @@
 
 
 void Graph::Decorate(Node* node) {
-  for (auto const decorator : decorators_) {
+  for (GraphDecorator* const decorator : decorators_) {
     decorator->Decorate(node);
   }
 }
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index 1f61af8..ee05ad0 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -49,16 +49,13 @@
       return Operand(ToDoubleRegister(op));
     }
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
-    return Operand(offset.from_stack_pointer() ? esp : ebp,
-                   offset.offset() + extra);
+    return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
   }
 
-  Operand ToMaterializableOperand(int materializable_offset) {
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        FPOffsetToFrameSlot(materializable_offset));
-    return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+  Operand SlotToOperand(int slot, int extra = 0) {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+    return Operand(offset.from_stack_pointer() ? esp : ebp,
+                   offset.offset() + extra);
   }
 
   Operand HighOperand(InstructionOperand* op) {
@@ -333,6 +330,39 @@
     __ bind(&done);                                          \
   } while (false)
 
+#define ASSEMBLE_COMPARE(asm_instr)                                   \
+  do {                                                                \
+    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+      size_t index = 0;                                               \
+      Operand left = i.MemoryOperand(&index);                         \
+      if (HasImmediateInput(instr, index)) {                          \
+        __ asm_instr(left, i.InputImmediate(index));                  \
+      } else {                                                        \
+        __ asm_instr(left, i.InputRegister(index));                   \
+      }                                                               \
+    } else {                                                          \
+      if (HasImmediateInput(instr, 1)) {                              \
+        if (instr->InputAt(0)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputImmediate(1));      \
+        } else {                                                      \
+          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));       \
+        }                                                             \
+      } else {                                                        \
+        if (instr->InputAt(1)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputRegister(1));       \
+        } else {                                                      \
+          __ asm_instr(i.InputRegister(0), i.InputOperand(1));        \
+        }                                                             \
+      }                                                               \
+    }                                                                 \
+  } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ mov(esp, ebp);
+  __ pop(ebp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -349,18 +379,56 @@
     __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ mov(ebp, MemOperand(ebp, 0));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register, Register,
+                                                     Register) {
+  // There are not enough temp registers left on ia32 for a call instruction
+  // so we pick some scratch registers and save/restore them manually here.
+  int scratch_count = 3;
+  Register scratch1 = ebx;
+  Register scratch2 = ecx;
+  Register scratch3 = edx;
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &done, Label::kNear);
+
+  __ push(scratch1);
+  __ push(scratch2);
+  __ push(scratch3);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ mov(caller_args_count_reg,
+         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack, scratch_count);
+  __ pop(scratch3);
+  __ pop(scratch2);
+  __ pop(scratch1);
+
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   IA32OperandConverter i(this, instr);
-
-  switch (ArchOpcodeField::decode(instr->opcode())) {
+  InstructionCode opcode = instr->opcode();
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (HasImmediateInput(instr, 0)) {
@@ -375,9 +443,14 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         no_reg, no_reg, no_reg);
+      }
       if (HasImmediateInput(instr, 0)) {
         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
         __ jmp(code, RelocInfo::CODE_TARGET);
@@ -402,6 +475,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -411,6 +485,10 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         no_reg, no_reg, no_reg);
+      }
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
       break;
@@ -469,7 +547,7 @@
       __ mov(i.OutputRegister(), ebp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ mov(i.OutputRegister(), Operand(ebp, 0));
       } else {
         __ mov(i.OutputRegister(), ebp);
@@ -530,38 +608,22 @@
       }
       break;
     case kIA32Cmp:
-      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
-        size_t index = 0;
-        Operand operand = i.MemoryOperand(&index);
-        if (HasImmediateInput(instr, index)) {
-          __ cmp(operand, i.InputImmediate(index));
-        } else {
-          __ cmp(operand, i.InputRegister(index));
-        }
-      } else {
-        if (HasImmediateInput(instr, 1)) {
-          __ cmp(i.InputOperand(0), i.InputImmediate(1));
-        } else {
-          __ cmp(i.InputRegister(0), i.InputOperand(1));
-        }
-      }
+      ASSEMBLE_COMPARE(cmp);
+      break;
+    case kIA32Cmp16:
+      ASSEMBLE_COMPARE(cmpw);
+      break;
+    case kIA32Cmp8:
+      ASSEMBLE_COMPARE(cmpb);
       break;
     case kIA32Test:
-      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
-        size_t index = 0;
-        Operand operand = i.MemoryOperand(&index);
-        if (HasImmediateInput(instr, index)) {
-          __ test(operand, i.InputImmediate(index));
-        } else {
-          __ test(i.InputRegister(index), operand);
-        }
-      } else {
-        if (HasImmediateInput(instr, 1)) {
-          __ test(i.InputOperand(0), i.InputImmediate(1));
-        } else {
-          __ test(i.InputRegister(0), i.InputOperand(1));
-        }
-      }
+      ASSEMBLE_COMPARE(test);
+      break;
+    case kIA32Test16:
+      ASSEMBLE_COMPARE(test_w);
+      break;
+    case kIA32Test8:
+      ASSEMBLE_COMPARE(test_b);
       break;
     case kIA32Imul:
       if (HasImmediateInput(instr, 1)) {
@@ -632,6 +694,92 @@
         __ sar_cl(i.OutputOperand());
       }
       break;
+    case kIA32AddPair: {
+      // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      bool use_temp = false;
+      if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+          i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+        // We cannot write to the output register directly, because it would
+        // overwrite an input for adc. We have to use the temp register.
+        use_temp = true;
+        __ Move(i.TempRegister(0), i.InputRegister(0));
+        __ add(i.TempRegister(0), i.InputRegister(2));
+      } else {
+        __ add(i.OutputRegister(0), i.InputRegister(2));
+      }
+      __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
+      if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+        __ Move(i.OutputRegister(1), i.InputRegister(1));
+      }
+      if (use_temp) {
+        __ Move(i.OutputRegister(0), i.TempRegister(0));
+      }
+      break;
+    }
+    case kIA32SubPair: {
+      // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      bool use_temp = false;
+      if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+          i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+        // We cannot write to the output register directly, because it would
+        // overwrite an input for adc. We have to use the temp register.
+        use_temp = true;
+        __ Move(i.TempRegister(0), i.InputRegister(0));
+        __ sub(i.TempRegister(0), i.InputRegister(2));
+      } else {
+        __ sub(i.OutputRegister(0), i.InputRegister(2));
+      }
+      __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
+      if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+        __ Move(i.OutputRegister(1), i.InputRegister(1));
+      }
+      if (use_temp) {
+        __ Move(i.OutputRegister(0), i.TempRegister(0));
+      }
+      break;
+    }
+    case kIA32MulPair: {
+      __ imul(i.OutputRegister(1), i.InputOperand(0));
+      __ mov(i.TempRegister(0), i.InputOperand(1));
+      __ imul(i.TempRegister(0), i.InputOperand(2));
+      __ add(i.OutputRegister(1), i.TempRegister(0));
+      __ mov(i.OutputRegister(0), i.InputOperand(0));
+      // Multiplies the low words and stores them in eax and edx.
+      __ mul(i.InputRegister(2));
+      __ add(i.OutputRegister(1), i.TempRegister(0));
+
+      break;
+    }
+    case kIA32ShlPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
+    case kIA32ShrPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
+    case kIA32SarPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
     case kIA32Ror:
       if (HasImmediateInput(instr, 1)) {
         __ ror(i.OutputOperand(), i.InputInt5(1));
@@ -1476,21 +1624,16 @@
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    // Assemble a prologue similar the to cdecl calling convention.
-    __ push(ebp);
-    __ mov(ebp, esp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    // TODO(turbofan): this prologue is redundant with OSR, but still needed for
-    // code aging.
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ push(ebp);
+      __ mov(ebp, esp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      __ StubPrologue(info()->GetOutputStackFrameType());
+    }
   }
-  frame_access_state()->SetFrameAccessToDefault();
-
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1536,17 +1679,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
-    __ pop(ebp);       // Pop caller's frame pointer.
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ jmp(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
-      __ pop(ebp);       // Pop caller's frame pointer.
+      AssembleDeconstructFrame();
     }
   }
   size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
@@ -1581,15 +1722,15 @@
     Constant src_constant = g.ToConstant(source);
     if (src_constant.type() == Constant::kHeapObject) {
       Handle<HeapObject> src = src_constant.ToHeapObject();
-      int offset;
-      if (IsMaterializableFromFrame(src, &offset)) {
+      int slot;
+      if (IsMaterializableFromFrame(src, &slot)) {
         if (destination->IsRegister()) {
           Register dst = g.ToRegister(destination);
-          __ mov(dst, g.ToMaterializableOperand(offset));
+          __ mov(dst, g.SlotToOperand(slot));
         } else {
           DCHECK(destination->IsStackSlot());
           Operand dst = g.ToOperand(destination);
-          __ push(g.ToMaterializableOperand(offset));
+          __ push(g.SlotToOperand(slot));
           __ pop(dst);
         }
       } else if (destination->IsRegister()) {
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 61fd035..3cf2094 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -15,7 +15,11 @@
   V(IA32Add)                       \
   V(IA32And)                       \
   V(IA32Cmp)                       \
+  V(IA32Cmp16)                     \
+  V(IA32Cmp8)                      \
   V(IA32Test)                      \
+  V(IA32Test16)                    \
+  V(IA32Test8)                     \
   V(IA32Or)                        \
   V(IA32Xor)                       \
   V(IA32Sub)                       \
@@ -29,6 +33,12 @@
   V(IA32Shl)                       \
   V(IA32Shr)                       \
   V(IA32Sar)                       \
+  V(IA32AddPair)                   \
+  V(IA32SubPair)                   \
+  V(IA32MulPair)                   \
+  V(IA32ShlPair)                   \
+  V(IA32ShrPair)                   \
+  V(IA32SarPair)                   \
   V(IA32Ror)                       \
   V(IA32Lzcnt)                     \
   V(IA32Tzcnt)                     \
@@ -105,7 +115,6 @@
   V(IA32Poke)                      \
   V(IA32StackCheck)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index 093bc22..803fdf6 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -17,7 +17,11 @@
     case kIA32Add:
     case kIA32And:
     case kIA32Cmp:
+    case kIA32Cmp16:
+    case kIA32Cmp8:
     case kIA32Test:
+    case kIA32Test16:
+    case kIA32Test8:
     case kIA32Or:
     case kIA32Xor:
     case kIA32Sub:
@@ -31,6 +35,12 @@
     case kIA32Shl:
     case kIA32Shr:
     case kIA32Sar:
+    case kIA32AddPair:
+    case kIA32SubPair:
+    case kIA32MulPair:
+    case kIA32ShlPair:
+    case kIA32ShrPair:
+    case kIA32SarPair:
     case kIA32Ror:
     case kIA32Lzcnt:
     case kIA32Tzcnt:
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index f649ba9..5c4acce 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -27,6 +27,30 @@
     return DefineAsRegister(node);
   }
 
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+    if (input->opcode() != IrOpcode::kLoad ||
+        !selector()->CanCover(node, input)) {
+      return false;
+    }
+    MachineRepresentation rep =
+        LoadRepresentationOf(input->op()).representation();
+    switch (opcode) {
+      case kIA32Cmp:
+      case kIA32Test:
+        return rep == MachineRepresentation::kWord32 ||
+               rep == MachineRepresentation::kTagged;
+      case kIA32Cmp16:
+      case kIA32Test16:
+        return rep == MachineRepresentation::kWord16;
+      case kIA32Cmp8:
+      case kIA32Test8:
+        return rep == MachineRepresentation::kWord8;
+      default:
+        break;
+    }
+    return false;
+  }
+
   bool CanBeImmediate(Node* node) {
     switch (node->opcode()) {
       case IrOpcode::kInt32Constant:
@@ -232,9 +256,7 @@
       inputs[input_count++] = g.UseUniqueRegister(index);
       addressing_mode = kMode_MR1;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -404,10 +426,11 @@
   }
 }
 
+namespace {
 
 // Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, FlagsContinuation* cont) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, FlagsContinuation* cont) {
   IA32OperandGenerator g(selector);
   Int32BinopMatcher m(node);
   Node* left = m.left().node();
@@ -456,18 +479,24 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
 // Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode) {
   FlagsContinuation cont;
   VisitBinop(selector, node, opcode, &cont);
 }
 
+}  // namespace
 
 void InstructionSelector::VisitWord32And(Node* node) {
   VisitBinop(this, node, kIA32And);
@@ -579,6 +608,93 @@
   VisitShift(this, node, kIA32Sar);
 }
 
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  IA32OperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the temp
+  // register.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  InstructionOperand temps[] = {g.TempRegister()};
+
+  Emit(kIA32AddPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  IA32OperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the temp
+  // register.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  InstructionOperand temps[] = {g.TempRegister()};
+
+  Emit(kIA32SubPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  IA32OperandGenerator g(this);
+
+  // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
+  // register and one mov instruction.
+  InstructionOperand inputs[] = {
+      g.UseUnique(node->InputAt(0)), g.UseUnique(node->InputAt(1)),
+      g.UseUniqueRegister(node->InputAt(2)), g.UseFixed(node->InputAt(3), ecx)};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsFixed(node, eax),
+      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
+
+  InstructionOperand temps[] = {g.TempRegister(edx)};
+
+  Emit(kIA32MulPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
+                          Node* node) {
+  IA32OperandGenerator g(selector);
+
+  Node* shift = node->InputAt(2);
+  InstructionOperand shift_operand;
+  if (g.CanBeImmediate(shift)) {
+    shift_operand = g.UseImmediate(shift);
+  } else {
+    shift_operand = g.UseFixed(shift, ecx);
+  }
+  InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
+                                 g.UseFixed(node->InputAt(1), edx),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsFixed(node, eax),
+      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+
+  selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  VisitWord32PairShift(this, kIA32ShlPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  VisitWord32PairShift(this, kIA32ShrPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  VisitWord32PairShift(this, kIA32SarPair, node);
+}
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
   VisitShift(this, node, kIA32Ror);
@@ -746,6 +862,9 @@
   VisitRO(this, node, kSSEFloat64ToUint32);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRO(this, node, kSSEFloat64ToUint32);
+}
 
 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   VisitRO(this, node, kSSEFloat64ToFloat32);
@@ -987,6 +1106,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
 
 namespace {
 
@@ -1008,6 +1128,9 @@
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1015,33 +1138,21 @@
   }
 }
 
-// Determines if {input} of {node} can be replaced by a memory operand.
-bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
-                         Node* node, Node* input) {
-  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
-    return false;
-  }
-  MachineRepresentation load_representation =
-      LoadRepresentationOf(input->op()).representation();
-  if (load_representation == MachineRepresentation::kWord32 ||
-      load_representation == MachineRepresentation::kTagged) {
-    return opcode == kIA32Cmp || opcode == kIA32Test;
-  }
-  return false;
-}
-
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
                   FlagsContinuation* cont) {
   IA32OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
   if (cont->IsBranch()) {
-    selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
+    selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
-    selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
-                   left, right);
+    selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
   }
 }
 
@@ -1057,6 +1168,36 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+// Tries to match the size of the given opcode to that of the operands, if
+// possible.
+InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
+                                    Node* right) {
+  if (opcode != kIA32Cmp && opcode != kIA32Test) {
+    return opcode;
+  }
+  // Currently, if one of the two operands is not a Load, we don't know what its
+  // machine representation is, so we bail out.
+  // TODO(epertoso): we can probably get some size information out of immediates
+  // and phi nodes.
+  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
+    return opcode;
+  }
+  // If the load representations don't match, both operands will be
+  // zero/sign-extended to 32bit.
+  LoadRepresentation left_representation = LoadRepresentationOf(left->op());
+  if (left_representation != LoadRepresentationOf(right->op())) {
+    return opcode;
+  }
+  switch (left_representation.representation()) {
+    case MachineRepresentation::kBit:
+    case MachineRepresentation::kWord8:
+      return opcode == kIA32Cmp ? kIA32Cmp8 : kIA32Test8;
+    case MachineRepresentation::kWord16:
+      return opcode == kIA32Cmp ? kIA32Cmp16 : kIA32Test16;
+    default:
+      return opcode;
+  }
+}
 
 // Shared routine for multiple float32 compare operations (inputs commuted).
 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -1082,15 +1223,22 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // If one of the two inputs is an immediate, make sure it's on the right.
-  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+  InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+
+  // If one of the two inputs is an immediate, make sure it's on the right, or
+  // if one of the two inputs is a memory operand, make sure it's on the left.
+  if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
+      (g.CanBeMemoryOperand(narrowed_opcode, node, right) &&
+       !g.CanBeMemoryOperand(narrowed_opcode, node, left))) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    if (g.CanBeMemoryOperand(opcode, node, left)) {
+      // TODO(epertoso): we should use `narrowed_opcode' here once we match
+      // immediates too.
       return VisitCompareWithMemoryOperand(selector, opcode, left,
                                            g.UseImmediate(right), cont);
     }
@@ -1098,15 +1246,21 @@
                         cont);
   }
 
+  // Match memory operands on left side of comparison.
+  if (g.CanBeMemoryOperand(narrowed_opcode, node, left)) {
+    bool needs_byte_register =
+        narrowed_opcode == kIA32Test8 || narrowed_opcode == kIA32Cmp8;
+    return VisitCompareWithMemoryOperand(
+        selector, narrowed_opcode, left,
+        needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
+        cont);
+  }
+
   if (g.CanBeBetterLeftOperand(right)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
-  if (CanUseMemoryOperand(selector, opcode, node, left)) {
-    return VisitCompareWithMemoryOperand(selector, opcode, left,
-                                         g.UseRegister(right), cont);
-  }
   return VisitCompare(selector, opcode, left, right, cont,
                       node->op()->HasProperty(Operator::kCommutative));
 }
@@ -1126,6 +1280,9 @@
       if (cont->IsBranch()) {
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
+      } else if (cont->IsDeoptimize()) {
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+                                 cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1228,13 +1385,23 @@
 
 }  // namespace
 
-
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   IA32OperandGenerator g(this);
@@ -1265,7 +1432,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1275,32 +1442,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kIA32Add, &cont);
   }
   FlagsContinuation cont;
@@ -1310,7 +1479,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kIA32Sub, &cont);
   }
   FlagsContinuation cont;
@@ -1319,37 +1488,41 @@
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index d2144cf..b005083 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -21,6 +21,8 @@
 #include "src/compiler/x64/instruction-codes-x64.h"
 #elif V8_TARGET_ARCH_PPC
 #include "src/compiler/ppc/instruction-codes-ppc.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/compiler/s390/instruction-codes-s390.h"
 #elif V8_TARGET_ARCH_X87
 #include "src/compiler/x87/instruction-codes-x87.h"
 #else
@@ -39,40 +41,42 @@
 
 // Target-specific opcodes that specify which assembly sequence to emit.
 // Most opcodes specify a single instruction.
-#define COMMON_ARCH_OPCODE_LIST(V) \
-  V(ArchCallCodeObject)            \
-  V(ArchTailCallCodeObject)        \
-  V(ArchCallJSFunction)            \
-  V(ArchTailCallJSFunction)        \
-  V(ArchPrepareCallCFunction)      \
-  V(ArchCallCFunction)             \
-  V(ArchPrepareTailCall)           \
-  V(ArchJmp)                       \
-  V(ArchLookupSwitch)              \
-  V(ArchTableSwitch)               \
-  V(ArchNop)                       \
-  V(ArchThrowTerminator)           \
-  V(ArchDeoptimize)                \
-  V(ArchRet)                       \
-  V(ArchStackPointer)              \
-  V(ArchFramePointer)              \
-  V(ArchParentFramePointer)        \
-  V(ArchTruncateDoubleToI)         \
-  V(ArchStoreWithWriteBarrier)     \
-  V(CheckedLoadInt8)               \
-  V(CheckedLoadUint8)              \
-  V(CheckedLoadInt16)              \
-  V(CheckedLoadUint16)             \
-  V(CheckedLoadWord32)             \
-  V(CheckedLoadWord64)             \
-  V(CheckedLoadFloat32)            \
-  V(CheckedLoadFloat64)            \
-  V(CheckedStoreWord8)             \
-  V(CheckedStoreWord16)            \
-  V(CheckedStoreWord32)            \
-  V(CheckedStoreWord64)            \
-  V(CheckedStoreFloat32)           \
-  V(CheckedStoreFloat64)           \
+#define COMMON_ARCH_OPCODE_LIST(V)        \
+  V(ArchCallCodeObject)                   \
+  V(ArchTailCallCodeObjectFromJSFunction) \
+  V(ArchTailCallCodeObject)               \
+  V(ArchCallJSFunction)                   \
+  V(ArchTailCallJSFunctionFromJSFunction) \
+  V(ArchTailCallJSFunction)               \
+  V(ArchPrepareCallCFunction)             \
+  V(ArchCallCFunction)                    \
+  V(ArchPrepareTailCall)                  \
+  V(ArchJmp)                              \
+  V(ArchLookupSwitch)                     \
+  V(ArchTableSwitch)                      \
+  V(ArchNop)                              \
+  V(ArchThrowTerminator)                  \
+  V(ArchDeoptimize)                       \
+  V(ArchRet)                              \
+  V(ArchStackPointer)                     \
+  V(ArchFramePointer)                     \
+  V(ArchParentFramePointer)               \
+  V(ArchTruncateDoubleToI)                \
+  V(ArchStoreWithWriteBarrier)            \
+  V(CheckedLoadInt8)                      \
+  V(CheckedLoadUint8)                     \
+  V(CheckedLoadInt16)                     \
+  V(CheckedLoadUint16)                    \
+  V(CheckedLoadWord32)                    \
+  V(CheckedLoadWord64)                    \
+  V(CheckedLoadFloat32)                   \
+  V(CheckedLoadFloat64)                   \
+  V(CheckedStoreWord8)                    \
+  V(CheckedStoreWord16)                   \
+  V(CheckedStoreWord32)                   \
+  V(CheckedStoreWord64)                   \
+  V(CheckedStoreFloat32)                  \
+  V(CheckedStoreFloat64)                  \
   V(ArchStackSlot)
 
 #define ARCH_OPCODE_LIST(V)  \
@@ -110,7 +114,12 @@
 std::ostream& operator<<(std::ostream& os, const AddressingMode& am);
 
 // The mode of the flags continuation (see below).
-enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 };
+enum FlagsMode {
+  kFlags_none = 0,
+  kFlags_branch = 1,
+  kFlags_deoptimize = 2,
+  kFlags_set = 3
+};
 
 std::ostream& operator<<(std::ostream& os, const FlagsMode& fm);
 
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index adbfd5d..b612cd1 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -115,7 +115,7 @@
   if (IsBlockTerminator(instr)) {
     // Make sure that basic block terminators are not moved by adding them
     // as successor of every instruction.
-    for (auto node : graph_) {
+    for (ScheduleGraphNode* node : graph_) {
       node->AddSuccessor(new_node);
     }
   } else if (IsFixedRegisterParameter(instr)) {
@@ -134,7 +134,7 @@
       if (last_side_effect_instr_ != nullptr) {
         last_side_effect_instr_->AddSuccessor(new_node);
       }
-      for (auto load : pending_loads_) {
+      for (ScheduleGraphNode* load : pending_loads_) {
         load->AddSuccessor(new_node);
       }
       pending_loads_.clear();
@@ -149,7 +149,7 @@
     }
 
     // Look for operand dependencies.
-    for (auto node : graph_) {
+    for (ScheduleGraphNode* node : graph_) {
       if (HasOperandDependency(node->instruction(), instr)) {
         node->AddSuccessor(new_node);
       }
@@ -168,7 +168,7 @@
   ComputeTotalLatencies();
 
   // Add nodes which don't have dependencies to the ready list.
-  for (auto node : graph_) {
+  for (ScheduleGraphNode* node : graph_) {
     if (!node->HasUnscheduledPredecessor()) {
       ready_list.AddNode(node);
     }
@@ -177,12 +177,12 @@
   // Go through the ready list and schedule the instructions.
   int cycle = 0;
   while (!ready_list.IsEmpty()) {
-    auto candidate = ready_list.PopBestCandidate(cycle);
+    ScheduleGraphNode* candidate = ready_list.PopBestCandidate(cycle);
 
     if (candidate != nullptr) {
       sequence()->AddInstruction(candidate->instruction());
 
-      for (auto successor : candidate->successors()) {
+      for (ScheduleGraphNode* successor : candidate->successors()) {
         successor->DropUnscheduledPredecessor();
         successor->set_start_cycle(
             std::max(successor->start_cycle(),
@@ -220,7 +220,9 @@
     case kArchCallJSFunction:
       return kHasSideEffect;
 
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject:
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction:
       return kHasSideEffect | kIsBlockTerminator;
 
@@ -296,10 +298,10 @@
 
 
 void InstructionScheduler::ComputeTotalLatencies() {
-  for (auto node : base::Reversed(graph_)) {
+  for (ScheduleGraphNode* node : base::Reversed(graph_)) {
     int max_latency = 0;
 
-    for (auto successor : node->successors()) {
+    for (ScheduleGraphNode* successor : node->successors()) {
       DCHECK(successor->total_latency() != -1);
       if (successor->total_latency() > max_latency) {
         max_latency = successor->total_latency();
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index 5cca888..e750aed 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -303,22 +303,32 @@
     DCHECK_NOT_NULL(false_block);
   }
 
-  // Creates a new flags continuation from the given condition and result node.
-  FlagsContinuation(FlagsCondition condition, Node* result)
-      : mode_(kFlags_set), condition_(condition), result_(result) {
-    DCHECK_NOT_NULL(result);
+  // Creates a new flags continuation for an eager deoptimization exit.
+  static FlagsContinuation ForDeoptimize(FlagsCondition condition,
+                                         Node* frame_state) {
+    return FlagsContinuation(kFlags_deoptimize, condition, frame_state);
+  }
+
+  // Creates a new flags continuation for a boolean value.
+  static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
+    return FlagsContinuation(kFlags_set, condition, result);
   }
 
   bool IsNone() const { return mode_ == kFlags_none; }
   bool IsBranch() const { return mode_ == kFlags_branch; }
+  bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
   bool IsSet() const { return mode_ == kFlags_set; }
   FlagsCondition condition() const {
     DCHECK(!IsNone());
     return condition_;
   }
+  Node* frame_state() const {
+    DCHECK(IsDeoptimize());
+    return frame_state_or_result_;
+  }
   Node* result() const {
     DCHECK(IsSet());
-    return result_;
+    return frame_state_or_result_;
   }
   BasicBlock* true_block() const {
     DCHECK(IsBranch());
@@ -355,11 +365,20 @@
   }
 
  private:
-  FlagsMode mode_;
+  FlagsContinuation(FlagsMode mode, FlagsCondition condition,
+                    Node* frame_state_or_result)
+      : mode_(mode),
+        condition_(condition),
+        frame_state_or_result_(frame_state_or_result) {
+    DCHECK_NOT_NULL(frame_state_or_result);
+  }
+
+  FlagsMode const mode_;
   FlagsCondition condition_;
-  Node* result_;             // Only valid if mode_ == kFlags_set.
-  BasicBlock* true_block_;   // Only valid if mode_ == kFlags_branch.
-  BasicBlock* false_block_;  // Only valid if mode_ == kFlags_branch.
+  Node* frame_state_or_result_;  // Only valid if mode_ == kFlags_deoptimize
+                                 // or mode_ == kFlags_set.
+  BasicBlock* true_block_;       // Only valid if mode_ == kFlags_branch.
+  BasicBlock* false_block_;      // Only valid if mode_ == kFlags_branch.
 };
 
 }  // namespace compiler
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 0f27e50..d172ed1 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -82,6 +82,9 @@
     }
     EndBlock(RpoNumber::FromInt(block->rpo_number()));
   }
+#if DEBUG
+  sequence()->ValidateSSA();
+#endif
 }
 
 
@@ -218,10 +221,25 @@
 
 
 bool InstructionSelector::CanCover(Node* user, Node* node) const {
-  return node->OwnedBy(user) &&
-         schedule()->block(node) == schedule()->block(user) &&
-         (node->op()->HasProperty(Operator::kPure) ||
-          GetEffectLevel(node) == GetEffectLevel(user));
+  // 1. Both {user} and {node} must be in the same basic block.
+  if (schedule()->block(node) != schedule()->block(user)) {
+    return false;
+  }
+  // 2. Pure {node}s must be owned by the {user}.
+  if (node->op()->HasProperty(Operator::kPure)) {
+    return node->OwnedBy(user);
+  }
+  // 3. Impure {node}s must match the effect level of {user}.
+  if (GetEffectLevel(node) != GetEffectLevel(user)) {
+    return false;
+  }
+  // 4. Only {node} must have value edges pointing to {user}.
+  for (Edge const edge : node->use_edges()) {
+    if (edge.from() != user && NodeProperties::IsValueEdge(edge)) {
+      return false;
+    }
+  }
+  return true;
 }
 
 int InstructionSelector::GetVirtualRegister(const Node* node) {
@@ -597,15 +615,17 @@
         call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
 
     // If it was a syntactic tail call we need to drop the current frame and
-    // an arguments adaptor frame on top of it (if the latter is present).
+    // all the frames on top of it that are either an arguments adaptor frame
+    // or a tail caller frame.
     if (buffer->descriptor->SupportsTailCalls()) {
       frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
       buffer->frame_state_descriptor =
           buffer->frame_state_descriptor->outer_state();
-
-      if (buffer->frame_state_descriptor != nullptr &&
-          buffer->frame_state_descriptor->type() ==
-              FrameStateType::kArgumentsAdaptor) {
+      while (buffer->frame_state_descriptor != nullptr &&
+             (buffer->frame_state_descriptor->type() ==
+                  FrameStateType::kArgumentsAdaptor ||
+              buffer->frame_state_descriptor->type() ==
+                  FrameStateType::kTailCallerFunction)) {
         frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
         buffer->frame_state_descriptor =
             buffer->frame_state_descriptor->outer_state();
@@ -873,6 +893,10 @@
     }
     case IrOpcode::kCall:
       return VisitCall(node);
+    case IrOpcode::kDeoptimizeIf:
+      return VisitDeoptimizeIf(node);
+    case IrOpcode::kDeoptimizeUnless:
+      return VisitDeoptimizeUnless(node);
     case IrOpcode::kFrameState:
     case IrOpcode::kStateValues:
     case IrOpcode::kObjectState:
@@ -998,6 +1022,8 @@
       return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
     case IrOpcode::kChangeFloat64ToUint32:
       return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+    case IrOpcode::kTruncateFloat64ToUint32:
+      return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
     case IrOpcode::kTruncateFloat32ToInt32:
       return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
     case IrOpcode::kTruncateFloat32ToUint32:
@@ -1128,6 +1154,30 @@
     }
     case IrOpcode::kCheckedStore:
       return VisitCheckedStore(node);
+    case IrOpcode::kInt32PairAdd:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitInt32PairAdd(node);
+    case IrOpcode::kInt32PairSub:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitInt32PairSub(node);
+    case IrOpcode::kInt32PairMul:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitInt32PairMul(node);
+    case IrOpcode::kWord32PairShl:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitWord32PairShl(node);
+    case IrOpcode::kWord32PairShr:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitWord32PairShr(node);
+    case IrOpcode::kWord32PairSar:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitWord32PairSar(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
@@ -1144,7 +1194,6 @@
 
 void InstructionSelector::VisitLoadFramePointer(Node* node) {
   OperandGenerator g(this);
-  frame_->MarkNeedsFrame();
   Emit(kArchFramePointer, g.DefineAsRegister(node));
 }
 
@@ -1351,6 +1400,20 @@
 
 #endif  // V8_TARGET_ARCH_32_BIT
 
+// 64 bit targets do not implement the following instructions.
+#if V8_TARGET_ARCH_64_BIT
+void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairMul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
+#endif  // V8_TARGET_ARCH_64_BIT
 
 void InstructionSelector::VisitFinishRegion(Node* node) {
   OperandGenerator g(this);
@@ -1429,6 +1492,12 @@
     case IrOpcode::kTryTruncateFloat64ToInt64:
     case IrOpcode::kTryTruncateFloat32ToUint64:
     case IrOpcode::kTryTruncateFloat64ToUint64:
+    case IrOpcode::kInt32PairAdd:
+    case IrOpcode::kInt32PairSub:
+    case IrOpcode::kInt32PairMul:
+    case IrOpcode::kWord32PairShl:
+    case IrOpcode::kWord32PairShr:
+    case IrOpcode::kWord32PairSar:
       if (ProjectionIndexOf(node->op()) == 0u) {
         Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
       } else {
@@ -1484,13 +1553,15 @@
     buffer.instruction_args.push_back(g.Label(handler));
   }
 
-  // (arm64 only) caller uses JSSP but callee might destroy it.
-  if (descriptor->UseNativeStack() &&
-      !linkage()->GetIncomingDescriptor()->UseNativeStack()) {
-    flags |= CallDescriptor::kRestoreJSSP;
+  bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
+  bool to_native_stack = descriptor->UseNativeStack();
+  if (from_native_stack != to_native_stack) {
+    // (arm64 only) Mismatch in the use of stack pointers. One or the other
+    // has to be restored manually by the code generator.
+    flags |= to_native_stack ? CallDescriptor::kRestoreJSSP
+                             : CallDescriptor::kRestoreCSP;
   }
 
-
   // Select the appropriate opcode based on the call type.
   InstructionCode opcode = kArchNop;
   switch (descriptor->kind()) {
@@ -1539,16 +1610,35 @@
 
     // Select the appropriate opcode based on the call type.
     InstructionCode opcode;
-    switch (descriptor->kind()) {
-      case CallDescriptor::kCallCodeObject:
-        opcode = kArchTailCallCodeObject;
-        break;
-      case CallDescriptor::kCallJSFunction:
-        opcode = kArchTailCallJSFunction;
-        break;
-      default:
-        UNREACHABLE();
-        return;
+    InstructionOperandVector temps(zone());
+    if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+      switch (descriptor->kind()) {
+        case CallDescriptor::kCallCodeObject:
+          opcode = kArchTailCallCodeObjectFromJSFunction;
+          break;
+        case CallDescriptor::kCallJSFunction:
+          opcode = kArchTailCallJSFunctionFromJSFunction;
+          break;
+        default:
+          UNREACHABLE();
+          return;
+      }
+      int temps_count = GetTempsCountForTailCallFromJSFunction();
+      for (int i = 0; i < temps_count; i++) {
+        temps.push_back(g.TempRegister());
+      }
+    } else {
+      switch (descriptor->kind()) {
+        case CallDescriptor::kCallCodeObject:
+          opcode = kArchTailCallCodeObject;
+          break;
+        case CallDescriptor::kCallJSFunction:
+          opcode = kArchTailCallJSFunction;
+          break;
+        default:
+          UNREACHABLE();
+          return;
+      }
     }
     opcode |= MiscField::encode(descriptor->flags());
 
@@ -1559,7 +1649,8 @@
 
     // Emit the tailcall instruction.
     Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
-         &buffer.instruction_args.front());
+         &buffer.instruction_args.front(), temps.size(),
+         temps.empty() ? nullptr : &temps.front());
   } else {
     FrameStateDescriptor* frame_state_descriptor =
         descriptor->NeedsFrameState()
@@ -1627,25 +1718,41 @@
   }
 }
 
+Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
+                                                 InstructionOperand output,
+                                                 InstructionOperand a,
+                                                 InstructionOperand b,
+                                                 Node* frame_state) {
+  size_t output_count = output.IsInvalid() ? 0 : 1;
+  InstructionOperand inputs[] = {a, b};
+  size_t input_count = arraysize(inputs);
+  return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
+                        frame_state);
+}
+
+Instruction* InstructionSelector::EmitDeoptimize(
+    InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
+    size_t input_count, InstructionOperand* inputs, Node* frame_state) {
+  OperandGenerator g(this);
+  FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
+  InstructionOperandVector args(instruction_zone());
+  args.reserve(input_count + 1 + descriptor->GetTotalSize());
+  for (size_t i = 0; i < input_count; ++i) {
+    args.push_back(inputs[i]);
+  }
+  opcode |= MiscField::encode(static_cast<int>(input_count));
+  InstructionSequence::StateId const state_id =
+      sequence()->AddFrameStateDescriptor(descriptor);
+  args.push_back(g.TempImmediate(state_id.ToInt()));
+  StateObjectDeduplicator deduplicator(instruction_zone());
+  AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
+                                  &args, FrameStateInputKind::kAny,
+                                  instruction_zone());
+  return Emit(opcode, output_count, outputs, args.size(), &args.front(), 0,
+              nullptr);
+}
 
 void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
-  OperandGenerator g(this);
-
-  FrameStateDescriptor* desc = GetFrameStateDescriptor(value);
-
-  InstructionOperandVector args(instruction_zone());
-  args.reserve(desc->GetTotalSize() + 1);  // Include deopt id.
-
-  InstructionSequence::StateId state_id =
-      sequence()->AddFrameStateDescriptor(desc);
-  args.push_back(g.TempImmediate(state_id.ToInt()));
-
-  StateObjectDeduplicator deduplicator(instruction_zone());
-
-  AddInputsToFrameStateDescriptor(desc, value, &g, &deduplicator, &args,
-                                  FrameStateInputKind::kAny,
-                                  instruction_zone());
-
   InstructionCode opcode = kArchDeoptimize;
   switch (kind) {
     case DeoptimizeKind::kEager:
@@ -1655,7 +1762,7 @@
       opcode |= MiscField::encode(Deoptimizer::SOFT);
       break;
   }
-  Emit(opcode, 0, nullptr, args.size(), &args.front(), 0, nullptr);
+  EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, value);
 }
 
 
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index a01cab4..9c1cd4c 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -100,6 +100,17 @@
   Instruction* Emit(Instruction* instr);
 
   // ===========================================================================
+  // ===== Architecture-independent deoptimization exit emission methods. ======
+  // ===========================================================================
+
+  Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
+                              InstructionOperand a, InstructionOperand b,
+                              Node* frame_state);
+  Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
+                              InstructionOperand* outputs, size_t input_count,
+                              InstructionOperand* inputs, Node* frame_state);
+
+  // ===========================================================================
   // ============== Architecture-independent CPU feature methods. ==============
   // ===========================================================================
 
@@ -213,6 +224,7 @@
   void InitializeCallBuffer(Node* call, CallBuffer* buffer,
                             CallBufferFlags flags, int stack_param_delta = 0);
   bool IsTailCallAddressImmediate();
+  int GetTempsCountForTailCallFromJSFunction();
 
   FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
 
@@ -243,6 +255,8 @@
   void VisitProjection(Node* node);
   void VisitConstant(Node* node);
   void VisitCall(Node* call, BasicBlock* handler = nullptr);
+  void VisitDeoptimizeIf(Node* node);
+  void VisitDeoptimizeUnless(Node* node);
   void VisitTailCall(Node* call);
   void VisitGoto(BasicBlock* target);
   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index d4ec6bc..c757557 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -114,7 +114,7 @@
       return os << "[constant:" << ConstantOperand::cast(op).virtual_register()
                 << "]";
     case InstructionOperand::IMMEDIATE: {
-      auto imm = ImmediateOperand::cast(op);
+      ImmediateOperand imm = ImmediateOperand::cast(op);
       switch (imm.type()) {
         case ImmediateOperand::INLINE:
           return os << "#" << imm.inline_value();
@@ -124,7 +124,7 @@
     }
     case InstructionOperand::EXPLICIT:
     case InstructionOperand::ALLOCATED: {
-      auto allocated = LocationOperand::cast(op);
+      LocationOperand allocated = LocationOperand::cast(op);
       if (op.IsStackSlot()) {
         os << "[stack:" << LocationOperand::cast(op).index();
       } else if (op.IsDoubleStackSlot()) {
@@ -214,7 +214,7 @@
 
 
 bool ParallelMove::IsRedundant() const {
-  for (auto move : *this) {
+  for (MoveOperands* move : *this) {
     if (!move->IsRedundant()) return false;
   }
   return true;
@@ -224,7 +224,7 @@
 MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
   MoveOperands* replacement = nullptr;
   MoveOperands* to_eliminate = nullptr;
-  for (auto curr : *this) {
+  for (MoveOperands* curr : *this) {
     if (curr->IsEliminated()) continue;
     if (curr->destination().EqualsCanonicalized(move->source())) {
       DCHECK(!replacement);
@@ -321,7 +321,7 @@
                          const PrintableParallelMove& printable) {
   const ParallelMove& pm = *printable.parallel_move_;
   bool first = true;
-  for (auto move : pm) {
+  for (MoveOperands* move : pm) {
     if (move->IsEliminated()) continue;
     if (!first) os << " ";
     first = false;
@@ -346,7 +346,7 @@
   PrintableInstructionOperand poi = {
       RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
       InstructionOperand()};
-  for (auto& op : pm.reference_operands_) {
+  for (const InstructionOperand& op : pm.reference_operands_) {
     if (!first) {
       os << ";";
     } else {
@@ -393,6 +393,8 @@
       return os;
     case kFlags_branch:
       return os << "branch";
+    case kFlags_deoptimize:
+      return os << "deoptimize";
     case kFlags_set:
       return os << "set";
   }
@@ -618,7 +620,7 @@
   return blocks;
 }
 
-void InstructionSequence::Validate() {
+void InstructionSequence::ValidateEdgeSplitForm() {
   // Validate blocks are in edge-split form: no block with multiple successors
   // has an edge to a block (== a successor) with more than one predecessors.
   for (const InstructionBlock* block : instruction_blocks()) {
@@ -633,14 +635,40 @@
   }
 }
 
+void InstructionSequence::ValidateDeferredBlockExitPaths() {
+  // A deferred block with more than one successor must have all its successors
+  // deferred.
+  for (const InstructionBlock* block : instruction_blocks()) {
+    if (!block->IsDeferred() || block->SuccessorCount() <= 1) continue;
+    for (RpoNumber successor_id : block->successors()) {
+      CHECK(InstructionBlockAt(successor_id)->IsDeferred());
+    }
+  }
+}
+
+void InstructionSequence::ValidateSSA() {
+  // TODO(mtrofin): We could use a local zone here instead.
+  BitVector definitions(VirtualRegisterCount(), zone());
+  for (const Instruction* instruction : *this) {
+    for (size_t i = 0; i < instruction->OutputCount(); ++i) {
+      const InstructionOperand* output = instruction->OutputAt(i);
+      int vreg = (output->IsConstant())
+                     ? ConstantOperand::cast(output)->virtual_register()
+                     : UnallocatedOperand::cast(output)->virtual_register();
+      CHECK(!definitions.Contains(vreg));
+      definitions.Add(vreg);
+    }
+  }
+}
+
 void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
   int ao = 0;
-  for (auto const block : *blocks) {
+  for (InstructionBlock* const block : *blocks) {
     if (!block->IsDeferred()) {
       block->set_ao_number(RpoNumber::FromInt(ao++));
     }
   }
-  for (auto const block : *blocks) {
+  for (InstructionBlock* const block : *blocks) {
     if (block->IsDeferred()) {
       block->set_ao_number(RpoNumber::FromInt(ao++));
     }
@@ -665,10 +693,6 @@
       representations_(zone()),
       deoptimization_entries_(zone()) {
   block_starts_.reserve(instruction_blocks_->size());
-
-#if DEBUG
-  Validate();
-#endif
 }
 
 
@@ -730,7 +754,7 @@
   if (end == block_starts_.end() || *end > instruction_index) --end;
   DCHECK(*end <= instruction_index);
   size_t index = std::distance(begin, end);
-  auto block = instruction_blocks_->at(index);
+  InstructionBlock* block = instruction_blocks_->at(index);
   DCHECK(block->code_start() <= instruction_index &&
          instruction_index < block->code_end());
   return block;
@@ -861,15 +885,15 @@
   os << "  instructions: [" << block->code_start() << ", " << block->code_end()
      << ")\n  predecessors:";
 
-  for (auto pred : block->predecessors()) {
+  for (RpoNumber pred : block->predecessors()) {
     os << " B" << pred.ToInt();
   }
   os << "\n";
 
-  for (auto phi : block->phis()) {
+  for (const PhiInstruction* phi : block->phis()) {
     PrintableInstructionOperand printable_op = {config, phi->output()};
     os << "     phi: " << printable_op << " =";
-    for (auto input : phi->operands()) {
+    for (int input : phi->operands()) {
       os << " v" << input;
     }
     os << "\n";
@@ -886,7 +910,7 @@
     os << "   " << buf.start() << ": " << printable_instr << "\n";
   }
 
-  for (auto succ : block->successors()) {
+  for (RpoNumber succ : block->successors()) {
     os << " B" << succ.ToInt();
   }
   os << "\n";
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 9c978ce..a1fe494 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -773,6 +773,9 @@
     DCHECK(output_count == 0 || outputs != nullptr);
     DCHECK(input_count == 0 || inputs != nullptr);
     DCHECK(temp_count == 0 || temps != nullptr);
+    // TODO(jarin/mstarzinger): Handle this gracefully. See crbug.com/582702.
+    CHECK(InputCountField::is_valid(input_count));
+
     size_t total_extra_ops = output_count + input_count + temp_count;
     if (total_extra_ops != 0) total_extra_ops--;
     int size = static_cast<int>(
@@ -812,6 +815,23 @@
            OutputCount() == 0 && TempCount() == 0;
   }
 
+  bool IsDeoptimizeCall() const {
+    return arch_opcode() == ArchOpcode::kArchDeoptimize ||
+           FlagsModeField::decode(opcode()) == kFlags_deoptimize;
+  }
+
+  bool IsJump() const { return arch_opcode() == ArchOpcode::kArchJmp; }
+  bool IsRet() const { return arch_opcode() == ArchOpcode::kArchRet; }
+  bool IsTailCall() const {
+    return arch_opcode() == ArchOpcode::kArchTailCallCodeObject ||
+           arch_opcode() == ArchOpcode::kArchTailCallCodeObjectFromJSFunction ||
+           arch_opcode() == ArchOpcode::kArchTailCallJSFunction ||
+           arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction;
+  }
+  bool IsThrow() const {
+    return arch_opcode() == ArchOpcode::kArchThrowTerminator;
+  }
+
   enum GapPosition {
     START,
     END,
@@ -1354,7 +1374,9 @@
   void PrintBlock(const RegisterConfiguration* config, int block_id) const;
   void PrintBlock(int block_id) const;
 
-  void Validate();
+  void ValidateEdgeSplitForm();
+  void ValidateDeferredBlockExitPaths();
+  void ValidateSSA();
 
  private:
   friend std::ostream& operator<<(std::ostream& os,
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index ff31abe..8824a03 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -4,9 +4,11 @@
 
 #include "src/compiler/int64-lowering.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 
 #include "src/compiler/node.h"
@@ -24,36 +26,44 @@
       graph_(graph),
       machine_(machine),
       common_(common),
-      state_(graph, 4),
+      state_(graph, 3),
       stack_(zone),
-      replacements_(zone->NewArray<Replacement>(graph->NodeCount())),
-      signature_(signature) {
+      replacements_(nullptr),
+      signature_(signature),
+      placeholder_(graph->NewNode(common->Parameter(-2, "placeholder"),
+                                  graph->start())) {
+  replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
   memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
 }
 
 void Int64Lowering::LowerGraph() {
-  if (4 != kPointerSize) {
+  if (!machine()->Is32()) {
     return;
   }
-  stack_.push(graph()->end());
+  stack_.push_back({graph()->end(), 0});
   state_.Set(graph()->end(), State::kOnStack);
 
   while (!stack_.empty()) {
-    Node* top = stack_.top();
-    if (state_.Get(top) == State::kInputsPushed) {
-      stack_.pop();
-      state_.Set(top, State::kVisited);
-      // All inputs of top have already been reduced, now reduce top.
-      LowerNode(top);
+    NodeState& top = stack_.back();
+    if (top.input_index == top.node->InputCount()) {
+      // All inputs of top have already been lowered, now lower top.
+      stack_.pop_back();
+      state_.Set(top.node, State::kVisited);
+      LowerNode(top.node);
     } else {
-      // Push all children onto the stack.
-      for (Node* input : top->inputs()) {
-        if (state_.Get(input) == State::kUnvisited) {
-          stack_.push(input);
-          state_.Set(input, State::kOnStack);
+      // Push the next input onto the stack.
+      Node* input = top.node->InputAt(top.input_index++);
+      if (state_.Get(input) == State::kUnvisited) {
+        if (input->opcode() == IrOpcode::kPhi) {
+          // To break cycles with phi nodes we push phis on a separate stack so
+          // that they are processed after all other nodes.
+          PreparePhiReplacement(input);
+          stack_.push_front({input, 0});
+        } else {
+          stack_.push_back({input, 0});
         }
+        state_.Set(input, State::kOnStack);
       }
-      state_.Set(top, State::kInputsPushed);
     }
   }
 }
@@ -122,6 +132,8 @@
         }
         NodeProperties::ChangeOp(node, load_op);
         ReplaceNode(node, node, high_node);
+      } else {
+        DefaultLowering(node);
       }
       break;
     }
@@ -164,30 +176,11 @@
         node->ReplaceInput(2, GetReplacementLow(value));
         NodeProperties::ChangeOp(node, store_op);
         ReplaceNode(node, node, high_node);
+      } else {
+        DefaultLowering(node);
       }
       break;
     }
-    case IrOpcode::kWord64And: {
-      DCHECK(node->InputCount() == 2);
-      Node* left = node->InputAt(0);
-      Node* right = node->InputAt(1);
-
-      Node* low_node =
-          graph()->NewNode(machine()->Word32And(), GetReplacementLow(left),
-                           GetReplacementLow(right));
-      Node* high_node =
-          graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
-                           GetReplacementHigh(right));
-      ReplaceNode(node, low_node, high_node);
-      break;
-    }
-    case IrOpcode::kTruncateInt64ToInt32: {
-      DCHECK(node->InputCount() == 1);
-      Node* input = node->InputAt(0);
-      ReplaceNode(node, GetReplacementLow(input), nullptr);
-      node->NullAllInputs();
-      break;
-    }
     case IrOpcode::kStart: {
       int parameter_count = GetParameterCountAfterLowering(signature());
       // Only exchange the node if the parameter count actually changed.
@@ -248,8 +241,517 @@
       }
       break;
     }
+    case IrOpcode::kWord64And: {
+      DCHECK(node->InputCount() == 2);
+      Node* left = node->InputAt(0);
+      Node* right = node->InputAt(1);
+
+      Node* low_node =
+          graph()->NewNode(machine()->Word32And(), GetReplacementLow(left),
+                           GetReplacementLow(right));
+      Node* high_node =
+          graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
+                           GetReplacementHigh(right));
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    case IrOpcode::kTruncateInt64ToInt32: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      ReplaceNode(node, GetReplacementLow(input), nullptr);
+      node->NullAllInputs();
+      break;
+    }
+    // todo(ahaas): I added a list of missing instructions here to make merging
+    // easier when I do them one by one.
+    // kExprI64Add:
+    case IrOpcode::kInt64Add: {
+      DCHECK(node->InputCount() == 2);
+
+      Node* right = node->InputAt(1);
+      node->ReplaceInput(1, GetReplacementLow(right));
+      node->AppendInput(zone(), GetReplacementHigh(right));
+
+      Node* left = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(left));
+      node->InsertInput(zone(), 1, GetReplacementHigh(left));
+
+      NodeProperties::ChangeOp(node, machine()->Int32PairAdd());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+
+    // kExprI64Sub:
+    case IrOpcode::kInt64Sub: {
+      DCHECK(node->InputCount() == 2);
+
+      Node* right = node->InputAt(1);
+      node->ReplaceInput(1, GetReplacementLow(right));
+      node->AppendInput(zone(), GetReplacementHigh(right));
+
+      Node* left = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(left));
+      node->InsertInput(zone(), 1, GetReplacementHigh(left));
+
+      NodeProperties::ChangeOp(node, machine()->Int32PairSub());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64Mul:
+    case IrOpcode::kInt64Mul: {
+      DCHECK(node->InputCount() == 2);
+
+      Node* right = node->InputAt(1);
+      node->ReplaceInput(1, GetReplacementLow(right));
+      node->AppendInput(zone(), GetReplacementHigh(right));
+
+      Node* left = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(left));
+      node->InsertInput(zone(), 1, GetReplacementHigh(left));
+
+      NodeProperties::ChangeOp(node, machine()->Int32PairMul());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64DivS:
+    // kExprI64DivU:
+    // kExprI64RemS:
+    // kExprI64RemU:
+    // kExprI64Ior:
+    case IrOpcode::kWord64Or: {
+      DCHECK(node->InputCount() == 2);
+      Node* left = node->InputAt(0);
+      Node* right = node->InputAt(1);
+
+      Node* low_node =
+          graph()->NewNode(machine()->Word32Or(), GetReplacementLow(left),
+                           GetReplacementLow(right));
+      Node* high_node =
+          graph()->NewNode(machine()->Word32Or(), GetReplacementHigh(left),
+                           GetReplacementHigh(right));
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+
+    // kExprI64Xor:
+    case IrOpcode::kWord64Xor: {
+      DCHECK(node->InputCount() == 2);
+      Node* left = node->InputAt(0);
+      Node* right = node->InputAt(1);
+
+      Node* low_node =
+          graph()->NewNode(machine()->Word32Xor(), GetReplacementLow(left),
+                           GetReplacementLow(right));
+      Node* high_node =
+          graph()->NewNode(machine()->Word32Xor(), GetReplacementHigh(left),
+                           GetReplacementHigh(right));
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64Shl:
+    case IrOpcode::kWord64Shl: {
+      // TODO(turbofan): if the shift count >= 32, then we can set the low word
+      // of the output to 0 and just calculate the high word.
+      DCHECK(node->InputCount() == 2);
+      Node* shift = node->InputAt(1);
+      if (HasReplacementLow(shift)) {
+        // We do not have to care about the high word replacement, because
+        // the shift can only be between 0 and 63 anyways.
+        node->ReplaceInput(1, GetReplacementLow(shift));
+      }
+
+      Node* value = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(value));
+      node->InsertInput(zone(), 1, GetReplacementHigh(value));
+
+      NodeProperties::ChangeOp(node, machine()->Word32PairShl());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64ShrU:
+    case IrOpcode::kWord64Shr: {
+      // TODO(turbofan): if the shift count >= 32, then we can set the low word
+      // of the output to 0 and just calculate the high word.
+      DCHECK(node->InputCount() == 2);
+      Node* shift = node->InputAt(1);
+      if (HasReplacementLow(shift)) {
+        // We do not have to care about the high word replacement, because
+        // the shift can only be between 0 and 63 anyways.
+        node->ReplaceInput(1, GetReplacementLow(shift));
+      }
+
+      Node* value = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(value));
+      node->InsertInput(zone(), 1, GetReplacementHigh(value));
+
+      NodeProperties::ChangeOp(node, machine()->Word32PairShr());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64ShrS:
+    case IrOpcode::kWord64Sar: {
+      // TODO(turbofan): if the shift count >= 32, then we can set the low word
+      // of the output to 0 and just calculate the high word.
+      DCHECK(node->InputCount() == 2);
+      Node* shift = node->InputAt(1);
+      if (HasReplacementLow(shift)) {
+        // We do not have to care about the high word replacement, because
+        // the shift can only be between 0 and 63 anyways.
+        node->ReplaceInput(1, GetReplacementLow(shift));
+      }
+
+      Node* value = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(value));
+      node->InsertInput(zone(), 1, GetReplacementHigh(value));
+
+      NodeProperties::ChangeOp(node, machine()->Word32PairSar());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64Eq:
+    case IrOpcode::kWord64Equal: {
+      DCHECK(node->InputCount() == 2);
+      Node* left = node->InputAt(0);
+      Node* right = node->InputAt(1);
+
+      // TODO(wasm): Use explicit comparisons and && here?
+      Node* replacement = graph()->NewNode(
+          machine()->Word32Equal(),
+          graph()->NewNode(
+              machine()->Word32Or(),
+              graph()->NewNode(machine()->Word32Xor(), GetReplacementLow(left),
+                               GetReplacementLow(right)),
+              graph()->NewNode(machine()->Word32Xor(), GetReplacementHigh(left),
+                               GetReplacementHigh(right))),
+          graph()->NewNode(common()->Int32Constant(0)));
+
+      ReplaceNode(node, replacement, nullptr);
+      break;
+    }
+    // kExprI64LtS:
+    case IrOpcode::kInt64LessThan: {
+      LowerComparison(node, machine()->Int32LessThan(),
+                      machine()->Uint32LessThan());
+      break;
+    }
+    case IrOpcode::kInt64LessThanOrEqual: {
+      LowerComparison(node, machine()->Int32LessThan(),
+                      machine()->Uint32LessThanOrEqual());
+      break;
+    }
+    case IrOpcode::kUint64LessThan: {
+      LowerComparison(node, machine()->Uint32LessThan(),
+                      machine()->Uint32LessThan());
+      break;
+    }
+    case IrOpcode::kUint64LessThanOrEqual: {
+      LowerComparison(node, machine()->Uint32LessThan(),
+                      machine()->Uint32LessThanOrEqual());
+      break;
+    }
+
+    // kExprI64SConvertI32:
+    case IrOpcode::kChangeInt32ToInt64: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      if (HasReplacementLow(input)) {
+        input = GetReplacementLow(input);
+      }
+      // We use SAR to preserve the sign in the high word.
+      ReplaceNode(
+          node, input,
+          graph()->NewNode(machine()->Word32Sar(), input,
+                           graph()->NewNode(common()->Int32Constant(31))));
+      node->NullAllInputs();
+      break;
+    }
+    // kExprI64UConvertI32: {
+    case IrOpcode::kChangeUint32ToUint64: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      if (HasReplacementLow(input)) {
+        input = GetReplacementLow(input);
+      }
+      ReplaceNode(node, input, graph()->NewNode(common()->Int32Constant(0)));
+      node->NullAllInputs();
+      break;
+    }
+    // kExprF64ReinterpretI64:
+    case IrOpcode::kBitcastInt64ToFloat64: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      Node* stack_slot = graph()->NewNode(
+          machine()->StackSlot(MachineRepresentation::kWord64));
+
+      Node* store_high_word = graph()->NewNode(
+          machine()->Store(
+              StoreRepresentation(MachineRepresentation::kWord32,
+                                  WriteBarrierKind::kNoWriteBarrier)),
+          stack_slot, graph()->NewNode(common()->Int32Constant(4)),
+          GetReplacementHigh(input), graph()->start(), graph()->start());
+
+      Node* store_low_word = graph()->NewNode(
+          machine()->Store(
+              StoreRepresentation(MachineRepresentation::kWord32,
+                                  WriteBarrierKind::kNoWriteBarrier)),
+          stack_slot, graph()->NewNode(common()->Int32Constant(0)),
+          GetReplacementLow(input), store_high_word, graph()->start());
+
+      Node* load =
+          graph()->NewNode(machine()->Load(MachineType::Float64()), stack_slot,
+                           graph()->NewNode(common()->Int32Constant(0)),
+                           store_low_word, graph()->start());
+
+      ReplaceNode(node, load, nullptr);
+      break;
+    }
+    // kExprI64ReinterpretF64:
+    case IrOpcode::kBitcastFloat64ToInt64: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      if (HasReplacementLow(input)) {
+        input = GetReplacementLow(input);
+      }
+      Node* stack_slot = graph()->NewNode(
+          machine()->StackSlot(MachineRepresentation::kWord64));
+      Node* store = graph()->NewNode(
+          machine()->Store(
+              StoreRepresentation(MachineRepresentation::kFloat64,
+                                  WriteBarrierKind::kNoWriteBarrier)),
+          stack_slot, graph()->NewNode(common()->Int32Constant(0)), input,
+          graph()->start(), graph()->start());
+
+      Node* high_node =
+          graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
+                           graph()->NewNode(common()->Int32Constant(4)), store,
+                           graph()->start());
+
+      Node* low_node =
+          graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
+                           graph()->NewNode(common()->Int32Constant(0)), store,
+                           graph()->start());
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    case IrOpcode::kWord64Ror: {
+      DCHECK(node->InputCount() == 2);
+      Node* input = node->InputAt(0);
+      Node* shift = HasReplacementLow(node->InputAt(1))
+                        ? GetReplacementLow(node->InputAt(1))
+                        : node->InputAt(1);
+      Int32Matcher m(shift);
+      if (m.HasValue()) {
+        // Precondition: 0 <= shift < 64.
+        int32_t shift_value = m.Value() & 0x3f;
+        if (shift_value == 0) {
+          ReplaceNode(node, GetReplacementLow(input),
+                      GetReplacementHigh(input));
+        } else if (shift_value == 32) {
+          ReplaceNode(node, GetReplacementHigh(input),
+                      GetReplacementLow(input));
+        } else {
+          Node* low_input;
+          Node* high_input;
+          if (shift_value < 32) {
+            low_input = GetReplacementLow(input);
+            high_input = GetReplacementHigh(input);
+          } else {
+            low_input = GetReplacementHigh(input);
+            high_input = GetReplacementLow(input);
+          }
+          int32_t masked_shift_value = shift_value & 0x1f;
+          Node* masked_shift =
+              graph()->NewNode(common()->Int32Constant(masked_shift_value));
+          Node* inv_shift = graph()->NewNode(
+              common()->Int32Constant(32 - masked_shift_value));
+
+          Node* low_node = graph()->NewNode(
+              machine()->Word32Or(),
+              graph()->NewNode(machine()->Word32Shr(), low_input, masked_shift),
+              graph()->NewNode(machine()->Word32Shl(), high_input, inv_shift));
+          Node* high_node = graph()->NewNode(
+              machine()->Word32Or(), graph()->NewNode(machine()->Word32Shr(),
+                                                      high_input, masked_shift),
+              graph()->NewNode(machine()->Word32Shl(), low_input, inv_shift));
+          ReplaceNode(node, low_node, high_node);
+        }
+      } else {
+        Node* safe_shift = shift;
+        if (!machine()->Word32ShiftIsSafe()) {
+          safe_shift =
+              graph()->NewNode(machine()->Word32And(), shift,
+                               graph()->NewNode(common()->Int32Constant(0x1f)));
+        }
+
+        // By creating this bit-mask with SAR and SHL we do not have to deal
+        // with shift == 0 as a special case.
+        Node* inv_mask = graph()->NewNode(
+            machine()->Word32Shl(),
+            graph()->NewNode(machine()->Word32Sar(),
+                             graph()->NewNode(common()->Int32Constant(
+                                 std::numeric_limits<int32_t>::min())),
+                             safe_shift),
+            graph()->NewNode(common()->Int32Constant(1)));
+
+        Node* bit_mask =
+            graph()->NewNode(machine()->Word32Xor(), inv_mask,
+                             graph()->NewNode(common()->Int32Constant(-1)));
+
+        // We have to mask the shift value for this comparison. If
+        // !machine()->Word32ShiftIsSafe() then the masking should already be
+        // part of the graph.
+        Node* masked_shift6 = shift;
+        if (machine()->Word32ShiftIsSafe()) {
+          masked_shift6 =
+              graph()->NewNode(machine()->Word32And(), shift,
+                               graph()->NewNode(common()->Int32Constant(0x3f)));
+        }
+
+        Diamond lt32(
+            graph(), common(),
+            graph()->NewNode(machine()->Int32LessThan(), masked_shift6,
+                             graph()->NewNode(common()->Int32Constant(32))));
+
+        // The low word and the high word can be swapped either at the input or
+        // at the output. We swap the inputs so that shift does not have to be
+        // kept for so long in a register.
+        Node* input_low =
+            lt32.Phi(MachineRepresentation::kWord32, GetReplacementLow(input),
+                     GetReplacementHigh(input));
+        Node* input_high =
+            lt32.Phi(MachineRepresentation::kWord32, GetReplacementHigh(input),
+                     GetReplacementLow(input));
+
+        Node* rotate_low =
+            graph()->NewNode(machine()->Word32Ror(), input_low, safe_shift);
+        Node* rotate_high =
+            graph()->NewNode(machine()->Word32Ror(), input_high, safe_shift);
+
+        Node* low_node = graph()->NewNode(
+            machine()->Word32Or(),
+            graph()->NewNode(machine()->Word32And(), rotate_low, bit_mask),
+            graph()->NewNode(machine()->Word32And(), rotate_high, inv_mask));
+
+        Node* high_node = graph()->NewNode(
+            machine()->Word32Or(),
+            graph()->NewNode(machine()->Word32And(), rotate_high, bit_mask),
+            graph()->NewNode(machine()->Word32And(), rotate_low, inv_mask));
+
+        ReplaceNode(node, low_node, high_node);
+      }
+      break;
+    }
+    // kExprI64Clz:
+    case IrOpcode::kWord64Clz: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      Diamond d(
+          graph(), common(),
+          graph()->NewNode(machine()->Word32Equal(), GetReplacementHigh(input),
+                           graph()->NewNode(common()->Int32Constant(0))));
+
+      Node* low_node = d.Phi(
+          MachineRepresentation::kWord32,
+          graph()->NewNode(machine()->Int32Add(),
+                           graph()->NewNode(machine()->Word32Clz(),
+                                            GetReplacementLow(input)),
+                           graph()->NewNode(common()->Int32Constant(32))),
+          graph()->NewNode(machine()->Word32Clz(), GetReplacementHigh(input)));
+      ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
+      break;
+    }
+    // kExprI64Ctz:
+    case IrOpcode::kWord64Ctz: {
+      DCHECK(node->InputCount() == 1);
+      DCHECK(machine()->Word32Ctz().IsSupported());
+      Node* input = node->InputAt(0);
+      Diamond d(
+          graph(), common(),
+          graph()->NewNode(machine()->Word32Equal(), GetReplacementLow(input),
+                           graph()->NewNode(common()->Int32Constant(0))));
+      Node* low_node =
+          d.Phi(MachineRepresentation::kWord32,
+                graph()->NewNode(machine()->Int32Add(),
+                                 graph()->NewNode(machine()->Word32Ctz().op(),
+                                                  GetReplacementHigh(input)),
+                                 graph()->NewNode(common()->Int32Constant(32))),
+                graph()->NewNode(machine()->Word32Ctz().op(),
+                                 GetReplacementLow(input)));
+      ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
+      break;
+    }
+    // kExprI64Popcnt:
+    case IrOpcode::kWord64Popcnt: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      // We assume that a Word64Popcnt node only has been created if
+      // Word32Popcnt is actually supported.
+      DCHECK(machine()->Word32Popcnt().IsSupported());
+      ReplaceNode(node, graph()->NewNode(
+                            machine()->Int32Add(),
+                            graph()->NewNode(machine()->Word32Popcnt().op(),
+                                             GetReplacementLow(input)),
+                            graph()->NewNode(machine()->Word32Popcnt().op(),
+                                             GetReplacementHigh(input))),
+                  graph()->NewNode(common()->Int32Constant(0)));
+      break;
+    }
+    case IrOpcode::kPhi: {
+      MachineRepresentation rep = PhiRepresentationOf(node->op());
+      if (rep == MachineRepresentation::kWord64) {
+        // The replacement nodes have already been created, we only have to
+        // replace placeholder nodes.
+        Node* low_node = GetReplacementLow(node);
+        Node* high_node = GetReplacementHigh(node);
+        for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+          low_node->ReplaceInput(i, GetReplacementLow(node->InputAt(i)));
+          high_node->ReplaceInput(i, GetReplacementHigh(node->InputAt(i)));
+        }
+      } else {
+        DefaultLowering(node);
+      }
+      break;
+    }
+
     default: { DefaultLowering(node); }
   }
+}  // NOLINT(readability/fn_size)
+
+void Int64Lowering::LowerComparison(Node* node, const Operator* high_word_op,
+                                    const Operator* low_word_op) {
+  DCHECK(node->InputCount() == 2);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  Node* replacement = graph()->NewNode(
+      machine()->Word32Or(),
+      graph()->NewNode(high_word_op, GetReplacementHigh(left),
+                       GetReplacementHigh(right)),
+      graph()->NewNode(
+          machine()->Word32And(),
+          graph()->NewNode(machine()->Word32Equal(), GetReplacementHigh(left),
+                           GetReplacementHigh(right)),
+          graph()->NewNode(low_word_op, GetReplacementLow(left),
+                           GetReplacementLow(right))));
+
+  ReplaceNode(node, replacement, nullptr);
 }
 
 bool Int64Lowering::DefaultLowering(Node* node) {
@@ -294,6 +796,32 @@
   DCHECK(result);
   return result;
 }
+
+void Int64Lowering::PreparePhiReplacement(Node* phi) {
+  MachineRepresentation rep = PhiRepresentationOf(phi->op());
+  if (rep == MachineRepresentation::kWord64) {
+    // We have to create the replacements for a phi node before we actually
+    // lower the phi to break potential cycles in the graph. The replacements of
+    // input nodes do not exist yet, so we use a placeholder node to pass the
+    // graph verifier.
+    int value_count = phi->op()->ValueInputCount();
+    Node** inputs_low = zone()->NewArray<Node*>(value_count + 1);
+    Node** inputs_high = zone()->NewArray<Node*>(value_count + 1);
+    for (int i = 0; i < value_count; i++) {
+      inputs_low[i] = placeholder_;
+      inputs_high[i] = placeholder_;
+    }
+    inputs_low[value_count] = NodeProperties::GetControlInput(phi, 0);
+    inputs_high[value_count] = NodeProperties::GetControlInput(phi, 0);
+    ReplaceNode(phi,
+                graph()->NewNode(
+                    common()->Phi(MachineRepresentation::kWord32, value_count),
+                    value_count + 1, inputs_low, false),
+                graph()->NewNode(
+                    common()->Phi(MachineRepresentation::kWord32, value_count),
+                    value_count + 1, inputs_high, false));
+  }
+}
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
index 79a95dc..7f6ef9a 100644
--- a/src/compiler/int64-lowering.h
+++ b/src/compiler/int64-lowering.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_COMPILER_INT64_REDUCER_H_
-#define V8_COMPILER_INT64_REDUCER_H_
+#ifndef V8_COMPILER_INT64_LOWERING_H_
+#define V8_COMPILER_INT64_LOWERING_H_
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
@@ -24,7 +24,7 @@
   void LowerGraph();
 
  private:
-  enum class State : uint8_t { kUnvisited, kOnStack, kInputsPushed, kVisited };
+  enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
 
   struct Replacement {
     Node* low;
@@ -37,27 +37,39 @@
   CommonOperatorBuilder* common() const { return common_; }
   Signature<MachineRepresentation>* signature() const { return signature_; }
 
+  void PrepareReplacements(Node* node);
+  void PushNode(Node* node);
   void LowerNode(Node* node);
   bool DefaultLowering(Node* node);
+  void LowerComparison(Node* node, const Operator* signed_op,
+                       const Operator* unsigned_op);
+  void PrepareProjectionReplacements(Node* node);
 
   void ReplaceNode(Node* old, Node* new_low, Node* new_high);
   bool HasReplacementLow(Node* node);
   Node* GetReplacementLow(Node* node);
   bool HasReplacementHigh(Node* node);
   Node* GetReplacementHigh(Node* node);
+  void PreparePhiReplacement(Node* phi);
+
+  struct NodeState {
+    Node* node;
+    int input_index;
+  };
 
   Zone* zone_;
   Graph* const graph_;
   MachineOperatorBuilder* machine_;
   CommonOperatorBuilder* common_;
   NodeMarker<State> state_;
-  ZoneStack<Node*> stack_;
+  ZoneDeque<NodeState> stack_;
   Replacement* replacements_;
   Signature<MachineRepresentation>* signature_;
+  Node* placeholder_;
 };
 
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_COMPILER_INT64_REDUCER_H_
+#endif  // V8_COMPILER_INT64_LOWERING_H_
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 3023031..41f9c30 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -117,18 +117,59 @@
   return NoChange();
 }
 
-
-// ES6 draft 08-24-14, section 20.2.2.19.
+// ES6 section 20.2.2.19 Math.imul ( x, y )
 Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
   JSCallReduction r(node);
-  if (r.InputsMatchTwo(Type::Integral32(), Type::Integral32())) {
-    // Math.imul(a:int32, b:int32) -> Int32Mul(a, b)
-    Node* value = graph()->NewNode(machine()->Int32Mul(), r.left(), r.right());
+  if (r.InputsMatchTwo(Type::Number(), Type::Number())) {
+    // Math.imul(a:number, b:number) -> NumberImul(NumberToUint32(a),
+    //                                             NumberToUint32(b))
+    Node* a = graph()->NewNode(simplified()->NumberToUint32(), r.left());
+    Node* b = graph()->NewNode(simplified()->NumberToUint32(), r.right());
+    Node* value = graph()->NewNode(simplified()->NumberImul(), a, b);
     return Replace(value);
   }
   return NoChange();
 }
 
+// ES6 section 20.2.2.10 Math.ceil ( x )
+Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.ceil(a:number) -> NumberCeil(a)
+    Node* value = graph()->NewNode(simplified()->NumberCeil(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+Reduction JSBuiltinReducer::ReduceMathClz32(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Unsigned32())) {
+    // Math.clz32(a:unsigned32) -> NumberClz32(a)
+    Node* value = graph()->NewNode(simplified()->NumberClz32(), r.left());
+    return Replace(value);
+  }
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.clz32(a:number) -> NumberClz32(NumberToUint32(a))
+    Node* value = graph()->NewNode(
+        simplified()->NumberClz32(),
+        graph()->NewNode(simplified()->NumberToUint32(), r.left()));
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 draft 08-24-14, section 20.2.2.16.
+Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.floor(a:number) -> NumberFloor(a)
+    Node* value = graph()->NewNode(simplified()->NumberFloor(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
 
 // ES6 draft 08-24-14, section 20.2.2.17.
 Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
@@ -145,25 +186,32 @@
 // ES6 section 20.2.2.28 Math.round ( x )
 Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
   JSCallReduction r(node);
-  if (r.InputsMatchOne(type_cache_.kIntegerOrMinusZeroOrNaN)) {
-    // Math.round(a:integer \/ -0 \/ NaN) -> a
-    return Replace(r.left());
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.round(a:number) -> NumberRound(a)
+    Node* value = graph()->NewNode(simplified()->NumberRound(), r.left());
+    return Replace(value);
   }
-  if (r.InputsMatchOne(Type::Number()) &&
-      machine()->Float64RoundUp().IsSupported()) {
-    // Math.round(a:number) -> Select(Float64LessThan(#0.5, Float64Sub(i, a)),
-    //                                Float64Sub(i, #1.0), i)
-    //   where i = Float64RoundUp(a)
-    Node* value = r.left();
-    Node* integer = graph()->NewNode(machine()->Float64RoundUp().op(), value);
-    Node* real = graph()->NewNode(machine()->Float64Sub(), integer, value);
-    return Replace(graph()->NewNode(
-        common()->Select(MachineRepresentation::kFloat64),
-        graph()->NewNode(machine()->Float64LessThan(),
-                         jsgraph()->Float64Constant(0.5), real),
-        graph()->NewNode(machine()->Float64Sub(), integer,
-                         jsgraph()->Float64Constant(1.0)),
-        integer));
+  return NoChange();
+}
+
+// ES6 section 20.2.2.32 Math.sqrt ( x )
+Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.sqrt(a:number) -> Float64Sqrt(a)
+    Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.35 Math.trunc ( x )
+Reduction JSBuiltinReducer::ReduceMathTrunc(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.trunc(a:number) -> NumberTrunc(a)
+    Node* value = graph()->NewNode(simplified()->NumberTrunc(), r.left());
+    return Replace(value);
   }
   return NoChange();
 }
@@ -181,12 +229,27 @@
     case kMathImul:
       reduction = ReduceMathImul(node);
       break;
+    case kMathClz32:
+      reduction = ReduceMathClz32(node);
+      break;
+    case kMathCeil:
+      reduction = ReduceMathCeil(node);
+      break;
+    case kMathFloor:
+      reduction = ReduceMathFloor(node);
+      break;
     case kMathFround:
       reduction = ReduceMathFround(node);
       break;
     case kMathRound:
       reduction = ReduceMathRound(node);
       break;
+    case kMathSqrt:
+      reduction = ReduceMathSqrt(node);
+      break;
+    case kMathTrunc:
+      reduction = ReduceMathTrunc(node);
+      break;
     default:
       break;
   }
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index b64b335..dfeb409 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -33,8 +33,13 @@
   Reduction ReduceFunctionCall(Node* node);
   Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathImul(Node* node);
+  Reduction ReduceMathCeil(Node* node);
+  Reduction ReduceMathClz32(Node* node);
+  Reduction ReduceMathFloor(Node* node);
   Reduction ReduceMathFround(Node* node);
   Reduction ReduceMathRound(Node* node);
+  Reduction ReduceMathSqrt(Node* node);
+  Reduction ReduceMathTrunc(Node* node);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index 34217e7..892dcc7 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -329,16 +329,8 @@
     Node* check = effect =
         graph()->NewNode(javascript()->StrictEqual(), target, array_function,
                          context, effect, control);
-    Node* branch =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* deoptimize =
-        graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                         frame_state, effect, if_false);
-    // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-    NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-    Revisit(graph()->end());
-    control = graph()->NewNode(common()->IfTrue(), branch);
+    control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               effect, control);
 
     // Turn the {node} into a {JSCreateArray} call.
     NodeProperties::ReplaceValueInput(node, array_function, 0);
@@ -355,16 +347,8 @@
       Node* check = effect =
           graph()->NewNode(javascript()->StrictEqual(), target, target_function,
                            context, effect, control);
-      Node* branch =
-          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* deoptimize =
-          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                           frame_state, effect, if_false);
-      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-      NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-      Revisit(graph()->end());
-      control = graph()->NewNode(common()->IfTrue(), branch);
+      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                 frame_state, effect, control);
 
       // Specialize the JSCallFunction node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
@@ -473,16 +457,8 @@
     Node* check = effect =
         graph()->NewNode(javascript()->StrictEqual(), target, array_function,
                          context, effect, control);
-    Node* branch =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* deoptimize =
-        graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                         frame_state, effect, if_false);
-    // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-    NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-    Revisit(graph()->end());
-    control = graph()->NewNode(common()->IfTrue(), branch);
+    control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               effect, control);
 
     // Turn the {node} into a {JSCreateArray} call.
     NodeProperties::ReplaceEffectInput(node, effect);
@@ -505,16 +481,8 @@
       Node* check = effect =
           graph()->NewNode(javascript()->StrictEqual(), target, target_function,
                            context, effect, control);
-      Node* branch =
-          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* deoptimize =
-          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                           frame_state, effect, if_false);
-      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-      NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-      Revisit(graph()->end());
-      control = graph()->NewNode(common()->IfTrue(), branch);
+      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                 frame_state, effect, control);
 
       // Specialize the JSCallConstruct node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h
index f40f05d..9ffae15 100644
--- a/src/compiler/js-call-reducer.h
+++ b/src/compiler/js-call-reducer.h
@@ -20,7 +20,7 @@
 
 // Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
 // which might allow inlining or other optimizations to be performed afterwards.
-class JSCallReducer final : public AdvancedReducer {
+class JSCallReducer final : public Reducer {
  public:
   // Flags that control the mode of operation.
   enum Flag {
@@ -29,12 +29,9 @@
   };
   typedef base::Flags<Flag> Flags;
 
-  JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
+  JSCallReducer(JSGraph* jsgraph, Flags flags,
                 MaybeHandle<Context> native_context)
-      : AdvancedReducer(editor),
-        jsgraph_(jsgraph),
-        flags_(flags),
-        native_context_(native_context) {}
+      : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
 
   Reduction Reduce(Node* node) final;
 
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index df5c8d0..2003363 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -905,8 +905,17 @@
         site_context->ExitScope(current_site, boilerplate_object);
       } else if (property_details.representation().IsDouble()) {
         // Allocate a mutable HeapNumber box and store the value into it.
-        value = effect = AllocateMutableHeapNumber(
-            Handle<HeapNumber>::cast(boilerplate_value)->value(),
+        Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
+        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+            isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+            CallDescriptor::kNoFlags, Operator::kNoThrow);
+        value = effect = graph()->NewNode(
+            common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+            jsgraph()->NoContextConstant(), effect, control);
+        effect = graph()->NewNode(
+            simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+            value, jsgraph()->Constant(
+                       Handle<HeapNumber>::cast(boilerplate_value)->value()),
             effect, control);
       } else if (property_details.representation().IsSmi()) {
         // Ensure that value is stored as smi.
@@ -1028,23 +1037,6 @@
   return builder.Finish();
 }
 
-Node* JSCreateLowering::AllocateMutableHeapNumber(double value, Node* effect,
-                                                  Node* control) {
-  // TODO(turbofan): Support inline allocation of MutableHeapNumber
-  // (requires proper alignment on Allocate, and Begin/FinishRegion).
-  Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
-  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-      isolate(), jsgraph()->zone(), callable.descriptor(), 0,
-      CallDescriptor::kNoFlags, Operator::kNoThrow);
-  Node* result = effect = graph()->NewNode(
-      common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-      jsgraph()->NoContextConstant(), effect, control);
-  effect = graph()->NewNode(
-      simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
-      jsgraph()->Constant(value), effect, control);
-  return result;
-}
-
 MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
     Node* node) {
   Node* const closure = NodeProperties::GetValueInput(node, 0);
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
index d9d184b..52e7ec2 100644
--- a/src/compiler/js-create-lowering.h
+++ b/src/compiler/js-create-lowering.h
@@ -70,7 +70,6 @@
                                     Handle<JSObject> boilerplate,
                                     PretenureFlag pretenure,
                                     AllocationSiteUsageContext* site_context);
-  Node* AllocateMutableHeapNumber(double value, Node* effect, Node* control);
 
   // Infers the LiteralsArray to use for a given {node}.
   MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index df2d908..1f12579 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -45,6 +45,8 @@
     JS_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
     case IrOpcode::kBranch:
+    case IrOpcode::kDeoptimizeIf:
+    case IrOpcode::kDeoptimizeUnless:
       // TODO(mstarzinger): If typing is enabled then simplified lowering will
       // have inserted the correct ChangeBoolToBit, otherwise we need to perform
       // poor-man's representation inference here and insert manual change.
@@ -68,14 +70,9 @@
     ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token),    \
                         CallDescriptor::kPatchableCallSiteWithNop | flags); \
   }
-REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
-REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
-REPLACE_BINARY_OP_IC_CALL(JSBitwiseAnd, Token::BIT_AND)
 REPLACE_BINARY_OP_IC_CALL(JSShiftLeft, Token::SHL)
 REPLACE_BINARY_OP_IC_CALL(JSShiftRight, Token::SAR)
 REPLACE_BINARY_OP_IC_CALL(JSShiftRightLogical, Token::SHR)
-REPLACE_BINARY_OP_IC_CALL(JSAdd, Token::ADD)
-REPLACE_BINARY_OP_IC_CALL(JSSubtract, Token::SUB)
 REPLACE_BINARY_OP_IC_CALL(JSMultiply, Token::MUL)
 REPLACE_BINARY_OP_IC_CALL(JSDivide, Token::DIV)
 REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
@@ -85,19 +82,39 @@
   void JSGenericLowering::Lower##op(Node* node) { \
     ReplaceWithRuntimeCall(node, fun);            \
   }
-REPLACE_RUNTIME_CALL(JSEqual, Runtime::kEqual)
-REPLACE_RUNTIME_CALL(JSNotEqual, Runtime::kNotEqual)
-REPLACE_RUNTIME_CALL(JSStrictEqual, Runtime::kStrictEqual)
-REPLACE_RUNTIME_CALL(JSStrictNotEqual, Runtime::kStrictNotEqual)
-REPLACE_RUNTIME_CALL(JSLessThan, Runtime::kLessThan)
-REPLACE_RUNTIME_CALL(JSGreaterThan, Runtime::kGreaterThan)
-REPLACE_RUNTIME_CALL(JSLessThanOrEqual, Runtime::kLessThanOrEqual)
-REPLACE_RUNTIME_CALL(JSGreaterThanOrEqual, Runtime::kGreaterThanOrEqual)
 REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
 REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
 REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
 #undef REPLACE_RUNTIME_CALL
 
+#define REPLACE_STUB_CALL(Name)                                   \
+  void JSGenericLowering::LowerJS##Name(Node* node) {             \
+    CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
+    Callable callable = CodeFactory::Name(isolate());             \
+    ReplaceWithStubCall(node, callable, flags);                   \
+  }
+REPLACE_STUB_CALL(Add)
+REPLACE_STUB_CALL(Subtract)
+REPLACE_STUB_CALL(BitwiseAnd)
+REPLACE_STUB_CALL(BitwiseOr)
+REPLACE_STUB_CALL(BitwiseXor)
+REPLACE_STUB_CALL(LessThan)
+REPLACE_STUB_CALL(LessThanOrEqual)
+REPLACE_STUB_CALL(GreaterThan)
+REPLACE_STUB_CALL(GreaterThanOrEqual)
+REPLACE_STUB_CALL(Equal)
+REPLACE_STUB_CALL(NotEqual)
+REPLACE_STUB_CALL(StrictEqual)
+REPLACE_STUB_CALL(StrictNotEqual)
+REPLACE_STUB_CALL(ToBoolean)
+REPLACE_STUB_CALL(ToInteger)
+REPLACE_STUB_CALL(ToLength)
+REPLACE_STUB_CALL(ToNumber)
+REPLACE_STUB_CALL(ToName)
+REPLACE_STUB_CALL(ToObject)
+REPLACE_STUB_CALL(ToString)
+#undef REPLACE_STUB_CALL
+
 void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
                                             CallDescriptor::Flags flags) {
   Operator::Properties properties = node->op()->properties();
@@ -134,42 +151,6 @@
 }
 
 
-void JSGenericLowering::LowerJSToBoolean(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  Callable callable = CodeFactory::ToBoolean(isolate());
-  ReplaceWithStubCall(node, callable,
-                      CallDescriptor::kPatchableCallSite | flags);
-}
-
-
-void JSGenericLowering::LowerJSToNumber(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  Callable callable = CodeFactory::ToNumber(isolate());
-  ReplaceWithStubCall(node, callable, flags);
-}
-
-
-void JSGenericLowering::LowerJSToString(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  Callable callable = CodeFactory::ToString(isolate());
-  ReplaceWithStubCall(node, callable, flags);
-}
-
-
-void JSGenericLowering::LowerJSToName(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  Callable callable = CodeFactory::ToName(isolate());
-  ReplaceWithStubCall(node, callable, flags);
-}
-
-
-void JSGenericLowering::LowerJSToObject(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  Callable callable = CodeFactory::ToObject(isolate());
-  ReplaceWithStubCall(node, callable, flags);
-}
-
-
 void JSGenericLowering::LowerJSLoadProperty(Node* node) {
   Node* closure = NodeProperties::GetValueInput(node, 2);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -546,15 +527,13 @@
 void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  int const length = Handle<FixedArray>::cast(p.constant())->length();
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
 
   // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
   // initial length limit for arrays with "fast" elements kind.
   if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
-      (p.flags() & ArrayLiteral::kIsStrong) == 0 &&
-      length < JSArray::kInitialMaxFastElementArray) {
+      p.length() < JSArray::kInitialMaxFastElementArray) {
     Callable callable = CodeFactory::FastCloneShallowArray(isolate());
     ReplaceWithStubCall(node, callable, flags);
   } else {
@@ -567,7 +546,6 @@
 void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  int const length = Handle<FixedArray>::cast(p.constant())->length();
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -575,8 +553,9 @@
   // Use the FastCloneShallowObjectStub only for shallow boilerplates without
   // elements up to the number of properties that the stubs can handle.
   if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
-      length <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    Callable callable = CodeFactory::FastCloneShallowObject(isolate(), length);
+      p.length() <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+    Callable callable =
+        CodeFactory::FastCloneShallowObject(isolate(), p.length());
     ReplaceWithStubCall(node, callable, flags);
   } else {
     ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index 132dec6..d8c9f17 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -171,16 +171,8 @@
       Node* check =
           graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
                            jsgraph()->Constant(property_cell_value));
-      Node* branch =
-          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* deoptimize =
-          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                           frame_state, effect, if_false);
-      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-      NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-      Revisit(graph()->end());
-      control = graph()->NewNode(common()->IfTrue(), branch);
+      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                 frame_state, effect, control);
       break;
     }
     case PropertyCellType::kConstantType: {
@@ -191,16 +183,8 @@
       Type* property_cell_value_type = Type::TaggedSigned();
       if (property_cell_value->IsHeapObject()) {
         // Deoptimize if the {value} is a Smi.
-        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                        check, control);
-        Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-        Node* deoptimize =
-            graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                             frame_state, effect, if_true);
-        // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-        NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-        Revisit(graph()->end());
-        control = graph()->NewNode(common()->IfFalse(), branch);
+        control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                                   effect, control);
 
         // Load the {value} map check against the {property_cell} map.
         Node* value_map = effect =
@@ -213,16 +197,8 @@
             jsgraph()->HeapConstant(property_cell_value_map));
         property_cell_value_type = Type::TaggedPointer();
       }
-      Node* branch =
-          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* deoptimize =
-          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                           frame_state, effect, if_false);
-      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-      NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-      Revisit(graph()->end());
-      control = graph()->NewNode(common()->IfTrue(), branch);
+      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                 frame_state, effect, control);
       effect = graph()->NewNode(
           simplified()->StoreField(
               AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index e938798..98ca7aa 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -29,6 +29,10 @@
                 HeapConstant(factory()->empty_fixed_array()));
 }
 
+Node* JSGraph::OptimizedOutConstant() {
+  return CACHED(kOptimizedOutConstant,
+                HeapConstant(factory()->optimized_out()));
+}
 
 Node* JSGraph::UndefinedConstant() {
   return CACHED(kUndefinedConstant, HeapConstant(factory()->undefined_value()));
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index 5a25ed0..06e8030 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -41,6 +41,7 @@
   // Canonicalized global constants.
   Node* CEntryStubConstant(int result_size);
   Node* EmptyFixedArrayConstant();
+  Node* OptimizedOutConstant();
   Node* UndefinedConstant();
   Node* TheHoleConstant();
   Node* TrueConstant();
@@ -136,6 +137,7 @@
   enum CachedNode {
     kCEntryStubConstant,
     kEmptyFixedArrayConstant,
+    kOptimizedOutConstant,
     kUndefinedConstant,
     kTheHoleConstant,
     kTrueConstant,
diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc
index cd5637b..0e0508b 100644
--- a/src/compiler/js-inlining-heuristic.cc
+++ b/src/compiler/js-inlining-heuristic.cc
@@ -121,7 +121,10 @@
 
 bool JSInliningHeuristic::CandidateCompare::operator()(
     const Candidate& left, const Candidate& right) const {
-  return left.node != right.node && left.calls >= right.calls;
+  if (left.calls != right.calls) {
+    return left.calls > right.calls;
+  }
+  return left.node < right.node;
 }
 
 
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 2244f9b..e3254bd 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -263,6 +263,35 @@
                                     node->InputAt(0), outer_frame_state);
 }
 
+Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
+  FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+  Handle<SharedFunctionInfo> shared;
+  frame_info.shared_info().ToHandle(&shared);
+
+  Node* function = frame_state->InputAt(kFrameStateFunctionInput);
+
+  // If we are inlining a tail call drop caller's frame state and an
+  // arguments adaptor if it exists.
+  frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+  if (frame_state->opcode() == IrOpcode::kFrameState) {
+    FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+    if (frame_info.type() == FrameStateType::kArgumentsAdaptor) {
+      frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+    }
+  }
+
+  const FrameStateFunctionInfo* state_info =
+      jsgraph_->common()->CreateFrameStateFunctionInfo(
+          FrameStateType::kTailCallerFunction, 0, 0, shared);
+
+  const Operator* op = jsgraph_->common()->FrameState(
+      BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
+  const Operator* op0 = jsgraph_->common()->StateValues(0);
+  Node* node0 = jsgraph_->graph()->NewNode(op0);
+  return jsgraph_->graph()->NewNode(op, node0, node0, node0,
+                                    jsgraph_->UndefinedConstant(), function,
+                                    frame_state);
+}
 
 namespace {
 
@@ -271,7 +300,10 @@
   DisallowHeapAllocation no_gc;
   Isolate* const isolate = shared_info->GetIsolate();
   Code* const construct_stub = shared_info->construct_stub();
-  return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub();
+  return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
+         construct_stub !=
+             *isolate->builtins()->JSBuiltinsConstructStubForDerived() &&
+         construct_stub != *isolate->builtins()->JSConstructStubApi();
 }
 
 bool IsNonConstructible(Handle<SharedFunctionInfo> shared_info) {
@@ -380,7 +412,7 @@
     return NoChange();
   }
 
-  Zone zone;
+  Zone zone(info_->isolate()->allocator());
   ParseInfo parse_info(&zone, function);
   CompilationInfo info(&parse_info);
   if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
@@ -395,17 +427,6 @@
     return NoChange();
   }
 
-  // In strong mode, in case of too few arguments we need to throw a TypeError
-  // so we must not inline this call.
-  int parameter_count = info.literal()->parameter_count();
-  if (is_strong(info.language_mode()) &&
-      call.formal_arguments() < parameter_count) {
-    TRACE("Not inlining %s into %s because too few arguments for strong mode\n",
-          shared_info->DebugName()->ToCString().get(),
-          info_->shared_info()->DebugName()->ToCString().get());
-    return NoChange();
-  }
-
   if (!Compiler::EnsureDeoptimizationSupport(&info)) {
     TRACE("Not inlining %s into %s because deoptimization support failed\n",
           shared_info->DebugName()->ToCString().get(),
@@ -508,10 +529,25 @@
     NodeProperties::ReplaceEffectInput(node, convert);
   }
 
+  // If we are inlining a JS call at tail position then we have to pop current
+  // frame state and its potential arguments adaptor frame state in order to
+  // make the call stack be consistent with non-inlining case.
+  // After that we add a tail caller frame state which lets deoptimizer handle
+  // the case when the outermost function inlines a tail call (it should remove
+  // potential arguments adaptor frame that belongs to outermost function when
+  // deopt happens).
+  if (node->opcode() == IrOpcode::kJSCallFunction) {
+    const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+    if (p.tail_call_mode() == TailCallMode::kAllow) {
+      frame_state = CreateTailCallerFrameState(node, frame_state);
+    }
+  }
+
   // Insert argument adaptor frame if required. The callees formal parameter
   // count (i.e. value outputs of start node minus target, receiver, new target,
   // arguments count and context) have to match the number of arguments passed
   // to the call.
+  int parameter_count = info.literal()->parameter_count();
   DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
   if (call.formal_arguments() != parameter_count) {
     frame_state = CreateArtificialFrameState(
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index 99eff96..d0ab7c0 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -45,6 +45,8 @@
                                    FrameStateType frame_state_type,
                                    Handle<SharedFunctionInfo> shared);
 
+  Node* CreateTailCallerFrameState(Node* node, Node* outer_frame_state);
+
   Reduction InlineCall(Node* call, Node* new_target, Node* context,
                        Node* frame_state, Node* start, Node* end);
 };
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index abeb110..034ee6f 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -15,7 +15,6 @@
 #include "src/compiler/operator-properties.h"
 #include "src/counters.h"
 #include "src/objects-inl.h"
-#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -23,11 +22,7 @@
 
 JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
                                          DeoptimizationMode mode)
-    : AdvancedReducer(editor),
-      jsgraph_(jsgraph),
-      mode_(mode),
-      type_cache_(TypeCache::Get()) {}
-
+    : AdvancedReducer(editor), jsgraph_(jsgraph), mode_(mode) {}
 
 Reduction JSIntrinsicLowering::Reduce(Node* node) {
   if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange();
@@ -45,8 +40,6 @@
       return ReduceDoubleHi(node);
     case Runtime::kInlineDoubleLo:
       return ReduceDoubleLo(node);
-    case Runtime::kInlineIncrementStatsCounter:
-      return ReduceIncrementStatsCounter(node);
     case Runtime::kInlineIsArray:
       return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
     case Runtime::kInlineIsTypedArray:
@@ -57,12 +50,6 @@
       return ReduceIsJSReceiver(node);
     case Runtime::kInlineIsSmi:
       return ReduceIsSmi(node);
-    case Runtime::kInlineMathClz32:
-      return ReduceMathClz32(node);
-    case Runtime::kInlineMathFloor:
-      return ReduceMathFloor(node);
-    case Runtime::kInlineMathSqrt:
-      return ReduceMathSqrt(node);
     case Runtime::kInlineValueOf:
       return ReduceValueOf(node);
     case Runtime::kInlineFixedArrayGet:
@@ -95,10 +82,12 @@
       return ReduceToString(node);
     case Runtime::kInlineCall:
       return ReduceCall(node);
-    case Runtime::kInlineTailCall:
-      return ReduceTailCall(node);
+    case Runtime::kInlineNewObject:
+      return ReduceNewObject(node);
     case Runtime::kInlineGetSuperConstructor:
       return ReduceGetSuperConstructor(node);
+    case Runtime::kInlineGetOrdinaryHasInstance:
+      return ReduceGetOrdinaryHasInstance(node);
     default:
       break;
   }
@@ -149,40 +138,23 @@
 
 
 Reduction JSIntrinsicLowering::ReduceDoubleHi(Node* node) {
+  // Tell the compiler to assume number input.
+  Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
+                                   node->InputAt(0), graph()->start());
+  node->ReplaceInput(0, renamed);
   return Change(node, machine()->Float64ExtractHighWord32());
 }
 
 
 Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
+  // Tell the compiler to assume number input.
+  Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
+                                   node->InputAt(0), graph()->start());
+  node->ReplaceInput(0, renamed);
   return Change(node, machine()->Float64ExtractLowWord32());
 }
 
 
-Reduction JSIntrinsicLowering::ReduceIncrementStatsCounter(Node* node) {
-  if (!FLAG_native_code_counters) return ChangeToUndefined(node);
-  HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
-  if (!m.HasValue() || !m.Value()->IsString()) {
-    return ChangeToUndefined(node);
-  }
-  base::SmartArrayPointer<char> name =
-      Handle<String>::cast(m.Value())->ToCString();
-  StatsCounter counter(jsgraph()->isolate(), name.get());
-  if (!counter.Enabled()) return ChangeToUndefined(node);
-
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  FieldAccess access = AccessBuilder::ForStatsCounter();
-  Node* cnt = jsgraph()->ExternalConstant(ExternalReference(&counter));
-  Node* load =
-      graph()->NewNode(simplified()->LoadField(access), cnt, effect, control);
-  Node* inc =
-      graph()->NewNode(machine()->Int32Add(), load, jsgraph()->OneConstant());
-  Node* store = graph()->NewNode(simplified()->StoreField(access), cnt, inc,
-                                 load, control);
-  return ChangeToUndefined(node, store);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceIsInstanceType(
     Node* node, InstanceType instance_type) {
   // if (%_IsSmi(value)) {
@@ -232,22 +204,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceMathClz32(Node* node) {
-  return Change(node, machine()->Word32Clz());
-}
-
-
-Reduction JSIntrinsicLowering::ReduceMathFloor(Node* node) {
-  if (!machine()->Float64RoundDown().IsSupported()) return NoChange();
-  return Change(node, machine()->Float64RoundDown().op());
-}
-
-
-Reduction JSIntrinsicLowering::ReduceMathSqrt(Node* node) {
-  return Change(node, machine()->Float64Sqrt());
-}
-
-
 Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
   // if (%_IsSmi(value)) {
   //   return value;
@@ -385,44 +341,8 @@
 
 
 Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // ToInteger is a no-op on integer values and -0.
-  Type* value_type = NodeProperties::GetType(value);
-  if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
-    ReplaceWithValue(node, value);
-    return Replace(value);
-  }
-
-  Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = value;
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    vfalse = efalse =
-        graph()->NewNode(javascript()->CallRuntime(Runtime::kToInteger), value,
-                         context, frame_state, efalse, if_false);
-    if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue, vfalse, control);
-  // TODO(bmeurer, mstarzinger): Rewire IfException inputs to {vfalse}.
-  ReplaceWithValue(node, value, effect, control);
-  return Changed(value);
+  NodeProperties::ChangeOp(node, javascript()->ToInteger());
+  return Changed(node);
 }
 
 
@@ -439,38 +359,8 @@
 
 
 Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Type* value_type = NodeProperties::GetType(value);
-  if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
-    if (value_type->Max() <= 0.0) {
-      value = jsgraph()->ZeroConstant();
-    } else if (value_type->Min() >= kMaxSafeInteger) {
-      value = jsgraph()->Constant(kMaxSafeInteger);
-    } else {
-      if (value_type->Min() <= 0.0) {
-        value = graph()->NewNode(
-            common()->Select(MachineRepresentation::kTagged),
-            graph()->NewNode(simplified()->NumberLessThanOrEqual(), value,
-                             jsgraph()->ZeroConstant()),
-            jsgraph()->ZeroConstant(), value);
-        value_type = Type::Range(0.0, value_type->Max(), graph()->zone());
-        NodeProperties::SetType(value, value_type);
-      }
-      if (value_type->Max() > kMaxSafeInteger) {
-        value = graph()->NewNode(
-            common()->Select(MachineRepresentation::kTagged),
-            graph()->NewNode(simplified()->NumberLessThanOrEqual(),
-                             jsgraph()->Constant(kMaxSafeInteger), value),
-            jsgraph()->Constant(kMaxSafeInteger), value);
-        value_type =
-            Type::Range(value_type->Min(), kMaxSafeInteger, graph()->zone());
-        NodeProperties::SetType(value, value_type);
-      }
-    }
-    ReplaceWithValue(node, value);
-    return Replace(value);
-  }
-  return Change(node, CodeFactory::ToLength(isolate()), 0);
+  NodeProperties::ChangeOp(node, javascript()->ToLength());
+  return Changed(node);
 }
 
 
@@ -506,17 +396,18 @@
   return Changed(node);
 }
 
-
-Reduction JSIntrinsicLowering::ReduceTailCall(Node* node) {
-  size_t const arity = CallRuntimeParametersOf(node->op()).arity();
-  NodeProperties::ChangeOp(node,
-                           javascript()->CallFunction(arity, VectorSlotPair(),
-                                                      ConvertReceiverMode::kAny,
-                                                      TailCallMode::kAllow));
-  return Changed(node);
+Reduction JSIntrinsicLowering::ReduceNewObject(Node* node) {
+  Node* constructor = NodeProperties::GetValueInput(node, 0);
+  Node* new_target = NodeProperties::GetValueInput(node, 1);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* value = graph()->NewNode(javascript()->Create(), constructor,
+                                 new_target, context, frame_state, effect);
+  ReplaceWithValue(node, value, value);
+  return Replace(value);
 }
 
-
 Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
   Node* active_function = NodeProperties::GetValueInput(node, 0);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -528,6 +419,16 @@
                 active_function_map, effect, control);
 }
 
+Reduction JSIntrinsicLowering::ReduceGetOrdinaryHasInstance(Node* node) {
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  return Change(node, javascript()->LoadContext(
+                          0, Context::ORDINARY_HAS_INSTANCE_INDEX, true),
+                native_context, context, effect);
+}
 
 Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
                                       Node* b) {
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index d8e1102..a43ed01 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -13,7 +13,6 @@
 
 // Forward declarations.
 class Callable;
-class TypeCache;
 
 
 namespace compiler {
@@ -43,13 +42,9 @@
   Reduction ReduceDeoptimizeNow(Node* node);
   Reduction ReduceDoubleHi(Node* node);
   Reduction ReduceDoubleLo(Node* node);
-  Reduction ReduceIncrementStatsCounter(Node* node);
   Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
   Reduction ReduceIsJSReceiver(Node* node);
   Reduction ReduceIsSmi(Node* node);
-  Reduction ReduceMathClz32(Node* node);
-  Reduction ReduceMathFloor(Node* node);
-  Reduction ReduceMathSqrt(Node* node);
   Reduction ReduceValueOf(Node* node);
   Reduction ReduceFixedArrayGet(Node* node);
   Reduction ReduceFixedArraySet(Node* node);
@@ -66,8 +61,9 @@
   Reduction ReduceToPrimitive(Node* node);
   Reduction ReduceToString(Node* node);
   Reduction ReduceCall(Node* node);
-  Reduction ReduceTailCall(Node* node);
+  Reduction ReduceNewObject(Node* node);
   Reduction ReduceGetSuperConstructor(Node* node);
+  Reduction ReduceGetOrdinaryHasInstance(Node* node);
 
   Reduction Change(Node* node, const Operator* op);
   Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
@@ -86,11 +82,9 @@
   MachineOperatorBuilder* machine() const;
   SimplifiedOperatorBuilder* simplified() const;
   DeoptimizationMode mode() const { return mode_; }
-  TypeCache const& type_cache() const { return type_cache_; }
 
   JSGraph* const jsgraph_;
   DeoptimizationMode const mode_;
-  TypeCache const& type_cache_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index 2c11794..d1353d2 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -108,27 +108,36 @@
   ZoneVector<Node*> effects(zone());
   ZoneVector<Node*> controls(zone());
 
-  // The list of "exiting" controls, which currently go to a single deoptimize.
-  // TODO(bmeurer): Consider using an IC as fallback.
-  Node* const exit_effect = effect;
-  ZoneVector<Node*> exit_controls(zone());
-
   // Ensure that {index} matches the specified {name} (if {index} is given).
   if (index != nullptr) {
     Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Name()),
                                    index, jsgraph()->HeapConstant(name));
-    Node* branch =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-    exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-    control = graph()->NewNode(common()->IfTrue(), branch);
+    control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               effect, control);
+  }
+
+  // Check if {receiver} may be a number.
+  bool receiverissmi_possible = false;
+  for (PropertyAccessInfo const& access_info : access_infos) {
+    if (access_info.receiver_type()->Is(Type::Number())) {
+      receiverissmi_possible = true;
+      break;
+    }
   }
 
   // Ensure that {receiver} is a heap object.
   Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
-  Node* branch = graph()->NewNode(common()->Branch(), check, control);
-  control = graph()->NewNode(common()->IfFalse(), branch);
-  Node* receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+  Node* receiverissmi_control = nullptr;
   Node* receiverissmi_effect = effect;
+  if (receiverissmi_possible) {
+    Node* branch = graph()->NewNode(common()->Branch(), check, control);
+    control = graph()->NewNode(common()->IfFalse(), branch);
+    receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+    receiverissmi_effect = effect;
+  } else {
+    control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                               effect, control);
+  }
 
   // Load the {receiver} map. The resulting effect is the dominating effect for
   // all (polymorphic) branches.
@@ -138,7 +147,8 @@
 
   // Generate code for the various different property access patterns.
   Node* fallthrough_control = control;
-  for (PropertyAccessInfo const& access_info : access_infos) {
+  for (size_t j = 0; j < access_infos.size(); ++j) {
+    PropertyAccessInfo const& access_info = access_infos[j];
     Node* this_value = value;
     Node* this_receiver = receiver;
     Node* this_effect = effect;
@@ -154,37 +164,52 @@
       Node* check =
           graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
                            jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
-      Node* branch =
-          graph()->NewNode(common()->Branch(), check, fallthrough_control);
-      fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
-      this_control = graph()->NewNode(common()->IfTrue(), branch);
+      if (j == access_infos.size() - 1) {
+        this_control =
+            graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                             this_effect, fallthrough_control);
+        fallthrough_control = nullptr;
+      } else {
+        Node* branch =
+            graph()->NewNode(common()->Branch(), check, fallthrough_control);
+        fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+        this_control = graph()->NewNode(common()->IfTrue(), branch);
+      }
     } else {
       // Emit a (sequence of) map checks for other {receiver}s.
       ZoneVector<Node*> this_controls(zone());
       ZoneVector<Node*> this_effects(zone());
+      int num_classes = access_info.receiver_type()->NumClasses();
       for (auto i = access_info.receiver_type()->Classes(); !i.Done();
            i.Advance()) {
+        DCHECK_LT(0, num_classes);
         Handle<Map> map = i.Current();
         Node* check =
             graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
                              receiver_map, jsgraph()->Constant(map));
-        Node* branch =
-            graph()->NewNode(common()->Branch(), check, fallthrough_control);
-        fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
-        this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
-        this_effects.push_back(this_effect);
+        if (--num_classes == 0 && j == access_infos.size() - 1) {
+          this_controls.push_back(
+              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               this_effect, fallthrough_control));
+          this_effects.push_back(this_effect);
+          fallthrough_control = nullptr;
+        } else {
+          Node* branch =
+              graph()->NewNode(common()->Branch(), check, fallthrough_control);
+          fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+          this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+          this_effects.push_back(this_effect);
+        }
       }
 
       // The Number case requires special treatment to also deal with Smis.
       if (receiver_type->Is(Type::Number())) {
-        // Join this check with the "receiver is smi" check above, and mark the
-        // "receiver is smi" check as "consumed" so that we don't deoptimize if
-        // the {receiver} is actually a Smi.
-        if (receiverissmi_control != nullptr) {
-          this_controls.push_back(receiverissmi_control);
-          this_effects.push_back(receiverissmi_effect);
-          receiverissmi_control = receiverissmi_effect = nullptr;
-        }
+        // Join this check with the "receiver is smi" check above.
+        DCHECK_NOT_NULL(receiverissmi_effect);
+        DCHECK_NOT_NULL(receiverissmi_control);
+        this_effects.push_back(receiverissmi_effect);
+        this_controls.push_back(receiverissmi_control);
+        receiverissmi_effect = receiverissmi_control = nullptr;
       }
 
       // Create dominating Merge+EffectPhi for this {receiver} type.
@@ -212,23 +237,14 @@
     // Generate the actual property access.
     if (access_info.IsNotFound()) {
       DCHECK_EQ(AccessMode::kLoad, access_mode);
-      if (is_strong(language_mode)) {
-        // TODO(bmeurer/mstarzinger): Add support for lowering inside try
-        // blocks rewiring the IfException edge to a runtime call/throw.
-        exit_controls.push_back(this_control);
-        continue;
-      } else {
-        this_value = jsgraph()->UndefinedConstant();
-      }
+      this_value = jsgraph()->UndefinedConstant();
     } else if (access_info.IsDataConstant()) {
       this_value = jsgraph()->Constant(access_info.constant());
       if (access_mode == AccessMode::kStore) {
         Node* check = graph()->NewNode(
             simplified()->ReferenceEqual(Type::Tagged()), value, this_value);
-        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                        check, this_control);
-        exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-        this_control = graph()->NewNode(common()->IfTrue(), branch);
+        this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                        frame_state, this_effect, this_control);
       }
     } else {
       DCHECK(access_info.IsDataField());
@@ -253,10 +269,9 @@
                                jsgraph()->Int32Constant(
                                    1 << JSArrayBuffer::WasNeutered::kShift)),
               jsgraph()->Int32Constant(0));
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                          check, this_control);
-          exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
-          this_control = graph()->NewNode(common()->IfFalse(), branch);
+          this_control =
+              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                               this_effect, this_control);
           break;
         }
       }
@@ -292,11 +307,9 @@
         if (field_type->Is(Type::UntaggedFloat64())) {
           Node* check =
               graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                          check, this_control);
-          exit_controls.push_back(
-              graph()->NewNode(common()->IfFalse(), branch));
-          this_control = graph()->NewNode(common()->IfTrue(), branch);
+          this_control =
+              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               this_effect, this_control);
           this_value = graph()->NewNode(common()->Guard(Type::Number()),
                                         this_value, this_control);
 
@@ -335,46 +348,30 @@
         } else if (field_type->Is(Type::TaggedSigned())) {
           Node* check =
               graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                          check, this_control);
-          exit_controls.push_back(
-              graph()->NewNode(common()->IfFalse(), branch));
-          this_control = graph()->NewNode(common()->IfTrue(), branch);
+          this_control =
+              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               this_effect, this_control);
           this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
                                         this_value, this_control);
         } else if (field_type->Is(Type::TaggedPointer())) {
           Node* check =
               graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                          check, this_control);
-          exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
-          this_control = graph()->NewNode(common()->IfFalse(), branch);
-          if (field_type->NumClasses() > 0) {
-            // Emit a (sequence of) map checks for the value.
-            ZoneVector<Node*> this_controls(zone());
+          this_control =
+              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                               this_effect, this_control);
+          if (field_type->NumClasses() == 1) {
+            // Emit a map check for the value.
             Node* this_value_map = this_effect = graph()->NewNode(
                 simplified()->LoadField(AccessBuilder::ForMap()), this_value,
                 this_effect, this_control);
-            for (auto i = field_type->Classes(); !i.Done(); i.Advance()) {
-              Handle<Map> field_map(i.Current());
-              check = graph()->NewNode(
-                  simplified()->ReferenceEqual(Type::Internal()),
-                  this_value_map, jsgraph()->Constant(field_map));
-              branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                        check, this_control);
-              this_control = graph()->NewNode(common()->IfFalse(), branch);
-              this_controls.push_back(
-                  graph()->NewNode(common()->IfTrue(), branch));
-            }
-            exit_controls.push_back(this_control);
-            int const this_control_count =
-                static_cast<int>(this_controls.size());
+            Node* check = graph()->NewNode(
+                simplified()->ReferenceEqual(Type::Internal()), this_value_map,
+                jsgraph()->Constant(field_type->Classes().Current()));
             this_control =
-                (this_control_count == 1)
-                    ? this_controls.front()
-                    : graph()->NewNode(common()->Merge(this_control_count),
-                                       this_control_count,
-                                       &this_controls.front());
+                graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                 frame_state, this_effect, this_control);
+          } else {
+            DCHECK_EQ(0, field_type->NumClasses());
           }
         } else {
           DCHECK(field_type->Is(Type::Tagged()));
@@ -403,39 +400,7 @@
     controls.push_back(this_control);
   }
 
-  // Collect the fallthrough control as final "exit" control.
-  if (fallthrough_control != control) {
-    // Mark the last fallthrough branch as deferred.
-    MarkAsDeferred(fallthrough_control);
-  }
-  exit_controls.push_back(fallthrough_control);
-
-  // Also collect the "receiver is smi" control if we didn't handle the case of
-  // Number primitives in the polymorphic branches above.
-  if (receiverissmi_control != nullptr) {
-    // Mark the "receiver is smi" case as deferred.
-    MarkAsDeferred(receiverissmi_control);
-    DCHECK_EQ(exit_effect, receiverissmi_effect);
-    exit_controls.push_back(receiverissmi_control);
-  }
-
-  // Generate the single "exit" point, where we get if either all map/instance
-  // type checks failed, or one of the assumptions inside one of the cases
-  // failes (i.e. failing prototype chain check).
-  // TODO(bmeurer): Consider falling back to IC here if deoptimization is
-  // disabled.
-  int const exit_control_count = static_cast<int>(exit_controls.size());
-  Node* exit_control =
-      (exit_control_count == 1)
-          ? exit_controls.front()
-          : graph()->NewNode(common()->Merge(exit_control_count),
-                             exit_control_count, &exit_controls.front());
-  Node* deoptimize =
-      graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                       frame_state, exit_effect, exit_control);
-  // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-  NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-  Revisit(graph()->end());
+  DCHECK_NULL(fallthrough_control);
 
   // Generate the final merge point for all (polymorphic) branches.
   int const control_count = static_cast<int>(controls.size());
@@ -562,17 +527,10 @@
   ZoneVector<Node*> effects(zone());
   ZoneVector<Node*> controls(zone());
 
-  // The list of "exiting" controls, which currently go to a single deoptimize.
-  // TODO(bmeurer): Consider using an IC as fallback.
-  Node* const exit_effect = effect;
-  ZoneVector<Node*> exit_controls(zone());
-
   // Ensure that {receiver} is a heap object.
   Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-  exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
-  control = graph()->NewNode(common()->IfFalse(), branch);
+  control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                             effect, control);
 
   // Load the {receiver} map. The resulting effect is the dominating effect for
   // all (polymorphic) branches.
@@ -582,7 +540,8 @@
 
   // Generate code for the various different element access patterns.
   Node* fallthrough_control = control;
-  for (ElementAccessInfo const& access_info : access_infos) {
+  for (size_t j = 0; j < access_infos.size(); ++j) {
+    ElementAccessInfo const& access_info = access_infos[j];
     Node* this_receiver = receiver;
     Node* this_value = value;
     Node* this_index = index;
@@ -595,35 +554,61 @@
     {
       ZoneVector<Node*> this_controls(zone());
       ZoneVector<Node*> this_effects(zone());
+      size_t num_transitions = access_info.transitions().size();
+      int num_classes = access_info.receiver_type()->NumClasses();
       for (auto i = access_info.receiver_type()->Classes(); !i.Done();
            i.Advance()) {
+        DCHECK_LT(0, num_classes);
         Handle<Map> map = i.Current();
         Node* check =
             graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
                              receiver_map, jsgraph()->Constant(map));
-        Node* branch =
-            graph()->NewNode(common()->Branch(), check, fallthrough_control);
-        this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+        if (--num_classes == 0 && num_transitions == 0 &&
+            j == access_infos.size() - 1) {
+          // Last map check on the fallthrough control path, do a conditional
+          // eager deoptimization exit here.
+          // TODO(turbofan): This is ugly as hell! We should probably introduce
+          // macro-ish operators for property access that encapsulate this whole
+          // mess.
+          this_controls.push_back(graph()->NewNode(common()->DeoptimizeUnless(),
+                                                   check, frame_state, effect,
+                                                   fallthrough_control));
+          fallthrough_control = nullptr;
+        } else {
+          Node* branch =
+              graph()->NewNode(common()->Branch(), check, fallthrough_control);
+          this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+          fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+        }
         this_effects.push_back(effect);
-        fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
         if (!map->IsJSArrayMap()) receiver_is_jsarray = false;
       }
 
       // Generate possible elements kind transitions.
       for (auto transition : access_info.transitions()) {
+        DCHECK_LT(0u, num_transitions);
         Handle<Map> transition_source = transition.first;
         Handle<Map> transition_target = transition.second;
+        Node* transition_control;
+        Node* transition_effect = effect;
 
         // Check if {receiver} has the specified {transition_source} map.
         Node* check = graph()->NewNode(
             simplified()->ReferenceEqual(Type::Any()), receiver_map,
             jsgraph()->HeapConstant(transition_source));
-        Node* branch =
-            graph()->NewNode(common()->Branch(), check, fallthrough_control);
+        if (--num_transitions == 0 && j == access_infos.size() - 1) {
+          transition_control =
+              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               transition_effect, fallthrough_control);
+          fallthrough_control = nullptr;
+        } else {
+          Node* branch =
+              graph()->NewNode(common()->Branch(), check, fallthrough_control);
+          fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+          transition_control = graph()->NewNode(common()->IfTrue(), branch);
+        }
 
         // Migrate {receiver} from {transition_source} to {transition_target}.
-        Node* transition_control = graph()->NewNode(common()->IfTrue(), branch);
-        Node* transition_effect = effect;
         if (IsSimpleMapChangeTransition(transition_source->elements_kind(),
                                         transition_target->elements_kind())) {
           // In-place migration, just store the {transition_target} map.
@@ -647,8 +632,6 @@
         }
         this_controls.push_back(transition_control);
         this_effects.push_back(transition_effect);
-
-        fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
       }
 
       // Create single chokepoint for the control.
@@ -679,10 +662,8 @@
     if (!NumberMatcher(this_index).HasValue()) {
       Node* check =
           graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
-      Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                      check, this_control);
-      exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-      this_control = graph()->NewNode(common()->IfTrue(), branch);
+      this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                      frame_state, this_effect, this_control);
       this_index = graph()->NewNode(common()->Guard(Type::Number()), this_index,
                                     this_control);
     }
@@ -694,10 +675,8 @@
           graph()->NewNode(simplified()->NumberToUint32(), this_index);
       Node* check = graph()->NewNode(simplified()->NumberEqual(), this_index32,
                                      this_index);
-      Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                      check, this_control);
-      exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-      this_control = graph()->NewNode(common()->IfTrue(), branch);
+      this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                      frame_state, this_effect, this_control);
       this_index = this_index32;
     }
 
@@ -716,13 +695,11 @@
       Node* this_elements_map = this_effect =
           graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
                            this_elements, this_effect, this_control);
-      check = graph()->NewNode(
+      Node* check = graph()->NewNode(
           simplified()->ReferenceEqual(Type::Any()), this_elements_map,
           jsgraph()->HeapConstant(factory()->fixed_array_map()));
-      branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
-                                this_control);
-      exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-      this_control = graph()->NewNode(common()->IfTrue(), branch);
+      this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                      frame_state, this_effect, this_control);
     }
 
     // Load the length of the {receiver}.
@@ -739,10 +716,8 @@
     // Check that the {index} is in the valid range for the {receiver}.
     Node* check = graph()->NewNode(simplified()->NumberLessThan(), this_index,
                                    this_length);
-    Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
-                                    this_control);
-    exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-    this_control = graph()->NewNode(common()->IfTrue(), branch);
+    this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                    frame_state, this_effect, this_control);
 
     // Compute the element access.
     Type* element_type = Type::Any();
@@ -781,16 +756,16 @@
         Node* check =
             graph()->NewNode(simplified()->ReferenceEqual(element_access.type),
                              this_value, jsgraph()->TheHoleConstant());
-        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                        check, this_control);
-        Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-        Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
         // Check if we are allowed to turn the hole into undefined.
         Type* initial_holey_array_type = Type::Class(
             handle(isolate()->get_initial_js_array_map(elements_kind)),
             graph()->zone());
         if (receiver_type->NowIs(initial_holey_array_type) &&
             isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                          check, this_control);
+          Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+          Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
           // Add a code dependency on the array protector cell.
           AssumePrototypesStable(receiver_type, native_context,
                                  isolate()->initial_object_prototype());
@@ -805,8 +780,9 @@
               Type::Union(element_type, Type::Undefined(), graph()->zone());
         } else {
           // Deoptimize in case of the hole.
-          exit_controls.push_back(if_true);
-          this_control = if_false;
+          this_control =
+              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                               this_effect, this_control);
         }
         // Rename the result to represent the actual type (not polluted by the
         // hole).
@@ -833,29 +809,24 @@
               check, jsgraph()->UndefinedConstant(), this_value);
         } else {
           // Deoptimize in case of the hole.
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                          check, this_control);
-          this_control = graph()->NewNode(common()->IfFalse(), branch);
-          exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+          this_control =
+              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                               this_effect, this_control);
         }
       }
     } else {
       DCHECK_EQ(AccessMode::kStore, access_mode);
       if (IsFastSmiElementsKind(elements_kind)) {
         Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
-        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                        check, this_control);
-        exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-        this_control = graph()->NewNode(common()->IfTrue(), branch);
+        this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                        frame_state, this_effect, this_control);
         this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
                                       this_value, this_control);
       } else if (IsFastDoubleElementsKind(elements_kind)) {
         Node* check =
             graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
-        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                        check, this_control);
-        exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-        this_control = graph()->NewNode(common()->IfTrue(), branch);
+        this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                        frame_state, this_effect, this_control);
         this_value = graph()->NewNode(common()->Guard(Type::Number()),
                                       this_value, this_control);
       }
@@ -870,30 +841,7 @@
     controls.push_back(this_control);
   }
 
-  // Collect the fallthrough control as final "exit" control.
-  if (fallthrough_control != control) {
-    // Mark the last fallthrough branch as deferred.
-    MarkAsDeferred(fallthrough_control);
-  }
-  exit_controls.push_back(fallthrough_control);
-
-  // Generate the single "exit" point, where we get if either all map/instance
-  // type checks failed, or one of the assumptions inside one of the cases
-  // failes (i.e. failing prototype chain check).
-  // TODO(bmeurer): Consider falling back to IC here if deoptimization is
-  // disabled.
-  int const exit_control_count = static_cast<int>(exit_controls.size());
-  Node* exit_control =
-      (exit_control_count == 1)
-          ? exit_controls.front()
-          : graph()->NewNode(common()->Merge(exit_control_count),
-                             exit_control_count, &exit_controls.front());
-  Node* deoptimize =
-      graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                       frame_state, exit_effect, exit_control);
-  // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-  NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-  Revisit(graph()->end());
+  DCHECK_NULL(fallthrough_control);
 
   // Generate the final merge point for all (polymorphic) branches.
   int const control_count = static_cast<int>(controls.size());
@@ -1048,18 +996,6 @@
 }
 
 
-void JSNativeContextSpecialization::MarkAsDeferred(Node* if_projection) {
-  Node* branch = NodeProperties::GetControlInput(if_projection);
-  DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
-  if (if_projection->opcode() == IrOpcode::kIfTrue) {
-    NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kFalse));
-  } else {
-    DCHECK_EQ(IrOpcode::kIfFalse, if_projection->opcode());
-    NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kTrue));
-  }
-}
-
-
 MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
     Node* node) {
   Node* const context = NodeProperties::GetContextInput(node);
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index 4251d72..5562c6e 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -85,10 +85,6 @@
                               Handle<Context> native_context,
                               Handle<JSObject> holder);
 
-  // Assuming that {if_projection} is either IfTrue or IfFalse, adds a hint on
-  // the dominating Branch that {if_projection} is the unlikely (deferred) case.
-  void MarkAsDeferred(Node* if_projection);
-
   // Retrieve the native context from the given {node} if known.
   MaybeHandle<Context> GetNativeContext(Node* node);
 
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index 5fcd519..98e090b 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -346,7 +346,8 @@
 bool operator==(CreateLiteralParameters const& lhs,
                 CreateLiteralParameters const& rhs) {
   return lhs.constant().location() == rhs.constant().location() &&
-         lhs.flags() == rhs.flags() && lhs.index() == rhs.index();
+         lhs.length() == rhs.length() && lhs.flags() == rhs.flags() &&
+         lhs.index() == rhs.index();
 }
 
 
@@ -357,12 +358,14 @@
 
 
 size_t hash_value(CreateLiteralParameters const& p) {
-  return base::hash_combine(p.constant().location(), p.flags(), p.index());
+  return base::hash_combine(p.constant().location(), p.length(), p.flags(),
+                            p.index());
 }
 
 
 std::ostream& operator<<(std::ostream& os, CreateLiteralParameters const& p) {
-  return os << Brief(*p.constant()) << ", " << p.flags() << ", " << p.index();
+  return os << Brief(*p.constant()) << ", " << p.length() << ", " << p.flags()
+            << ", " << p.index();
 }
 
 
@@ -382,10 +385,12 @@
   V(GreaterThan, Operator::kNoProperties, 2, 1)            \
   V(LessThanOrEqual, Operator::kNoProperties, 2, 1)        \
   V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)     \
-  V(ToNumber, Operator::kNoProperties, 1, 1)               \
-  V(ToString, Operator::kNoProperties, 1, 1)               \
+  V(ToInteger, Operator::kNoProperties, 1, 1)              \
+  V(ToLength, Operator::kNoProperties, 1, 1)               \
   V(ToName, Operator::kNoProperties, 1, 1)                 \
+  V(ToNumber, Operator::kNoProperties, 1, 1)               \
   V(ToObject, Operator::kNoProperties, 1, 1)               \
+  V(ToString, Operator::kNoProperties, 1, 1)               \
   V(Yield, Operator::kNoProperties, 1, 1)                  \
   V(Create, Operator::kEliminatable, 2, 1)                 \
   V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
@@ -733,12 +738,11 @@
       parameters);                                         // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CreateLiteralArray(
-    Handle<FixedArray> constant_elements, int literal_flags,
-    int literal_index) {
-  CreateLiteralParameters parameters(constant_elements, literal_flags,
-                                     literal_index);
+    Handle<FixedArray> constant_elements, int literal_flags, int literal_index,
+    int number_of_elements) {
+  CreateLiteralParameters parameters(constant_elements, number_of_elements,
+                                     literal_flags, literal_index);
   return new (zone()) Operator1<CreateLiteralParameters>(        // --
       IrOpcode::kJSCreateLiteralArray, Operator::kNoProperties,  // opcode
       "JSCreateLiteralArray",                                    // name
@@ -746,12 +750,11 @@
       parameters);                                               // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CreateLiteralObject(
     Handle<FixedArray> constant_properties, int literal_flags,
-    int literal_index) {
-  CreateLiteralParameters parameters(constant_properties, literal_flags,
-                                     literal_index);
+    int literal_index, int number_of_properties) {
+  CreateLiteralParameters parameters(constant_properties, number_of_properties,
+                                     literal_flags, literal_index);
   return new (zone()) Operator1<CreateLiteralParameters>(         // --
       IrOpcode::kJSCreateLiteralObject, Operator::kNoProperties,  // opcode
       "JSCreateLiteralObject",                                    // name
@@ -762,7 +765,7 @@
 
 const Operator* JSOperatorBuilder::CreateLiteralRegExp(
     Handle<String> constant_pattern, int literal_flags, int literal_index) {
-  CreateLiteralParameters parameters(constant_pattern, literal_flags,
+  CreateLiteralParameters parameters(constant_pattern, -1, literal_flags,
                                      literal_index);
   return new (zone()) Operator1<CreateLiteralParameters>(         // --
       IrOpcode::kJSCreateLiteralRegExp, Operator::kNoProperties,  // opcode
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index 070e71e..eb323c9 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -350,15 +350,18 @@
 // JSCreateLiteralRegExp operators.
 class CreateLiteralParameters final {
  public:
-  CreateLiteralParameters(Handle<HeapObject> constant, int flags, int index)
-      : constant_(constant), flags_(flags), index_(index) {}
+  CreateLiteralParameters(Handle<HeapObject> constant, int length, int flags,
+                          int index)
+      : constant_(constant), length_(length), flags_(flags), index_(index) {}
 
   Handle<HeapObject> constant() const { return constant_; }
+  int length() const { return length_; }
   int flags() const { return flags_; }
   int index() const { return index_; }
 
  private:
   Handle<HeapObject> const constant_;
+  int const length_;
   int const flags_;
   int const index_;
 };
@@ -401,10 +404,12 @@
   const Operator* Modulus(BinaryOperationHints hints);
 
   const Operator* ToBoolean(ToBooleanHints hints);
-  const Operator* ToNumber();
-  const Operator* ToString();
+  const Operator* ToInteger();
+  const Operator* ToLength();
   const Operator* ToName();
+  const Operator* ToNumber();
   const Operator* ToObject();
+  const Operator* ToString();
   const Operator* Yield();
 
   const Operator* Create();
@@ -414,9 +419,11 @@
                                 PretenureFlag pretenure);
   const Operator* CreateIterResultObject();
   const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
-                                     int literal_flags, int literal_index);
+                                     int literal_flags, int literal_index,
+                                     int number_of_elements);
   const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
-                                      int literal_flags, int literal_index);
+                                      int literal_flags, int literal_index,
+                                      int number_of_properties);
   const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
                                       int literal_flags, int literal_index);
 
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 11ae3a9..7e1a0dc 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -533,15 +533,11 @@
     return r.ChangeToPureOperator(
         simplified()->ReferenceEqual(Type::Receiver()), invert);
   }
-  if (r.OneInputIs(Type::NullOrUndefined())) {
-    Callable const callable = CodeFactory::CompareNilIC(isolate(), kNullValue);
-    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNeedsFrameState, node->op()->properties());
-    node->RemoveInput(r.LeftInputIs(Type::NullOrUndefined()) ? 0 : 1);
-    node->InsertInput(graph()->zone(), 0,
-                      jsgraph()->HeapConstant(callable.code()));
-    NodeProperties::ChangeOp(node, common()->Call(desc));
+  if (r.OneInputIs(Type::Undetectable())) {
+    RelaxEffectsAndControls(node);
+    node->RemoveInput(r.LeftInputIs(Type::Undetectable()) ? 0 : 1);
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->ObjectIsUndetectable());
     if (invert) {
       // Insert an boolean not to invert the value.
       Node* value = graph()->NewNode(simplified()->BooleanNot(), node);
@@ -648,6 +644,51 @@
   return NoChange();
 }
 
+Reduction JSTypedLowering::ReduceJSToInteger(Node* node) {
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (input_type->Is(type_cache_.kIntegerOrMinusZero)) {
+    // JSToInteger(x:integer) => x
+    ReplaceWithValue(node, input);
+    return Replace(input);
+  }
+  return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
+  Node* input = NodeProperties::GetValueInput(node, 0);
+  Type* input_type = NodeProperties::GetType(input);
+  if (input_type->Is(type_cache_.kIntegerOrMinusZero)) {
+    if (input_type->Max() <= 0.0) {
+      input = jsgraph()->ZeroConstant();
+    } else if (input_type->Min() >= kMaxSafeInteger) {
+      input = jsgraph()->Constant(kMaxSafeInteger);
+    } else {
+      if (input_type->Min() <= 0.0) {
+        input = graph()->NewNode(
+            common()->Select(MachineRepresentation::kTagged),
+            graph()->NewNode(simplified()->NumberLessThanOrEqual(), input,
+                             jsgraph()->ZeroConstant()),
+            jsgraph()->ZeroConstant(), input);
+        input_type = Type::Range(0.0, input_type->Max(), graph()->zone());
+        NodeProperties::SetType(input, input_type);
+      }
+      if (input_type->Max() > kMaxSafeInteger) {
+        input = graph()->NewNode(
+            common()->Select(MachineRepresentation::kTagged),
+            graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+                             jsgraph()->Constant(kMaxSafeInteger), input),
+            jsgraph()->Constant(kMaxSafeInteger), input);
+        input_type =
+            Type::Range(input_type->Min(), kMaxSafeInteger, graph()->zone());
+        NodeProperties::SetType(input, input_type);
+      }
+    }
+    ReplaceWithValue(node, input);
+    return Replace(input);
+  }
+  return NoChange();
+}
 
 Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
   if (input->opcode() == IrOpcode::kJSToNumber) {
@@ -699,7 +740,10 @@
     // JSToNumber(x:boolean) => BooleanToNumber(x)
     return Replace(graph()->NewNode(simplified()->BooleanToNumber(), input));
   }
-  // TODO(turbofan): js-typed-lowering of ToNumber(x:string)
+  if (input_type->Is(Type::String())) {
+    // JSToNumber(x:string) => StringToNumber(x)
+    return Replace(graph()->NewNode(simplified()->StringToNumber(), input));
+  }
   return NoChange();
 }
 
@@ -1684,6 +1728,10 @@
       return ReduceJSModulus(node);
     case IrOpcode::kJSToBoolean:
       return ReduceJSToBoolean(node);
+    case IrOpcode::kJSToInteger:
+      return ReduceJSToInteger(node);
+    case IrOpcode::kJSToLength:
+      return ReduceJSToLength(node);
     case IrOpcode::kJSToNumber:
       return ReduceJSToNumber(node);
     case IrOpcode::kJSToString:
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 4621a45..1517871 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -62,6 +62,8 @@
   Reduction ReduceJSEqual(Node* node, bool invert);
   Reduction ReduceJSStrictEqual(Node* node, bool invert);
   Reduction ReduceJSToBoolean(Node* node);
+  Reduction ReduceJSToInteger(Node* node);
+  Reduction ReduceJSToLength(Node* node);
   Reduction ReduceJSToNumberInput(Node* input);
   Reduction ReduceJSToNumber(Node* node);
   Reduction ReduceJSToStringInput(Node* input);
diff --git a/src/compiler/jump-threading.cc b/src/compiler/jump-threading.cc
index 5abd346..5554282 100644
--- a/src/compiler/jump-threading.cc
+++ b/src/compiler/jump-threading.cc
@@ -96,7 +96,9 @@
           // the frame at start. So we should move the decision of whether
           // to build a frame or not in the register allocator, and trickle it
           // here and to the code generator.
-          if (frame_at_start || !block->must_deconstruct_frame()) {
+          if (frame_at_start ||
+              !(block->must_deconstruct_frame() ||
+                block->must_construct_frame())) {
             fw = code->InputRpo(instr, 0);
           }
           fallthru = false;
@@ -141,7 +143,7 @@
                                     InstructionSequence* code) {
   if (!FLAG_turbo_jt) return;
 
-  Zone local_zone;
+  Zone local_zone(code->isolate()->allocator());
   ZoneVector<bool> skip(static_cast<int>(result.size()), false, &local_zone);
 
   // Skip empty blocks when the previous block doesn't fall through.
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index d4a3665..105bd35 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -160,12 +160,17 @@
     case Runtime::kPushCatchContext:
     case Runtime::kReThrow:
     case Runtime::kStringCompare:
-    case Runtime::kStringEquals:
-    case Runtime::kToFastProperties:  // TODO(jarin): Is it safe?
+    case Runtime::kStringEqual:
+    case Runtime::kStringNotEqual:
+    case Runtime::kStringLessThan:
+    case Runtime::kStringLessThanOrEqual:
+    case Runtime::kStringGreaterThan:
+    case Runtime::kStringGreaterThanOrEqual:
     case Runtime::kTraceEnter:
     case Runtime::kTraceExit:
       return 0;
     case Runtime::kInlineGetPrototype:
+    case Runtime::kInlineNewObject:
     case Runtime::kInlineRegExpConstructResult:
     case Runtime::kInlineRegExpExec:
     case Runtime::kInlineSubString:
@@ -174,13 +179,12 @@
     case Runtime::kInlineToName:
     case Runtime::kInlineToNumber:
     case Runtime::kInlineToObject:
+    case Runtime::kInlineToPrimitive:
     case Runtime::kInlineToPrimitive_Number:
     case Runtime::kInlineToPrimitive_String:
-    case Runtime::kInlineToPrimitive:
     case Runtime::kInlineToString:
       return 1;
     case Runtime::kInlineCall:
-    case Runtime::kInlineTailCall:
     case Runtime::kInlineDeoptimizeNow:
     case Runtime::kInlineThrowNotDateError:
       return 2;
@@ -319,8 +323,9 @@
   MachineType target_type = MachineType::AnyTagged();
   // When entering into an OSR function from unoptimized code the JSFunction
   // is not in a register, but it is on the stack in the marker spill slot.
-  LinkageLocation target_loc = is_osr ? LinkageLocation::ForSavedCallerMarker()
-                                      : regloc(kJSFunctionRegister);
+  LinkageLocation target_loc = is_osr
+                                   ? LinkageLocation::ForSavedCallerFunction()
+                                   : regloc(kJSFunctionRegister);
   return new (zone) CallDescriptor(     // --
       CallDescriptor::kCallJSFunction,  // kind
       target_type,                      // target MachineType
@@ -401,7 +406,8 @@
       properties,                       // properties
       kNoCalleeSaved,                   // callee-saved registers
       kNoCalleeSaved,                   // callee-saved fp
-      flags,                            // flags
+      CallDescriptor::kCanUseRoots |    // flags
+          flags,                        // flags
       descriptor.DebugName(isolate));
 }
 
@@ -431,7 +437,7 @@
 
 
 bool Linkage::ParameterHasSecondaryLocation(int index) const {
-  if (incoming_->kind() != CallDescriptor::kCallJSFunction) return false;
+  if (!incoming_->IsJSFunctionCall()) return false;
   LinkageLocation loc = GetParameterLocation(index);
   return (loc == regloc(kJSFunctionRegister) ||
           loc == regloc(kContextRegister));
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 3012f56..a0434f8 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -76,9 +76,9 @@
                               kPointerSize);
   }
 
-  static LinkageLocation ForSavedCallerMarker() {
+  static LinkageLocation ForSavedCallerFunction() {
     return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
-                               StandardFrameConstants::kMarkerOffset) /
+                               StandardFrameConstants::kFunctionOffset) /
                               kPointerSize);
   }
 
@@ -160,10 +160,11 @@
     kCanUseRoots = 1u << 6,
     // (arm64 only) native stack should be used for arguments.
     kUseNativeStack = 1u << 7,
-    // (arm64 only) call instruction has to restore JSSP.
+    // (arm64 only) call instruction has to restore JSSP or CSP.
     kRestoreJSSP = 1u << 8,
+    kRestoreCSP = 1u << 9,
     // Causes the code generator to initialize the root register.
-    kInitializeRootRegister = 1u << 9,
+    kInitializeRootRegister = 1u << 10,
     kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
   };
   typedef base::Flags<Flag> Flags;
@@ -367,6 +368,11 @@
   // Get the location where an incoming OSR value is stored.
   LinkageLocation GetOsrValueLocation(int index) const;
 
+  // A special {Parameter} index for Stub Calls that represents context.
+  static int GetStubCallContextParamIndex(int parameter_count) {
+    return parameter_count + 0;  // Parameter (arity + 0) is special.
+  }
+
   // A special {Parameter} index for JSCalls that represents the new target.
   static int GetJSCallNewTargetParamIndex(int parameter_count) {
     return parameter_count + 0;  // Parameter (arity + 0) is special.
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
index 97f1ab0..e19368d 100644
--- a/src/compiler/load-elimination.cc
+++ b/src/compiler/load-elimination.cc
@@ -4,8 +4,11 @@
 
 #include "src/compiler/load-elimination.h"
 
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/types.h"
 
 namespace v8 {
 namespace internal {
@@ -13,7 +16,6 @@
 
 LoadElimination::~LoadElimination() {}
 
-
 Reduction LoadElimination::Reduce(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kLoadField:
@@ -24,7 +26,6 @@
   return NoChange();
 }
 
-
 Reduction LoadElimination::ReduceLoadField(Node* node) {
   DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
   FieldAccess const access = FieldAccessOf(node->op());
@@ -45,8 +46,22 @@
         if (access == FieldAccessOf(effect->op())) {
           if (object == NodeProperties::GetValueInput(effect, 0)) {
             Node* const value = NodeProperties::GetValueInput(effect, 1);
-            ReplaceWithValue(node, value);
-            return Replace(value);
+            Type* stored_value_type = NodeProperties::GetType(value);
+            Type* load_type = NodeProperties::GetType(node);
+            // Make sure the replacement's type is a subtype of the node's
+            // type. Otherwise we could confuse optimizations that were
+            // based on the original type.
+            if (stored_value_type->Is(load_type)) {
+              ReplaceWithValue(node, value);
+              return Replace(value);
+            } else {
+              Node* renamed = graph()->NewNode(
+                  common()->Guard(Type::Intersect(stored_value_type, load_type,
+                                                  graph()->zone())),
+                  value, NodeProperties::GetControlInput(node));
+              ReplaceWithValue(node, renamed);
+              return Replace(renamed);
+            }
           }
           // TODO(turbofan): Alias analysis to the rescue?
           return NoChange();
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
index db87d9a..92c6dd0 100644
--- a/src/compiler/load-elimination.h
+++ b/src/compiler/load-elimination.h
@@ -11,15 +11,25 @@
 namespace internal {
 namespace compiler {
 
+class CommonOperatorBuilder;
+class Graph;
+
 class LoadElimination final : public AdvancedReducer {
  public:
-  explicit LoadElimination(Editor* editor) : AdvancedReducer(editor) {}
+  explicit LoadElimination(Editor* editor, Graph* graph,
+                           CommonOperatorBuilder* common)
+      : AdvancedReducer(editor), graph_(graph), common_(common) {}
   ~LoadElimination() final;
 
   Reduction Reduce(Node* node) final;
 
  private:
+  CommonOperatorBuilder* common() const { return common_; }
+  Graph* graph() { return graph_; }
+
   Reduction ReduceLoadField(Node* node);
+  Graph* graph_;
+  CommonOperatorBuilder* common_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/loop-analysis.h b/src/compiler/loop-analysis.h
index 2ed5bc2..b8bc395 100644
--- a/src/compiler/loop-analysis.h
+++ b/src/compiler/loop-analysis.h
@@ -116,6 +116,8 @@
     return nullptr;
   }
 
+  Zone* zone() const { return zone_; }
+
  private:
   friend class LoopFinderImpl;
 
diff --git a/src/compiler/loop-peeling.cc b/src/compiler/loop-peeling.cc
index b553a9f..5379596 100644
--- a/src/compiler/loop-peeling.cc
+++ b/src/compiler/loop-peeling.cc
@@ -184,7 +184,7 @@
 
 
 bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
-  Zone zone;
+  Zone zone(loop_tree->zone()->allocator());
   NodeVector exits(&zone);
   NodeVector rets(&zone);
   FindLoopExits(loop_tree, loop, exits, rets);
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 3b6f21b..6a506d2 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -148,6 +148,7 @@
   V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
   V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
   V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                  \
+  V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                \
   V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)                 \
   V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1)                \
   V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2)              \
@@ -195,7 +196,13 @@
   V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1)                \
   V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)                       \
   V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)                       \
-  V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)
+  V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)                 \
+  V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2)                           \
+  V(Int32PairSub, Operator::kNoProperties, 4, 0, 2)                           \
+  V(Int32PairMul, Operator::kNoProperties, 4, 0, 2)                           \
+  V(Word32PairShl, Operator::kNoProperties, 3, 0, 2)                          \
+  V(Word32PairShr, Operator::kNoProperties, 3, 0, 2)                          \
+  V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)
 
 #define PURE_OPTIONAL_OP_LIST(V)                            \
   V(Word32Ctz, Operator::kNoProperties, 1, 0, 1)            \
@@ -467,6 +474,19 @@
   return nullptr;
 }
 
+// On 32 bit platforms we need to get a reference to optional operators of
+// 64-bit instructions for later Int64Lowering, even though 32 bit platforms
+// don't support the original 64-bit instruction.
+const Operator* MachineOperatorBuilder::Word64PopcntPlaceholder() {
+  return &cache_.kWord64Popcnt;
+}
+
+// On 32 bit platforms we need to get a reference to optional operators of
+// 64-bit instructions for later Int64Lowering, even though 32 bit platforms
+// don't support the original 64-bit instruction.
+const Operator* MachineOperatorBuilder::Word64CtzPlaceholder() {
+  return &cache_.kWord64Ctz;
+}
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index c5a80aa..68e393a 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -164,6 +164,7 @@
   const OptionalOperator Word32Ctz();
   const OptionalOperator Word32Popcnt();
   const OptionalOperator Word64Popcnt();
+  const Operator* Word64PopcntPlaceholder();
   const OptionalOperator Word32ReverseBits();
   const OptionalOperator Word64ReverseBits();
   bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
@@ -177,8 +178,16 @@
   const Operator* Word64Ror();
   const Operator* Word64Clz();
   const OptionalOperator Word64Ctz();
+  const Operator* Word64CtzPlaceholder();
   const Operator* Word64Equal();
 
+  const Operator* Int32PairAdd();
+  const Operator* Int32PairSub();
+  const Operator* Int32PairMul();
+  const Operator* Word32PairShl();
+  const Operator* Word32PairShr();
+  const Operator* Word32PairSar();
+
   const Operator* Int32Add();
   const Operator* Int32AddWithOverflow();
   const Operator* Int32Sub();
@@ -219,6 +228,7 @@
   const Operator* ChangeFloat32ToFloat64();
   const Operator* ChangeFloat64ToInt32();   // narrowing
   const Operator* ChangeFloat64ToUint32();  // narrowing
+  const Operator* TruncateFloat64ToUint32();
   const Operator* TruncateFloat32ToInt32();
   const Operator* TruncateFloat32ToUint32();
   const Operator* TryTruncateFloat32ToInt64();
@@ -340,6 +350,7 @@
   V(Word, Shr)            \
   V(Word, Sar)            \
   V(Word, Ror)            \
+  V(Word, Clz)            \
   V(Word, Equal)          \
   V(Int, Add)             \
   V(Int, Sub)             \
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index cdd7e34..9b0d706 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -120,8 +120,11 @@
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
+
+  MemOperand SlotToMemOperand(int slot) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
 };
@@ -221,7 +224,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -235,7 +239,7 @@
                                              : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore ra if the frame was elided.
       __ Push(ra);
     }
@@ -243,7 +247,7 @@
                          remembered_set_action, save_fp_mode);
     __ Addu(scratch1_, object_, index_);
     __ CallStub(&stub);
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       __ Pop(ra);
     }
   }
@@ -255,6 +259,7 @@
   Register const scratch0_;
   Register const scratch1_;
   RecordWriteMode const mode_;
+  bool must_save_lr_;
 };
 
 
@@ -467,6 +472,13 @@
     __ bind(&done);                                                           \
   }
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ mov(sp, fp);
+  __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
+
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -482,20 +494,44 @@
     __ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
     __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ Branch(&done, ne, scratch1,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ lw(caller_args_count_reg,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   MipsOperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
-
-  switch (ArchOpcodeField::decode(opcode)) {
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
@@ -509,9 +545,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (instr->InputAt(0)->IsImmediate()) {
         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -537,6 +579,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -547,6 +590,11 @@
 
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(at);
       frame_access_state()->ClearSPDelta();
@@ -606,7 +654,7 @@
       __ mov(i.OutputRegister(), fp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ lw(i.OutputRegister(), MemOperand(fp, 0));
       } else {
         __ mov(i.OutputRegister(), fp);
@@ -875,6 +923,14 @@
     case kMipsCmpD:
       // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
       break;
+    case kMipsMulPair: {
+      __ Mulu(i.OutputRegister(1), i.OutputRegister(0), i.InputRegister(0),
+              i.InputRegister(2));
+      __ mul(kScratchReg, i.InputRegister(0), i.InputRegister(3));
+      __ mul(kScratchReg2, i.InputRegister(1), i.InputRegister(2));
+      __ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg);
+      __ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg2);
+    } break;
     case kMipsAddD:
       // TODO(plind): add special case: combine mult & add.
       __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1525,17 +1581,16 @@
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   int stack_shrink_slots = frame()->GetSpillSlotCount();
-  if (descriptor->IsCFunctionCall()) {
-    __ Push(ra, fp);
-    __ mov(fp, sp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(0);
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ Push(ra, fp);
+      __ mov(fp, sp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      __ StubPrologue(info()->GetOutputStackFrameType());
+    }
   }
-  frame_access_state()->SetFrameAccessToDefault();
 
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1597,17 +1652,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ mov(sp, fp);
-    __ Pop(ra, fp);
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ Branch(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ mov(sp, fp);
-      __ Pop(ra, fp);
+      AssembleDeconstructFrame();
     }
   }
   if (pop_count != 0) {
@@ -1665,9 +1718,9 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int offset;
-          if (IsMaterializableFromFrame(src_object, &offset)) {
-            __ lw(dst, MemOperand(fp, offset));
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ lw(dst, g.SlotToMemOperand(slot));
           } else if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index 64aecd0..d85c2a7 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -59,6 +59,7 @@
   V(MipsSqrtD)                     \
   V(MipsMaxD)                      \
   V(MipsMinD)                      \
+  V(MipsMulPair)                   \
   V(MipsFloat32RoundDown)          \
   V(MipsFloat32RoundTruncate)      \
   V(MipsFloat32RoundUp)            \
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index df972f7..f86ffe7 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -114,8 +114,13 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -189,9 +194,7 @@
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
     inputs[input_count++] = g.UseUniqueRegister(index);
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -392,6 +395,27 @@
   VisitRRO(this, kMipsSar, node);
 }
 
+void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  MipsOperandGenerator g(this);
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 g.UseUniqueRegister(node->InputAt(2)),
+                                 g.UseUniqueRegister(node->InputAt(3))};
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+  Emit(kMipsMulPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
   VisitRRO(this, kMipsRor, node);
@@ -612,6 +636,9 @@
   VisitRR(this, kMipsTruncUwD, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kMipsTruncUwD, node);
+}
 
 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   MipsOperandGenerator g(this);
@@ -878,6 +905,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -981,6 +1009,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1086,9 +1117,6 @@
   VisitWordCompare(selector, node, kMipsCmp, cont, false);
 }
 
-}  // namespace
-
-
 // Shared routine for word comparisons against zero.
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, FlagsContinuation* cont) {
@@ -1177,12 +1205,17 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
+                             g.TempImmediate(0), cont->frame_state());
   } else {
+    DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    g.TempImmediate(0));
   }
 }
 
+}  // namespace
 
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
@@ -1190,6 +1223,17 @@
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   MipsOperandGenerator g(this);
@@ -1220,7 +1264,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1230,32 +1274,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMipsAddOvf, &cont);
   }
   FlagsContinuation cont;
@@ -1265,7 +1311,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMipsSubOvf, &cont);
   }
   FlagsContinuation cont;
@@ -1274,37 +1320,39 @@
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index 373a1a6..c6341b1 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -120,8 +120,11 @@
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
+
+  MemOperand SlotToMemOperand(int slot) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
 };
@@ -221,7 +224,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -235,7 +239,7 @@
                                              : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore ra if the frame was elided.
       __ Push(ra);
     }
@@ -243,7 +247,7 @@
                          remembered_set_action, save_fp_mode);
     __ Daddu(scratch1_, object_, index_);
     __ CallStub(&stub);
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       __ Pop(ra);
     }
   }
@@ -255,6 +259,7 @@
   Register const scratch0_;
   Register const scratch1_;
   RecordWriteMode const mode_;
+  bool must_save_lr_;
 };
 
 
@@ -479,6 +484,13 @@
     __ bind(&done);                                                           \
   }
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ mov(sp, fp);
+  __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
+
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -494,20 +506,44 @@
     __ Dsubu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
     __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ Branch(&done, ne, scratch3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ ld(caller_args_count_reg,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   MipsOperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
-
-  switch (ArchOpcodeField::decode(opcode)) {
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
@@ -521,9 +557,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (instr->InputAt(0)->IsImmediate()) {
         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -548,6 +590,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -557,6 +600,11 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(at);
       frame_access_state()->ClearSPDelta();
@@ -616,7 +664,7 @@
       __ mov(i.OutputRegister(), fp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ ld(i.OutputRegister(), MemOperand(fp, 0));
       } else {
         __ mov(i.OutputRegister(), fp);
@@ -1831,17 +1879,16 @@
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    __ Push(ra, fp);
-    __ mov(fp, sp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(0);
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ Push(ra, fp);
+      __ mov(fp, sp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      __ StubPrologue(info()->GetOutputStackFrameType());
+    }
   }
-  frame_access_state()->SetFrameAccessToDefault();
 
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
@@ -1900,17 +1947,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ mov(sp, fp);
-    __ Pop(ra, fp);
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ Branch(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ mov(sp, fp);
-      __ Pop(ra, fp);
+      AssembleDeconstructFrame();
     }
   }
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
@@ -1969,9 +2014,9 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int offset;
-          if (IsMaterializableFromFrame(src_object, &offset)) {
-            __ ld(dst, MemOperand(fp, offset));
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ ld(dst, g.SlotToMemOperand(slot));
           } else if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 44a5470..5e2b5f2 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -119,8 +119,13 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -196,9 +201,7 @@
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
     inputs[input_count++] = g.UseUniqueRegister(index);
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -922,6 +925,9 @@
   VisitRR(this, kMips64TruncUwD, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kMips64TruncUwD, node);
+}
 
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   Mips64OperandGenerator g(this);
@@ -1320,6 +1326,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -1431,6 +1438,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1542,7 +1552,6 @@
   VisitWordCompare(selector, node, kMips64Cmp, cont, false);
 }
 
-}  // namespace
 
 
 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
@@ -1553,6 +1562,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
+                             g.TempImmediate(0), cont->frame_state());
   } else {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    g.TempImmediate(0));
@@ -1677,6 +1689,7 @@
   EmitWordCompareZero(selector, value, cont);
 }
 
+}  // namespace
 
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
@@ -1684,6 +1697,17 @@
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   Mips64OperandGenerator g(this);
@@ -1714,7 +1738,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1725,32 +1749,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMips64Dadd, &cont);
   }
   FlagsContinuation cont;
@@ -1760,7 +1786,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMips64Dsub, &cont);
   }
   FlagsContinuation cont;
@@ -1770,7 +1796,7 @@
 
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMips64DaddOvf, &cont);
   }
   FlagsContinuation cont;
@@ -1780,7 +1806,7 @@
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMips64DsubOvf, &cont);
   }
   FlagsContinuation cont;
@@ -1789,7 +1815,7 @@
 
 
 void InstructionSelector::VisitWord64Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int64BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1800,61 +1826,65 @@
 
 
 void InstructionSelector::VisitInt64LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index c78e15e..b038d15 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -21,6 +21,8 @@
   V(IfDefault)             \
   V(Merge)                 \
   V(Deoptimize)            \
+  V(DeoptimizeIf)          \
+  V(DeoptimizeUnless)      \
   V(Return)                \
   V(TailCall)              \
   V(Terminate)             \
@@ -94,10 +96,12 @@
 
 #define JS_CONVERSION_UNOP_LIST(V) \
   V(JSToBoolean)                   \
-  V(JSToNumber)                    \
-  V(JSToString)                    \
+  V(JSToInteger)                   \
+  V(JSToLength)                    \
   V(JSToName)                      \
-  V(JSToObject)
+  V(JSToNumber)                    \
+  V(JSToObject)                    \
+  V(JSToString)
 
 #define JS_OTHER_UNOP_LIST(V) \
   V(JSTypeOf)
@@ -181,10 +185,17 @@
   V(NumberShiftLeft)               \
   V(NumberShiftRight)              \
   V(NumberShiftRightLogical)       \
+  V(NumberImul)                    \
+  V(NumberClz32)                   \
+  V(NumberCeil)                    \
+  V(NumberFloor)                   \
+  V(NumberRound)                   \
+  V(NumberTrunc)                   \
   V(NumberToInt32)                 \
   V(NumberToUint32)                \
   V(NumberIsHoleNaN)               \
   V(PlainPrimitiveToNumber)        \
+  V(StringToNumber)                \
   V(ChangeTaggedToInt32)           \
   V(ChangeTaggedToUint32)          \
   V(ChangeTaggedToFloat64)         \
@@ -202,7 +213,8 @@
   V(StoreElement)                  \
   V(ObjectIsNumber)                \
   V(ObjectIsReceiver)              \
-  V(ObjectIsSmi)
+  V(ObjectIsSmi)                   \
+  V(ObjectIsUndetectable)
 
 // Opcodes for Machine-level operators.
 #define MACHINE_COMPARE_BINOP_LIST(V) \
@@ -273,6 +285,7 @@
   V(ChangeFloat32ToFloat64)     \
   V(ChangeFloat64ToInt32)       \
   V(ChangeFloat64ToUint32)      \
+  V(TruncateFloat64ToUint32)    \
   V(TruncateFloat32ToInt32)     \
   V(TruncateFloat32ToUint32)    \
   V(TryTruncateFloat32ToInt64)  \
@@ -330,7 +343,13 @@
   V(LoadFramePointer)           \
   V(LoadParentFramePointer)     \
   V(CheckedLoad)                \
-  V(CheckedStore)
+  V(CheckedStore)               \
+  V(Int32PairAdd)               \
+  V(Int32PairSub)               \
+  V(Int32PairMul)               \
+  V(Word32PairShl)              \
+  V(Word32PairShr)              \
+  V(Word32PairSar)
 
 #define VALUE_OP_LIST(V) \
   COMMON_OP_LIST(V)      \
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index 1ee31d5..7f38ca7 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -58,6 +58,8 @@
     case IrOpcode::kJSCreateScriptContext:
 
     // Conversions
+    case IrOpcode::kJSToInteger:
+    case IrOpcode::kJSToLength:
     case IrOpcode::kJSToName:
     case IrOpcode::kJSToNumber:
     case IrOpcode::kJSToObject:
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 21c34fc..1d7e967 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -267,23 +267,26 @@
     register_allocation_data_ = nullptr;
   }
 
-  void InitializeInstructionSequence() {
+  void InitializeInstructionSequence(const CallDescriptor* descriptor) {
     DCHECK(sequence_ == nullptr);
     InstructionBlocks* instruction_blocks =
         InstructionSequence::InstructionBlocksFor(instruction_zone(),
                                                   schedule());
     sequence_ = new (instruction_zone()) InstructionSequence(
         info()->isolate(), instruction_zone(), instruction_blocks);
+    if (descriptor && descriptor->RequiresFrameAsIncoming()) {
+      sequence_->instruction_blocks()[0]->mark_needs_frame();
+    } else {
+      DCHECK_EQ(0, descriptor->CalleeSavedFPRegisters());
+      DCHECK_EQ(0, descriptor->CalleeSavedRegisters());
+    }
   }
 
   void InitializeFrameData(CallDescriptor* descriptor) {
     DCHECK(frame_ == nullptr);
     int fixed_frame_size = 0;
     if (descriptor != nullptr) {
-      fixed_frame_size = (descriptor->IsCFunctionCall())
-                             ? StandardFrameConstants::kFixedSlotCountAboveFp +
-                                   StandardFrameConstants::kCPSlotCount
-                             : StandardFrameConstants::kFixedSlotCount;
+      fixed_frame_size = CalculateFixedFrameSize(descriptor);
     }
     frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
   }
@@ -338,6 +341,16 @@
   Zone* register_allocation_zone_;
   RegisterAllocationData* register_allocation_data_;
 
+  int CalculateFixedFrameSize(CallDescriptor* descriptor) {
+    if (descriptor->IsJSFunctionCall()) {
+      return StandardFrameConstants::kFixedSlotCount;
+    }
+    return descriptor->IsCFunctionCall()
+               ? (CommonFrameConstants::kFixedSlotCountAboveFp +
+                  CommonFrameConstants::kCPSlotCount)
+               : TypedFrameConstants::kFixedSlotCount;
+  }
+
   DISALLOW_COPY_AND_ASSIGN(PipelineData);
 };
 
@@ -539,7 +552,7 @@
                                               data->common());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
-    JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
+    JSCallReducer call_reducer(data->jsgraph(),
                                data->info()->is_deoptimization_enabled()
                                    ? JSCallReducer::kDeoptimizationEnabled
                                    : JSCallReducer::kNoFlags,
@@ -615,7 +628,8 @@
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
-    LoadElimination load_elimination(&graph_reducer);
+    LoadElimination load_elimination(&graph_reducer, data->graph(),
+                                     data->common());
     JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
     MaybeHandle<LiteralsArray> literals_array =
         data->info()->is_native_context_specializing()
@@ -639,6 +653,7 @@
         data->info()->is_deoptimization_enabled()
             ? JSIntrinsicLowering::kDeoptimizationEnabled
             : JSIntrinsicLowering::kDeoptimizationDisabled);
+    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -649,6 +664,7 @@
     AddReducer(data, &graph_reducer, &typed_lowering);
     AddReducer(data, &graph_reducer, &intrinsic_lowering);
     AddReducer(data, &graph_reducer, &load_elimination);
+    AddReducer(data, &graph_reducer, &simple_reducer);
     AddReducer(data, &graph_reducer, &common_reducer);
     graph_reducer.ReduceGraph();
   }
@@ -1079,7 +1095,7 @@
 
 
 Handle<Code> Pipeline::GenerateCode() {
-  ZonePool zone_pool;
+  ZonePool zone_pool(isolate()->allocator());
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
 
   if (FLAG_turbo_stats) {
@@ -1240,7 +1256,7 @@
   CompilationInfo info(debug_name, isolate, graph->zone(), flags);
 
   // Construct a pipeline for scheduling and code generation.
-  ZonePool zone_pool;
+  ZonePool zone_pool(isolate->allocator());
   PipelineData data(&zone_pool, &info, graph, schedule);
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats) {
@@ -1281,7 +1297,7 @@
                                               Graph* graph,
                                               Schedule* schedule) {
   // Construct a pipeline for scheduling and code generation.
-  ZonePool zone_pool;
+  ZonePool zone_pool(info->isolate()->allocator());
   PipelineData data(&zone_pool, info, graph, schedule);
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats) {
@@ -1304,7 +1320,7 @@
                                            InstructionSequence* sequence,
                                            bool run_verifier) {
   CompilationInfo info("testing", sequence->isolate(), sequence->zone());
-  ZonePool zone_pool;
+  ZonePool zone_pool(sequence->isolate()->allocator());
   PipelineData data(&zone_pool, &info, sequence);
   Pipeline pipeline(&info);
   pipeline.data_ = &data;
@@ -1329,7 +1345,7 @@
                                                        data->schedule());
   }
 
-  data->InitializeInstructionSequence();
+  data->InitializeInstructionSequence(call_descriptor);
 
   data->InitializeFrameData(call_descriptor);
   // Select and schedule instructions covering the scheduled graph.
@@ -1358,6 +1374,7 @@
   AllocateRegisters(
       RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
       call_descriptor, run_verifier);
+  Run<FrameElisionPhase>();
   if (data->compilation_failed()) {
     info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
     return Handle<Code>();
@@ -1366,11 +1383,7 @@
   BeginPhaseKind("code generation");
   // TODO(mtrofin): move this off to the register allocator.
   bool generate_frame_at_start =
-      !FLAG_turbo_frame_elision || !data_->info()->IsStub() ||
-      !data_->frame()->needs_frame() ||
-      data_->sequence()->instruction_blocks().front()->needs_frame() ||
-      linkage.GetIncomingDescriptor()->CalleeSavedFPRegisters() != 0 ||
-      linkage.GetIncomingDescriptor()->CalleeSavedRegisters() != 0;
+      data_->sequence()->instruction_blocks().front()->must_construct_frame();
   // Optimimize jumps.
   if (FLAG_turbo_jt) {
     Run<JumpThreadingPhase>(generate_frame_at_start);
@@ -1430,7 +1443,7 @@
   base::SmartPointer<Zone> verifier_zone;
   RegisterAllocatorVerifier* verifier = nullptr;
   if (run_verifier) {
-    verifier_zone.Reset(new Zone());
+    verifier_zone.Reset(new Zone(isolate()->allocator()));
     verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
         verifier_zone.get(), config, data->sequence());
   }
@@ -1438,6 +1451,8 @@
   base::SmartArrayPointer<char> debug_name;
 #ifdef DEBUG
   debug_name = info()->GetDebugName();
+  data_->sequence()->ValidateEdgeSplitForm();
+  data_->sequence()->ValidateDeferredBlockExitPaths();
 #endif
 
   data->InitializeRegisterAllocationData(config, descriptor, debug_name.get());
@@ -1477,12 +1492,6 @@
     Run<MergeSplintersPhase>();
   }
 
-  // We plan to enable frame elision only for stubs and bytecode handlers.
-  if (FLAG_turbo_frame_elision && info()->IsStub()) {
-    Run<LocateSpillSlotsPhase>();
-    Run<FrameElisionPhase>();
-  }
-
   Run<AssignSpillSlotsPhase>();
 
   Run<CommitAssignmentPhase>();
@@ -1493,6 +1502,8 @@
     Run<OptimizeMovesPhase>();
   }
 
+  Run<LocateSpillSlotsPhase>();
+
   if (FLAG_trace_turbo_graph) {
     OFStream os(stdout);
     PrintableInstructionSequence printable = {config, data->sequence()};
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index 7fc6dd9..6f1e588 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -32,6 +32,7 @@
   RCBit OutputRCBit() const {
     switch (instr_->flags_mode()) {
       case kFlags_branch:
+      case kFlags_deoptimize:
       case kFlags_set:
         return SetRC;
       case kFlags_none:
@@ -103,8 +104,11 @@
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
+
+  MemOperand SlotToMemOperand(int slot) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
 };
@@ -183,7 +187,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -197,7 +202,7 @@
                                              : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ mflr(scratch1_);
       __ Push(scratch1_);
@@ -211,7 +216,7 @@
       __ add(scratch1_, object_, offset_);
     }
     __ CallStub(&stub);
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ Pop(scratch1_);
       __ mtlr(scratch1_);
@@ -226,6 +231,7 @@
   Register const scratch0_;
   Register const scratch1_;
   RecordWriteMode const mode_;
+  bool must_save_lr_;
 };
 
 
@@ -293,20 +299,24 @@
 
 }  // namespace
 
-#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr)                            \
+#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round)                     \
   do {                                                               \
     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
                  i.OutputRCBit());                                   \
+    if (round) {                                                     \
+      __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister());   \
+    }                                                                \
   } while (0)
 
-
-#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr)                           \
+#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr, round)                    \
   do {                                                               \
     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
                  i.InputDoubleRegister(1), i.OutputRCBit());         \
+    if (round) {                                                     \
+      __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister());   \
+    }                                                                \
   } while (0)
 
-
 #define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
   do {                                                         \
     if (HasRegisterInput(instr, 1)) {                          \
@@ -662,6 +672,11 @@
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
   } while (0)
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ LeaveFrame(StackFrame::MANUAL);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -678,12 +693,36 @@
     __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ RestoreFrameStateForTailCall();
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&done);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ LoadP(caller_args_count_reg,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
@@ -708,9 +747,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (HasRegisterInput(instr, 0)) {
         __ addi(ip, i.InputRegister(0),
                 Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -745,6 +790,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -756,6 +802,11 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(ip);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -823,7 +874,7 @@
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
       } else {
         __ mr(i.OutputRegister(), fp);
@@ -928,6 +979,71 @@
       ASSEMBLE_BINOP_INT_RC(srad, sradi);
       break;
 #endif
+#if !V8_TARGET_ARCH_PPC64
+    case kPPC_AddPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ addc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
+      __ adde(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_SubPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ subc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
+      __ sube(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_MulPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ mullw(i.TempRegister(0), i.InputRegister(0), i.InputRegister(3));
+      __ mullw(i.TempRegister(1), i.InputRegister(2), i.InputRegister(1));
+      __ add(i.TempRegister(0), i.TempRegister(0), i.TempRegister(1));
+      __ mullw(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
+      __ mulhwu(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(2));
+      __ add(i.OutputRegister(1), i.OutputRegister(1), i.TempRegister(0));
+      break;
+    case kPPC_ShiftLeftPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+                         i.InputRegister(0), i.InputRegister(1),
+                         i.InputInt32(2));
+      } else {
+        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+                         i.InputRegister(0), i.InputRegister(1), kScratchReg,
+                         i.InputRegister(2));
+      }
+      break;
+    case kPPC_ShiftRightPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+                          i.InputRegister(0), i.InputRegister(1),
+                          i.InputInt32(2));
+      } else {
+        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+                          i.InputRegister(0), i.InputRegister(1), kScratchReg,
+                          i.InputRegister(2));
+      }
+      break;
+    case kPPC_ShiftRightAlgPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftRightAlgPair(i.OutputRegister(0), i.OutputRegister(1),
+                             i.InputRegister(0), i.InputRegister(1),
+                             i.InputInt32(2));
+      } else {
+        __ ShiftRightAlgPair(i.OutputRegister(0), i.OutputRegister(1),
+                             i.InputRegister(0), i.InputRegister(1),
+                             kScratchReg, i.InputRegister(2));
+      }
+      break;
+#endif
     case kPPC_RotRight32:
       if (HasRegisterInput(instr, 1)) {
         __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
@@ -992,7 +1108,7 @@
       ASSEMBLE_ADD_WITH_OVERFLOW32();
       break;
     case kPPC_AddDouble:
-      ASSEMBLE_FLOAT_BINOP_RC(fadd);
+      ASSEMBLE_FLOAT_BINOP_RC(fadd, MiscField::decode(instr->opcode()));
       break;
     case kPPC_Sub:
 #if V8_TARGET_ARCH_PPC64
@@ -1015,7 +1131,7 @@
       ASSEMBLE_SUB_WITH_OVERFLOW32();
       break;
     case kPPC_SubDouble:
-      ASSEMBLE_FLOAT_BINOP_RC(fsub);
+      ASSEMBLE_FLOAT_BINOP_RC(fsub, MiscField::decode(instr->opcode()));
       break;
     case kPPC_Mul32:
       __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -1036,7 +1152,7 @@
                 i.OutputRCBit());
       break;
     case kPPC_MulDouble:
-      ASSEMBLE_FLOAT_BINOP_RC(fmul);
+      ASSEMBLE_FLOAT_BINOP_RC(fmul, MiscField::decode(instr->opcode()));
       break;
     case kPPC_Div32:
       __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -1059,7 +1175,7 @@
       break;
 #endif
     case kPPC_DivDouble:
-      ASSEMBLE_FLOAT_BINOP_RC(fdiv);
+      ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
       break;
     case kPPC_Mod32:
       ASSEMBLE_MODULO(divw, mullw);
@@ -1092,25 +1208,25 @@
       ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
       break;
     case kPPC_AbsDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(fabs);
+      ASSEMBLE_FLOAT_UNOP_RC(fabs, 0);
       break;
     case kPPC_SqrtDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
+      ASSEMBLE_FLOAT_UNOP_RC(fsqrt, MiscField::decode(instr->opcode()));
       break;
     case kPPC_FloorDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(frim);
+      ASSEMBLE_FLOAT_UNOP_RC(frim, MiscField::decode(instr->opcode()));
       break;
     case kPPC_CeilDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(frip);
+      ASSEMBLE_FLOAT_UNOP_RC(frip, MiscField::decode(instr->opcode()));
       break;
     case kPPC_TruncateDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(friz);
+      ASSEMBLE_FLOAT_UNOP_RC(friz, MiscField::decode(instr->opcode()));
       break;
     case kPPC_RoundDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(frin);
+      ASSEMBLE_FLOAT_UNOP_RC(frin, MiscField::decode(instr->opcode()));
       break;
     case kPPC_NegDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(fneg);
+      ASSEMBLE_FLOAT_UNOP_RC(fneg, 0);
       break;
     case kPPC_Cntlz32:
       __ cntlzw_(i.OutputRegister(), i.InputRegister(0));
@@ -1316,7 +1432,7 @@
     }
 #endif
     case kPPC_DoubleToFloat32:
-      ASSEMBLE_FLOAT_UNOP_RC(frsp);
+      ASSEMBLE_FLOAT_UNOP_RC(frsp, 0);
       break;
     case kPPC_Float32ToDouble:
       // Nothing to do.
@@ -1589,36 +1705,36 @@
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
+  // TODO(turbofan): We should be able to generate better code by sharing the
+  // actual final call site and just bl'ing to it here, similar to what we do
+  // in the lithium backend.
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
 }
 
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    __ function_descriptor();
-    __ mflr(r0);
-    if (FLAG_enable_embedded_constant_pool) {
-      __ Push(r0, fp, kConstantPoolRegister);
-      // Adjust FP to point to saved FP.
-      __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ function_descriptor();
+      __ mflr(r0);
+      if (FLAG_enable_embedded_constant_pool) {
+        __ Push(r0, fp, kConstantPoolRegister);
+        // Adjust FP to point to saved FP.
+        __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+      } else {
+        __ Push(r0, fp);
+        __ mr(fp, sp);
+      }
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
     } else {
-      __ Push(r0, fp);
-      __ mr(fp, sp);
+      StackFrame::Type type = info()->GetOutputStackFrameType();
+      // TODO(mbrandy): Detect cases where ip is the entrypoint (for
+      // efficient intialization of the constant pool pointer register).
+      __ StubPrologue(type);
     }
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
-  } else if (frame()->needs_frame()) {
-    if (!ABI_CALL_VIA_IP && info()->output_code_kind() == Code::WASM_FUNCTION) {
-      // TODO(mbrandy): Restrict only to the wasm wrapper case.
-      __ StubPrologue();
-    } else {
-      __ StubPrologue(ip);
-    }
-  } else {
-    frame()->SetElidedFrameSizeInSlots(0);
   }
-  frame_access_state()->SetFrameAccessToDefault();
 
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
@@ -1687,20 +1803,18 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ b(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+      AssembleDeconstructFrame();
     }
-  } else {
-    __ Drop(pop_count);
   }
-  __ Ret();
+  __ Ret(pop_count);
 }
 
 
@@ -1753,9 +1867,9 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int offset;
-          if (IsMaterializableFromFrame(src_object, &offset)) {
-            __ LoadP(dst, MemOperand(fp, offset));
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ LoadP(dst, g.SlotToMemOperand(slot));
           } else if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index 877ebb5..66c2e99 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -19,10 +19,13 @@
   V(PPC_Xor)                       \
   V(PPC_ShiftLeft32)               \
   V(PPC_ShiftLeft64)               \
+  V(PPC_ShiftLeftPair)             \
   V(PPC_ShiftRight32)              \
   V(PPC_ShiftRight64)              \
+  V(PPC_ShiftRightPair)            \
   V(PPC_ShiftRightAlg32)           \
   V(PPC_ShiftRightAlg64)           \
+  V(PPC_ShiftRightAlgPair)         \
   V(PPC_RotRight32)                \
   V(PPC_RotRight64)                \
   V(PPC_Not)                       \
@@ -32,14 +35,17 @@
   V(PPC_RotLeftAndClearRight64)    \
   V(PPC_Add)                       \
   V(PPC_AddWithOverflow32)         \
+  V(PPC_AddPair)                   \
   V(PPC_AddDouble)                 \
   V(PPC_Sub)                       \
   V(PPC_SubWithOverflow32)         \
+  V(PPC_SubPair)                   \
   V(PPC_SubDouble)                 \
   V(PPC_Mul32)                     \
   V(PPC_Mul64)                     \
   V(PPC_MulHigh32)                 \
   V(PPC_MulHighU32)                \
+  V(PPC_MulPair)                   \
   V(PPC_MulDouble)                 \
   V(PPC_Div32)                     \
   V(PPC_Div64)                     \
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index fd1df6a..e7d7719 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -21,10 +21,13 @@
     case kPPC_Xor:
     case kPPC_ShiftLeft32:
     case kPPC_ShiftLeft64:
+    case kPPC_ShiftLeftPair:
     case kPPC_ShiftRight32:
     case kPPC_ShiftRight64:
+    case kPPC_ShiftRightPair:
     case kPPC_ShiftRightAlg32:
     case kPPC_ShiftRightAlg64:
+    case kPPC_ShiftRightAlgPair:
     case kPPC_RotRight32:
     case kPPC_RotRight64:
     case kPPC_Not:
@@ -34,14 +37,17 @@
     case kPPC_RotLeftAndClearRight64:
     case kPPC_Add:
     case kPPC_AddWithOverflow32:
+    case kPPC_AddPair:
     case kPPC_AddDouble:
     case kPPC_Sub:
     case kPPC_SubWithOverflow32:
+    case kPPC_SubPair:
     case kPPC_SubDouble:
     case kPPC_Mul32:
     case kPPC_Mul64:
     case kPPC_MulHigh32:
     case kPPC_MulHighU32:
+    case kPPC_MulPair:
     case kPPC_MulDouble:
     case kPPC_Div32:
     case kPPC_Div64:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index 244e6f4..5abb5f1 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -71,22 +71,22 @@
 
 namespace {
 
-void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRR(InstructionSelector* selector, InstructionCode opcode,
+             Node* node) {
   PPCOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)));
 }
 
-
-void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRR(InstructionSelector* selector, InstructionCode opcode,
+              Node* node) {
   PPCOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)),
                  g.UseRegister(node->InputAt(1)));
 }
 
-
-void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+void VisitRRO(InstructionSelector* selector, InstructionCode opcode, Node* node,
               ImmediateMode operand_mode) {
   PPCOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
@@ -96,8 +96,8 @@
 
 
 #if V8_TARGET_ARCH_PPC64
-void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
-                            Node* node) {
+void VisitTryTruncateDouble(InstructionSelector* selector,
+                            InstructionCode opcode, Node* node) {
   PPCOperandGenerator g(selector);
   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   InstructionOperand outputs[2];
@@ -144,15 +144,20 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
 // Shared routine for multiple binary operations.
 template <typename Matcher>
-void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
-                ImmediateMode operand_mode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, ImmediateMode operand_mode) {
   FlagsContinuation cont;
   VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
 }
@@ -247,9 +252,7 @@
       inputs[input_count++] = g.UseUniqueRegister(offset);
       addressing_mode = kMode_MRR;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -720,7 +723,6 @@
   VisitRRO(this, kPPC_ShiftRight32, node, kShift32Imm);
 }
 
-
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitWord64Shr(Node* node) {
   PPCOperandGenerator g(this);
@@ -782,9 +784,109 @@
   VisitRRO(this, kPPC_ShiftRightAlg32, node, kShift32Imm);
 }
 
+#if !V8_TARGET_ARCH_PPC64
+void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
+                    Node* node) {
+  PPCOperandGenerator g(selector);
+
+  // We use UseUniqueRegister here to avoid register sharing with the output
+  // registers.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  selector->Emit(opcode, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  VisitPairBinop(this, kPPC_AddPair, node);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  VisitPairBinop(this, kPPC_SubPair, node);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  PPCOperandGenerator g(this);
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 g.UseUniqueRegister(node->InputAt(2)),
+                                 g.UseRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+
+  Emit(kPPC_MulPair, 2, outputs, 4, inputs, 2, temps);
+}
+
+void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
+                    Node* node) {
+  PPCOperandGenerator g(selector);
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+                                 g.UseRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  VisitPairShift(this, kPPC_ShiftLeftPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  VisitPairShift(this, kPPC_ShiftRightPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  VisitPairShift(this, kPPC_ShiftRightAlgPair, node);
+}
+#endif
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitWord64Sar(Node* node) {
+  PPCOperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
+      m.right().Is(32)) {
+    // Just load and sign-extend the interesting 4 bytes instead. This happens,
+    // for example, when we're loading and untagging SMIs.
+    BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+    if (mleft.matches() && mleft.index() == nullptr) {
+      int64_t offset = 0;
+      Node* displacement = mleft.displacement();
+      if (displacement != nullptr) {
+        Int64Matcher mdisplacement(displacement);
+        DCHECK(mdisplacement.HasValue());
+        offset = mdisplacement.Value();
+      }
+      offset = SmiWordOffset(offset);
+      if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {
+        Emit(kPPC_LoadWordS32 | AddressingModeField::encode(kMode_MRI),
+             g.DefineAsRegister(node), g.UseRegister(mleft.base()),
+             g.TempImmediate(offset));
+        return;
+      }
+    }
+  }
   VisitRRO(this, kPPC_ShiftRightAlg64, node, kShift64Imm);
 }
 #endif
@@ -861,7 +963,6 @@
 }
 #endif
 
-
 void InstructionSelector::VisitInt32Sub(Node* node) {
   PPCOperandGenerator g(this);
   Int32BinopMatcher m(node);
@@ -994,6 +1095,9 @@
   VisitRR(this, kPPC_DoubleToUint32, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kPPC_DoubleToUint32, node);
+}
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
@@ -1108,7 +1212,7 @@
 
 
 void InstructionSelector::VisitFloat32Add(Node* node) {
-  VisitRRR(this, kPPC_AddDouble, node);
+  VisitRRR(this, kPPC_AddDouble | MiscField::encode(1), node);
 }
 
 
@@ -1122,11 +1226,11 @@
   PPCOperandGenerator g(this);
   Float32BinopMatcher m(node);
   if (m.left().IsMinusZero()) {
-    Emit(kPPC_NegDouble, g.DefineAsRegister(node),
+    Emit(kPPC_NegDouble | MiscField::encode(1), g.DefineAsRegister(node),
          g.UseRegister(m.right().node()));
     return;
   }
-  VisitRRR(this, kPPC_SubDouble, node);
+  VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
 }
 
 
@@ -1157,7 +1261,7 @@
 
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
-  VisitRRR(this, kPPC_MulDouble, node);
+  VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node);
 }
 
 
@@ -1168,7 +1272,7 @@
 
 
 void InstructionSelector::VisitFloat32Div(Node* node) {
-  VisitRRR(this, kPPC_DivDouble, node);
+  VisitRRR(this, kPPC_DivDouble | MiscField::encode(1), node);
 }
 
 
@@ -1198,7 +1302,7 @@
 
 
 void InstructionSelector::VisitFloat32Abs(Node* node) {
-  VisitRR(this, kPPC_AbsDouble, node);
+  VisitRR(this, kPPC_AbsDouble | MiscField::encode(1), node);
 }
 
 
@@ -1208,7 +1312,7 @@
 
 
 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
-  VisitRR(this, kPPC_SqrtDouble, node);
+  VisitRR(this, kPPC_SqrtDouble | MiscField::encode(1), node);
 }
 
 
@@ -1218,7 +1322,7 @@
 
 
 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
-  VisitRR(this, kPPC_FloorDouble, node);
+  VisitRR(this, kPPC_FloorDouble | MiscField::encode(1), node);
 }
 
 
@@ -1228,7 +1332,7 @@
 
 
 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
-  VisitRR(this, kPPC_CeilDouble, node);
+  VisitRR(this, kPPC_CeilDouble | MiscField::encode(1), node);
 }
 
 
@@ -1238,7 +1342,7 @@
 
 
 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
-  VisitRR(this, kPPC_TruncateDouble, node);
+  VisitRR(this, kPPC_TruncateDouble | MiscField::encode(1), node);
 }
 
 
@@ -1264,7 +1368,7 @@
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32,
                                          kInt16Imm, &cont);
   }
@@ -1276,7 +1380,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
                                          kInt16Imm_Negate, &cont);
   }
@@ -1289,7 +1393,7 @@
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
                                          &cont);
   }
@@ -1300,7 +1404,7 @@
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate,
                                          &cont);
   }
@@ -1336,6 +1440,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1573,6 +1680,17 @@
   VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   PPCOperandGenerator g(this);
@@ -1603,7 +1721,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
@@ -1613,32 +1731,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitWord64Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int64BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
@@ -1648,62 +1768,66 @@
 
 
 void InstructionSelector::VisitInt64LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 #endif
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
@@ -1750,6 +1874,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
   PPCOperandGenerator g(this);
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 0d4b8cb..728d79a 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -39,6 +39,17 @@
 Schedule* RawMachineAssembler::Export() {
   // Compute the correct codegen order.
   DCHECK(schedule_->rpo_order()->empty());
+  OFStream os(stdout);
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("--- RAW SCHEDULE -------------------------------------------\n");
+    os << *schedule_;
+  }
+  schedule_->EnsureSplitEdgeForm();
+  schedule_->PropagateDeferredMark();
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
+    os << *schedule_;
+  }
   Scheduler::ComputeSpecialRPO(zone(), schedule_);
   // Invalidate RawMachineAssembler.
   Schedule* schedule = schedule_;
@@ -79,15 +90,17 @@
   BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
   for (size_t index = 0; index < case_count; ++index) {
     int32_t case_value = case_values[index];
-    BasicBlock* case_block = Use(case_labels[index]);
+    BasicBlock* case_block = schedule()->NewBasicBlock();
     Node* case_node =
         graph()->NewNode(common()->IfValue(case_value), switch_node);
     schedule()->AddNode(case_block, case_node);
+    schedule()->AddGoto(case_block, Use(case_labels[index]));
     succ_blocks[index] = case_block;
   }
-  BasicBlock* default_block = Use(default_label);
+  BasicBlock* default_block = schedule()->NewBasicBlock();
   Node* default_node = graph()->NewNode(common()->IfDefault(), switch_node);
   schedule()->AddNode(default_block, default_node);
+  schedule()->AddGoto(default_block, Use(default_label));
   succ_blocks[case_count] = default_block;
   schedule()->AddSwitch(CurrentBlock(), switch_node, succ_blocks, succ_count);
   current_block_ = nullptr;
@@ -247,6 +260,27 @@
   return tail_call;
 }
 
+Node* RawMachineAssembler::TailCallRuntime0(Runtime::FunctionId function,
+                                            Node* context) {
+  const int kArity = 0;
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, kArity, Operator::kNoProperties,
+      CallDescriptor::kSupportsTailCalls);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(kArity);
+
+  Node* nodes[] = {centry, ref, arity, context};
+  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+  schedule()->AddTailCall(CurrentBlock(), tail_call);
+  current_block_ = nullptr;
+  return tail_call;
+}
 
 Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
                                             Node* arg1, Node* context) {
@@ -407,6 +441,7 @@
   DCHECK(!label->bound_);
   label->bound_ = true;
   current_block_ = EnsureBlock(label);
+  current_block_->set_deferred(label->deferred_);
 }
 
 
@@ -459,11 +494,6 @@
   return graph()->NewNodeUnchecked(op, input_count, inputs);
 }
 
-
-RawMachineLabel::RawMachineLabel()
-    : block_(nullptr), used_(false), bound_(false) {}
-
-
 RawMachineLabel::~RawMachineLabel() { DCHECK(bound_ || !used_); }
 
 }  // namespace compiler
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index a0cb7a0..f3445ac 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -324,6 +324,24 @@
   Node* Uint64Mod(Node* a, Node* b) {
     return AddNode(machine()->Uint64Mod(), a, b);
   }
+  Node* Int32PairAdd(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
+    return AddNode(machine()->Int32PairAdd(), a_low, a_high, b_low, b_high);
+  }
+  Node* Int32PairSub(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
+    return AddNode(machine()->Int32PairSub(), a_low, a_high, b_low, b_high);
+  }
+  Node* Int32PairMul(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
+    return AddNode(machine()->Int32PairMul(), a_low, a_high, b_low, b_high);
+  }
+  Node* Word32PairShl(Node* low_word, Node* high_word, Node* shift) {
+    return AddNode(machine()->Word32PairShl(), low_word, high_word, shift);
+  }
+  Node* Word32PairShr(Node* low_word, Node* high_word, Node* shift) {
+    return AddNode(machine()->Word32PairShr(), low_word, high_word, shift);
+  }
+  Node* Word32PairSar(Node* low_word, Node* high_word, Node* shift) {
+    return AddNode(machine()->Word32PairSar(), low_word, high_word, shift);
+  }
 
 #define INTPTR_BINOP(prefix, name)                     \
   Node* IntPtr##name(Node* a, Node* b) {               \
@@ -332,7 +350,9 @@
   }
 
   INTPTR_BINOP(Int, Add);
+  INTPTR_BINOP(Int, AddWithOverflow);
   INTPTR_BINOP(Int, Sub);
+  INTPTR_BINOP(Int, SubWithOverflow);
   INTPTR_BINOP(Int, LessThan);
   INTPTR_BINOP(Int, LessThanOrEqual);
   INTPTR_BINOP(Word, Equal);
@@ -374,6 +394,7 @@
     return AddNode(machine()->Float32Min().op(), a, b);
   }
   Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
+  Node* Float32Neg(Node* a) { return Float32Sub(Float32Constant(-0.0f), a); }
   Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
   Node* Float32Equal(Node* a, Node* b) {
     return AddNode(machine()->Float32Equal(), a, b);
@@ -414,6 +435,7 @@
     return AddNode(machine()->Float64Min().op(), a, b);
   }
   Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
+  Node* Float64Neg(Node* a) { return Float64Sub(Float64Constant(-0.0), a); }
   Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
   Node* Float64Equal(Node* a, Node* b) {
     return AddNode(machine()->Float64Equal(), a, b);
@@ -448,6 +470,9 @@
   Node* ChangeFloat64ToUint32(Node* a) {
     return AddNode(machine()->ChangeFloat64ToUint32(), a);
   }
+  Node* TruncateFloat64ToUint32(Node* a) {
+    return AddNode(machine()->TruncateFloat64ToUint32(), a);
+  }
   Node* TruncateFloat32ToInt32(Node* a) {
     return AddNode(machine()->TruncateFloat32ToInt32(), a);
   }
@@ -457,22 +482,12 @@
   Node* TryTruncateFloat32ToInt64(Node* a) {
     return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
   }
-  Node* TruncateFloat64ToInt64(Node* a) {
-    // TODO(ahaas): Remove this function as soon as it is not used anymore in
-    // WebAssembly.
-    return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
-  }
   Node* TryTruncateFloat64ToInt64(Node* a) {
     return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
   }
   Node* TryTruncateFloat32ToUint64(Node* a) {
     return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
   }
-  Node* TruncateFloat64ToUint64(Node* a) {
-    // TODO(ahaas): Remove this function as soon as it is not used anymore in
-    // WebAssembly.
-    return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
-  }
   Node* TryTruncateFloat64ToUint64(Node* a) {
     return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
   }
@@ -622,6 +637,8 @@
 
   // Tail call the given call descriptor and the given arguments.
   Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
+  // Tail call to a runtime function with zero arguments.
+  Node* TailCallRuntime0(Runtime::FunctionId function, Node* context);
   // Tail call to a runtime function with one argument.
   Node* TailCallRuntime1(Runtime::FunctionId function, Node* arg0,
                          Node* context);
@@ -708,13 +725,17 @@
 
 class RawMachineLabel final {
  public:
-  RawMachineLabel();
+  enum Type { kDeferred, kNonDeferred };
+
+  explicit RawMachineLabel(Type type = kNonDeferred)
+      : deferred_(type == kDeferred) {}
   ~RawMachineLabel();
 
  private:
-  BasicBlock* block_;
-  bool used_;
-  bool bound_;
+  BasicBlock* block_ = nullptr;
+  bool used_ = false;
+  bool bound_ = false;
+  bool deferred_;
   friend class RawMachineAssembler;
   DISALLOW_COPY_AND_ASSIGN(RawMachineLabel);
 };
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index 0b12e14..f2160f5 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -32,9 +32,9 @@
        i <= Instruction::LAST_GAP_POSITION; i++) {
     Instruction::GapPosition inner_pos =
         static_cast<Instruction::GapPosition>(i);
-    auto moves = instr->GetParallelMove(inner_pos);
+    const ParallelMove* moves = instr->GetParallelMove(inner_pos);
     if (moves == nullptr) continue;
-    for (auto move : *moves) {
+    for (const MoveOperands* move : *moves) {
       if (move->IsRedundant()) continue;
       CHECK(move->source().IsAllocated() || move->source().IsConstant());
       CHECK(move->destination().IsAllocated());
@@ -81,11 +81,12 @@
   // TODO(dcarney): model unique constraints.
   // Construct OperandConstraints for all InstructionOperands, eliminating
   // kSameAsFirst along the way.
-  for (const auto* instr : sequence->instructions()) {
+  for (const Instruction* instr : sequence->instructions()) {
     // All gaps should be totally unallocated at this point.
     VerifyEmptyGaps(instr);
     const size_t operand_count = OperandCount(instr);
-    auto* op_constraints = zone->NewArray<OperandConstraint>(operand_count);
+    OperandConstraint* op_constraints =
+        zone->NewArray<OperandConstraint>(operand_count);
     size_t count = 0;
     for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
       BuildConstraint(instr->InputAt(i), &op_constraints[count]);
@@ -115,11 +116,12 @@
   CHECK(sequence()->instructions().size() == constraints()->size());
   auto instr_it = sequence()->begin();
   for (const auto& instr_constraint : *constraints()) {
-    const auto* instr = instr_constraint.instruction_;
+    const Instruction* instr = instr_constraint.instruction_;
     // All gaps should be totally allocated at this point.
     VerifyAllocatedGaps(instr);
     const size_t operand_count = instr_constraint.operand_constaints_size_;
-    const auto* op_constraints = instr_constraint.operand_constraints_;
+    const OperandConstraint* op_constraints =
+        instr_constraint.operand_constraints_;
     CHECK_EQ(instr, *instr_it);
     CHECK(operand_count == OperandCount(instr));
     size_t count = 0;
@@ -148,14 +150,14 @@
   } else if (op->IsExplicit()) {
     constraint->type_ = kExplicit;
   } else if (op->IsImmediate()) {
-    auto imm = ImmediateOperand::cast(op);
+    const ImmediateOperand* imm = ImmediateOperand::cast(op);
     int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value()
                                                         : imm->indexed_value();
     constraint->type_ = kImmediate;
     constraint->value_ = value;
   } else {
     CHECK(op->IsUnallocated());
-    const auto* unallocated = UnallocatedOperand::cast(op);
+    const UnallocatedOperand* unallocated = UnallocatedOperand::cast(op);
     int vreg = unallocated->virtual_register();
     constraint->virtual_register_ = vreg;
     if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
@@ -213,7 +215,7 @@
       return;
     case kImmediate: {
       CHECK(op->IsImmediate());
-      auto imm = ImmediateOperand::cast(op);
+      const ImmediateOperand* imm = ImmediateOperand::cast(op);
       int value = imm->type() == ImmediateOperand::INLINE
                       ? imm->inline_value()
                       : imm->indexed_value();
@@ -324,7 +326,7 @@
       if (this->empty()) return;
       auto it = this->begin();
       OperandLess less;
-      for (const auto& o : other) {
+      for (const std::pair<const InstructionOperand*, MapValue*>& o : other) {
         while (less(it->first, o.first)) {
           this->erase(it++);
           if (it == this->end()) return;
@@ -346,7 +348,7 @@
   void RunParallelMoves(Zone* zone, const ParallelMove* moves) {
     // Compute outgoing mappings.
     Map to_insert(zone);
-    for (auto move : *moves) {
+    for (const MoveOperands* move : *moves) {
       if (move->IsEliminated()) continue;
       auto cur = map().find(&move->source());
       CHECK(cur != map().end());
@@ -356,7 +358,7 @@
       CHECK(res.second);
     }
     // Drop current mappings.
-    for (auto move : *moves) {
+    for (const MoveOperands* move : *moves) {
       if (move->IsEliminated()) continue;
       auto cur = map().find(&move->destination());
       if (cur != map().end()) map().erase(cur);
@@ -368,8 +370,9 @@
   void RunGaps(Zone* zone, const Instruction* instr) {
     for (int i = Instruction::FIRST_GAP_POSITION;
          i <= Instruction::LAST_GAP_POSITION; i++) {
-      auto inner_pos = static_cast<Instruction::GapPosition>(i);
-      auto move = instr->GetParallelMove(inner_pos);
+      Instruction::GapPosition inner_pos =
+          static_cast<Instruction::GapPosition>(i);
+      const ParallelMove* move = instr->GetParallelMove(inner_pos);
       if (move == nullptr) continue;
       RunParallelMoves(zone, move);
     }
@@ -383,7 +386,7 @@
   void DropRegisters(const RegisterConfiguration* config) {
     // TODO(dcarney): sort map by kind and drop range.
     for (auto it = map().begin(); it != map().end();) {
-      auto op = it->first;
+      const InstructionOperand* op = it->first;
       if (op->IsRegister() || op->IsDoubleRegister()) {
         map().erase(it++);
       } else {
@@ -394,7 +397,7 @@
 
   MapValue* Define(Zone* zone, const InstructionOperand* op,
                    int virtual_register) {
-    auto value = new (zone) MapValue();
+    MapValue* value = new (zone) MapValue();
     value->define_vreg = virtual_register;
     auto res = map().insert(std::make_pair(op, value));
     if (!res.second) res.first->second = value;
@@ -404,7 +407,7 @@
   void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
     auto it = map().find(op);
     CHECK(it != map().end());
-    auto v = it->second;
+    MapValue* v = it->second;
     if (v->define_vreg != kInvalidVreg) {
       CHECK_EQ(v->define_vreg, use_vreg);
     }
@@ -445,7 +448,7 @@
               bool initial_pass) {
     auto it = map().find(op);
     CHECK(it != map().end());
-    auto v = it->second;
+    MapValue* v = it->second;
     int use_vreg = phi->virtual_register;
     // Phis are not defined.
     CHECK_EQ(kInvalidVreg, v->define_vreg);
@@ -473,7 +476,7 @@
         CHECK(v->define_vreg == phi->first_pred_vreg);
       } else if (v->use_vreg != phi->first_pred_vreg) {
         // Walk the phi chain, hunting for a matching phi use.
-        auto p = phi;
+        const PhiData* p = phi;
         for (; p != nullptr; p = p->first_pred_phi) {
           if (p->virtual_register == v->use_vreg) break;
         }
@@ -529,12 +532,12 @@
     BlockIds block_ids((BlockIds::key_compare()),
                        zone_allocator<size_t>(zone()));
     // First ensure that incoming contains only keys in all predecessors.
-    for (auto block : sequence()->instruction_blocks()) {
+    for (const InstructionBlock* block : sequence()->instruction_blocks()) {
       size_t index = block->rpo_number().ToSize();
       block_ids.insert(index);
-      auto& succ_map = incoming_maps_[index]->map();
+      OperandMap::Map& succ_map = incoming_maps_[index]->map();
       for (size_t i = 0; i < block->PredecessorCount(); ++i) {
-        auto pred_rpo = block->predecessors()[i];
+        RpoNumber pred_rpo = block->predecessors()[i];
         succ_map.Intersect(outgoing_maps_[pred_rpo.ToSize()]->map());
       }
     }
@@ -545,8 +548,9 @@
       const size_t succ_index = *block_id_it;
       block_ids.erase(block_id_it);
       // Propagate uses back to their definition blocks using succ_vreg.
-      auto block = sequence()->instruction_blocks()[succ_index];
-      auto& succ_map = incoming_maps_[succ_index]->map();
+      const InstructionBlock* block =
+          sequence()->instruction_blocks()[succ_index];
+      OperandMap::Map& succ_map = incoming_maps_[succ_index]->map();
       for (size_t i = 0; i < block->PredecessorCount(); ++i) {
         for (auto& succ_val : succ_map) {
           // An incoming map contains no defines.
@@ -561,15 +565,15 @@
           if (succ_vreg == kInvalidVreg) continue;
           // May need to transition phi.
           if (IsPhi(succ_vreg)) {
-            auto phi = GetPhi(succ_vreg);
+            const PhiData* phi = GetPhi(succ_vreg);
             if (phi->definition_rpo.ToSize() == succ_index) {
               // phi definition block, transition to pred value.
               succ_vreg = phi->operands[i];
             }
           }
           // Push succ_vreg up to all predecessors.
-          auto pred_rpo = block->predecessors()[i];
-          auto& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
+          RpoNumber pred_rpo = block->predecessors()[i];
+          OperandMap::Map& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
           auto& pred_val = *pred_map.find(succ_val.first);
           if (pred_val.second->use_vreg != kInvalidVreg) {
             CHECK_EQ(succ_vreg, pred_val.second->use_vreg);
@@ -606,7 +610,7 @@
       }
     }
     // Clear uses and back links for second pass.
-    for (auto operand_map : incoming_maps_) {
+    for (OperandMap* operand_map : incoming_maps_) {
       for (auto& succ_val : operand_map->map()) {
         succ_val.second->incoming = nullptr;
         succ_val.second->use_vreg = kInvalidVreg;
@@ -616,18 +620,19 @@
 
  private:
   OperandMap* InitializeFromFirstPredecessor(size_t block_index) {
-    auto to_init = outgoing_maps_[block_index];
+    OperandMap* to_init = outgoing_maps_[block_index];
     CHECK(to_init->map().empty());
-    auto block = sequence()->instruction_blocks()[block_index];
+    const InstructionBlock* block =
+        sequence()->instruction_blocks()[block_index];
     if (block->predecessors().empty()) return to_init;
     size_t predecessor_index = block->predecessors()[0].ToSize();
     // Ensure not a backedge.
     CHECK(predecessor_index < block->rpo_number().ToSize());
-    auto incoming = outgoing_maps_[predecessor_index];
+    OperandMap* incoming = outgoing_maps_[predecessor_index];
     // Copy map and replace values.
     to_init->map() = incoming->map();
     for (auto& it : to_init->map()) {
-      auto incoming = it.second;
+      OperandMap::MapValue* incoming = it.second;
       it.second = new (zone()) OperandMap::MapValue();
       it.second->incoming = incoming;
     }
@@ -653,8 +658,9 @@
   void InitializePhis() {
     const size_t block_count = sequence()->instruction_blocks().size();
     for (size_t block_index = 0; block_index < block_count; ++block_index) {
-      const auto block = sequence()->instruction_blocks()[block_index];
-      for (auto phi : block->phis()) {
+      const InstructionBlock* block =
+          sequence()->instruction_blocks()[block_index];
+      for (const PhiInstruction* phi : block->phis()) {
         int first_pred_vreg = phi->operands()[0];
         const PhiData* first_pred_phi = nullptr;
         if (IsPhi(first_pred_vreg)) {
@@ -662,7 +668,7 @@
           first_pred_vreg = first_pred_phi->first_pred_vreg;
         }
         CHECK(!IsPhi(first_pred_vreg));
-        auto phi_data = new (zone()) PhiData(
+        PhiData* phi_data = new (zone()) PhiData(
             block->rpo_number(), phi, first_pred_vreg, first_pred_phi, zone());
         auto res =
             phi_map_.insert(std::make_pair(phi->virtual_register(), phi_data));
@@ -700,14 +706,17 @@
                                                bool initial_pass) {
   const size_t block_count = sequence()->instruction_blocks().size();
   for (size_t block_index = 0; block_index < block_count; ++block_index) {
-    auto current = block_maps->InitializeIncoming(block_index, initial_pass);
-    const auto block = sequence()->instruction_blocks()[block_index];
+    OperandMap* current =
+        block_maps->InitializeIncoming(block_index, initial_pass);
+    const InstructionBlock* block =
+        sequence()->instruction_blocks()[block_index];
     for (int instr_index = block->code_start(); instr_index < block->code_end();
          ++instr_index) {
-      const auto& instr_constraint = constraints_[instr_index];
-      const auto instr = instr_constraint.instruction_;
+      const InstructionConstraint& instr_constraint = constraints_[instr_index];
+      const Instruction* instr = instr_constraint.instruction_;
       current->RunGaps(zone(), instr);
-      const auto op_constraints = instr_constraint.operand_constraints_;
+      const OperandConstraint* op_constraints =
+          instr_constraint.operand_constraints_;
       size_t count = 0;
       for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
         if (op_constraints[count].type_ == kImmediate ||
@@ -715,11 +724,11 @@
           continue;
         }
         int virtual_register = op_constraints[count].virtual_register_;
-        auto op = instr->InputAt(i);
+        const InstructionOperand* op = instr->InputAt(i);
         if (!block_maps->IsPhi(virtual_register)) {
           current->Use(op, virtual_register, initial_pass);
         } else {
-          auto phi = block_maps->GetPhi(virtual_register);
+          const PhiData* phi = block_maps->GetPhi(virtual_register);
           current->UsePhi(op, phi, initial_pass);
         }
       }
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 02ba1f1..82faf75 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -25,7 +25,6 @@
   v->erase(it);
 }
 
-
 int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
   return kind == DOUBLE_REGISTERS ? cfg->num_double_registers()
                                   : cfg->num_general_registers();
@@ -1561,7 +1560,6 @@
   return true;
 }
 
-
 SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
     TopLevelLiveRange* range) {
   DCHECK(!range->HasSpillOperand());
@@ -2197,8 +2195,22 @@
     int phi_vreg = phi->virtual_register();
     live->Remove(phi_vreg);
     InstructionOperand* hint = nullptr;
-    Instruction* instr = GetLastInstruction(
-        code(), code()->InstructionBlockAt(block->predecessors()[0]));
+    const InstructionBlock::Predecessors& predecessors = block->predecessors();
+    const InstructionBlock* predecessor_block =
+        code()->InstructionBlockAt(predecessors[0]);
+    const Instruction* instr = GetLastInstruction(code(), predecessor_block);
+    if (predecessor_block->IsDeferred()) {
+      // "Prefer the hint from the first non-deferred predecessor, if any.
+      for (size_t i = 1; i < predecessors.size(); ++i) {
+        predecessor_block = code()->InstructionBlockAt(predecessors[i]);
+        if (!predecessor_block->IsDeferred()) {
+          instr = GetLastInstruction(code(), predecessor_block);
+          break;
+        }
+      }
+    }
+    DCHECK_NOT_NULL(instr);
+
     for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
       InstructionOperand& to = move->destination();
       if (to.IsUnallocated() &&
@@ -2322,11 +2334,79 @@
   for (auto& hint : phi_hints_) {
     CHECK(hint.second->IsResolved());
   }
-  for (TopLevelLiveRange* current : data()->live_ranges()) {
-    if (current != nullptr && !current->IsEmpty()) current->Verify();
+  for (const TopLevelLiveRange* current : data()->live_ranges()) {
+    if (current != nullptr && !current->IsEmpty()) {
+      // New LiveRanges should not be split.
+      CHECK_NULL(current->next());
+      // General integrity check.
+      current->Verify();
+      const UseInterval* first = current->first_interval();
+      if (first->next() == nullptr) continue;
+
+      // Consecutive intervals should not end and start in the same block,
+      // otherwise the intervals should have been joined, because the
+      // variable is live throughout that block.
+      CHECK(NextIntervalStartsInDifferentBlocks(first));
+
+      for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
+        // Except for the first interval, the other intevals must start at
+        // a block boundary, otherwise data wouldn't flow to them.
+        CHECK(IntervalStartsAtBlockBoundary(i));
+        // The last instruction of the predecessors of the block the interval
+        // starts must be covered by the range.
+        CHECK(IntervalPredecessorsCoveredByRange(i, current));
+        if (i->next() != nullptr) {
+          // Check the consecutive intervals property, except for the last
+          // interval, where it doesn't apply.
+          CHECK(NextIntervalStartsInDifferentBlocks(i));
+        }
+      }
+    }
   }
 }
 
+bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
+    const UseInterval* interval) const {
+  LifetimePosition start = interval->start();
+  if (!start.IsFullStart()) return false;
+  int instruction_index = start.ToInstructionIndex();
+  const InstructionBlock* block =
+      data()->code()->GetInstructionBlock(instruction_index);
+  return block->first_instruction_index() == instruction_index;
+}
+
+bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
+    const UseInterval* interval, const TopLevelLiveRange* range) const {
+  LifetimePosition start = interval->start();
+  int instruction_index = start.ToInstructionIndex();
+  const InstructionBlock* block =
+      data()->code()->GetInstructionBlock(instruction_index);
+  for (RpoNumber pred_index : block->predecessors()) {
+    const InstructionBlock* predecessor =
+        data()->code()->InstructionBlockAt(pred_index);
+    LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
+        predecessor->last_instruction_index());
+    last_pos = last_pos.NextStart().End();
+    if (!range->Covers(last_pos)) return false;
+  }
+  return true;
+}
+
+bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
+    const UseInterval* interval) const {
+  DCHECK_NOT_NULL(interval->next());
+  LifetimePosition end = interval->end();
+  LifetimePosition next_start = interval->next()->start();
+  // Since end is not covered, but the previous position is, move back a
+  // position
+  end = end.IsStart() ? end.PrevStart().End() : end.Start();
+  int last_covered_index = end.ToInstructionIndex();
+  const InstructionBlock* block =
+      data()->code()->GetInstructionBlock(last_covered_index);
+  const InstructionBlock* next_block =
+      data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
+  return block->rpo_number() < next_block->rpo_number();
+}
 
 RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
                                      RegisterKind kind)
@@ -3081,21 +3161,14 @@
   for (TopLevelLiveRange* range : data()->live_ranges()) {
     if (range == nullptr || range->IsEmpty()) continue;
     // We care only about ranges which spill in the frame.
-    if (!range->HasSpillRange()) continue;
-    if (range->IsSpilledOnlyInDeferredBlocks()) {
-      for (LiveRange* child = range; child != nullptr; child = child->next()) {
-        if (child->spilled()) {
-          code->GetInstructionBlock(child->Start().ToInstructionIndex())
-              ->mark_needs_frame();
-        }
-      }
-    } else {
-      TopLevelLiveRange::SpillMoveInsertionList* spills =
-          range->GetSpillMoveInsertionLocations();
-      DCHECK_NOT_NULL(spills);
-      for (; spills != nullptr; spills = spills->next) {
-        code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
-      }
+    if (!range->HasSpillRange() || range->IsSpilledOnlyInDeferredBlocks()) {
+      continue;
+    }
+    TopLevelLiveRange::SpillMoveInsertionList* spills =
+        range->GetSpillMoveInsertionLocations();
+    DCHECK_NOT_NULL(spills);
+    for (; spills != nullptr; spills = spills->next) {
+      code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
     }
   }
 }
@@ -3558,7 +3631,7 @@
     worklist.pop();
     if (done_blocks.Contains(block_id)) continue;
     done_blocks.Add(block_id);
-    const InstructionBlock* spill_block =
+    InstructionBlock* spill_block =
         code->InstructionBlockAt(RpoNumber::FromInt(block_id));
 
     for (const RpoNumber& pred : spill_block->predecessors()) {
@@ -3578,6 +3651,7 @@
         data()->AddGapMove(spill_block->first_instruction_index(),
                            Instruction::GapPosition::START, pred_op,
                            spill_operand);
+        spill_block->mark_needs_frame();
       }
     }
   }
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index 38fad05..d6ed005 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -919,7 +919,12 @@
     return data()->live_in_sets();
   }
 
+  // Verification.
   void Verify() const;
+  bool IntervalStartsAtBlockBoundary(const UseInterval* interval) const;
+  bool IntervalPredecessorsCoveredByRange(const UseInterval* interval,
+                                          const TopLevelLiveRange* range) const;
+  bool NextIntervalStartsInDifferentBlocks(const UseInterval* interval) const;
 
   // Liveness analysis support.
   void AddInitialIntervals(const InstructionBlock* block, BitVector* live_out);
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index 2f7720b..f59c8bc 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -142,7 +142,8 @@
     case MachineRepresentation::kWord8:
     case MachineRepresentation::kWord16:
     case MachineRepresentation::kWord32:
-      return GetWord32RepresentationFor(node, output_rep, output_type);
+      return GetWord32RepresentationFor(node, output_rep, output_type,
+                                        truncation);
     case MachineRepresentation::kWord64:
       return GetWord64RepresentationFor(node, output_rep, output_type);
     case MachineRepresentation::kSimd128:  // Fall through.
@@ -234,30 +235,34 @@
       break;
   }
   // Select the correct X -> Float32 operator.
-  const Operator* op;
-  if (output_rep == MachineRepresentation::kBit) {
-    return TypeError(node, output_rep, output_type,
-                     MachineRepresentation::kFloat32);
-  } else if (IsWord(output_rep)) {
+  const Operator* op = nullptr;
+  if (IsWord(output_rep)) {
     if (output_type->Is(Type::Signed32())) {
+      // int32 -> float64 -> float32
       op = machine()->ChangeInt32ToFloat64();
-    } else {
-      // Either the output is int32 or the uses only care about the
-      // low 32 bits (so we can pick int32 safely).
-      DCHECK(output_type->Is(Type::Unsigned32()) ||
-             truncation.TruncatesToWord32());
+      node = jsgraph()->graph()->NewNode(op, node);
+      op = machine()->TruncateFloat64ToFloat32();
+    } else if (output_type->Is(Type::Unsigned32()) ||
+               truncation.TruncatesToWord32()) {
+      // Either the output is uint32 or the uses only care about the
+      // low 32 bits (so we can pick uint32 safely).
+
+      // uint32 -> float64 -> float32
       op = machine()->ChangeUint32ToFloat64();
+      node = jsgraph()->graph()->NewNode(op, node);
+      op = machine()->TruncateFloat64ToFloat32();
     }
-    // int32 -> float64 -> float32
-    node = jsgraph()->graph()->NewNode(op, node);
-    op = machine()->TruncateFloat64ToFloat32();
   } else if (output_rep == MachineRepresentation::kTagged) {
-    op = simplified()->ChangeTaggedToFloat64();  // tagged -> float64 -> float32
-    node = jsgraph()->graph()->NewNode(op, node);
-    op = machine()->TruncateFloat64ToFloat32();
+    if (output_type->Is(Type::Number())) {
+      op = simplified()
+               ->ChangeTaggedToFloat64();  // tagged -> float64 -> float32
+      node = jsgraph()->graph()->NewNode(op, node);
+      op = machine()->TruncateFloat64ToFloat32();
+    }
   } else if (output_rep == MachineRepresentation::kFloat64) {
     op = machine()->TruncateFloat64ToFloat32();
-  } else {
+  }
+  if (op == nullptr) {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kFloat32);
   }
@@ -289,25 +294,24 @@
       break;
   }
   // Select the correct X -> Float64 operator.
-  const Operator* op;
-  if (output_rep == MachineRepresentation::kBit) {
-    return TypeError(node, output_rep, output_type,
-                     MachineRepresentation::kFloat64);
-  } else if (IsWord(output_rep)) {
+  const Operator* op = nullptr;
+  if (IsWord(output_rep)) {
     if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeInt32ToFloat64();
-    } else {
-      // Either the output is int32 or the uses only care about the
-      // low 32 bits (so we can pick int32 safely).
-      DCHECK(output_type->Is(Type::Unsigned32()) ||
-             truncation.TruncatesToWord32());
+    } else if (output_type->Is(Type::Unsigned32()) ||
+               truncation.TruncatesToWord32()) {
+      // Either the output is uint32 or the uses only care about the
+      // low 32 bits (so we can pick uint32 safely).
       op = machine()->ChangeUint32ToFloat64();
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
-    op = simplified()->ChangeTaggedToFloat64();
+    if (output_type->Is(Type::Number())) {
+      op = simplified()->ChangeTaggedToFloat64();
+    }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     op = machine()->ChangeFloat32ToFloat64();
-  } else {
+  }
+  if (op == nullptr) {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kFloat64);
   }
@@ -319,9 +323,9 @@
   return jsgraph()->Int32Constant(DoubleToInt32(value));
 }
 
-
 Node* RepresentationChanger::GetWord32RepresentationFor(
-    Node* node, MachineRepresentation output_rep, Type* output_type) {
+    Node* node, MachineRepresentation output_rep, Type* output_type,
+    Truncation truncation) {
   // Eagerly fold representation changes for constants.
   switch (node->opcode()) {
     case IrOpcode::kInt32Constant:
@@ -335,43 +339,37 @@
       break;
   }
   // Select the correct X -> Word32 operator.
-  const Operator* op;
-  Type* type = NodeProperties::GetType(node);
-
+  const Operator* op = nullptr;
   if (output_rep == MachineRepresentation::kBit) {
     return node;  // Sloppy comparison -> word32
   } else if (output_rep == MachineRepresentation::kFloat64) {
-    // TODO(jarin) Use only output_type here, once we intersect it with the
-    // type inferred by the typer.
-    if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+    if (output_type->Is(Type::Unsigned32())) {
       op = machine()->ChangeFloat64ToUint32();
-    } else if (output_type->Is(Type::Signed32()) ||
-               type->Is(Type::Signed32())) {
+    } else if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else {
+    } else if (truncation.TruncatesToWord32()) {
       op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     node = InsertChangeFloat32ToFloat64(node);  // float32 -> float64 -> int32
-    if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+    if (output_type->Is(Type::Unsigned32())) {
       op = machine()->ChangeFloat64ToUint32();
-    } else if (output_type->Is(Type::Signed32()) ||
-               type->Is(Type::Signed32())) {
+    } else if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else {
+    } else if (truncation.TruncatesToWord32()) {
       op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
-    if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+    if (output_type->Is(Type::Unsigned32())) {
       op = simplified()->ChangeTaggedToUint32();
-    } else if (output_type->Is(Type::Signed32()) ||
-               type->Is(Type::Signed32())) {
+    } else if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeTaggedToInt32();
-    } else {
+    } else if (truncation.TruncatesToWord32()) {
       node = InsertChangeTaggedToFloat64(node);
       op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
     }
-  } else {
+  }
+  if (op == nullptr) {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kWord32);
   }
@@ -467,6 +465,10 @@
       return machine()->Uint32LessThan();
     case IrOpcode::kNumberLessThanOrEqual:
       return machine()->Uint32LessThanOrEqual();
+    case IrOpcode::kNumberClz32:
+      return machine()->Word32Clz();
+    case IrOpcode::kNumberImul:
+      return machine()->Int32Mul();
     default:
       UNREACHABLE();
       return nullptr;
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index 62ea3b4..24e28f3 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -121,7 +121,7 @@
                                     MachineRepresentation output_rep,
                                     Type* output_type, Truncation truncation);
   Node* GetWord32RepresentationFor(Node* node, MachineRepresentation output_rep,
-                                   Type* output_type);
+                                   Type* output_type, Truncation truncation);
   Node* GetBitRepresentationFor(Node* node, MachineRepresentation output_rep,
                                 Type* output_type);
   Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
diff --git a/src/compiler/s390/OWNERS b/src/compiler/s390/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/compiler/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
new file mode 100644
index 0000000..68c1d9d
--- /dev/null
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -0,0 +1,2085 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/ast/scopes.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/s390/macro-assembler-s390.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+#define kScratchReg ip
+
+// Adds S390-specific methods to convert InstructionOperands.
+class S390OperandConverter final : public InstructionOperandConverter {
+ public:
+  S390OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  size_t OutputCount() { return instr_->OutputCount(); }
+
+  bool CompareLogical() const {
+    switch (instr_->flags_condition()) {
+      case kUnsignedLessThan:
+      case kUnsignedGreaterThanOrEqual:
+      case kUnsignedLessThanOrEqual:
+      case kUnsignedGreaterThan:
+        return true;
+      default:
+        return false;
+    }
+    UNREACHABLE();
+    return false;
+  }
+
+  Operand InputImmediate(size_t index) {
+    Constant constant = ToConstant(instr_->InputAt(index));
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kFloat32:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kInt64:
+#if V8_TARGET_ARCH_S390X
+        return Operand(constant.ToInt64());
+#endif
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+      case Constant::kRpoNumber:
+        break;
+    }
+    UNREACHABLE();
+    return Operand::Zero();
+  }
+
+  MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
+    const size_t index = *first_index;
+    *mode = AddressingModeField::decode(instr_->opcode());
+    switch (*mode) {
+      case kMode_None:
+        break;
+      case kMode_MRI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_MRR:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+    }
+    UNREACHABLE();
+    return MemOperand(r0);
+  }
+
+  MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
+    return MemoryOperand(mode, &first_index);
+  }
+
+  MemOperand ToMemOperand(InstructionOperand* op) const {
+    DCHECK_NOT_NULL(op);
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
+
+  MemOperand SlotToMemOperand(int slot) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+  }
+};
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsRegister();
+}
+
+namespace {
+
+class OutOfLineLoadNAN32 final : public OutOfLineCode {
+ public:
+  OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final {
+    __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
+                         kScratchReg);
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+class OutOfLineLoadNAN64 final : public OutOfLineCode {
+ public:
+  OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final {
+    __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
+                         kScratchReg);
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+class OutOfLineLoadZero final : public OutOfLineCode {
+ public:
+  OutOfLineLoadZero(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
+
+ private:
+  Register const result_;
+};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
+                       Register value, Register scratch0, Register scratch1,
+                       RecordWriteMode mode)
+      : OutOfLineCode(gen),
+        object_(object),
+        offset_(offset),
+        offset_immediate_(0),
+        value_(value),
+        scratch0_(scratch0),
+        scratch1_(scratch1),
+        mode_(mode) {}
+
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
+                       Register value, Register scratch0, Register scratch1,
+                       RecordWriteMode mode)
+      : OutOfLineCode(gen),
+        object_(object),
+        offset_(no_reg),
+        offset_immediate_(offset),
+        value_(value),
+        scratch0_(scratch0),
+        scratch1_(scratch1),
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+
+  void Generate() final {
+    if (mode_ > RecordWriteMode::kValueIsPointer) {
+      __ JumpIfSmi(value_, exit());
+    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
+    SaveFPRegsMode const save_fp_mode =
+        frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+    if (must_save_lr_) {
+      // We need to save and restore r14 if the frame was elided.
+      __ Push(r14);
+    }
+    RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+                         remembered_set_action, save_fp_mode);
+    if (offset_.is(no_reg)) {
+      __ AddP(scratch1_, object_, Operand(offset_immediate_));
+    } else {
+      DCHECK_EQ(0, offset_immediate_);
+      __ AddP(scratch1_, object_, offset_);
+    }
+    __ CallStub(&stub);
+    if (must_save_lr_) {
+      // We need to save and restore r14 if the frame was elided.
+      __ Pop(r14);
+    }
+  }
+
+ private:
+  Register const object_;
+  Register const offset_;
+  int32_t const offset_immediate_;  // Valid if offset_.is(no_reg).
+  Register const value_;
+  Register const scratch0_;
+  Register const scratch1_;
+  RecordWriteMode const mode_;
+  bool must_save_lr_;
+};
+
+Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
+  switch (condition) {
+    case kEqual:
+      return eq;
+    case kNotEqual:
+      return ne;
+    case kSignedLessThan:
+    case kUnsignedLessThan:
+      return lt;
+    case kSignedGreaterThanOrEqual:
+    case kUnsignedGreaterThanOrEqual:
+      return ge;
+    case kSignedLessThanOrEqual:
+    case kUnsignedLessThanOrEqual:
+      return le;
+    case kSignedGreaterThan:
+    case kUnsignedGreaterThan:
+      return gt;
+    case kOverflow:
+      // Overflow checked for AddP/SubP only.
+      switch (op) {
+#if V8_TARGET_ARCH_S390X
+        case kS390_Add:
+        case kS390_Sub:
+          return lt;
+#endif
+        case kS390_AddWithOverflow32:
+        case kS390_SubWithOverflow32:
+#if V8_TARGET_ARCH_S390X
+          return ne;
+#else
+          return lt;
+#endif
+        default:
+          break;
+      }
+      break;
+    case kNotOverflow:
+      switch (op) {
+#if V8_TARGET_ARCH_S390X
+        case kS390_Add:
+        case kS390_Sub:
+          return ge;
+#endif
+        case kS390_AddWithOverflow32:
+        case kS390_SubWithOverflow32:
+#if V8_TARGET_ARCH_S390X
+          return eq;
+#else
+          return ge;
+#endif
+        default:
+          break;
+      }
+      break;
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return kNoCondition;
+}
+
+}  // namespace
+
+#define ASSEMBLE_FLOAT_UNOP(asm_instr)                                \
+  do {                                                                \
+    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_BINOP(asm_instr)                              \
+  do {                                                               \
+    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+                 i.InputDoubleRegister(1));                          \
+  } while (0)
+
+#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
+  do {                                                         \
+    if (HasRegisterInput(instr, 1)) {                          \
+      __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputRegister(1));                    \
+    } else {                                                   \
+      __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputImmediate(1));                   \
+    }                                                          \
+  } while (0)
+
+#define ASSEMBLE_BINOP_INT(asm_instr_reg, asm_instr_imm)       \
+  do {                                                         \
+    if (HasRegisterInput(instr, 1)) {                          \
+      __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputRegister(1));                    \
+    } else {                                                   \
+      __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputInt32(1));                       \
+    }                                                          \
+  } while (0)
+
+#define ASSEMBLE_ADD_WITH_OVERFLOW()                                    \
+  do {                                                                  \
+    if (HasRegisterInput(instr, 1)) {                                   \
+      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                i.InputRegister(1), kScratchReg, r0);   \
+    } else {                                                            \
+      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                i.InputInt32(1), kScratchReg, r0);      \
+    }                                                                   \
+  } while (0)
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW()                                    \
+  do {                                                                  \
+    if (HasRegisterInput(instr, 1)) {                                   \
+      __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                i.InputRegister(1), kScratchReg, r0);   \
+    } else {                                                            \
+      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                -i.InputInt32(1), kScratchReg, r0);     \
+    }                                                                   \
+  } while (0)
+
+#if V8_TARGET_ARCH_S390X
+#define ASSEMBLE_ADD_WITH_OVERFLOW32()      \
+  do {                                      \
+    ASSEMBLE_BINOP(AddP, AddP);             \
+    __ TestIfInt32(i.OutputRegister(), r0); \
+  } while (0)
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW32()      \
+  do {                                      \
+    ASSEMBLE_BINOP(SubP, SubP);             \
+    __ TestIfInt32(i.OutputRegister(), r0); \
+  } while (0)
+#else
+#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
+#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
+#endif
+
+#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                 \
+  do {                                                          \
+    if (HasRegisterInput(instr, 1)) {                           \
+      if (i.CompareLogical()) {                                 \
+        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));  \
+      } else {                                                  \
+        __ cmp_instr(i.InputRegister(0), i.InputRegister(1));   \
+      }                                                         \
+    } else {                                                    \
+      if (i.CompareLogical()) {                                 \
+        __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
+      } else {                                                  \
+        __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));  \
+      }                                                         \
+    }                                                           \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_COMPARE(cmp_instr)                            \
+  do {                                                               \
+    __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
+  } while (0)
+
+// Divide instruction dr will implicity use register pair
+// r0 & r1 below.
+// R0:R1 = R1 / divisor - R0 remainder
+// Copy remainder to output reg
+#define ASSEMBLE_MODULO(div_instr, shift_instr) \
+  do {                                          \
+    __ LoadRR(r0, i.InputRegister(0));          \
+    __ shift_instr(r0, Operand(32));            \
+    __ div_instr(r0, i.InputRegister(1));       \
+    __ ltr(i.OutputRegister(), r0);             \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_MODULO()                                               \
+  do {                                                                        \
+    FrameScope scope(masm(), StackFrame::MANUAL);                             \
+    __ PrepareCallCFunction(0, 2, kScratchReg);                               \
+    __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
+                            i.InputDoubleRegister(1));                        \
+    __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
+                     0, 2);                                                   \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                          \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_MAX(double_scratch_reg, general_scratch_reg) \
+  do {                                                              \
+    Label ge, done;                                                 \
+    __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));    \
+    __ bge(&ge, Label::kNear);                                      \
+    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1));    \
+    __ b(&done, Label::kNear);                                      \
+    __ bind(&ge);                                                   \
+    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));    \
+    __ bind(&done);                                                 \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_MIN(double_scratch_reg, general_scratch_reg) \
+  do {                                                              \
+    Label ge, done;                                                 \
+    __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));    \
+    __ bge(&ge, Label::kNear);                                      \
+    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));    \
+    __ b(&done, Label::kNear);                                      \
+    __ bind(&ge);                                                   \
+    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1));    \
+    __ bind(&done);                                                 \
+  } while (0)
+
+// Only MRI mode for these instructions available
+#define ASSEMBLE_LOAD_FLOAT(asm_instr)                \
+  do {                                                \
+    DoubleRegister result = i.OutputDoubleRegister(); \
+    AddressingMode mode = kMode_None;                 \
+    MemOperand operand = i.MemoryOperand(&mode);      \
+    __ asm_instr(result, operand);                    \
+  } while (0)
+
+#define ASSEMBLE_LOAD_INTEGER(asm_instr)         \
+  do {                                           \
+    Register result = i.OutputRegister();        \
+    AddressingMode mode = kMode_None;            \
+    MemOperand operand = i.MemoryOperand(&mode); \
+    __ asm_instr(result, operand);               \
+  } while (0)
+
+#define ASSEMBLE_STORE_FLOAT32()                         \
+  do {                                                   \
+    size_t index = 0;                                    \
+    AddressingMode mode = kMode_None;                    \
+    MemOperand operand = i.MemoryOperand(&mode, &index); \
+    DoubleRegister value = i.InputDoubleRegister(index); \
+    __ StoreFloat32(value, operand);                     \
+  } while (0)
+
+#define ASSEMBLE_STORE_DOUBLE()                          \
+  do {                                                   \
+    size_t index = 0;                                    \
+    AddressingMode mode = kMode_None;                    \
+    MemOperand operand = i.MemoryOperand(&mode, &index); \
+    DoubleRegister value = i.InputDoubleRegister(index); \
+    __ StoreDouble(value, operand);                      \
+  } while (0)
+
+#define ASSEMBLE_STORE_INTEGER(asm_instr)                \
+  do {                                                   \
+    size_t index = 0;                                    \
+    AddressingMode mode = kMode_None;                    \
+    MemOperand operand = i.MemoryOperand(&mode, &index); \
+    Register value = i.InputRegister(index);             \
+    __ asm_instr(value, operand);                        \
+  } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width)              \
+  do {                                                             \
+    DoubleRegister result = i.OutputDoubleRegister();              \
+    size_t index = 0;                                              \
+    AddressingMode mode = kMode_None;                              \
+    MemOperand operand = i.MemoryOperand(&mode, index);            \
+    Register offset = operand.rb();                                \
+    __ lgfr(offset, offset);                                       \
+    if (HasRegisterInput(instr, 2)) {                              \
+      __ CmpLogical32(offset, i.InputRegister(2));                 \
+    } else {                                                       \
+      __ CmpLogical32(offset, i.InputImmediate(2));                \
+    }                                                              \
+    auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
+    __ bge(ool->entry());                                          \
+    __ asm_instr(result, operand);                                 \
+    __ bind(ool->exit());                                          \
+  } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)             \
+  do {                                                       \
+    Register result = i.OutputRegister();                    \
+    size_t index = 0;                                        \
+    AddressingMode mode = kMode_None;                        \
+    MemOperand operand = i.MemoryOperand(&mode, index);      \
+    Register offset = operand.rb();                          \
+    __ lgfr(offset, offset);                                 \
+    if (HasRegisterInput(instr, 2)) {                        \
+      __ CmpLogical32(offset, i.InputRegister(2));           \
+    } else {                                                 \
+      __ CmpLogical32(offset, i.InputImmediate(2));          \
+    }                                                        \
+    auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+    __ bge(ool->entry());                                    \
+    __ asm_instr(result, operand);                           \
+    __ bind(ool->exit());                                    \
+  } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_FLOAT32()                \
+  do {                                                  \
+    Label done;                                         \
+    size_t index = 0;                                   \
+    AddressingMode mode = kMode_None;                   \
+    MemOperand operand = i.MemoryOperand(&mode, index); \
+    Register offset = operand.rb();                     \
+    __ lgfr(offset, offset);                            \
+    if (HasRegisterInput(instr, 2)) {                   \
+      __ CmpLogical32(offset, i.InputRegister(2));      \
+    } else {                                            \
+      __ CmpLogical32(offset, i.InputImmediate(2));     \
+    }                                                   \
+    __ bge(&done);                                      \
+    DoubleRegister value = i.InputDoubleRegister(3);    \
+    __ StoreFloat32(value, operand);                    \
+    __ bind(&done);                                     \
+  } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_DOUBLE()                 \
+  do {                                                  \
+    Label done;                                         \
+    size_t index = 0;                                   \
+    AddressingMode mode = kMode_None;                   \
+    MemOperand operand = i.MemoryOperand(&mode, index); \
+    DCHECK_EQ(kMode_MRR, mode);                         \
+    Register offset = operand.rb();                     \
+    __ lgfr(offset, offset);                            \
+    if (HasRegisterInput(instr, 2)) {                   \
+      __ CmpLogical32(offset, i.InputRegister(2));      \
+    } else {                                            \
+      __ CmpLogical32(offset, i.InputImmediate(2));     \
+    }                                                   \
+    __ bge(&done);                                      \
+    DoubleRegister value = i.InputDoubleRegister(3);    \
+    __ StoreDouble(value, operand);                     \
+    __ bind(&done);                                     \
+  } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)       \
+  do {                                                  \
+    Label done;                                         \
+    size_t index = 0;                                   \
+    AddressingMode mode = kMode_None;                   \
+    MemOperand operand = i.MemoryOperand(&mode, index); \
+    Register offset = operand.rb();                     \
+    __ lgfr(offset, offset);                            \
+    if (HasRegisterInput(instr, 2)) {                   \
+      __ CmpLogical32(offset, i.InputRegister(2));      \
+    } else {                                            \
+      __ CmpLogical32(offset, i.InputImmediate(2));     \
+    }                                                   \
+    __ bge(&done);                                      \
+    Register value = i.InputRegister(3);                \
+    __ asm_instr(value, operand);                       \
+    __ bind(&done);                                     \
+  } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ LeaveFrame(StackFrame::MANUAL);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+  if (sp_slot_delta > 0) {
+    __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
+  }
+  frame_access_state()->SetFrameAccessToDefault();
+}
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+  if (sp_slot_delta < 0) {
+    __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
+    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+  }
+  if (frame_access_state()->has_frame()) {
+    __ RestoreFrameStateForTailCall();
+  }
+  frame_access_state()->SetFrameAccessToSP();
+}
+
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&done);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ LoadP(caller_args_count_reg,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  S390OperandConverter i(this, instr);
+  ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
+
+  switch (opcode) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (HasRegisterInput(instr, 0)) {
+        __ AddP(ip, i.InputRegister(0),
+                Operand(Code::kHeaderSize - kHeapObjectTag));
+        __ Call(ip);
+      } else {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      }
+      RecordCallPosition(instr);
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchTailCallCodeObjectFromJSFunction:
+    case kArchTailCallCodeObject: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      if (opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
+      if (HasRegisterInput(instr, 0)) {
+        __ AddP(ip, i.InputRegister(0),
+                Operand(Code::kHeaderSize - kHeapObjectTag));
+        __ Jump(ip);
+      } else {
+        // We cannot use the constant pool to load the target since
+        // we've already restored the caller's frame.
+        ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+        __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      }
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ LoadP(kScratchReg,
+                 FieldMemOperand(func, JSFunction::kContextOffset));
+        __ CmpP(cp, kScratchReg);
+        __ Assert(eq, kWrongFunctionContext);
+      }
+      __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(ip);
+      RecordCallPosition(instr);
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchTailCallJSFunctionFromJSFunction:
+    case kArchTailCallJSFunction: {
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ LoadP(kScratchReg,
+                 FieldMemOperand(func, JSFunction::kContextOffset));
+        __ CmpP(cp, kScratchReg);
+        __ Assert(eq, kWrongFunctionContext);
+      }
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      if (opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
+      __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Jump(ip);
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchPrepareCallCFunction: {
+      int const num_parameters = MiscField::decode(instr->opcode());
+      __ PrepareCallCFunction(num_parameters, kScratchReg);
+      // Frame alignment requires using FP-relative frame addressing.
+      frame_access_state()->SetFrameAccessToFP();
+      break;
+    }
+    case kArchPrepareTailCall:
+      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      break;
+    case kArchCallCFunction: {
+      int const num_parameters = MiscField::decode(instr->opcode());
+      if (instr->InputAt(0)->IsImmediate()) {
+        ExternalReference ref = i.InputExternalReference(0);
+        __ CallCFunction(ref, num_parameters);
+      } else {
+        Register func = i.InputRegister(0);
+        __ CallCFunction(func, num_parameters);
+      }
+      frame_access_state()->SetFrameAccessToDefault();
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchJmp:
+      AssembleArchJump(i.InputRpo(0));
+      break;
+    case kArchLookupSwitch:
+      AssembleArchLookupSwitch(instr);
+      break;
+    case kArchTableSwitch:
+      AssembleArchTableSwitch(instr);
+      break;
+    case kArchNop:
+    case kArchThrowTerminator:
+      // don't emit code for nops.
+      break;
+    case kArchDeoptimize: {
+      int deopt_state_id =
+          BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+      Deoptimizer::BailoutType bailout_type =
+          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      break;
+    }
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchStackPointer:
+      __ LoadRR(i.OutputRegister(), sp);
+      break;
+    case kArchFramePointer:
+      __ LoadRR(i.OutputRegister(), fp);
+      break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->has_frame()) {
+        __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ LoadRR(i.OutputRegister(), fp);
+      }
+      break;
+    case kArchTruncateDoubleToI:
+      // TODO(mbrandy): move slow call to stub out of line.
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArchStoreWithWriteBarrier: {
+      RecordWriteMode mode =
+          static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+      Register object = i.InputRegister(0);
+      Register value = i.InputRegister(2);
+      Register scratch0 = i.TempRegister(0);
+      Register scratch1 = i.TempRegister(1);
+      OutOfLineRecordWrite* ool;
+
+      AddressingMode addressing_mode =
+          AddressingModeField::decode(instr->opcode());
+      if (addressing_mode == kMode_MRI) {
+        int32_t offset = i.InputInt32(1);
+        ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+                                                scratch0, scratch1, mode);
+        __ StoreP(value, MemOperand(object, offset));
+      } else {
+        DCHECK_EQ(kMode_MRR, addressing_mode);
+        Register offset(i.InputRegister(1));
+        ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+                                                scratch0, scratch1, mode);
+        __ StoreP(value, MemOperand(object, offset));
+      }
+      __ CheckPageFlag(object, scratch0,
+                       MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+                       ool->entry());
+      __ bind(ool->exit());
+      break;
+    }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      __ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+              Operand(offset.offset()));
+      break;
+    }
+    case kS390_And:
+      ASSEMBLE_BINOP(AndP, AndP);
+      break;
+    case kS390_AndComplement:
+      __ NotP(i.InputRegister(1));
+      __ AndP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kS390_Or:
+      ASSEMBLE_BINOP(OrP, OrP);
+      break;
+    case kS390_OrComplement:
+      __ NotP(i.InputRegister(1));
+      __ OrP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kS390_Xor:
+      ASSEMBLE_BINOP(XorP, XorP);
+      break;
+    case kS390_ShiftLeft32:
+      if (HasRegisterInput(instr, 1)) {
+        if (i.OutputRegister().is(i.InputRegister(1)) &&
+            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
+          __ LoadRR(kScratchReg, i.InputRegister(1));
+          __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+        } else {
+          ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
+        }
+      } else {
+        ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
+      }
+      __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_ShiftLeft64:
+      ASSEMBLE_BINOP(sllg, sllg);
+      break;
+#endif
+    case kS390_ShiftRight32:
+      if (HasRegisterInput(instr, 1)) {
+        if (i.OutputRegister().is(i.InputRegister(1)) &&
+            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
+          __ LoadRR(kScratchReg, i.InputRegister(1));
+          __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+        } else {
+          ASSEMBLE_BINOP(ShiftRight, ShiftRight);
+        }
+      } else {
+        ASSEMBLE_BINOP(ShiftRight, ShiftRight);
+      }
+      __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_ShiftRight64:
+      ASSEMBLE_BINOP(srlg, srlg);
+      break;
+#endif
+    case kS390_ShiftRightArith32:
+      if (HasRegisterInput(instr, 1)) {
+        if (i.OutputRegister().is(i.InputRegister(1)) &&
+            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
+          __ LoadRR(kScratchReg, i.InputRegister(1));
+          __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
+                             kScratchReg);
+        } else {
+          ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
+        }
+      } else {
+        ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
+      }
+      __ LoadlW(i.OutputRegister(), i.OutputRegister());
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_ShiftRightArith64:
+      ASSEMBLE_BINOP(srag, srag);
+      break;
+#endif
+#if !V8_TARGET_ARCH_S390X
+    case kS390_AddPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ AddLogical32(i.OutputRegister(0), i.InputRegister(0),
+                      i.InputRegister(2));
+      __ AddLogicalWithCarry32(i.OutputRegister(1), i.InputRegister(1),
+                               i.InputRegister(3));
+      break;
+    case kS390_SubPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ SubLogical32(i.OutputRegister(0), i.InputRegister(0),
+                      i.InputRegister(2));
+      __ SubLogicalWithBorrow32(i.OutputRegister(1), i.InputRegister(1),
+                                i.InputRegister(3));
+      break;
+    case kS390_MulPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ sllg(r0, i.InputRegister(1), Operand(32));
+      __ sllg(r1, i.InputRegister(3), Operand(32));
+      __ lr(r0, i.InputRegister(0));
+      __ lr(r1, i.InputRegister(2));
+      __ msgr(r1, r0);
+      __ lr(i.OutputRegister(0), r1);
+      __ srag(i.OutputRegister(1), r1, Operand(32));
+      break;
+    case kS390_ShiftLeftPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+                         i.InputRegister(0), i.InputRegister(1),
+                         i.InputInt32(2));
+      } else {
+        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+                         i.InputRegister(0), i.InputRegister(1), kScratchReg,
+                         i.InputRegister(2));
+      }
+      break;
+    case kS390_ShiftRightPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+                          i.InputRegister(0), i.InputRegister(1),
+                          i.InputInt32(2));
+      } else {
+        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+                          i.InputRegister(0), i.InputRegister(1), kScratchReg,
+                          i.InputRegister(2));
+      }
+      break;
+    case kS390_ShiftRightArithPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
+                               i.InputRegister(0), i.InputRegister(1),
+                               i.InputInt32(2));
+      } else {
+        __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
+                               i.InputRegister(0), i.InputRegister(1),
+                               kScratchReg, i.InputRegister(2));
+      }
+      break;
+#endif
+    case kS390_RotRight32:
+      if (HasRegisterInput(instr, 1)) {
+        __ LoadComplementRR(kScratchReg, i.InputRegister(1));
+        __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+      } else {
+        __ rll(i.OutputRegister(), i.InputRegister(0),
+               Operand(32 - i.InputInt32(1)));
+      }
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_RotRight64:
+      if (HasRegisterInput(instr, 1)) {
+        __ LoadComplementRR(kScratchReg, i.InputRegister(1));
+        __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+      } else {
+        __ rllg(i.OutputRegister(), i.InputRegister(0),
+                Operand(64 - i.InputInt32(1)));
+      }
+      break;
+#endif
+    case kS390_Not:
+      __ LoadRR(i.OutputRegister(), i.InputRegister(0));
+      __ NotP(i.OutputRegister());
+      break;
+    case kS390_RotLeftAndMask32:
+      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+        int shiftAmount = i.InputInt32(1);
+        int endBit = 63 - i.InputInt32(3);
+        int startBit = 63 - i.InputInt32(2);
+        __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+        __ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
+                 Operand(endBit), Operand::Zero(), true);
+      } else {
+        int shiftAmount = i.InputInt32(1);
+        int clearBitLeft = 63 - i.InputInt32(2);
+        int clearBitRight = i.InputInt32(3);
+        __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
+        __ srlg(i.OutputRegister(), i.OutputRegister(),
+                Operand((clearBitLeft + clearBitRight)));
+        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
+      }
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_RotLeftAndClear64:
+      UNIMPLEMENTED();  // Find correct instruction
+      break;
+    case kS390_RotLeftAndClearLeft64:
+      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+        int shiftAmount = i.InputInt32(1);
+        int endBit = 63;
+        int startBit = 63 - i.InputInt32(2);
+        __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
+                 Operand(endBit), Operand(shiftAmount), true);
+      } else {
+        int shiftAmount = i.InputInt32(1);
+        int clearBit = 63 - i.InputInt32(2);
+        __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+        __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+      }
+      break;
+    case kS390_RotLeftAndClearRight64:
+      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+        int shiftAmount = i.InputInt32(1);
+        int endBit = 63 - i.InputInt32(2);
+        int startBit = 0;
+        __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
+                 Operand(endBit), Operand(shiftAmount), true);
+      } else {
+        int shiftAmount = i.InputInt32(1);
+        int clearBit = i.InputInt32(2);
+        __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+        __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+      }
+      break;
+#endif
+    case kS390_Add:
+#if V8_TARGET_ARCH_S390X
+      if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+        ASSEMBLE_ADD_WITH_OVERFLOW();
+      } else {
+#endif
+        ASSEMBLE_BINOP(AddP, AddP);
+#if V8_TARGET_ARCH_S390X
+      }
+#endif
+      break;
+    case kS390_AddWithOverflow32:
+      ASSEMBLE_ADD_WITH_OVERFLOW32();
+      break;
+    case kS390_AddFloat:
+      // Ensure we don't clobber right/InputReg(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        ASSEMBLE_FLOAT_UNOP(aebr);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ aebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_AddDouble:
+      // Ensure we don't clobber right/InputReg(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        ASSEMBLE_FLOAT_UNOP(adbr);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_Sub:
+#if V8_TARGET_ARCH_S390X
+      if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+        ASSEMBLE_SUB_WITH_OVERFLOW();
+      } else {
+#endif
+        ASSEMBLE_BINOP(SubP, SubP);
+#if V8_TARGET_ARCH_S390X
+      }
+#endif
+      break;
+    case kS390_SubWithOverflow32:
+      ASSEMBLE_SUB_WITH_OVERFLOW32();
+      break;
+    case kS390_SubFloat:
+      // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ sebr(i.OutputDoubleRegister(), kScratchDoubleReg);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        }
+        __ sebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_SubDouble:
+      // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ sdbr(i.OutputDoubleRegister(), kScratchDoubleReg);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        }
+        __ sdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_Mul32:
+#if V8_TARGET_ARCH_S390X
+    case kS390_Mul64:
+#endif
+      __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kS390_MulHigh32:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ mr_z(r0, i.InputRegister(1));
+      __ LoadW(i.OutputRegister(), r0);
+      break;
+    case kS390_MulHighU32:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ mlr(r0, i.InputRegister(1));
+      __ LoadlW(i.OutputRegister(), r0);
+      break;
+    case kS390_MulFloat:
+      // Ensure we don't clobber right
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        ASSEMBLE_FLOAT_UNOP(meebr);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ meebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_MulDouble:
+      // Ensure we don't clobber right
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        ASSEMBLE_FLOAT_UNOP(mdbr);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ mdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Div64:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ dsgr(r0, i.InputRegister(1));  // R1: Dividend
+      __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
+      break;
+#endif
+    case kS390_Div32:
+      __ LoadRR(r0, i.InputRegister(0));
+      __ srda(r0, Operand(32));
+      __ dr(r0, i.InputRegister(1));
+      __ LoadAndTestP_ExtendSrc(i.OutputRegister(),
+                                r1);  // Copy R1: Quotient to output
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_DivU64:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ LoadImmP(r0, Operand::Zero());
+      __ dlgr(r0, i.InputRegister(1));  // R0:R1: Dividend
+      __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
+      break;
+#endif
+    case kS390_DivU32:
+      __ LoadRR(r0, i.InputRegister(0));
+      __ srdl(r0, Operand(32));
+      __ dlr(r0, i.InputRegister(1));  // R0:R1: Dividend
+      __ LoadlW(i.OutputRegister(), r1);  // Copy R1: Quotient to output
+      __ LoadAndTestP_ExtendSrc(r1, r1);
+      break;
+
+    case kS390_DivFloat:
+      // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ debr(i.OutputDoubleRegister(), kScratchDoubleReg);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ debr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_DivDouble:
+      // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ ddbr(i.OutputDoubleRegister(), kScratchDoubleReg);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ ddbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_Mod32:
+      ASSEMBLE_MODULO(dr, srda);
+      break;
+    case kS390_ModU32:
+      ASSEMBLE_MODULO(dlr, srdl);
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Mod64:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ dsgr(r0, i.InputRegister(1));  // R1: Dividend
+      __ ltgr(i.OutputRegister(), r0);  // Copy R0: Remainder to output
+      break;
+    case kS390_ModU64:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ LoadImmP(r0, Operand::Zero());
+      __ dlgr(r0, i.InputRegister(1));  // R0:R1: Dividend
+      __ ltgr(i.OutputRegister(), r0);  // Copy R0: Remainder to output
+      break;
+#endif
+    case kS390_AbsFloat:
+      __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_SqrtFloat:
+      ASSEMBLE_FLOAT_UNOP(sqebr);
+      break;
+    case kS390_FloorFloat:
+      __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
+      break;
+    case kS390_CeilFloat:
+      __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
+      break;
+    case kS390_TruncateFloat:
+      __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
+      break;
+    //  Double operations
+    case kS390_ModDouble:
+      ASSEMBLE_FLOAT_MODULO();
+      break;
+    case kS390_Neg:
+      __ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_MaxDouble:
+      ASSEMBLE_FLOAT_MAX(kScratchDoubleReg, kScratchReg);
+      break;
+    case kS390_MinDouble:
+      ASSEMBLE_FLOAT_MIN(kScratchDoubleReg, kScratchReg);
+      break;
+    case kS390_AbsDouble:
+      __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_SqrtDouble:
+      ASSEMBLE_FLOAT_UNOP(sqdbr);
+      break;
+    case kS390_FloorDouble:
+      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
+      break;
+    case kS390_CeilDouble:
+      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
+      break;
+    case kS390_TruncateDouble:
+      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
+      break;
+    case kS390_RoundDouble:
+      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
+      break;
+    case kS390_NegDouble:
+      ASSEMBLE_FLOAT_UNOP(lcdbr);
+      break;
+    case kS390_Cntlz32: {
+      __ llgfr(i.OutputRegister(), i.InputRegister(0));
+      __ flogr(r0, i.OutputRegister());
+      __ LoadRR(i.OutputRegister(), r0);
+      __ SubP(i.OutputRegister(), Operand(32));
+    } break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Cntlz64: {
+      __ flogr(r0, i.InputRegister(0));
+      __ LoadRR(i.OutputRegister(), r0);
+    } break;
+#endif
+    case kS390_Popcnt32:
+      __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Popcnt64:
+      __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
+      break;
+#endif
+    case kS390_Cmp32:
+      ASSEMBLE_COMPARE(Cmp32, CmpLogical32);
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Cmp64:
+      ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
+      break;
+#endif
+    case kS390_CmpFloat:
+      __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kS390_CmpDouble:
+      __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kS390_Tst32:
+      if (HasRegisterInput(instr, 1)) {
+        __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
+      } else {
+        __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+      }
+      __ LoadAndTestP_ExtendSrc(r0, r0);
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Tst64:
+      if (HasRegisterInput(instr, 1)) {
+        __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
+      } else {
+        __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+      }
+      break;
+#endif
+    case kS390_Push:
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+        __ lay(sp, MemOperand(sp, -kDoubleSize));
+        frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+      } else {
+        __ Push(i.InputRegister(0));
+        frame_access_state()->IncreaseSPDelta(1);
+      }
+      break;
+    case kS390_PushFrame: {
+      int num_slots = i.InputInt32(1);
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ StoreDouble(i.InputDoubleRegister(0),
+                       MemOperand(sp, -num_slots * kPointerSize));
+      } else {
+        __ StoreP(i.InputRegister(0),
+                  MemOperand(sp, -num_slots * kPointerSize));
+      }
+      __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
+      break;
+    }
+    case kS390_StoreToStackSlot: {
+      int slot = i.InputInt32(1);
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ StoreDouble(i.InputDoubleRegister(0),
+                       MemOperand(sp, slot * kPointerSize));
+      } else {
+        __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+      }
+      break;
+    }
+    case kS390_ExtendSignWord8:
+#if V8_TARGET_ARCH_S390X
+      __ lgbr(i.OutputRegister(), i.InputRegister(0));
+#else
+      __ lbr(i.OutputRegister(), i.InputRegister(0));
+#endif
+      break;
+    case kS390_ExtendSignWord16:
+#if V8_TARGET_ARCH_S390X
+      __ lghr(i.OutputRegister(), i.InputRegister(0));
+#else
+      __ lhr(i.OutputRegister(), i.InputRegister(0));
+#endif
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_ExtendSignWord32:
+      __ lgfr(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_Uint32ToUint64:
+      // Zero extend
+      __ llgfr(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_Int64ToInt32:
+      // sign extend
+      __ lgfr(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_Int64ToFloat32:
+      __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+      break;
+    case kS390_Int64ToDouble:
+      __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+      break;
+    case kS390_Uint64ToFloat32:
+      __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
+                                     i.OutputDoubleRegister());
+      break;
+    case kS390_Uint64ToDouble:
+      __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
+                                      i.OutputDoubleRegister());
+      break;
+#endif
+    case kS390_Int32ToFloat32:
+      __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+      break;
+    case kS390_Int32ToDouble:
+      __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+      break;
+    case kS390_Uint32ToFloat32:
+      __ ConvertUnsignedIntToFloat(i.InputRegister(0),
+                                   i.OutputDoubleRegister());
+      break;
+    case kS390_Uint32ToDouble:
+      __ ConvertUnsignedIntToDouble(i.InputRegister(0),
+                                    i.OutputDoubleRegister());
+      break;
+    case kS390_DoubleToInt32:
+    case kS390_DoubleToUint32:
+    case kS390_DoubleToInt64: {
+#if V8_TARGET_ARCH_S390X
+      bool check_conversion =
+          (opcode == kS390_DoubleToInt64 && i.OutputCount() > 1);
+#endif
+      __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
+#if !V8_TARGET_ARCH_S390X
+                              kScratchReg,
+#endif
+                              i.OutputRegister(0), kScratchDoubleReg);
+#if V8_TARGET_ARCH_S390X
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+#endif
+      break;
+    }
+    case kS390_Float32ToInt32: {
+      bool check_conversion = (i.OutputCount() > 1);
+      __ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
+                               kScratchDoubleReg);
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+      break;
+    }
+    case kS390_Float32ToUint32: {
+      bool check_conversion = (i.OutputCount() > 1);
+      __ ConvertFloat32ToUnsignedInt32(i.InputDoubleRegister(0),
+                                       i.OutputRegister(0), kScratchDoubleReg);
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+      break;
+    }
+#if V8_TARGET_ARCH_S390X
+    case kS390_Float32ToUint64: {
+      bool check_conversion = (i.OutputCount() > 1);
+      __ ConvertFloat32ToUnsignedInt64(i.InputDoubleRegister(0),
+                                       i.OutputRegister(0), kScratchDoubleReg);
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+      break;
+    }
+#endif
+    case kS390_Float32ToInt64: {
+#if V8_TARGET_ARCH_S390X
+      bool check_conversion =
+          (opcode == kS390_Float32ToInt64 && i.OutputCount() > 1);
+#endif
+      __ ConvertFloat32ToInt64(i.InputDoubleRegister(0),
+#if !V8_TARGET_ARCH_S390X
+                               kScratchReg,
+#endif
+                               i.OutputRegister(0), kScratchDoubleReg);
+#if V8_TARGET_ARCH_S390X
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+#endif
+      break;
+    }
+#if V8_TARGET_ARCH_S390X
+    case kS390_DoubleToUint64: {
+      bool check_conversion = (i.OutputCount() > 1);
+      __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
+                                      i.OutputRegister(0), kScratchDoubleReg);
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+      break;
+    }
+#endif
+    case kS390_DoubleToFloat32:
+      __ ledbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_Float32ToDouble:
+      __ ldebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_DoubleExtractLowWord32:
+      // TODO(john.yan): this can cause problem when interrupting,
+      //                 use freg->greg instruction
+      __ stdy(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+      __ LoadlW(i.OutputRegister(),
+                MemOperand(sp, -kDoubleSize + Register::kMantissaOffset));
+      break;
+    case kS390_DoubleExtractHighWord32:
+      // TODO(john.yan): this can cause problem when interrupting,
+      //                 use freg->greg instruction
+      __ stdy(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+      __ LoadlW(i.OutputRegister(),
+                MemOperand(sp, -kDoubleSize + Register::kExponentOffset));
+      break;
+    case kS390_DoubleInsertLowWord32:
+      __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1));
+      break;
+    case kS390_DoubleInsertHighWord32:
+      __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1));
+      break;
+    case kS390_DoubleConstruct:
+// TODO(john.yan): this can cause problem when interrupting,
+//                 use greg->freg instruction
+#if V8_TARGET_LITTLE_ENDIAN
+      __ StoreW(i.InputRegister(0), MemOperand(sp, -kDoubleSize / 2));
+      __ StoreW(i.InputRegister(1), MemOperand(sp, -kDoubleSize));
+#else
+      __ StoreW(i.InputRegister(1), MemOperand(sp, -kDoubleSize / 2));
+      __ StoreW(i.InputRegister(0), MemOperand(sp, -kDoubleSize));
+#endif
+      __ ldy(i.OutputDoubleRegister(), MemOperand(sp, -kDoubleSize));
+      break;
+    case kS390_LoadWordS8:
+      ASSEMBLE_LOAD_INTEGER(LoadlB);
+#if V8_TARGET_ARCH_S390X
+      __ lgbr(i.OutputRegister(), i.OutputRegister());
+#else
+      __ lbr(i.OutputRegister(), i.OutputRegister());
+#endif
+      break;
+    case kS390_BitcastFloat32ToInt32:
+      __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_BitcastInt32ToFloat32:
+      __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_BitcastDoubleToInt64:
+      __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_BitcastInt64ToDouble:
+      __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+#endif
+    case kS390_LoadWordU8:
+      ASSEMBLE_LOAD_INTEGER(LoadlB);
+      break;
+    case kS390_LoadWordU16:
+      ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
+      break;
+    case kS390_LoadWordS16:
+      ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
+      break;
+    case kS390_LoadWordS32:
+      ASSEMBLE_LOAD_INTEGER(LoadW);
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_LoadWord64:
+      ASSEMBLE_LOAD_INTEGER(lg);
+      break;
+#endif
+    case kS390_LoadFloat32:
+      ASSEMBLE_LOAD_FLOAT(LoadFloat32);
+      break;
+    case kS390_LoadDouble:
+      ASSEMBLE_LOAD_FLOAT(LoadDouble);
+      break;
+    case kS390_StoreWord8:
+      ASSEMBLE_STORE_INTEGER(StoreByte);
+      break;
+    case kS390_StoreWord16:
+      ASSEMBLE_STORE_INTEGER(StoreHalfWord);
+      break;
+    case kS390_StoreWord32:
+      ASSEMBLE_STORE_INTEGER(StoreW);
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_StoreWord64:
+      ASSEMBLE_STORE_INTEGER(StoreP);
+      break;
+#endif
+    case kS390_StoreFloat32:
+      ASSEMBLE_STORE_FLOAT32();
+      break;
+    case kS390_StoreDouble:
+      ASSEMBLE_STORE_DOUBLE();
+      break;
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
+#if V8_TARGET_ARCH_S390X
+      __ lgbr(i.OutputRegister(), i.OutputRegister());
+#else
+      __ lbr(i.OutputRegister(), i.OutputRegister());
+#endif
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadW);
+      break;
+    case kCheckedLoadWord64:
+#if V8_TARGET_ARCH_S390X
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
+#else
+      UNREACHABLE();
+#endif
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
+      break;
+    case kCheckedStoreWord64:
+#if V8_TARGET_ARCH_S390X
+      ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
+#else
+      UNREACHABLE();
+#endif
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT32();
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_DOUBLE();
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}  // NOLINT(readability/fn_size)
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  S390OperandConverter i(this, instr);
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  ArchOpcode op = instr->arch_opcode();
+  FlagsCondition condition = branch->condition;
+
+  Condition cond = FlagsConditionToCondition(condition, op);
+  if (op == kS390_CmpDouble) {
+    // check for unordered if necessary
+    // Branching to flabel/tlabel according to what's expected by tests
+    if (cond == le || cond == eq || cond == lt) {
+      __ bunordered(flabel);
+    } else if (cond == gt || cond == ne || cond == ge) {
+      __ bunordered(tlabel);
+    }
+  }
+  __ b(cond, tlabel);
+  if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
+}
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
+}
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  S390OperandConverter i(this, instr);
+  Label done;
+  ArchOpcode op = instr->arch_opcode();
+  bool check_unordered = (op == kS390_CmpDouble || kS390_CmpFloat);
+
+  // Overflow checked for add/sub only.
+  DCHECK((condition != kOverflow && condition != kNotOverflow) ||
+         (op == kS390_AddWithOverflow32 || op == kS390_SubWithOverflow32) ||
+         (op == kS390_Add || op == kS390_Sub));
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  DCHECK_NE(0u, instr->OutputCount());
+  Register reg = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cond = FlagsConditionToCondition(condition, op);
+  switch (cond) {
+    case ne:
+    case ge:
+    case gt:
+      if (check_unordered) {
+        __ LoadImmP(reg, Operand(1));
+        __ LoadImmP(kScratchReg, Operand::Zero());
+        __ bunordered(&done);
+        Label cond_true;
+        __ b(cond, &cond_true, Label::kNear);
+        __ LoadRR(reg, kScratchReg);
+        __ bind(&cond_true);
+      } else {
+        Label cond_true, done_here;
+        __ LoadImmP(reg, Operand(1));
+        __ b(cond, &cond_true, Label::kNear);
+        __ LoadImmP(reg, Operand::Zero());
+        __ bind(&cond_true);
+      }
+      break;
+    case eq:
+    case lt:
+    case le:
+      if (check_unordered) {
+        __ LoadImmP(reg, Operand::Zero());
+        __ LoadImmP(kScratchReg, Operand(1));
+        __ bunordered(&done);
+        Label cond_false;
+        __ b(NegateCondition(cond), &cond_false, Label::kNear);
+        __ LoadRR(reg, kScratchReg);
+        __ bind(&cond_false);
+      } else {
+        __ LoadImmP(reg, Operand::Zero());
+        Label cond_false;
+        __ b(NegateCondition(cond), &cond_false, Label::kNear);
+        __ LoadImmP(reg, Operand(1));
+        __ bind(&cond_false);
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  __ bind(&done);
+}
+
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+  S390OperandConverter i(this, instr);
+  Register input = i.InputRegister(0);
+  for (size_t index = 2; index < instr->InputCount(); index += 2) {
+    __ CmpP(input, Operand(i.InputInt32(index + 0)));
+    __ beq(GetLabel(i.InputRpo(index + 1)));
+  }
+  AssembleArchJump(i.InputRpo(1));
+}
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+  S390OperandConverter i(this, instr);
+  Register input = i.InputRegister(0);
+  int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
+  Label** cases = zone()->NewArray<Label*>(case_count);
+  for (int32_t index = 0; index < case_count; ++index) {
+    cases[index] = GetLabel(i.InputRpo(index + 2));
+  }
+  Label* const table = AddJumpTable(cases, case_count);
+  __ CmpLogicalP(input, Operand(case_count));
+  __ bge(GetLabel(i.InputRpo(1)));
+  __ larl(kScratchReg, table);
+  __ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
+  __ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
+  __ Jump(kScratchReg);
+}
+
+void CodeGenerator::AssembleDeoptimizerCall(
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, bailout_type);
+  // TODO(turbofan): We should be able to generate better code by sharing the
+  // actual final call site and just bl'ing to it here, similar to what we do
+  // in the lithium backend.
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ Push(r14, fp);
+      __ LoadRR(fp, sp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+    } else {
+      StackFrame::Type type = info()->GetOutputStackFrameType();
+      // TODO(mbrandy): Detect cases where ip is the entrypoint (for
+      // efficient intialization of the constant pool pointer register).
+      __ StubPrologue(type);
+    }
+  }
+
+  int stack_shrink_slots = frame()->GetSpillSlotCount();
+  if (info()->is_osr()) {
+    // TurboFan OSR-compiled functions cannot be entered directly.
+    __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+    // Unoptimized code jumps directly to this entrypoint while the unoptimized
+    // frame is still on the stack. Optimized code uses OSR values directly from
+    // the unoptimized frame. Thus, all that needs to be done is to allocate the
+    // remaining stack slots.
+    if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+    osr_pc_offset_ = __ pc_offset();
+    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+  }
+
+  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+  if (double_saves != 0) {
+    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+  }
+  if (stack_shrink_slots > 0) {
+    __ lay(sp, MemOperand(sp, -stack_shrink_slots * kPointerSize));
+  }
+
+  // Save callee-saved Double registers.
+  if (double_saves != 0) {
+    __ MultiPushDoubles(double_saves);
+    DCHECK(kNumCalleeSavedDoubles ==
+           base::bits::CountPopulation32(double_saves));
+    frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+                                              (kDoubleSize / kPointerSize));
+  }
+
+  // Save callee-saved registers.
+  const RegList saves = descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    __ MultiPush(saves);
+    // register save area does not include the fp or constant pool pointer.
+    const int num_saves =
+        kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
+    DCHECK(num_saves == base::bits::CountPopulation32(saves));
+    frame()->AllocateSavedCalleeRegisterSlots(num_saves);
+  }
+}
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  int pop_count = static_cast<int>(descriptor->StackParameterCount());
+
+  // Restore registers.
+  const RegList saves = descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    __ MultiPop(saves);
+  }
+
+  // Restore double registers.
+  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+  if (double_saves != 0) {
+    __ MultiPopDoubles(double_saves);
+  }
+
+  if (descriptor->IsCFunctionCall()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
+    // Canonicalize JSFunction return sites for now.
+    if (return_label_.is_bound()) {
+      __ b(&return_label_);
+      return;
+    } else {
+      __ bind(&return_label_);
+      AssembleDeconstructFrame();
+    }
+  }
+  __ Ret(pop_count);
+}
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  S390OperandConverter g(this, nullptr);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ Move(g.ToRegister(destination), src);
+    } else {
+      __ StoreP(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ LoadP(g.ToRegister(destination), src);
+    } else {
+      Register temp = kScratchReg;
+      __ LoadP(temp, src, r0);
+      __ StoreP(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsConstant()) {
+    Constant src = g.ToConstant(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst =
+          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+      switch (src.type()) {
+        case Constant::kInt32:
+          __ mov(dst, Operand(src.ToInt32()));
+          break;
+        case Constant::kInt64:
+          __ mov(dst, Operand(src.ToInt64()));
+          break;
+        case Constant::kFloat32:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+          break;
+        case Constant::kFloat64:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ mov(dst, Operand(src.ToExternalReference()));
+          break;
+        case Constant::kHeapObject: {
+          Handle<HeapObject> src_object = src.ToHeapObject();
+          Heap::RootListIndex index;
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ LoadP(dst, g.SlotToMemOperand(slot));
+          } else if (IsMaterializableFromRoot(src_object, &index)) {
+            __ LoadRoot(dst, index);
+          } else {
+            __ Move(dst, src_object);
+          }
+          break;
+        }
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(dcarney): loading RPO constants on S390.
+          break;
+      }
+      if (destination->IsStackSlot()) {
+        __ StoreP(dst, g.ToMemOperand(destination), r0);
+      }
+    } else {
+      DoubleRegister dst = destination->IsDoubleRegister()
+                               ? g.ToDoubleRegister(destination)
+                               : kScratchDoubleReg;
+      double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+                                                        : src.ToFloat64();
+      if (src.type() == Constant::kFloat32) {
+        __ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
+      } else {
+        __ LoadDoubleLiteral(dst, value, kScratchReg);
+      }
+
+      if (destination->IsDoubleStackSlot()) {
+        __ StoreDouble(dst, g.ToMemOperand(destination));
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DoubleRegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ StoreDouble(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ LoadDouble(g.ToDoubleRegister(destination), src);
+    } else {
+      DoubleRegister temp = kScratchDoubleReg;
+      __ LoadDouble(temp, src);
+      __ StoreDouble(temp, g.ToMemOperand(destination));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  S390OperandConverter g(this, nullptr);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    Register temp = kScratchReg;
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ LoadRR(temp, src);
+      __ LoadRR(src, dst);
+      __ LoadRR(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ LoadRR(temp, src);
+      __ LoadP(src, dst);
+      __ StoreP(temp, dst);
+    }
+#if V8_TARGET_ARCH_S390X
+  } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+#else
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsStackSlot());
+#endif
+    Register temp_0 = kScratchReg;
+    Register temp_1 = r0;
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ LoadP(temp_0, src);
+    __ LoadP(temp_1, dst);
+    __ StoreP(temp_0, dst);
+    __ StoreP(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister temp = kScratchDoubleReg;
+    DoubleRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DoubleRegister dst = g.ToDoubleRegister(destination);
+      __ ldr(temp, src);
+      __ ldr(src, dst);
+      __ ldr(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ ldr(temp, src);
+      __ LoadDouble(src, dst);
+      __ StoreDouble(temp, dst);
+    }
+#if !V8_TARGET_ARCH_S390X
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleStackSlot());
+    DoubleRegister temp_0 = kScratchDoubleReg;
+    DoubleRegister temp_1 = d0;
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    // TODO(joransiu): MVC opportunity
+    __ LoadDouble(temp_0, src);
+    __ LoadDouble(temp_1, dst);
+    __ StoreDouble(temp_0, dst);
+    __ StoreDouble(temp_1, src);
+#endif
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+  for (size_t index = 0; index < target_count; ++index) {
+    __ emit_label_addr(targets[index]);
+  }
+}
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+  // We do not insert nops for inlined Smi code.
+}
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+    return;
+  }
+
+  int space_needed = Deoptimizer::patch_size();
+  // Ensure that we have enough space after the previous lazy-bailout
+  // instruction for patching the code here.
+  int current_pc = masm()->pc_offset();
+  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+    DCHECK_EQ(0, padding_size % 2);
+    while (padding_size > 0) {
+      __ nop();
+      padding_size -= 2;
+    }
+  }
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/s390/instruction-codes-s390.h b/src/compiler/s390/instruction-codes-s390.h
new file mode 100644
index 0000000..a32f875
--- /dev/null
+++ b/src/compiler/s390/instruction-codes-s390.h
@@ -0,0 +1,160 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
+#define V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// S390-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(S390_And)                      \
+  V(S390_AndComplement)            \
+  V(S390_Or)                       \
+  V(S390_OrComplement)             \
+  V(S390_Xor)                      \
+  V(S390_ShiftLeft32)              \
+  V(S390_ShiftLeft64)              \
+  V(S390_ShiftLeftPair)            \
+  V(S390_ShiftRight32)             \
+  V(S390_ShiftRight64)             \
+  V(S390_ShiftRightPair)           \
+  V(S390_ShiftRightArith32)        \
+  V(S390_ShiftRightArith64)        \
+  V(S390_ShiftRightArithPair)      \
+  V(S390_RotRight32)               \
+  V(S390_RotRight64)               \
+  V(S390_Not)                      \
+  V(S390_RotLeftAndMask32)         \
+  V(S390_RotLeftAndClear64)        \
+  V(S390_RotLeftAndClearLeft64)    \
+  V(S390_RotLeftAndClearRight64)   \
+  V(S390_Add)                      \
+  V(S390_AddWithOverflow32)        \
+  V(S390_AddPair)                  \
+  V(S390_AddFloat)                 \
+  V(S390_AddDouble)                \
+  V(S390_Sub)                      \
+  V(S390_SubWithOverflow32)        \
+  V(S390_SubFloat)                 \
+  V(S390_SubDouble)                \
+  V(S390_SubPair)                  \
+  V(S390_MulPair)                  \
+  V(S390_Mul32)                    \
+  V(S390_Mul64)                    \
+  V(S390_MulHigh32)                \
+  V(S390_MulHighU32)               \
+  V(S390_MulFloat)                 \
+  V(S390_MulDouble)                \
+  V(S390_Div32)                    \
+  V(S390_Div64)                    \
+  V(S390_DivU32)                   \
+  V(S390_DivU64)                   \
+  V(S390_DivFloat)                 \
+  V(S390_DivDouble)                \
+  V(S390_Mod32)                    \
+  V(S390_Mod64)                    \
+  V(S390_ModU32)                   \
+  V(S390_ModU64)                   \
+  V(S390_ModDouble)                \
+  V(S390_Neg)                      \
+  V(S390_NegDouble)                \
+  V(S390_SqrtFloat)                \
+  V(S390_FloorFloat)               \
+  V(S390_CeilFloat)                \
+  V(S390_TruncateFloat)            \
+  V(S390_AbsFloat)                 \
+  V(S390_SqrtDouble)               \
+  V(S390_FloorDouble)              \
+  V(S390_CeilDouble)               \
+  V(S390_TruncateDouble)           \
+  V(S390_RoundDouble)              \
+  V(S390_MaxDouble)                \
+  V(S390_MinDouble)                \
+  V(S390_AbsDouble)                \
+  V(S390_Cntlz32)                  \
+  V(S390_Cntlz64)                  \
+  V(S390_Popcnt32)                 \
+  V(S390_Popcnt64)                 \
+  V(S390_Cmp32)                    \
+  V(S390_Cmp64)                    \
+  V(S390_CmpFloat)                 \
+  V(S390_CmpDouble)                \
+  V(S390_Tst32)                    \
+  V(S390_Tst64)                    \
+  V(S390_Push)                     \
+  V(S390_PushFrame)                \
+  V(S390_StoreToStackSlot)         \
+  V(S390_ExtendSignWord8)          \
+  V(S390_ExtendSignWord16)         \
+  V(S390_ExtendSignWord32)         \
+  V(S390_Uint32ToUint64)           \
+  V(S390_Int64ToInt32)             \
+  V(S390_Int64ToFloat32)           \
+  V(S390_Int64ToDouble)            \
+  V(S390_Uint64ToFloat32)          \
+  V(S390_Uint64ToDouble)           \
+  V(S390_Int32ToFloat32)           \
+  V(S390_Int32ToDouble)            \
+  V(S390_Uint32ToFloat32)          \
+  V(S390_Uint32ToDouble)           \
+  V(S390_Float32ToInt64)           \
+  V(S390_Float32ToUint64)          \
+  V(S390_Float32ToInt32)           \
+  V(S390_Float32ToUint32)          \
+  V(S390_Float32ToDouble)          \
+  V(S390_DoubleToInt32)            \
+  V(S390_DoubleToUint32)           \
+  V(S390_DoubleToInt64)            \
+  V(S390_DoubleToUint64)           \
+  V(S390_DoubleToFloat32)          \
+  V(S390_DoubleExtractLowWord32)   \
+  V(S390_DoubleExtractHighWord32)  \
+  V(S390_DoubleInsertLowWord32)    \
+  V(S390_DoubleInsertHighWord32)   \
+  V(S390_DoubleConstruct)          \
+  V(S390_BitcastInt32ToFloat32)    \
+  V(S390_BitcastFloat32ToInt32)    \
+  V(S390_BitcastInt64ToDouble)     \
+  V(S390_BitcastDoubleToInt64)     \
+  V(S390_LoadWordS8)               \
+  V(S390_LoadWordU8)               \
+  V(S390_LoadWordS16)              \
+  V(S390_LoadWordU16)              \
+  V(S390_LoadWordS32)              \
+  V(S390_LoadWord64)               \
+  V(S390_LoadFloat32)              \
+  V(S390_LoadDouble)               \
+  V(S390_StoreWord8)               \
+  V(S390_StoreWord16)              \
+  V(S390_StoreWord32)              \
+  V(S390_StoreWord64)              \
+  V(S390_StoreFloat32)             \
+  V(S390_StoreDouble)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MRI) /* [%r0 + K] */               \
+  V(MRR) /* [%r0 + %r1] */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
diff --git a/src/compiler/s390/instruction-scheduler-s390.cc b/src/compiler/s390/instruction-scheduler-s390.cc
new file mode 100644
index 0000000..2d98e11
--- /dev/null
+++ b/src/compiler/s390/instruction-scheduler-s390.cc
@@ -0,0 +1,163 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+int InstructionScheduler::GetTargetInstructionFlags(
+    const Instruction* instr) const {
+  switch (instr->arch_opcode()) {
+    case kS390_And:
+    case kS390_AndComplement:
+    case kS390_Or:
+    case kS390_OrComplement:
+    case kS390_Xor:
+    case kS390_ShiftLeft32:
+    case kS390_ShiftLeft64:
+    case kS390_ShiftLeftPair:
+    case kS390_ShiftRight32:
+    case kS390_ShiftRight64:
+    case kS390_ShiftRightPair:
+    case kS390_ShiftRightArith32:
+    case kS390_ShiftRightArith64:
+    case kS390_ShiftRightArithPair:
+    case kS390_RotRight32:
+    case kS390_RotRight64:
+    case kS390_Not:
+    case kS390_RotLeftAndMask32:
+    case kS390_RotLeftAndClear64:
+    case kS390_RotLeftAndClearLeft64:
+    case kS390_RotLeftAndClearRight64:
+    case kS390_Add:
+    case kS390_AddWithOverflow32:
+    case kS390_AddPair:
+    case kS390_AddFloat:
+    case kS390_AddDouble:
+    case kS390_Sub:
+    case kS390_SubWithOverflow32:
+    case kS390_SubPair:
+    case kS390_MulPair:
+    case kS390_SubFloat:
+    case kS390_SubDouble:
+    case kS390_Mul32:
+    case kS390_Mul64:
+    case kS390_MulHigh32:
+    case kS390_MulHighU32:
+    case kS390_MulFloat:
+    case kS390_MulDouble:
+    case kS390_Div32:
+    case kS390_Div64:
+    case kS390_DivU32:
+    case kS390_DivU64:
+    case kS390_DivFloat:
+    case kS390_DivDouble:
+    case kS390_Mod32:
+    case kS390_Mod64:
+    case kS390_ModU32:
+    case kS390_ModU64:
+    case kS390_ModDouble:
+    case kS390_Neg:
+    case kS390_NegDouble:
+    case kS390_SqrtFloat:
+    case kS390_FloorFloat:
+    case kS390_CeilFloat:
+    case kS390_TruncateFloat:
+    case kS390_AbsFloat:
+    case kS390_SqrtDouble:
+    case kS390_FloorDouble:
+    case kS390_CeilDouble:
+    case kS390_TruncateDouble:
+    case kS390_RoundDouble:
+    case kS390_MaxDouble:
+    case kS390_MinDouble:
+    case kS390_AbsDouble:
+    case kS390_Cntlz32:
+    case kS390_Cntlz64:
+    case kS390_Popcnt32:
+    case kS390_Popcnt64:
+    case kS390_Cmp32:
+    case kS390_Cmp64:
+    case kS390_CmpFloat:
+    case kS390_CmpDouble:
+    case kS390_Tst32:
+    case kS390_Tst64:
+    case kS390_ExtendSignWord8:
+    case kS390_ExtendSignWord16:
+    case kS390_ExtendSignWord32:
+    case kS390_Uint32ToUint64:
+    case kS390_Int64ToInt32:
+    case kS390_Int64ToFloat32:
+    case kS390_Int64ToDouble:
+    case kS390_Uint64ToFloat32:
+    case kS390_Uint64ToDouble:
+    case kS390_Int32ToFloat32:
+    case kS390_Int32ToDouble:
+    case kS390_Uint32ToFloat32:
+    case kS390_Uint32ToDouble:
+    case kS390_Float32ToInt32:
+    case kS390_Float32ToUint32:
+    case kS390_Float32ToUint64:
+    case kS390_Float32ToDouble:
+    case kS390_DoubleToInt32:
+    case kS390_DoubleToUint32:
+    case kS390_Float32ToInt64:
+    case kS390_DoubleToInt64:
+    case kS390_DoubleToUint64:
+    case kS390_DoubleToFloat32:
+    case kS390_DoubleExtractLowWord32:
+    case kS390_DoubleExtractHighWord32:
+    case kS390_DoubleInsertLowWord32:
+    case kS390_DoubleInsertHighWord32:
+    case kS390_DoubleConstruct:
+    case kS390_BitcastInt32ToFloat32:
+    case kS390_BitcastFloat32ToInt32:
+    case kS390_BitcastInt64ToDouble:
+    case kS390_BitcastDoubleToInt64:
+      return kNoOpcodeFlags;
+
+    case kS390_LoadWordS8:
+    case kS390_LoadWordU8:
+    case kS390_LoadWordS16:
+    case kS390_LoadWordU16:
+    case kS390_LoadWordS32:
+    case kS390_LoadWord64:
+    case kS390_LoadFloat32:
+    case kS390_LoadDouble:
+      return kIsLoadOperation;
+
+    case kS390_StoreWord8:
+    case kS390_StoreWord16:
+    case kS390_StoreWord32:
+    case kS390_StoreWord64:
+    case kS390_StoreFloat32:
+    case kS390_StoreDouble:
+    case kS390_Push:
+    case kS390_PushFrame:
+    case kS390_StoreToStackSlot:
+      return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+      COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+      // Already covered in architecture independent code.
+      UNREACHABLE();
+  }
+
+  UNREACHABLE();
+  return kNoOpcodeFlags;
+}
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+  // TODO(all): Add instruction cost modeling.
+  return 1;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
new file mode 100644
index 0000000..8a4af5e
--- /dev/null
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -0,0 +1,1769 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/adapters.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/s390/frames-s390.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+  kInt16Imm,
+  kInt16Imm_Unsigned,
+  kInt16Imm_Negate,
+  kInt16Imm_4ByteAligned,
+  kShift32Imm,
+  kShift64Imm,
+  kNoImmediate
+};
+
+// Adds S390-specific methods for generating operands.
+class S390OperandGenerator final : public OperandGenerator {
+ public:
+  explicit S390OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
+    if (CanBeImmediate(node, mode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, ImmediateMode mode) {
+    int64_t value;
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      value = OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      value = OpParameter<int64_t>(node);
+    else
+      return false;
+    return CanBeImmediate(value, mode);
+  }
+
+  bool CanBeImmediate(int64_t value, ImmediateMode mode) {
+    switch (mode) {
+      case kInt16Imm:
+        return is_int16(value);
+      case kInt16Imm_Unsigned:
+        return is_uint16(value);
+      case kInt16Imm_Negate:
+        return is_int16(-value);
+      case kInt16Imm_4ByteAligned:
+        return is_int16(value) && !(value & 3);
+      case kShift32Imm:
+        return 0 <= value && value < 32;
+      case kShift64Imm:
+        return 0 <= value && value < 64;
+      case kNoImmediate:
+        return false;
+    }
+    return false;
+  }
+};
+
+namespace {
+
+void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  S390OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  S390OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+              ImmediateMode operand_mode) {
+  S390OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+#if V8_TARGET_ARCH_S390X
+void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
+                            Node* node) {
+  S390OperandGenerator g(selector);
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+  InstructionOperand outputs[2];
+  size_t output_count = 0;
+  outputs[output_count++] = g.DefineAsRegister(node);
+
+  Node* success_output = NodeProperties::FindProjection(node, 1);
+  if (success_output) {
+    outputs[output_count++] = g.DefineAsRegister(success_output);
+  }
+
+  selector->Emit(opcode, output_count, outputs, 1, inputs);
+}
+#endif
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, ImmediateMode operand_mode,
+                FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  Matcher m(node);
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+  InstructionOperand outputs[2];
+  size_t output_count = 0;
+
+  inputs[input_count++] = g.UseRegister(m.left().node());
+  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0u, input_count);
+  DCHECK_NE(0u, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
+}
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+                ImmediateMode operand_mode) {
+  FlagsContinuation cont;
+  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
+}
+
+}  // namespace
+
+void InstructionSelector::VisitLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  S390OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* offset = node->InputAt(1);
+  ArchOpcode opcode = kArchNop;
+  ImmediateMode mode = kInt16Imm;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kFloat32:
+      opcode = kS390_LoadFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kS390_LoadDouble;
+      break;
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
+      break;
+#if !V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kTagged:  // Fall through.
+#endif
+    case MachineRepresentation::kWord32:
+      opcode = kS390_LoadWordS32;
+#if V8_TARGET_ARCH_S390X
+      // TODO(john.yan): Remove this mode since s390 do not has this restriction
+      mode = kInt16Imm_4ByteAligned;
+#endif
+      break;
+#if V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kWord64:
+      opcode = kS390_LoadWord64;
+      mode = kInt16Imm_4ByteAligned;
+      break;
+#else
+    case MachineRepresentation::kWord64:    // Fall through.
+#endif
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(offset, mode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
+  } else if (g.CanBeImmediate(base, mode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MRR),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
+  }
+}
+
+void InstructionSelector::VisitStore(Node* node) {
+  S390OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* offset = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+  WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+  MachineRepresentation rep = store_rep.representation();
+
+  if (write_barrier_kind != kNoWriteBarrier) {
+    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    AddressingMode addressing_mode;
+    InstructionOperand inputs[3];
+    size_t input_count = 0;
+    inputs[input_count++] = g.UseUniqueRegister(base);
+    // OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
+    // for the store itself, so we must check compatibility with both.
+    if (g.CanBeImmediate(offset, kInt16Imm)
+#if V8_TARGET_ARCH_S390X
+        && g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
+#endif
+            ) {
+      inputs[input_count++] = g.UseImmediate(offset);
+      addressing_mode = kMode_MRI;
+    } else {
+      inputs[input_count++] = g.UseUniqueRegister(offset);
+      addressing_mode = kMode_MRR;
+    }
+    inputs[input_count++] = g.UseUniqueRegister(value);
+    RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+    switch (write_barrier_kind) {
+      case kNoWriteBarrier:
+        UNREACHABLE();
+        break;
+      case kMapWriteBarrier:
+        record_write_mode = RecordWriteMode::kValueIsMap;
+        break;
+      case kPointerWriteBarrier:
+        record_write_mode = RecordWriteMode::kValueIsPointer;
+        break;
+      case kFullWriteBarrier:
+        record_write_mode = RecordWriteMode::kValueIsAny;
+        break;
+    }
+    InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+    size_t const temp_count = arraysize(temps);
+    InstructionCode code = kArchStoreWithWriteBarrier;
+    code |= AddressingModeField::encode(addressing_mode);
+    code |= MiscField::encode(static_cast<int>(record_write_mode));
+    Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+  } else {
+    ArchOpcode opcode = kArchNop;
+    ImmediateMode mode = kInt16Imm;
+    switch (rep) {
+      case MachineRepresentation::kFloat32:
+        opcode = kS390_StoreFloat32;
+        break;
+      case MachineRepresentation::kFloat64:
+        opcode = kS390_StoreDouble;
+        break;
+      case MachineRepresentation::kBit:  // Fall through.
+      case MachineRepresentation::kWord8:
+        opcode = kS390_StoreWord8;
+        break;
+      case MachineRepresentation::kWord16:
+        opcode = kS390_StoreWord16;
+        break;
+#if !V8_TARGET_ARCH_S390X
+      case MachineRepresentation::kTagged:  // Fall through.
+#endif
+      case MachineRepresentation::kWord32:
+        opcode = kS390_StoreWord32;
+        break;
+#if V8_TARGET_ARCH_S390X
+      case MachineRepresentation::kTagged:  // Fall through.
+      case MachineRepresentation::kWord64:
+        opcode = kS390_StoreWord64;
+        mode = kInt16Imm_4ByteAligned;
+        break;
+#else
+      case MachineRepresentation::kWord64:  // Fall through.
+#endif
+      case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kNone:
+        UNREACHABLE();
+        return;
+    }
+    if (g.CanBeImmediate(offset, mode)) {
+      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+           g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
+    } else if (g.CanBeImmediate(base, mode)) {
+      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+           g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
+    } else {
+      Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+           g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+    }
+  }
+}
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
+  S390OperandGenerator g(this);
+  Node* const base = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+#if V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kWord64:
+      opcode = kCheckedLoadWord64;
+      break;
+#endif
+    case MachineRepresentation::kFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    case MachineRepresentation::kBit:     // Fall through.
+    case MachineRepresentation::kTagged:  // Fall through.
+#if !V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kWord64:  // Fall through.
+#endif
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+  AddressingMode addressingMode = kMode_MRR;
+  Emit(opcode | AddressingModeField::encode(addressingMode),
+       g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
+       g.UseOperand(length, kInt16Imm_Unsigned));
+}
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
+  S390OperandGenerator g(this);
+  Node* const base = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+#if V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kWord64:
+      opcode = kCheckedStoreWord64;
+      break;
+#endif
+    case MachineRepresentation::kFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    case MachineRepresentation::kBit:     // Fall through.
+    case MachineRepresentation::kTagged:  // Fall through.
+#if !V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kWord64:  // Fall through.
+#endif
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+  AddressingMode addressingMode = kMode_MRR;
+  Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
+       g.UseRegister(base), g.UseRegister(offset),
+       g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
+}
+
+template <typename Matcher>
+static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
+                         ArchOpcode opcode, bool left_can_cover,
+                         bool right_can_cover, ImmediateMode imm_mode) {
+  S390OperandGenerator g(selector);
+
+  // Map instruction to equivalent operation with inverted right input.
+  ArchOpcode inv_opcode = opcode;
+  switch (opcode) {
+    case kS390_And:
+      inv_opcode = kS390_AndComplement;
+      break;
+    case kS390_Or:
+      inv_opcode = kS390_OrComplement;
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
+  if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
+    Matcher mleft(m->left().node());
+    if (mleft.right().Is(-1)) {
+      selector->Emit(inv_opcode, g.DefineAsRegister(node),
+                     g.UseRegister(m->right().node()),
+                     g.UseRegister(mleft.left().node()));
+      return;
+    }
+  }
+
+  // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
+  if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
+      right_can_cover) {
+    Matcher mright(m->right().node());
+    if (mright.right().Is(-1)) {
+      // TODO(all): support shifted operand on right.
+      selector->Emit(inv_opcode, g.DefineAsRegister(node),
+                     g.UseRegister(m->left().node()),
+                     g.UseRegister(mright.left().node()));
+      return;
+    }
+  }
+
+  VisitBinop<Matcher>(selector, node, opcode, imm_mode);
+}
+
+static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
+  int mask_width = base::bits::CountPopulation32(value);
+  int mask_msb = base::bits::CountLeadingZeros32(value);
+  int mask_lsb = base::bits::CountTrailingZeros32(value);
+  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
+    return false;
+  *mb = mask_lsb + mask_width - 1;
+  *me = mask_lsb;
+  return true;
+}
+
+#if V8_TARGET_ARCH_S390X
+static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
+  int mask_width = base::bits::CountPopulation64(value);
+  int mask_msb = base::bits::CountLeadingZeros64(value);
+  int mask_lsb = base::bits::CountTrailingZeros64(value);
+  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
+    return false;
+  *mb = mask_lsb + mask_width - 1;
+  *me = mask_lsb;
+  return true;
+}
+#endif
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  int mb = 0;
+  int me = 0;
+  if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
+    int sh = 0;
+    Node* left = m.left().node();
+    if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
+        CanCover(node, left)) {
+      Int32BinopMatcher mleft(m.left().node());
+      if (mleft.right().IsInRange(0, 31)) {
+        left = mleft.left().node();
+        sh = mleft.right().Value();
+        if (m.left().IsWord32Shr()) {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (mb > 31 - sh) mb = 31 - sh;
+          sh = (32 - sh) & 0x1f;
+        } else {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (me < sh) me = sh;
+        }
+      }
+    }
+    if (mb >= me) {
+      Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
+           g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb),
+           g.TempImmediate(me));
+      return;
+    }
+  }
+  VisitLogical<Int32BinopMatcher>(
+      this, node, &m, kS390_And, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64And(Node* node) {
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  int mb = 0;
+  int me = 0;
+  if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+    int sh = 0;
+    Node* left = m.left().node();
+    if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
+        CanCover(node, left)) {
+      Int64BinopMatcher mleft(m.left().node());
+      if (mleft.right().IsInRange(0, 63)) {
+        left = mleft.left().node();
+        sh = mleft.right().Value();
+        if (m.left().IsWord64Shr()) {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (mb > 63 - sh) mb = 63 - sh;
+          sh = (64 - sh) & 0x3f;
+        } else {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (me < sh) me = sh;
+        }
+      }
+    }
+    if (mb >= me) {
+      bool match = false;
+      ArchOpcode opcode;
+      int mask;
+      if (me == 0) {
+        match = true;
+        opcode = kS390_RotLeftAndClearLeft64;
+        mask = mb;
+      } else if (mb == 63) {
+        match = true;
+        opcode = kS390_RotLeftAndClearRight64;
+        mask = me;
+      } else if (sh && me <= sh && m.left().IsWord64Shl()) {
+        match = true;
+        opcode = kS390_RotLeftAndClear64;
+        mask = mb;
+      }
+      if (match) {
+        Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+             g.TempImmediate(sh), g.TempImmediate(mask));
+        return;
+      }
+    }
+  }
+  VisitLogical<Int64BinopMatcher>(
+      this, node, &m, kS390_And, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  Int32BinopMatcher m(node);
+  VisitLogical<Int32BinopMatcher>(
+      this, node, &m, kS390_Or, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Or(Node* node) {
+  Int64BinopMatcher m(node);
+  VisitLogical<Int64BinopMatcher>(
+      this, node, &m, kS390_Or, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
+  }
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
+  }
+}
+#endif
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+    Int32BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (me < sh) me = sh;
+      if (mb >= me) {
+        Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+             g.TempImmediate(mb), g.TempImmediate(me));
+        return;
+      }
+    }
+  }
+  VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  // TODO(mbrandy): eliminate left sign extension if right >= 32
+  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+    Int64BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (me < sh) me = sh;
+      if (mb >= me) {
+        bool match = false;
+        ArchOpcode opcode;
+        int mask;
+        if (me == 0) {
+          match = true;
+          opcode = kS390_RotLeftAndClearLeft64;
+          mask = mb;
+        } else if (mb == 63) {
+          match = true;
+          opcode = kS390_RotLeftAndClearRight64;
+          mask = me;
+        } else if (sh && me <= sh) {
+          match = true;
+          opcode = kS390_RotLeftAndClear64;
+          mask = mb;
+        }
+        if (match) {
+          Emit(opcode, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+               g.TempImmediate(mask));
+          return;
+        }
+      }
+    }
+  }
+  VisitRRO(this, kS390_ShiftLeft64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+    Int32BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (mb > 31 - sh) mb = 31 - sh;
+      sh = (32 - sh) & 0x1f;
+      if (mb >= me) {
+        Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+             g.TempImmediate(mb), g.TempImmediate(me));
+        return;
+      }
+    }
+  }
+  VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+    Int64BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (mb > 63 - sh) mb = 63 - sh;
+      sh = (64 - sh) & 0x3f;
+      if (mb >= me) {
+        bool match = false;
+        ArchOpcode opcode;
+        int mask;
+        if (me == 0) {
+          match = true;
+          opcode = kS390_RotLeftAndClearLeft64;
+          mask = mb;
+        } else if (mb == 63) {
+          match = true;
+          opcode = kS390_RotLeftAndClearRight64;
+          mask = me;
+        }
+        if (match) {
+          Emit(opcode, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+               g.TempImmediate(mask));
+          return;
+        }
+      }
+    }
+  }
+  VisitRRO(this, kS390_ShiftRight64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  // Replace with sign extension for (x << K) >> K where K is 16 or 24.
+  if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kS390_ExtendSignWord16, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kS390_ExtendSignWord8, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()));
+      return;
+    }
+  }
+  VisitRRO(this, kS390_ShiftRightArith32, node, kShift32Imm);
+}
+
+#if !V8_TARGET_ARCH_S390X
+void VisitPairBinop(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  S390OperandGenerator g(selector);
+
+  // We use UseUniqueRegister here to avoid register sharing with the output
+  // registers.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  selector->Emit(opcode, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  VisitPairBinop(this, kS390_AddPair, node);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  VisitPairBinop(this, kS390_SubPair, node);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  S390OperandGenerator g(this);
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 g.UseUniqueRegister(node->InputAt(2)),
+                                 g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kS390_MulPair, 2, outputs, 4, inputs);
+}
+
+void VisitPairShift(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  S390OperandGenerator g(selector);
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+                                 g.UseRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  VisitPairShift(this, kS390_ShiftLeftPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  VisitPairShift(this, kS390_ShiftRightPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  VisitPairShift(this, kS390_ShiftRightArithPair, node);
+}
+#endif
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitRRO(this, kS390_ShiftRightArith64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitRRO(this, kS390_RotRight32, node, kShift32Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Ror(Node* node) {
+  VisitRRO(this, kS390_RotRight64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_Cntlz32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Clz(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_Cntlz64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_Popcnt32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_Popcnt64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+#endif
+
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+#endif
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_Add, kInt16Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Add(Node* node) {
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm);
+}
+#endif
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
+  }
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
+  }
+}
+#endif
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  VisitRRR(this, kS390_Mul32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  VisitRRR(this, kS390_Mul64, node);
+}
+#endif
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_MulHigh32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_MulHighU32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitRRR(this, kS390_Div32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Div(Node* node) {
+  VisitRRR(this, kS390_Div64, node);
+}
+#endif
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+  VisitRRR(this, kS390_DivU32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitUint64Div(Node* node) {
+  VisitRRR(this, kS390_DivU64, node);
+}
+#endif
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitRRR(this, kS390_Mod32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  VisitRRR(this, kS390_Mod64, node);
+}
+#endif
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+  VisitRRR(this, kS390_ModU32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitUint64Mod(Node* node) {
+  VisitRRR(this, kS390_ModU64, node);
+}
+#endif
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  VisitRR(this, kS390_Float32ToDouble, node);
+}
+
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kS390_Int32ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kS390_Uint32ToFloat32, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  VisitRR(this, kS390_Int32ToDouble, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  VisitRR(this, kS390_Uint32ToDouble, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  VisitRR(this, kS390_DoubleToInt32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  VisitRR(this, kS390_DoubleToUint32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kS390_DoubleToUint32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+  VisitTryTruncateDouble(this, kS390_Float32ToInt64, node);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+  VisitTryTruncateDouble(this, kS390_DoubleToInt64, node);
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+  VisitTryTruncateDouble(this, kS390_Float32ToUint64, node);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+  VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  VisitRR(this, kS390_ExtendSignWord32, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  VisitRR(this, kS390_Uint32ToUint64, node);
+}
+#endif
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  VisitRR(this, kS390_DoubleToFloat32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+  switch (TruncationModeOf(node->op())) {
+    case TruncationMode::kJavaScript:
+      return VisitRR(this, kArchTruncateDoubleToI, node);
+    case TruncationMode::kRoundToZero:
+      return VisitRR(this, kS390_DoubleToInt32, node);
+  }
+  UNREACHABLE();
+}
+
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kS390_Float32ToInt32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kS390_Float32ToUint32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  VisitRR(this, kS390_Int64ToInt32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+  VisitRR(this, kS390_Int64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+  VisitRR(this, kS390_Int64ToDouble, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+  VisitRR(this, kS390_Uint64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+  VisitRR(this, kS390_Uint64ToDouble, node);
+}
+#endif
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+  VisitRR(this, kS390_BitcastFloat32ToInt32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+  VisitRR(this, kS390_BitcastDoubleToInt64, node);
+}
+#endif
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+  VisitRR(this, kS390_BitcastInt32ToFloat32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+  VisitRR(this, kS390_BitcastInt64ToDouble, node);
+}
+#endif
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+  VisitRRR(this, kS390_AddFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  // TODO(mbrandy): detect multiply-add
+  VisitRRR(this, kS390_AddDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+  S390OperandGenerator g(this);
+  Float32BinopMatcher m(node);
+  if (m.left().IsMinusZero()) {
+    Emit(kS390_NegDouble, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
+    return;
+  }
+  VisitRRR(this, kS390_SubFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  // TODO(mbrandy): detect multiply-subtract
+  S390OperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (m.left().IsMinusZero()) {
+    if (m.right().IsFloat64RoundDown() &&
+        CanCover(m.node(), m.right().node())) {
+      if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+          CanCover(m.right().node(), m.right().InputAt(0))) {
+        Float64BinopMatcher mright0(m.right().InputAt(0));
+        if (mright0.left().IsMinusZero()) {
+          // -floor(-x) = ceil(x)
+          Emit(kS390_CeilDouble, g.DefineAsRegister(node),
+               g.UseRegister(mright0.right().node()));
+          return;
+        }
+      }
+    }
+    Emit(kS390_NegDouble, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
+    return;
+  }
+  VisitRRR(this, kS390_SubDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+  VisitRRR(this, kS390_MulFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  // TODO(mbrandy): detect negate
+  VisitRRR(this, kS390_MulDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+  VisitRRR(this, kS390_DivFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRR(this, kS390_DivDouble, node);
+}
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
+       g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
+      ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+  VisitRR(this, kS390_AbsFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+  VisitRR(this, kS390_AbsDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+  VisitRR(this, kS390_SqrtFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  VisitRR(this, kS390_SqrtDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+  VisitRR(this, kS390_FloorFloat, node);
+}
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+  VisitRR(this, kS390_FloorDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+  VisitRR(this, kS390_CeilFloat, node);
+}
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+  VisitRR(this, kS390_CeilDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+  VisitRR(this, kS390_TruncateFloat, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  VisitRR(this, kS390_TruncateDouble, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  VisitRR(this, kS390_RoundDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+  UNREACHABLE();
+}
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+  UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32,
+                                         kInt16Imm, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32, kInt16Imm,
+                                &cont);
+}
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
+                                         kInt16Imm_Negate, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
+                                kInt16Imm_Negate, &cont);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm,
+                                         &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm, &cont);
+}
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub,
+                                         kInt16Imm_Negate, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate, &cont);
+}
+#endif
+
+static bool CompareLogical(FlagsContinuation* cont) {
+  switch (cont->condition()) {
+    case kUnsignedLessThan:
+    case kUnsignedGreaterThanOrEqual:
+    case kUnsignedLessThanOrEqual:
+    case kUnsignedGreaterThan:
+      return true;
+    default:
+      return false;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+namespace {
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                  InstructionOperand left, InstructionOperand right,
+                  FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      InstructionCode opcode, FlagsContinuation* cont,
+                      bool commutative, ImmediateMode immediate_mode) {
+  S390OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, immediate_mode)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, immediate_mode)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+  VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
+}
+
+#if V8_TARGET_ARCH_S390X
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+  VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
+}
+#endif
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kS390_CmpFloat, g.UseRegister(left),
+               g.UseRegister(right), cont);
+}
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kS390_CmpDouble, g.UseRegister(left),
+               g.UseRegister(right), cont);
+}
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, InstructionCode opcode,
+                          FlagsContinuation* cont) {
+  while (selector->CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(selector, value, cont);
+      }
+      case IrOpcode::kInt32LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kUint32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
+#if V8_TARGET_ARCH_S390X
+      case IrOpcode::kWord64Equal:
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kInt64LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kInt64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kUint64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kUint64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
+#endif
+      case IrOpcode::kFloat32Equal:
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat64Equal:
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (ProjectionIndexOf(value->op()) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either nullptr, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* const node = value->InputAt(0);
+          Node* const result = NodeProperties::FindProjection(node, 0);
+          if (result == nullptr || selector->IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(
+                    selector, node, kS390_AddWithOverflow32, kInt16Imm, cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(selector, node,
+                                                     kS390_SubWithOverflow32,
+                                                     kInt16Imm_Negate, cont);
+#if V8_TARGET_ARCH_S390X
+              case IrOpcode::kInt64AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Add,
+                                                     kInt16Imm, cont);
+              case IrOpcode::kInt64SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Sub,
+                                                     kInt16Imm_Negate, cont);
+#endif
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kInt32Sub:
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kWord32And:
+        return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
+                                kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt32Add:
+// case IrOpcode::kWord32Or:
+// case IrOpcode::kWord32Xor:
+// case IrOpcode::kWord32Sar:
+// case IrOpcode::kWord32Shl:
+// case IrOpcode::kWord32Shr:
+// case IrOpcode::kWord32Ror:
+#if V8_TARGET_ARCH_S390X
+      case IrOpcode::kInt64Sub:
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kWord64And:
+        return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
+                                kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt64Add:
+// case IrOpcode::kWord64Or:
+// case IrOpcode::kWord64Xor:
+// case IrOpcode::kWord64Sar:
+// case IrOpcode::kWord64Shl:
+// case IrOpcode::kWord64Shr:
+// case IrOpcode::kWord64Ror:
+#endif
+      default:
+        break;
+    }
+    break;
+  }
+
+  // Branch could not be combined with a compare, emit compare against 0.
+  S390OperandGenerator g(selector);
+  VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
+               cont);
+}
+
+void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
+                            Node* value, FlagsContinuation* cont) {
+  VisitWordCompareZero(selector, user, value, kS390_Cmp32, cont);
+}
+
+#if V8_TARGET_ARCH_S390X
+void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
+                            Node* value, FlagsContinuation* cont) {
+  VisitWordCompareZero(selector, user, value, kS390_Cmp64, cont);
+}
+#endif
+
+}  // namespace
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+  S390OperandGenerator g(this);
+  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+  // Emit either ArchTableSwitch or ArchLookupSwitch.
+  size_t table_space_cost = 4 + sw.value_range;
+  size_t table_time_cost = 3;
+  size_t lookup_space_cost = 3 + 2 * sw.case_count;
+  size_t lookup_time_cost = sw.case_count;
+  if (sw.case_count > 0 &&
+      table_space_cost + 3 * table_time_cost <=
+          lookup_space_cost + 3 * lookup_time_cost &&
+      sw.min_value > std::numeric_limits<int32_t>::min()) {
+    InstructionOperand index_operand = value_operand;
+    if (sw.min_value) {
+      index_operand = g.TempRegister();
+      Emit(kS390_Sub, index_operand, value_operand,
+           g.TempImmediate(sw.min_value));
+    }
+    // Generate a table lookup.
+    return EmitTableSwitch(sw, index_operand);
+  }
+
+  // Generate a sequence of conditional jumps.
+  return EmitLookupSwitch(sw, value_operand);
+}
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
+  }
+  VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
+  }
+  VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+  VisitWord64Compare(this, node, &cont);
+}
+#endif
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::EmitPrepareArguments(
+    ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+    Node* node) {
+  S390OperandGenerator g(this);
+
+  // Prepare for C function call.
+  if (descriptor->IsCFunctionCall()) {
+    Emit(kArchPrepareCallCFunction |
+             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+         0, nullptr, 0, nullptr);
+
+    // Poke any stack arguments.
+    int slot = kStackFrameExtraParamSlot;
+    for (PushParameter input : (*arguments)) {
+      Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+           g.TempImmediate(slot));
+      ++slot;
+    }
+  } else {
+    // Push any stack arguments.
+    int num_slots = static_cast<int>(descriptor->StackParameterCount());
+    int slot = 0;
+    for (PushParameter input : (*arguments)) {
+      if (slot == 0) {
+        DCHECK(input.node());
+        Emit(kS390_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
+             g.TempImmediate(num_slots));
+      } else {
+        // Skip any alignment holes in pushed nodes.
+        if (input.node()) {
+          Emit(kS390_StoreToStackSlot, g.NoOutput(),
+               g.UseRegister(input.node()), g.TempImmediate(slot));
+        }
+      }
+      ++slot;
+    }
+  }
+}
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_DoubleExtractLowWord32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_DoubleExtractHighWord32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+  S390OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+      CanCover(node, left)) {
+    left = left->InputAt(1);
+    Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
+         g.UseRegister(right));
+    return;
+  }
+  Emit(kS390_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
+       g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+  S390OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+      CanCover(node, left)) {
+    left = left->InputAt(1);
+    Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
+         g.UseRegister(left));
+    return;
+  }
+  Emit(kS390_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
+       g.UseRegister(left), g.UseRegister(right));
+}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  return MachineOperatorBuilder::kFloat32RoundDown |
+         MachineOperatorBuilder::kFloat64RoundDown |
+         MachineOperatorBuilder::kFloat32RoundUp |
+         MachineOperatorBuilder::kFloat64RoundUp |
+         MachineOperatorBuilder::kFloat32RoundTruncate |
+         MachineOperatorBuilder::kFloat64RoundTruncate |
+         MachineOperatorBuilder::kFloat64RoundTiesAway |
+         MachineOperatorBuilder::kWord32Popcnt |
+         MachineOperatorBuilder::kWord64Popcnt;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index 455fcd1..4ac65e5 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -298,6 +298,64 @@
   SetControlInput(block, sw);
 }
 
+void Schedule::EnsureSplitEdgeForm() {
+  // Make a copy of all the blocks for the iteration, since adding the split
+  // edges will allocate new blocks.
+  BasicBlockVector all_blocks_copy(all_blocks_);
+
+  // Insert missing split edge blocks.
+  for (auto block : all_blocks_copy) {
+    if (block->PredecessorCount() > 1 && block != end_) {
+      for (auto current_pred = block->predecessors().begin();
+           current_pred != block->predecessors().end(); ++current_pred) {
+        BasicBlock* pred = *current_pred;
+        if (pred->SuccessorCount() > 1) {
+          // Found a predecessor block with multiple successors.
+          BasicBlock* split_edge_block = NewBasicBlock();
+          split_edge_block->set_control(BasicBlock::kGoto);
+          split_edge_block->successors().push_back(block);
+          split_edge_block->predecessors().push_back(pred);
+          split_edge_block->set_deferred(pred->deferred());
+          *current_pred = split_edge_block;
+          // Find a corresponding successor in the previous block, replace it
+          // with the split edge block... but only do it once, since we only
+          // replace the previous blocks in the current block one at a time.
+          for (auto successor = pred->successors().begin();
+               successor != pred->successors().end(); ++successor) {
+            if (*successor == block) {
+              *successor = split_edge_block;
+              break;
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+void Schedule::PropagateDeferredMark() {
+  // Push forward the deferred block marks through newly inserted blocks and
+  // other improperly marked blocks until a fixed point is reached.
+  // TODO(danno): optimize the propagation
+  bool done = false;
+  while (!done) {
+    done = true;
+    for (auto block : all_blocks_) {
+      if (!block->deferred()) {
+        bool deferred = block->PredecessorCount() > 0;
+        for (auto pred : block->predecessors()) {
+          if (!pred->deferred()) {
+            deferred = false;
+          }
+        }
+        if (deferred) {
+          block->set_deferred(true);
+          done = false;
+        }
+      }
+    }
+  }
+}
 
 void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
   block->AddSuccessor(succ);
@@ -331,15 +389,24 @@
 
 
 std::ostream& operator<<(std::ostream& os, const Schedule& s) {
-  for (BasicBlock* block : *s.rpo_order()) {
-    os << "--- BLOCK B" << block->rpo_number();
+  for (BasicBlock* block :
+       ((s.RpoBlockCount() == 0) ? *s.all_blocks() : *s.rpo_order())) {
+    if (block->rpo_number() == -1) {
+      os << "--- BLOCK id:" << block->id().ToInt();
+    } else {
+      os << "--- BLOCK B" << block->rpo_number();
+    }
     if (block->deferred()) os << " (deferred)";
     if (block->PredecessorCount() != 0) os << " <- ";
     bool comma = false;
     for (BasicBlock const* predecessor : block->predecessors()) {
       if (comma) os << ", ";
       comma = true;
-      os << "B" << predecessor->rpo_number();
+      if (predecessor->rpo_number() == -1) {
+        os << "id:" << predecessor->id().ToInt();
+      } else {
+        os << "B" << predecessor->rpo_number();
+      }
     }
     os << " ---\n";
     for (Node* node : *block) {
@@ -364,7 +431,11 @@
       for (BasicBlock const* successor : block->successors()) {
         if (comma) os << ", ";
         comma = true;
-        os << "B" << successor->rpo_number();
+        if (successor->rpo_number() == -1) {
+          os << "id:" << successor->id().ToInt();
+        } else {
+          os << "B" << successor->rpo_number();
+        }
       }
       os << "\n";
     }
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index 9624ff5..c99a0fc 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -243,6 +243,7 @@
     return AddSuccessor(block, succ);
   }
 
+  const BasicBlockVector* all_blocks() const { return &all_blocks_; }
   BasicBlockVector* rpo_order() { return &rpo_order_; }
   const BasicBlockVector* rpo_order() const { return &rpo_order_; }
 
@@ -254,6 +255,12 @@
  private:
   friend class Scheduler;
   friend class BasicBlockInstrumentor;
+  friend class RawMachineAssembler;
+
+  // Ensure split-edge form for a hand-assembled schedule.
+  void EnsureSplitEdgeForm();
+  // Copy deferred block markers down as far as possible
+  void PropagateDeferredMark();
 
   void AddSuccessor(BasicBlock* block, BasicBlock* succ);
   void MoveSuccessors(BasicBlock* from, BasicBlock* to);
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index 80ce8b1..b04ba6f 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -1538,6 +1538,8 @@
   }
 
   BasicBlock* GetBlockForUse(Edge edge) {
+    // TODO(titzer): ignore uses from dead nodes (not visited in PrepareUses()).
+    // Dead uses only occur if the graph is not trimmed before scheduling.
     Node* use = edge.from();
     if (IrOpcode::IsPhiOpcode(use->opcode())) {
       // If the use is from a coupled (i.e. floating) phi, compute the common
@@ -1545,7 +1547,8 @@
       if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
         TRACE("  inspecting uses of coupled #%d:%s\n", use->id(),
               use->op()->mnemonic());
-        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
+        // TODO(titzer): reenable once above TODO is addressed.
+        //        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
         return GetCommonDominatorOfUses(use);
       }
       // If the use is from a fixed (i.e. non-floating) phi, we use the
diff --git a/src/compiler/select-lowering.cc b/src/compiler/select-lowering.cc
index 0e8b36f..b1a2309 100644
--- a/src/compiler/select-lowering.cc
+++ b/src/compiler/select-lowering.cc
@@ -15,10 +15,7 @@
 namespace compiler {
 
 SelectLowering::SelectLowering(Graph* graph, CommonOperatorBuilder* common)
-    : common_(common),
-      graph_(graph),
-      merges_(Merges::key_compare(), Merges::allocator_type(graph->zone())) {}
-
+    : common_(common), graph_(graph) {}
 
 SelectLowering::~SelectLowering() {}
 
@@ -30,58 +27,16 @@
   Node* cond = node->InputAt(0);
   Node* vthen = node->InputAt(1);
   Node* velse = node->InputAt(2);
-  Node* merge = nullptr;
 
-  // Check if we already have a diamond for this condition.
-  auto range = merges_.equal_range(cond);
-  for (auto i = range.first;; ++i) {
-    if (i == range.second) {
-      // Create a new diamond for this condition and remember its merge node.
-      Diamond d(graph(), common(), cond, p.hint());
-      merges_.insert(std::make_pair(cond, d.merge));
-      merge = d.merge;
-      break;
-    }
-
-    // If the diamond is reachable from the Select, merging them would result in
-    // an unschedulable graph, so we cannot reuse the diamond in that case.
-    merge = i->second;
-    if (!ReachableFrom(merge, node)) {
-      break;
-    }
-  }
-
-  // Create a Phi hanging off the previously determined merge.
+  // Create a diamond and a phi.
+  Diamond d(graph(), common(), cond, p.hint());
   node->ReplaceInput(0, vthen);
   node->ReplaceInput(1, velse);
-  node->ReplaceInput(2, merge);
+  node->ReplaceInput(2, d.merge);
   NodeProperties::ChangeOp(node, common()->Phi(p.representation(), 2));
   return Changed(node);
 }
 
-
-bool SelectLowering::ReachableFrom(Node* const sink, Node* const source) {
-  // TODO(turbofan): This is probably horribly expensive, and it should be moved
-  // into node.h or somewhere else?!
-  Zone zone;
-  std::queue<Node*, NodeDeque> queue((NodeDeque(&zone)));
-  BoolVector visited(graph()->NodeCount(), false, &zone);
-  queue.push(source);
-  visited[source->id()] = true;
-  while (!queue.empty()) {
-    Node* current = queue.front();
-    if (current == sink) return true;
-    queue.pop();
-    for (auto input : current->inputs()) {
-      if (!visited[input->id()]) {
-        queue.push(input);
-        visited[input->id()] = true;
-      }
-    }
-  }
-  return false;
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/select-lowering.h b/src/compiler/select-lowering.h
index 5894d35..b882a31 100644
--- a/src/compiler/select-lowering.h
+++ b/src/compiler/select-lowering.h
@@ -5,10 +5,7 @@
 #ifndef V8_COMPILER_SELECT_LOWERING_H_
 #define V8_COMPILER_SELECT_LOWERING_H_
 
-#include <map>
-
 #include "src/compiler/graph-reducer.h"
-#include "src/zone-allocator.h"
 
 namespace v8 {
 namespace internal {
@@ -28,17 +25,11 @@
   Reduction Reduce(Node* node) override;
 
  private:
-  typedef std::multimap<Node*, Node*, std::less<Node*>,
-                        zone_allocator<std::pair<Node* const, Node*>>> Merges;
-
-  bool ReachableFrom(Node* const sink, Node* const source);
-
   CommonOperatorBuilder* common() const { return common_; }
   Graph* graph() const { return graph_; }
 
   CommonOperatorBuilder* common_;
   Graph* graph_;
-  Merges merges_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index ed7fe9d..88931f5 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -104,15 +104,6 @@
   static UseInfo None() {
     return UseInfo(MachineRepresentation::kNone, Truncation::None());
   }
-
-  // Truncation to a representation that is smaller than the preferred
-  // one.
-  static UseInfo Float64TruncatingToWord32() {
-    return UseInfo(MachineRepresentation::kFloat64, Truncation::Word32());
-  }
-  static UseInfo Word64TruncatingToWord32() {
-    return UseInfo(MachineRepresentation::kWord64, Truncation::Word32());
-  }
   static UseInfo AnyTruncatingToBool() {
     return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
   }
@@ -242,71 +233,6 @@
 class RepresentationSelector {
  public:
   // Information for each node tracked during the fixpoint.
-  class NodeOutputInfo {
-   public:
-    NodeOutputInfo(MachineRepresentation representation, Type* type)
-        : type_(type), representation_(representation) {}
-    NodeOutputInfo()
-        : type_(Type::None()), representation_(MachineRepresentation::kNone) {}
-
-    MachineRepresentation representation() const { return representation_; }
-    Type* type() const { return type_; }
-
-    static NodeOutputInfo None() {
-      return NodeOutputInfo(MachineRepresentation::kNone, Type::None());
-    }
-
-    static NodeOutputInfo Float32() {
-      return NodeOutputInfo(MachineRepresentation::kFloat32, Type::Number());
-    }
-
-    static NodeOutputInfo Float64() {
-      return NodeOutputInfo(MachineRepresentation::kFloat64, Type::Number());
-    }
-
-    static NodeOutputInfo NumberTruncatedToWord32() {
-      return NodeOutputInfo(MachineRepresentation::kWord32, Type::Number());
-    }
-
-    static NodeOutputInfo Int32() {
-      return NodeOutputInfo(MachineRepresentation::kWord32, Type::Signed32());
-    }
-
-    static NodeOutputInfo Uint32() {
-      return NodeOutputInfo(MachineRepresentation::kWord32, Type::Unsigned32());
-    }
-
-    static NodeOutputInfo Bool() {
-      return NodeOutputInfo(MachineRepresentation::kBit, Type::Boolean());
-    }
-
-    static NodeOutputInfo Int64() {
-      // TODO(jarin) Fix once we have a real int64 type.
-      return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
-    }
-
-    static NodeOutputInfo Uint64() {
-      // TODO(jarin) Fix once we have a real uint64 type.
-      return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
-    }
-
-    static NodeOutputInfo AnyTagged() {
-      return NodeOutputInfo(MachineRepresentation::kTagged, Type::Any());
-    }
-
-    static NodeOutputInfo NumberTagged() {
-      return NodeOutputInfo(MachineRepresentation::kTagged, Type::Number());
-    }
-
-    static NodeOutputInfo Pointer() {
-      return NodeOutputInfo(MachineType::PointerRepresentation(), Type::Any());
-    }
-
-   private:
-    Type* type_;
-    MachineRepresentation representation_;
-  };
-
   class NodeInfo {
    public:
     // Adds new use to the node. Returns true if something has changed
@@ -322,17 +248,15 @@
     void set_visited() { visited_ = true; }
     bool visited() const { return visited_; }
     Truncation truncation() const { return truncation_; }
-    void set_output_type(NodeOutputInfo output) { output_ = output; }
+    void set_output(MachineRepresentation output) { representation_ = output; }
 
-    Type* output_type() const { return output_.type(); }
-    MachineRepresentation representation() const {
-      return output_.representation();
-    }
+    MachineRepresentation representation() const { return representation_; }
 
    private:
-    bool queued_ = false;                  // Bookkeeping for the traversal.
-    bool visited_ = false;                 // Bookkeeping for the traversal.
-    NodeOutputInfo output_;                // Output type and representation.
+    bool queued_ = false;   // Bookkeeping for the traversal.
+    bool visited_ = false;  // Bookkeeping for the traversal.
+    MachineRepresentation representation_ =
+        MachineRepresentation::kNone;             // Output representation.
     Truncation truncation_ = Truncation::None();  // Information about uses.
   };
 
@@ -467,76 +391,31 @@
     }
   }
 
-  void SetOutputFromMachineType(Node* node, MachineType machine_type) {
-    Type* type = Type::None();
-    switch (machine_type.semantic()) {
-      case MachineSemantic::kNone:
-        type = Type::None();
-        break;
-      case MachineSemantic::kBool:
-        type = Type::Boolean();
-        break;
-      case MachineSemantic::kInt32:
-        type = Type::Signed32();
-        break;
-      case MachineSemantic::kUint32:
-        type = Type::Unsigned32();
-        break;
-      case MachineSemantic::kInt64:
-        // TODO(jarin) Fix once we have proper int64.
-        type = Type::Internal();
-        break;
-      case MachineSemantic::kUint64:
-        // TODO(jarin) Fix once we have proper uint64.
-        type = Type::Internal();
-        break;
-      case MachineSemantic::kNumber:
-        type = Type::Number();
-        break;
-      case MachineSemantic::kAny:
-        type = Type::Any();
-        break;
-    }
-    return SetOutput(node, NodeOutputInfo(machine_type.representation(), type));
+  void SetOutput(Node* node, MachineRepresentation representation) {
+    NodeInfo* info = GetInfo(node);
+    DCHECK(
+        MachineRepresentationIsSubtype(info->representation(), representation));
+    info->set_output(representation);
   }
 
-  void SetOutput(Node* node, NodeOutputInfo output_info) {
-    // Every node should have at most one output representation. Note that
-    // phis can have 0, if they have not been used in a representation-inducing
-    // instruction.
-    Type* output_type = output_info.type();
-    if (NodeProperties::IsTyped(node)) {
-      output_type = Type::Intersect(NodeProperties::GetType(node),
-                                    output_info.type(), jsgraph_->zone());
-    }
-    NodeInfo* info = GetInfo(node);
-    DCHECK(info->output_type()->Is(output_type));
-    DCHECK(MachineRepresentationIsSubtype(info->representation(),
-                                          output_info.representation()));
-    if (!output_type->Is(info->output_type()) ||
-        output_info.representation() != info->representation()) {
-      EnqueueUses(node);
-    }
-    info->set_output_type(
-        NodeOutputInfo(output_info.representation(), output_type));
-  }
+  Type* GetUpperBound(Node* node) { return NodeProperties::GetType(node); }
 
   bool BothInputsAreSigned32(Node* node) {
     DCHECK_EQ(2, node->InputCount());
-    return GetInfo(node->InputAt(0))->output_type()->Is(Type::Signed32()) &&
-           GetInfo(node->InputAt(1))->output_type()->Is(Type::Signed32());
+    return GetUpperBound(node->InputAt(0))->Is(Type::Signed32()) &&
+           GetUpperBound(node->InputAt(1))->Is(Type::Signed32());
   }
 
   bool BothInputsAreUnsigned32(Node* node) {
     DCHECK_EQ(2, node->InputCount());
-    return GetInfo(node->InputAt(0))->output_type()->Is(Type::Unsigned32()) &&
-           GetInfo(node->InputAt(1))->output_type()->Is(Type::Unsigned32());
+    return GetUpperBound(node->InputAt(0))->Is(Type::Unsigned32()) &&
+           GetUpperBound(node->InputAt(1))->Is(Type::Unsigned32());
   }
 
   bool BothInputsAre(Node* node, Type* type) {
     DCHECK_EQ(2, node->InputCount());
-    return GetInfo(node->InputAt(0))->output_type()->Is(type) &&
-           GetInfo(node->InputAt(1))->output_type()->Is(type);
+    return GetUpperBound(node->InputAt(0))->Is(type) &&
+           GetUpperBound(node->InputAt(1))->Is(type);
   }
 
   void ConvertInput(Node* node, int index, UseInfo use) {
@@ -556,7 +435,7 @@
       PrintUseInfo(use);
       TRACE("\n");
       Node* n = changer_->GetRepresentationFor(
-          input, input_info->representation(), input_info->output_type(),
+          input, input_info->representation(), GetUpperBound(input),
           use.preferred(), use.truncation());
       node->ReplaceInput(index, n);
     }
@@ -602,7 +481,7 @@
 
   // Helper for binops of the R x L -> O variety.
   void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
-                  NodeOutputInfo output) {
+                  MachineRepresentation output) {
     DCHECK_EQ(2, node->op()->ValueInputCount());
     ProcessInput(node, 0, left_use);
     ProcessInput(node, 1, right_use);
@@ -613,80 +492,77 @@
   }
 
   // Helper for binops of the I x I -> O variety.
-  void VisitBinop(Node* node, UseInfo input_use, NodeOutputInfo output) {
+  void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output) {
     VisitBinop(node, input_use, input_use, output);
   }
 
   // Helper for unops of the I -> O variety.
-  void VisitUnop(Node* node, UseInfo input_use, NodeOutputInfo output) {
+  void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output) {
     DCHECK_EQ(1, node->InputCount());
     ProcessInput(node, 0, input_use);
     SetOutput(node, output);
   }
 
   // Helper for leaf nodes.
-  void VisitLeaf(Node* node, NodeOutputInfo output) {
+  void VisitLeaf(Node* node, MachineRepresentation output) {
     DCHECK_EQ(0, node->InputCount());
     SetOutput(node, output);
   }
 
   // Helpers for specific types of binops.
   void VisitFloat64Binop(Node* node) {
-    VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
+    VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
   }
   void VisitInt32Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+    VisitBinop(node, UseInfo::TruncatingWord32(),
+               MachineRepresentation::kWord32);
   }
   void VisitWord32TruncatingBinop(Node* node) {
     VisitBinop(node, UseInfo::TruncatingWord32(),
-               NodeOutputInfo::NumberTruncatedToWord32());
+               MachineRepresentation::kWord32);
   }
   void VisitUint32Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+    VisitBinop(node, UseInfo::TruncatingWord32(),
+               MachineRepresentation::kWord32);
   }
   void VisitInt64Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Int64());
+    VisitBinop(node, UseInfo::TruncatingWord64(),
+               MachineRepresentation::kWord64);
   }
   void VisitUint64Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Uint64());
+    VisitBinop(node, UseInfo::TruncatingWord64(),
+               MachineRepresentation::kWord64);
   }
   void VisitFloat64Cmp(Node* node) {
-    VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+    VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kBit);
   }
   void VisitInt32Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+    VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
   }
   void VisitUint32Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+    VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
   }
   void VisitInt64Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
+    VisitBinop(node, UseInfo::TruncatingWord64(), MachineRepresentation::kBit);
   }
   void VisitUint64Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
+    VisitBinop(node, UseInfo::TruncatingWord64(), MachineRepresentation::kBit);
   }
 
   // Infer representation for phi-like nodes.
-  NodeOutputInfo GetOutputInfoForPhi(Node* node, Truncation use) {
-    // Compute the type.
-    Type* type = GetInfo(node->InputAt(0))->output_type();
-    for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
-      type = Type::Union(type, GetInfo(node->InputAt(i))->output_type(),
-                         jsgraph_->zone());
-    }
-
+  MachineRepresentation GetOutputInfoForPhi(Node* node, Truncation use) {
     // Compute the representation.
-    MachineRepresentation rep = MachineRepresentation::kTagged;
+    Type* type = GetUpperBound(node);
     if (type->Is(Type::None())) {
-      rep = MachineRepresentation::kNone;
+      return MachineRepresentation::kNone;
     } else if (type->Is(Type::Signed32()) || type->Is(Type::Unsigned32())) {
-      rep = MachineRepresentation::kWord32;
+      return MachineRepresentation::kWord32;
     } else if (use.TruncatesToWord32()) {
-      rep = MachineRepresentation::kWord32;
+      return MachineRepresentation::kWord32;
     } else if (type->Is(Type::Boolean())) {
-      rep = MachineRepresentation::kBit;
+      return MachineRepresentation::kBit;
     } else if (type->Is(Type::Number())) {
-      rep = MachineRepresentation::kFloat64;
+      return MachineRepresentation::kFloat64;
     } else if (type->Is(Type::Internal())) {
       // We mark (u)int64 as Type::Internal.
       // TODO(jarin) This is a workaround for our lack of (u)int64
@@ -702,10 +578,10 @@
                                  MachineRepresentation::kWord64);
       }
 #endif
-      rep = is_word64 ? MachineRepresentation::kWord64
-                      : MachineRepresentation::kTagged;
+      return is_word64 ? MachineRepresentation::kWord64
+                       : MachineRepresentation::kTagged;
     }
-    return NodeOutputInfo(rep, type);
+    return MachineRepresentation::kTagged;
   }
 
   // Helper for handling selects.
@@ -713,20 +589,20 @@
                    SimplifiedLowering* lowering) {
     ProcessInput(node, 0, UseInfo::Bool());
 
-    NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+    MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
     SetOutput(node, output);
 
     if (lower()) {
       // Update the select operator.
       SelectParameters p = SelectParametersOf(node->op());
-      if (output.representation() != p.representation()) {
-        NodeProperties::ChangeOp(node, lowering->common()->Select(
-                                           output.representation(), p.hint()));
+      if (output != p.representation()) {
+        NodeProperties::ChangeOp(node,
+                                 lowering->common()->Select(output, p.hint()));
       }
     }
     // Convert inputs to the output representation of this phi, pass the
     // truncation truncation along.
-    UseInfo input_use(output.representation(), truncation);
+    UseInfo input_use(output, truncation);
     ProcessInput(node, 1, input_use);
     ProcessInput(node, 2, input_use);
   }
@@ -734,21 +610,20 @@
   // Helper for handling phis.
   void VisitPhi(Node* node, Truncation truncation,
                 SimplifiedLowering* lowering) {
-    NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+    MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
     SetOutput(node, output);
 
     int values = node->op()->ValueInputCount();
     if (lower()) {
       // Update the phi operator.
-      if (output.representation() != PhiRepresentationOf(node->op())) {
-        NodeProperties::ChangeOp(
-            node, lowering->common()->Phi(output.representation(), values));
+      if (output != PhiRepresentationOf(node->op())) {
+        NodeProperties::ChangeOp(node, lowering->common()->Phi(output, values));
       }
     }
 
     // Convert inputs to the output representation of this phi, pass the
     // truncation truncation along.
-    UseInfo input_use(output.representation(), truncation);
+    UseInfo input_use(output, truncation);
     for (int i = 0; i < node->InputCount(); i++) {
       ProcessInput(node, i, i < values ? input_use : UseInfo::None());
     }
@@ -772,9 +647,10 @@
     }
 
     if (sig->return_count() > 0) {
-      SetOutputFromMachineType(node, desc->GetMachineSignature()->GetReturn());
+      SetOutput(node,
+                desc->GetMachineSignature()->GetReturn().representation());
     } else {
-      SetOutput(node, NodeOutputInfo::AnyTagged());
+      SetOutput(node, MachineRepresentation::kTagged);
     }
   }
 
@@ -801,10 +677,10 @@
           new (zone->New(sizeof(ZoneVector<MachineType>)))
               ZoneVector<MachineType>(node->InputCount(), zone);
       for (int i = 0; i < node->InputCount(); i++) {
-        NodeInfo* input_info = GetInfo(node->InputAt(i));
-        MachineType machine_type(
-            input_info->representation(),
-            DeoptValueSemanticOf(input_info->output_type()));
+        Node* input = node->InputAt(i);
+        NodeInfo* input_info = GetInfo(input);
+        MachineType machine_type(input_info->representation(),
+                                 DeoptValueSemanticOf(GetUpperBound(input)));
         DCHECK(machine_type.representation() !=
                    MachineRepresentation::kWord32 ||
                machine_type.semantic() == MachineSemantic::kInt32 ||
@@ -814,7 +690,7 @@
       NodeProperties::ChangeOp(node,
                                jsgraph_->common()->TypedStateValues(types));
     }
-    SetOutput(node, NodeOutputInfo::AnyTagged());
+    SetOutput(node, MachineRepresentation::kTagged);
   }
 
   const Operator* Int32Op(Node* node) {
@@ -839,29 +715,34 @@
       //------------------------------------------------------------------
       case IrOpcode::kStart:
       case IrOpcode::kDead:
-        return VisitLeaf(node, NodeOutputInfo::None());
+        return VisitLeaf(node, MachineRepresentation::kNone);
       case IrOpcode::kParameter: {
         // TODO(titzer): use representation from linkage.
-        Type* type = NodeProperties::GetType(node);
         ProcessInput(node, 0, UseInfo::None());
-        SetOutput(node, NodeOutputInfo(MachineRepresentation::kTagged, type));
+        SetOutput(node, MachineRepresentation::kTagged);
         return;
       }
       case IrOpcode::kInt32Constant:
-        return VisitLeaf(node, NodeOutputInfo::Int32());
+        return VisitLeaf(node, MachineRepresentation::kWord32);
       case IrOpcode::kInt64Constant:
-        return VisitLeaf(node, NodeOutputInfo::Int64());
+        return VisitLeaf(node, MachineRepresentation::kWord64);
       case IrOpcode::kFloat32Constant:
-        return VisitLeaf(node, NodeOutputInfo::Float32());
+        return VisitLeaf(node, MachineRepresentation::kFloat32);
       case IrOpcode::kFloat64Constant:
-        return VisitLeaf(node, NodeOutputInfo::Float64());
+        return VisitLeaf(node, MachineRepresentation::kFloat64);
       case IrOpcode::kExternalConstant:
-        return VisitLeaf(node, NodeOutputInfo::Pointer());
+        return VisitLeaf(node, MachineType::PointerRepresentation());
       case IrOpcode::kNumberConstant:
-        return VisitLeaf(node, NodeOutputInfo::NumberTagged());
+        return VisitLeaf(node, MachineRepresentation::kTagged);
       case IrOpcode::kHeapConstant:
-        return VisitLeaf(node, NodeOutputInfo::AnyTagged());
+        return VisitLeaf(node, MachineRepresentation::kTagged);
 
+      case IrOpcode::kDeoptimizeIf:
+      case IrOpcode::kDeoptimizeUnless:
+        ProcessInput(node, 0, UseInfo::Bool());
+        ProcessInput(node, 1, UseInfo::AnyTagged());
+        ProcessRemainingInputs(node, 2);
+        break;
       case IrOpcode::kBranch:
         ProcessInput(node, 0, UseInfo::Bool());
         EnqueueInput(node, NodeProperties::FirstControlIndex(node));
@@ -889,7 +770,7 @@
         JS_OP_LIST(DEFINE_JS_CASE)
 #undef DEFINE_JS_CASE
         VisitInputs(node);
-        return SetOutput(node, NodeOutputInfo::AnyTagged());
+        return SetOutput(node, MachineRepresentation::kTagged);
 
       //------------------------------------------------------------------
       // Simplified operators.
@@ -909,7 +790,7 @@
         } else {
           // No input representation requirement; adapt during lowering.
           ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
-          SetOutput(node, NodeOutputInfo::Bool());
+          SetOutput(node, MachineRepresentation::kBit);
         }
         break;
       }
@@ -927,7 +808,7 @@
         } else {
           // No input representation requirement; adapt during lowering.
           ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
-          SetOutput(node, NodeOutputInfo::Int32());
+          SetOutput(node, MachineRepresentation::kWord32);
         }
         break;
       }
@@ -1055,46 +936,80 @@
         break;
       }
       case IrOpcode::kNumberShiftLeft: {
-        Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, UseInfo::TruncatingWord32(),
-                   UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
         if (lower()) {
           lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
         }
         break;
       }
       case IrOpcode::kNumberShiftRight: {
-        Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, UseInfo::TruncatingWord32(),
-                   UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
         if (lower()) {
           lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
         }
         break;
       }
       case IrOpcode::kNumberShiftRightLogical: {
-        Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, UseInfo::TruncatingWord32(),
-                   UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
         if (lower()) {
           lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
         }
         break;
       }
+      case IrOpcode::kNumberImul: {
+        VisitBinop(node, UseInfo::TruncatingWord32(),
+                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
+        if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+        break;
+      }
+      case IrOpcode::kNumberClz32: {
+        VisitUnop(node, UseInfo::TruncatingWord32(),
+                  MachineRepresentation::kWord32);
+        if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+        break;
+      }
+      case IrOpcode::kNumberCeil: {
+        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        if (lower()) DeferReplacement(node, lowering->Float64Ceil(node));
+        break;
+      }
+      case IrOpcode::kNumberFloor: {
+        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        if (lower()) DeferReplacement(node, lowering->Float64Floor(node));
+        break;
+      }
+      case IrOpcode::kNumberRound: {
+        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        if (lower()) DeferReplacement(node, lowering->Float64Round(node));
+        break;
+      }
+      case IrOpcode::kNumberTrunc: {
+        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        if (lower()) DeferReplacement(node, lowering->Float64Trunc(node));
+        break;
+      }
       case IrOpcode::kNumberToInt32: {
         // Just change representation if necessary.
-        VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+        VisitUnop(node, UseInfo::TruncatingWord32(),
+                  MachineRepresentation::kWord32);
         if (lower()) DeferReplacement(node, node->InputAt(0));
         break;
       }
       case IrOpcode::kNumberToUint32: {
         // Just change representation if necessary.
-        VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+        VisitUnop(node, UseInfo::TruncatingWord32(),
+                  MachineRepresentation::kWord32);
         if (lower()) DeferReplacement(node, node->InputAt(0));
         break;
       }
       case IrOpcode::kNumberIsHoleNaN: {
-        VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kBit);
         if (lower()) {
           // NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
           //                                   #HoleNaNLower32)
@@ -1109,7 +1024,7 @@
         break;
       }
       case IrOpcode::kPlainPrimitiveToNumber: {
-        VisitUnop(node, UseInfo::AnyTagged(), NodeOutputInfo::NumberTagged());
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         if (lower()) {
           // PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
           Operator::Properties properties = node->op()->properties();
@@ -1126,38 +1041,93 @@
         break;
       }
       case IrOpcode::kReferenceEqual: {
-        VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
+        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
         if (lower()) {
           NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
         }
         break;
       }
       case IrOpcode::kStringEqual: {
-        VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
-        if (lower()) lowering->DoStringEqual(node);
+        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        if (lower()) {
+          // StringEqual(x, y) => Call(StringEqualStub, x, y, no-context)
+          Operator::Properties properties = node->op()->properties();
+          Callable callable = CodeFactory::StringEqual(jsgraph_->isolate());
+          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+              flags, properties);
+          node->InsertInput(jsgraph_->zone(), 0,
+                            jsgraph_->HeapConstant(callable.code()));
+          node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+        }
         break;
       }
       case IrOpcode::kStringLessThan: {
-        VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
-        if (lower()) lowering->DoStringLessThan(node);
+        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        if (lower()) {
+          // StringLessThan(x, y) => Call(StringLessThanStub, x, y, no-context)
+          Operator::Properties properties = node->op()->properties();
+          Callable callable = CodeFactory::StringLessThan(jsgraph_->isolate());
+          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+              flags, properties);
+          node->InsertInput(jsgraph_->zone(), 0,
+                            jsgraph_->HeapConstant(callable.code()));
+          node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+        }
         break;
       }
       case IrOpcode::kStringLessThanOrEqual: {
-        VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
-        if (lower()) lowering->DoStringLessThanOrEqual(node);
+        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        if (lower()) {
+          // StringLessThanOrEqual(x, y)
+          //   => Call(StringLessThanOrEqualStub, x, y, no-context)
+          Operator::Properties properties = node->op()->properties();
+          Callable callable =
+              CodeFactory::StringLessThanOrEqual(jsgraph_->isolate());
+          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+              flags, properties);
+          node->InsertInput(jsgraph_->zone(), 0,
+                            jsgraph_->HeapConstant(callable.code()));
+          node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+        }
+        break;
+      }
+      case IrOpcode::kStringToNumber: {
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        if (lower()) {
+          // StringToNumber(x) => Call(StringToNumberStub, x, no-context)
+          Operator::Properties properties = node->op()->properties();
+          Callable callable = CodeFactory::StringToNumber(jsgraph_->isolate());
+          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+              flags, properties);
+          node->InsertInput(jsgraph_->zone(), 0,
+                            jsgraph_->HeapConstant(callable.code()));
+          node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+        }
         break;
       }
       case IrOpcode::kAllocate: {
         ProcessInput(node, 0, UseInfo::AnyTagged());
         ProcessRemainingInputs(node, 1);
-        SetOutput(node, NodeOutputInfo::AnyTagged());
+        SetOutput(node, MachineRepresentation::kTagged);
         break;
       }
       case IrOpcode::kLoadField: {
         FieldAccess access = FieldAccessOf(node->op());
         ProcessInput(node, 0, UseInfoForBasePointer(access));
         ProcessRemainingInputs(node, 1);
-        SetOutputFromMachineType(node, access.machine_type);
+        SetOutput(node, access.machine_type.representation());
         break;
       }
       case IrOpcode::kStoreField: {
@@ -1166,7 +1136,7 @@
         ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
                                   access.machine_type.representation()));
         ProcessRemainingInputs(node, 2);
-        SetOutput(node, NodeOutputInfo::None());
+        SetOutput(node, MachineRepresentation::kNone);
         break;
       }
       case IrOpcode::kLoadBuffer: {
@@ -1176,29 +1146,26 @@
         ProcessInput(node, 2, UseInfo::TruncatingWord32());  // length
         ProcessRemainingInputs(node, 3);
 
-        NodeOutputInfo output_info;
+        MachineRepresentation output;
         if (truncation.TruncatesUndefinedToZeroOrNaN()) {
           if (truncation.TruncatesNaNToZero()) {
             // If undefined is truncated to a non-NaN number, we can use
             // the load's representation.
-            output_info = NodeOutputInfo(access.machine_type().representation(),
-                                         NodeProperties::GetType(node));
+            output = access.machine_type().representation();
           } else {
             // If undefined is truncated to a number, but the use can
             // observe NaN, we need to output at least the float32
             // representation.
             if (access.machine_type().representation() ==
                 MachineRepresentation::kFloat32) {
-              output_info =
-                  NodeOutputInfo(access.machine_type().representation(),
-                                 NodeProperties::GetType(node));
+              output = access.machine_type().representation();
             } else {
               if (access.machine_type().representation() !=
                   MachineRepresentation::kFloat64) {
                 // TODO(bmeurer): See comment on abort_compilation_.
                 if (lower()) lowering->abort_compilation_ = true;
               }
-              output_info = NodeOutputInfo::Float64();
+              output = MachineRepresentation::kFloat64;
             }
           }
         } else {
@@ -1207,11 +1174,10 @@
 
           // If undefined is not truncated away, we need to have the tagged
           // representation.
-          output_info = NodeOutputInfo::AnyTagged();
+          output = MachineRepresentation::kTagged;
         }
-        SetOutput(node, output_info);
-        if (lower())
-          lowering->DoLoadBuffer(node, output_info.representation(), changer_);
+        SetOutput(node, output);
+        if (lower()) lowering->DoLoadBuffer(node, output, changer_);
         break;
       }
       case IrOpcode::kStoreBuffer: {
@@ -1223,7 +1189,7 @@
                      TruncatingUseInfoFromRepresentation(
                          access.machine_type().representation()));  // value
         ProcessRemainingInputs(node, 4);
-        SetOutput(node, NodeOutputInfo::None());
+        SetOutput(node, MachineRepresentation::kNone);
         if (lower()) lowering->DoStoreBuffer(node);
         break;
       }
@@ -1232,7 +1198,7 @@
         ProcessInput(node, 0, UseInfoForBasePointer(access));  // base
         ProcessInput(node, 1, UseInfo::TruncatingWord32());    // index
         ProcessRemainingInputs(node, 2);
-        SetOutputFromMachineType(node, access.machine_type);
+        SetOutput(node, access.machine_type.representation());
         break;
       }
       case IrOpcode::kStoreElement: {
@@ -1243,22 +1209,15 @@
                      TruncatingUseInfoFromRepresentation(
                          access.machine_type.representation()));  // value
         ProcessRemainingInputs(node, 3);
-        SetOutput(node, NodeOutputInfo::None());
+        SetOutput(node, MachineRepresentation::kNone);
         break;
       }
-      case IrOpcode::kObjectIsNumber: {
+      case IrOpcode::kObjectIsNumber:
+      case IrOpcode::kObjectIsReceiver:
+      case IrOpcode::kObjectIsSmi:
+      case IrOpcode::kObjectIsUndetectable: {
         ProcessInput(node, 0, UseInfo::AnyTagged());
-        SetOutput(node, NodeOutputInfo::Bool());
-        break;
-      }
-      case IrOpcode::kObjectIsReceiver: {
-        ProcessInput(node, 0, UseInfo::AnyTagged());
-        SetOutput(node, NodeOutputInfo::Bool());
-        break;
-      }
-      case IrOpcode::kObjectIsSmi: {
-        ProcessInput(node, 0, UseInfo::AnyTagged());
-        SetOutput(node, NodeOutputInfo::Bool());
+        SetOutput(node, MachineRepresentation::kBit);
         break;
       }
 
@@ -1272,7 +1231,7 @@
         ProcessInput(node, 0, UseInfo::AnyTagged());   // tagged pointer
         ProcessInput(node, 1, UseInfo::PointerInt());  // index
         ProcessRemainingInputs(node, 2);
-        SetOutputFromMachineType(node, rep);
+        SetOutput(node, rep.representation());
         break;
       }
       case IrOpcode::kStore: {
@@ -1284,13 +1243,13 @@
         ProcessInput(node, 2,
                      TruncatingUseInfoFromRepresentation(rep.representation()));
         ProcessRemainingInputs(node, 3);
-        SetOutput(node, NodeOutputInfo::None());
+        SetOutput(node, MachineRepresentation::kNone);
         break;
       }
       case IrOpcode::kWord32Shr:
         // We output unsigned int32 for shift right because JavaScript.
         return VisitBinop(node, UseInfo::TruncatingWord32(),
-                          NodeOutputInfo::Uint32());
+                          MachineRepresentation::kWord32);
       case IrOpcode::kWord32And:
       case IrOpcode::kWord32Or:
       case IrOpcode::kWord32Xor:
@@ -1300,14 +1259,14 @@
         // though the machine bits are the same for either signed or unsigned,
         // because JavaScript considers the result from these operations signed.
         return VisitBinop(node, UseInfo::TruncatingWord32(),
-                          NodeOutputInfo::Int32());
+                          MachineRepresentation::kWord32);
       case IrOpcode::kWord32Equal:
         return VisitBinop(node, UseInfo::TruncatingWord32(),
-                          NodeOutputInfo::Bool());
+                          MachineRepresentation::kBit);
 
       case IrOpcode::kWord32Clz:
         return VisitUnop(node, UseInfo::TruncatingWord32(),
-                         NodeOutputInfo::Uint32());
+                         MachineRepresentation::kWord32);
 
       case IrOpcode::kInt32Add:
       case IrOpcode::kInt32Sub:
@@ -1352,45 +1311,33 @@
       case IrOpcode::kWord64Shr:
       case IrOpcode::kWord64Sar:
         return VisitBinop(node, UseInfo::TruncatingWord64(),
-                          NodeOutputInfo::Int64());
+                          MachineRepresentation::kWord64);
       case IrOpcode::kWord64Equal:
         return VisitBinop(node, UseInfo::TruncatingWord64(),
-                          NodeOutputInfo::Bool());
+                          MachineRepresentation::kBit);
 
       case IrOpcode::kChangeInt32ToInt64:
-        return VisitUnop(
-            node, UseInfo::TruncatingWord32(),
-            NodeOutputInfo(MachineRepresentation::kWord64, Type::Signed32()));
+        return VisitUnop(node, UseInfo::TruncatingWord32(),
+                         MachineRepresentation::kWord64);
       case IrOpcode::kChangeUint32ToUint64:
-        return VisitUnop(
-            node, UseInfo::TruncatingWord32(),
-            NodeOutputInfo(MachineRepresentation::kWord64, Type::Unsigned32()));
+        return VisitUnop(node, UseInfo::TruncatingWord32(),
+                         MachineRepresentation::kWord64);
       case IrOpcode::kTruncateFloat64ToFloat32:
-        return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float32());
+        return VisitUnop(node, UseInfo::Float64(),
+                         MachineRepresentation::kFloat32);
       case IrOpcode::kTruncateFloat64ToInt32:
-        return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
-      case IrOpcode::kTruncateInt64ToInt32:
-        // TODO(titzer): Is kTypeInt32 correct here?
-        return VisitUnop(node, UseInfo::Word64TruncatingToWord32(),
-                         NodeOutputInfo::Int32());
+        return VisitUnop(node, UseInfo::Float64(),
+                         MachineRepresentation::kWord32);
 
       case IrOpcode::kChangeFloat32ToFloat64:
-        return VisitUnop(node, UseInfo::Float32(), NodeOutputInfo::Float64());
+        return VisitUnop(node, UseInfo::Float32(),
+                         MachineRepresentation::kFloat64);
       case IrOpcode::kChangeInt32ToFloat64:
-        return VisitUnop(
-            node, UseInfo::TruncatingWord32(),
-            NodeOutputInfo(MachineRepresentation::kFloat64, Type::Signed32()));
+        return VisitUnop(node, UseInfo::TruncatingWord32(),
+                         MachineRepresentation::kFloat64);
       case IrOpcode::kChangeUint32ToFloat64:
         return VisitUnop(node, UseInfo::TruncatingWord32(),
-                         NodeOutputInfo(MachineRepresentation::kFloat64,
-                                        Type::Unsigned32()));
-      case IrOpcode::kChangeFloat64ToInt32:
-        return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
-                         NodeOutputInfo::Int32());
-      case IrOpcode::kChangeFloat64ToUint32:
-        return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
-                         NodeOutputInfo::Uint32());
-
+                         MachineRepresentation::kFloat64);
       case IrOpcode::kFloat64Add:
       case IrOpcode::kFloat64Sub:
       case IrOpcode::kFloat64Mul:
@@ -1404,29 +1351,40 @@
       case IrOpcode::kFloat64RoundTruncate:
       case IrOpcode::kFloat64RoundTiesAway:
       case IrOpcode::kFloat64RoundUp:
-        return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
+        return VisitUnop(node, UseInfo::Float64(),
+                         MachineRepresentation::kFloat64);
       case IrOpcode::kFloat64Equal:
       case IrOpcode::kFloat64LessThan:
       case IrOpcode::kFloat64LessThanOrEqual:
         return VisitFloat64Cmp(node);
       case IrOpcode::kFloat64ExtractLowWord32:
       case IrOpcode::kFloat64ExtractHighWord32:
-        return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
+        return VisitUnop(node, UseInfo::Float64(),
+                         MachineRepresentation::kWord32);
       case IrOpcode::kFloat64InsertLowWord32:
       case IrOpcode::kFloat64InsertHighWord32:
         return VisitBinop(node, UseInfo::Float64(), UseInfo::TruncatingWord32(),
-                          NodeOutputInfo::Float64());
+                          MachineRepresentation::kFloat64);
       case IrOpcode::kLoadStackPointer:
       case IrOpcode::kLoadFramePointer:
       case IrOpcode::kLoadParentFramePointer:
-        return VisitLeaf(node, NodeOutputInfo::Pointer());
+        return VisitLeaf(node, MachineType::PointerRepresentation());
       case IrOpcode::kStateValues:
         VisitStateValues(node);
         break;
+
+      // The following opcodes are not produced before representation
+      // inference runs, so we do not have any real test coverage.
+      // Simply fail here.
+      case IrOpcode::kChangeFloat64ToInt32:
+      case IrOpcode::kChangeFloat64ToUint32:
+      case IrOpcode::kTruncateInt64ToInt32:
+        FATAL("Representation inference: unsupported opcodes.");
+
       default:
         VisitInputs(node);
         // Assume the output is tagged.
-        SetOutput(node, NodeOutputInfo::AnyTagged());
+        SetOutput(node, MachineRepresentation::kTagged);
         break;
     }
   }
@@ -1437,7 +1395,7 @@
           replacement->op()->mnemonic());
 
     if (replacement->id() < count_ &&
-        GetInfo(node)->output_type()->Is(GetInfo(replacement)->output_type())) {
+        GetUpperBound(node)->Is(GetUpperBound(replacement))) {
       // Replace with a previously existing node eagerly only if the type is the
       // same.
       node->ReplaceUses(replacement);
@@ -1455,9 +1413,7 @@
   void PrintOutputInfo(NodeInfo* info) {
     if (FLAG_trace_representation) {
       OFStream os(stdout);
-      os << info->representation() << " (";
-      info->output_type()->PrintTo(os, Type::SEMANTIC_DIM);
-      os << ")";
+      os << info->representation();
     }
   }
 
@@ -1595,21 +1551,392 @@
   NodeProperties::ChangeOp(node, machine()->CheckedStore(rep));
 }
 
+Node* SimplifiedLowering::Float64Ceil(Node* const node) {
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const zero = jsgraph()->Float64Constant(0.0);
+  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+  Node* const input = node->InputAt(0);
 
-Node* SimplifiedLowering::StringComparison(Node* node) {
-  Operator::Properties properties = node->op()->properties();
-  Callable callable = CodeFactory::StringCompare(isolate());
-  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0, flags, properties);
-  return graph()->NewNode(
-      common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-      NodeProperties::GetValueInput(node, 0),
-      NodeProperties::GetValueInput(node, 1), jsgraph()->NoContextConstant(),
-      NodeProperties::GetEffectInput(node),
-      NodeProperties::GetControlInput(node));
+  // Use fast hardware instruction if available.
+  if (machine()->Float64RoundUp().IsSupported()) {
+    return graph()->NewNode(machine()->Float64RoundUp().op(), input);
+  }
+
+  // General case for ceil.
+  //
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if temp1 < input then
+  //         temp1 + 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
+  //         -0 - temp3
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+
+  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+                                   graph()->start());
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* vtrue0;
+  {
+    Node* check1 =
+        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* temp1 = graph()->NewNode(
+          machine()->Float64Sub(),
+          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+      vfalse1 = graph()->NewNode(
+          common()->Select(MachineRepresentation::kFloat64),
+          graph()->NewNode(machine()->Float64LessThan(), temp1, input),
+          graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* vfalse0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+                                      input, minus_two_52);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* vtrue2 = input;
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* vfalse2;
+      {
+        Node* temp1 =
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+        Node* temp2 = graph()->NewNode(
+            machine()->Float64Sub(),
+            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+        Node* temp3 = graph()->NewNode(
+            common()->Select(MachineRepresentation::kFloat64),
+            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
+            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
+        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
+      }
+
+      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+      vfalse1 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue2, vfalse2, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                          vtrue0, vfalse0, merge0);
 }
 
+Node* SimplifiedLowering::Float64Floor(Node* const node) {
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const zero = jsgraph()->Float64Constant(0.0);
+  Node* const minus_one = jsgraph()->Float64Constant(-1.0);
+  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+  Node* const input = node->InputAt(0);
+
+  // Use fast hardware instruction if available.
+  if (machine()->Float64RoundDown().IsSupported()) {
+    return graph()->NewNode(machine()->Float64RoundDown().op(), input);
+  }
+
+  // General case for floor.
+  //
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if input < temp1 then
+  //         temp1 - 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         if temp2 < temp1 then
+  //           -1 - temp2
+  //         else
+  //           -0 - temp2
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+
+  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+                                   graph()->start());
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* vtrue0;
+  {
+    Node* check1 =
+        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* temp1 = graph()->NewNode(
+          machine()->Float64Sub(),
+          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+      vfalse1 = graph()->NewNode(
+          common()->Select(MachineRepresentation::kFloat64),
+          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* vfalse0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+                                      input, minus_two_52);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* vtrue2 = input;
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* vfalse2;
+      {
+        Node* temp1 =
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+        Node* temp2 = graph()->NewNode(
+            machine()->Float64Sub(),
+            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+        vfalse2 = graph()->NewNode(
+            common()->Select(MachineRepresentation::kFloat64),
+            graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
+            graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
+      }
+
+      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+      vfalse1 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue2, vfalse2, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                          vtrue0, vfalse0, merge0);
+}
+
+Node* SimplifiedLowering::Float64Round(Node* const node) {
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const one_half = jsgraph()->Float64Constant(0.5);
+  Node* const input = node->InputAt(0);
+
+  // Round up towards Infinity, and adjust if the difference exceeds 0.5.
+  Node* result = Float64Ceil(node);
+  return graph()->NewNode(
+      common()->Select(MachineRepresentation::kFloat64),
+      graph()->NewNode(
+          machine()->Float64LessThanOrEqual(),
+          graph()->NewNode(machine()->Float64Sub(), result, one_half), input),
+      result, graph()->NewNode(machine()->Float64Sub(), result, one));
+}
+
+Node* SimplifiedLowering::Float64Trunc(Node* const node) {
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const zero = jsgraph()->Float64Constant(0.0);
+  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+  Node* const input = node->InputAt(0);
+
+  // Use fast hardware instruction if available.
+  if (machine()->Float64RoundTruncate().IsSupported()) {
+    return graph()->NewNode(machine()->Float64RoundTruncate().op(), input);
+  }
+
+  // General case for trunc.
+  //
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if input < temp1 then
+  //         temp1 - 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
+  //         -0 - temp3
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+
+  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+                                   graph()->start());
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* vtrue0;
+  {
+    Node* check1 =
+        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* temp1 = graph()->NewNode(
+          machine()->Float64Sub(),
+          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+      vfalse1 = graph()->NewNode(
+          common()->Select(MachineRepresentation::kFloat64),
+          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* vfalse0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+                                      input, minus_two_52);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* vtrue2 = input;
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* vfalse2;
+      {
+        Node* temp1 =
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+        Node* temp2 = graph()->NewNode(
+            machine()->Float64Sub(),
+            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+        Node* temp3 = graph()->NewNode(
+            common()->Select(MachineRepresentation::kFloat64),
+            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
+            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
+        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
+      }
+
+      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+      vfalse1 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue2, vfalse2, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                          vtrue0, vfalse0, merge0);
+}
 
 Node* SimplifiedLowering::Int32Div(Node* const node) {
   Int32BinopMatcher m(node);
@@ -1873,53 +2200,6 @@
   NodeProperties::ChangeOp(node, op);
 }
 
-
-namespace {
-
-void ReplaceEffectUses(Node* node, Node* replacement) {
-  // Requires distinguishing between value and effect edges.
-  DCHECK(replacement->op()->EffectOutputCount() > 0);
-  for (Edge edge : node->use_edges()) {
-    if (NodeProperties::IsEffectEdge(edge)) {
-      edge.UpdateTo(replacement);
-    } else {
-      DCHECK(NodeProperties::IsValueEdge(edge));
-    }
-  }
-}
-
-}  // namespace
-
-
-void SimplifiedLowering::DoStringEqual(Node* node) {
-  Node* comparison = StringComparison(node);
-  ReplaceEffectUses(node, comparison);
-  node->ReplaceInput(0, comparison);
-  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
-  node->TrimInputCount(2);
-  NodeProperties::ChangeOp(node, machine()->WordEqual());
-}
-
-
-void SimplifiedLowering::DoStringLessThan(Node* node) {
-  Node* comparison = StringComparison(node);
-  ReplaceEffectUses(node, comparison);
-  node->ReplaceInput(0, comparison);
-  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
-  node->TrimInputCount(2);
-  NodeProperties::ChangeOp(node, machine()->IntLessThan());
-}
-
-
-void SimplifiedLowering::DoStringLessThanOrEqual(Node* node) {
-  Node* comparison = StringComparison(node);
-  ReplaceEffectUses(node, comparison);
-  node->ReplaceInput(0, comparison);
-  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
-  node->TrimInputCount(2);
-  NodeProperties::ChangeOp(node, machine()->IntLessThanOrEqual());
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index 358bd97..8b711a9 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -37,9 +37,6 @@
                     RepresentationChanger* changer);
   void DoStoreBuffer(Node* node);
   void DoShift(Node* node, Operator const* op, Type* rhs_type);
-  void DoStringEqual(Node* node);
-  void DoStringLessThan(Node* node);
-  void DoStringLessThanOrEqual(Node* node);
 
   // TODO(bmeurer): This is a gigantic hack to support the gigantic LoadBuffer
   // typing hack to support the gigantic "asm.js should be fast without proper
@@ -58,7 +55,10 @@
   // position information via the SourcePositionWrapper like all other reducers.
   SourcePositionTable* source_positions_;
 
-  Node* StringComparison(Node* node);
+  Node* Float64Ceil(Node* const node);
+  Node* Float64Floor(Node* const node);
+  Node* Float64Round(Node* const node);
+  Node* Float64Trunc(Node* const node);
   Node* Int32Div(Node* const node);
   Node* Int32Mod(Node* const node);
   Node* Uint32Div(Node* const node);
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index 120d792..012004a 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -9,14 +9,14 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/operator-properties.h"
 #include "src/conversions-inl.h"
+#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
 SimplifiedOperatorReducer::SimplifiedOperatorReducer(JSGraph* jsgraph)
-    : jsgraph_(jsgraph) {}
-
+    : jsgraph_(jsgraph), type_cache_(TypeCache::Get()) {}
 
 SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
 
@@ -89,6 +89,17 @@
       if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
       break;
     }
+    case IrOpcode::kNumberCeil:
+    case IrOpcode::kNumberFloor:
+    case IrOpcode::kNumberRound:
+    case IrOpcode::kNumberTrunc: {
+      Node* const input = NodeProperties::GetValueInput(node, 0);
+      Type* const input_type = NodeProperties::GetType(input);
+      if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+        return Replace(input);
+      }
+      break;
+    }
     case IrOpcode::kReferenceEqual:
       return ReduceReferenceEqual(node);
     default:
@@ -97,7 +108,6 @@
   return NoChange();
 }
 
-
 Reduction SimplifiedOperatorReducer::ReduceReferenceEqual(Node* node) {
   DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
   Node* const left = NodeProperties::GetValueInput(node, 0);
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
index 979a3d0..13301c2 100644
--- a/src/compiler/simplified-operator-reducer.h
+++ b/src/compiler/simplified-operator-reducer.h
@@ -9,6 +9,10 @@
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class TypeCache;
+
 namespace compiler {
 
 // Forward declarations.
@@ -42,6 +46,7 @@
   SimplifiedOperatorBuilder* simplified() const;
 
   JSGraph* const jsgraph_;
+  TypeCache const& type_cache_;
 
   DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
 };
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index c7abe9c..daa9501 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -156,7 +156,6 @@
   return OpParameter<ElementAccess>(op);
 }
 
-
 #define PURE_OP_LIST(V)                                  \
   V(BooleanNot, Operator::kNoProperties, 1)              \
   V(BooleanToNumber, Operator::kNoProperties, 1)         \
@@ -174,10 +173,17 @@
   V(NumberShiftLeft, Operator::kNoProperties, 2)         \
   V(NumberShiftRight, Operator::kNoProperties, 2)        \
   V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
+  V(NumberImul, Operator::kNoProperties, 2)              \
+  V(NumberClz32, Operator::kNoProperties, 1)             \
+  V(NumberCeil, Operator::kNoProperties, 1)              \
+  V(NumberFloor, Operator::kNoProperties, 1)             \
+  V(NumberRound, Operator::kNoProperties, 1)             \
+  V(NumberTrunc, Operator::kNoProperties, 1)             \
   V(NumberToInt32, Operator::kNoProperties, 1)           \
   V(NumberToUint32, Operator::kNoProperties, 1)          \
   V(NumberIsHoleNaN, Operator::kNoProperties, 1)         \
   V(PlainPrimitiveToNumber, Operator::kNoProperties, 1)  \
+  V(StringToNumber, Operator::kNoProperties, 1)          \
   V(ChangeTaggedToInt32, Operator::kNoProperties, 1)     \
   V(ChangeTaggedToUint32, Operator::kNoProperties, 1)    \
   V(ChangeTaggedToFloat64, Operator::kNoProperties, 1)   \
@@ -188,7 +194,8 @@
   V(ChangeBitToBool, Operator::kNoProperties, 1)         \
   V(ObjectIsNumber, Operator::kNoProperties, 1)          \
   V(ObjectIsReceiver, Operator::kNoProperties, 1)        \
-  V(ObjectIsSmi, Operator::kNoProperties, 1)
+  V(ObjectIsSmi, Operator::kNoProperties, 1)             \
+  V(ObjectIsUndetectable, Operator::kNoProperties, 1)
 
 #define NO_THROW_OP_LIST(V)                 \
   V(StringEqual, Operator::kCommutative, 2) \
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 2ed4b5f..a39d864 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -143,6 +143,12 @@
   const Operator* NumberShiftLeft();
   const Operator* NumberShiftRight();
   const Operator* NumberShiftRightLogical();
+  const Operator* NumberImul();
+  const Operator* NumberClz32();
+  const Operator* NumberCeil();
+  const Operator* NumberFloor();
+  const Operator* NumberRound();
+  const Operator* NumberTrunc();
   const Operator* NumberToInt32();
   const Operator* NumberToUint32();
   const Operator* NumberIsHoleNaN();
@@ -154,6 +160,7 @@
   const Operator* StringEqual();
   const Operator* StringLessThan();
   const Operator* StringLessThanOrEqual();
+  const Operator* StringToNumber();
 
   const Operator* ChangeTaggedToInt32();
   const Operator* ChangeTaggedToUint32();
@@ -167,6 +174,7 @@
   const Operator* ObjectIsNumber();
   const Operator* ObjectIsReceiver();
   const Operator* ObjectIsSmi();
+  const Operator* ObjectIsUndetectable();
 
   const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
 
diff --git a/src/compiler/type-hint-analyzer.cc b/src/compiler/type-hint-analyzer.cc
index 42c4627..da4f268 100644
--- a/src/compiler/type-hint-analyzer.cc
+++ b/src/compiler/type-hint-analyzer.cc
@@ -48,10 +48,10 @@
   if (i == infos_.end()) return false;
   Handle<Code> code = i->second;
   DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
-  ToBooleanStub stub(code->GetIsolate(), code->extra_ic_state());
-// TODO(bmeurer): Replace ToBooleanStub::Types with ToBooleanHints.
-#define ASSERT_COMPATIBLE(NAME, Name)       \
-  STATIC_ASSERT(1 << ToBooleanStub::NAME == \
+  ToBooleanICStub stub(code->GetIsolate(), code->extra_ic_state());
+// TODO(bmeurer): Replace ToBooleanICStub::Types with ToBooleanHints.
+#define ASSERT_COMPATIBLE(NAME, Name)         \
+  STATIC_ASSERT(1 << ToBooleanICStub::NAME == \
                 static_cast<int>(ToBooleanHint::k##Name))
   ASSERT_COMPATIBLE(UNDEFINED, Undefined);
   ASSERT_COMPATIBLE(BOOLEAN, Boolean);
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 9679513..81c3d3d 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -57,10 +57,8 @@
   unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
   falsish_ = Type::Union(
       Type::Undetectable(),
-      Type::Union(
-          Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
-                      Type::NullOrUndefined(), zone),
-          singleton_the_hole_, zone),
+      Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
+                  singleton_the_hole_, zone),
       zone);
   truish_ = Type::Union(
       singleton_true_,
@@ -116,6 +114,8 @@
       DECLARE_CASE(IfDefault)
       DECLARE_CASE(Merge)
       DECLARE_CASE(Deoptimize)
+      DECLARE_CASE(DeoptimizeIf)
+      DECLARE_CASE(DeoptimizeUnless)
       DECLARE_CASE(Return)
       DECLARE_CASE(TailCall)
       DECLARE_CASE(Terminate)
@@ -160,6 +160,8 @@
       DECLARE_CASE(IfDefault)
       DECLARE_CASE(Merge)
       DECLARE_CASE(Deoptimize)
+      DECLARE_CASE(DeoptimizeIf)
+      DECLARE_CASE(DeoptimizeUnless)
       DECLARE_CASE(Return)
       DECLARE_CASE(TailCall)
       DECLARE_CASE(Terminate)
@@ -238,12 +240,17 @@
   static Type* ToNumber(Type*, Typer*);
   static Type* ToObject(Type*, Typer*);
   static Type* ToString(Type*, Typer*);
+  static Type* NumberCeil(Type*, Typer*);
+  static Type* NumberFloor(Type*, Typer*);
+  static Type* NumberRound(Type*, Typer*);
+  static Type* NumberTrunc(Type*, Typer*);
   static Type* NumberToInt32(Type*, Typer*);
   static Type* NumberToUint32(Type*, Typer*);
 
   static Type* ObjectIsNumber(Type*, Typer*);
   static Type* ObjectIsReceiver(Type*, Typer*);
   static Type* ObjectIsSmi(Type*, Typer*);
+  static Type* ObjectIsUndetectable(Type*, Typer*);
 
   static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
   static Type* JSSubtractRanger(RangeType*, RangeType*, Typer*);
@@ -414,6 +421,11 @@
   // ES6 section 7.1.4 ToInteger ( argument )
   type = ToNumber(type, t);
   if (type->Is(t->cache_.kIntegerOrMinusZero)) return type;
+  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) {
+    return Type::Union(
+        Type::Intersect(type, t->cache_.kIntegerOrMinusZero, t->zone()),
+        t->cache_.kSingletonZero, t->zone());
+  }
   return t->cache_.kIntegerOrMinusZero;
 }
 
@@ -469,7 +481,9 @@
   // ES6 section 7.1.13 ToObject ( argument )
   if (type->Is(Type::Receiver())) return type;
   if (type->Is(Type::Primitive())) return Type::OtherObject();
-  if (!type->Maybe(Type::Undetectable())) return Type::DetectableReceiver();
+  if (!type->Maybe(Type::OtherUndetectable())) {
+    return Type::DetectableReceiver();
+  }
   return Type::Receiver();
 }
 
@@ -482,6 +496,37 @@
   return Type::String();
 }
 
+// static
+Type* Typer::Visitor::NumberCeil(Type* type, Typer* t) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+// static
+Type* Typer::Visitor::NumberFloor(Type* type, Typer* t) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+// static
+Type* Typer::Visitor::NumberRound(Type* type, Typer* t) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+// static
+Type* Typer::Visitor::NumberTrunc(Type* type, Typer* t) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
 
 Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
   // TODO(neis): DCHECK(type->Is(Type::Number()));
@@ -533,6 +578,13 @@
 }
 
 
+Type* Typer::Visitor::ObjectIsUndetectable(Type* type, Typer* t) {
+  if (type->Is(Type::Undetectable())) return t->singleton_true_;
+  if (!type->Maybe(Type::Undetectable())) return t->singleton_false_;
+  return Type::Boolean();
+}
+
+
 // -----------------------------------------------------------------------------
 
 
@@ -1171,7 +1223,7 @@
     return Type::Constant(f->string_string(), t->zone());
   } else if (type->Is(Type::Symbol())) {
     return Type::Constant(f->symbol_string(), t->zone());
-  } else if (type->Is(Type::Union(Type::Undefined(), Type::Undetectable(),
+  } else if (type->Is(Type::Union(Type::Undefined(), Type::OtherUndetectable(),
                                   t->zone()))) {
     return Type::Constant(f->undefined_string(), t->zone());
   } else if (type->Is(Type::Null())) {
@@ -1198,26 +1250,29 @@
   return TypeUnaryOp(node, ToBoolean);
 }
 
-
-Type* Typer::Visitor::TypeJSToNumber(Node* node) {
-  return TypeUnaryOp(node, ToNumber);
+Type* Typer::Visitor::TypeJSToInteger(Node* node) {
+  return TypeUnaryOp(node, ToInteger);
 }
 
-
-Type* Typer::Visitor::TypeJSToString(Node* node) {
-  return TypeUnaryOp(node, ToString);
+Type* Typer::Visitor::TypeJSToLength(Node* node) {
+  return TypeUnaryOp(node, ToLength);
 }
 
-
 Type* Typer::Visitor::TypeJSToName(Node* node) {
   return TypeUnaryOp(node, ToName);
 }
 
+Type* Typer::Visitor::TypeJSToNumber(Node* node) {
+  return TypeUnaryOp(node, ToNumber);
+}
 
 Type* Typer::Visitor::TypeJSToObject(Node* node) {
   return TypeUnaryOp(node, ToObject);
 }
 
+Type* Typer::Visitor::TypeJSToString(Node* node) {
+  return TypeUnaryOp(node, ToString);
+}
 
 // JS object operators.
 
@@ -1502,8 +1557,9 @@
         case kMathRandom:
           return Type::OrderedNumber();
         case kMathFloor:
-        case kMathRound:
         case kMathCeil:
+        case kMathRound:
+        case kMathTrunc:
           return t->cache_.kIntegerOrMinusZeroOrNaN;
         // Unary math functions.
         case kMathAbs:
@@ -1573,15 +1629,8 @@
     case Runtime::kInlineDoubleHi:
       return Type::Signed32();
     case Runtime::kInlineConstructDouble:
-    case Runtime::kInlineMathFloor:
-    case Runtime::kInlineMathSqrt:
-    case Runtime::kInlineMathAcos:
-    case Runtime::kInlineMathAsin:
-    case Runtime::kInlineMathAtan:
     case Runtime::kInlineMathAtan2:
       return Type::Number();
-    case Runtime::kInlineMathClz32:
-      return Type::Range(0, 32, zone());
     case Runtime::kInlineCreateIterResultObject:
     case Runtime::kInlineRegExpConstructResult:
       return Type::OtherObject();
@@ -1708,6 +1757,27 @@
   return Type::Unsigned32();
 }
 
+Type* Typer::Visitor::TypeNumberImul(Node* node) { return Type::Signed32(); }
+
+Type* Typer::Visitor::TypeNumberClz32(Node* node) {
+  return typer_->cache_.kZeroToThirtyTwo;
+}
+
+Type* Typer::Visitor::TypeNumberCeil(Node* node) {
+  return TypeUnaryOp(node, NumberCeil);
+}
+
+Type* Typer::Visitor::TypeNumberFloor(Node* node) {
+  return TypeUnaryOp(node, NumberFloor);
+}
+
+Type* Typer::Visitor::TypeNumberRound(Node* node) {
+  return TypeUnaryOp(node, NumberRound);
+}
+
+Type* Typer::Visitor::TypeNumberTrunc(Node* node) {
+  return TypeUnaryOp(node, NumberTrunc);
+}
 
 Type* Typer::Visitor::TypeNumberToInt32(Node* node) {
   return TypeUnaryOp(node, NumberToInt32);
@@ -1750,6 +1820,9 @@
   return Type::Boolean();
 }
 
+Type* Typer::Visitor::TypeStringToNumber(Node* node) {
+  return TypeUnaryOp(node, ToNumber);
+}
 
 namespace {
 
@@ -1925,6 +1998,11 @@
 }
 
 
+Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
+  return TypeUnaryOp(node, ObjectIsUndetectable);
+}
+
+
 // Machine operators.
 
 Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
@@ -2132,6 +2210,10 @@
                          zone());
 }
 
+Type* Typer::Visitor::TypeTruncateFloat64ToUint32(Node* node) {
+  return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
+                         zone());
+}
 
 Type* Typer::Visitor::TypeTruncateFloat32ToInt32(Node* node) {
   return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
@@ -2427,6 +2509,17 @@
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeInt32PairAdd(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeInt32PairSub(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeInt32PairMul(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeWord32PairShl(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeWord32PairShr(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeWord32PairSar(Node* node) { return Type::Internal(); }
 
 // Heap constants.
 
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 99480ca..a69ace9 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -29,13 +29,13 @@
 
 
 static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
-  auto const uses = def->uses();
+  const Node::Uses uses = def->uses();
   return std::find(uses.begin(), uses.end(), use) != uses.end();
 }
 
 
 static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
-  auto const inputs = use->inputs();
+  const Node::Inputs inputs = use->inputs();
   return std::find(inputs.begin(), inputs.end(), def) != inputs.end();
 }
 
@@ -135,6 +135,11 @@
     CheckOutput(value, node, value->op()->ValueOutputCount(), "value");
     CHECK(IsDefUseChainLinkPresent(value, node));
     CHECK(IsUseDefChainLinkPresent(value, node));
+    // Verify that only parameters and projections can have input nodes with
+    // multiple outputs.
+    CHECK(node->opcode() == IrOpcode::kParameter ||
+          node->opcode() == IrOpcode::kProjection ||
+          value->op()->ValueOutputCount() <= 1);
   }
 
   // Verify all context inputs are value nodes.
@@ -161,16 +166,6 @@
     CHECK(IsUseDefChainLinkPresent(control, node));
   }
 
-  // Verify all successors are projections if multiple value outputs exist.
-  if (node->op()->ValueOutputCount() > 1) {
-    for (Edge edge : node->use_edges()) {
-      Node* use = edge.from();
-      CHECK(!NodeProperties::IsValueEdge(edge) ||
-            use->opcode() == IrOpcode::kProjection ||
-            use->opcode() == IrOpcode::kParameter);
-    }
-  }
-
   switch (node->opcode()) {
     case IrOpcode::kStart:
       // Start has no inputs.
@@ -194,7 +189,7 @@
     case IrOpcode::kBranch: {
       // Branch uses are IfTrue and IfFalse.
       int count_true = 0, count_false = 0;
-      for (auto use : node->uses()) {
+      for (const Node* use : node->uses()) {
         CHECK(use->opcode() == IrOpcode::kIfTrue ||
               use->opcode() == IrOpcode::kIfFalse);
         if (use->opcode() == IrOpcode::kIfTrue) ++count_true;
@@ -232,10 +227,10 @@
     case IrOpcode::kSwitch: {
       // Switch uses are Case and Default.
       int count_case = 0, count_default = 0;
-      for (auto use : node->uses()) {
+      for (const Node* use : node->uses()) {
         switch (use->opcode()) {
           case IrOpcode::kIfValue: {
-            for (auto user : node->uses()) {
+            for (const Node* user : node->uses()) {
               if (user != use && user->opcode() == IrOpcode::kIfValue) {
                 CHECK_NE(OpParameter<int32_t>(use->op()),
                          OpParameter<int32_t>(user->op()));
@@ -274,11 +269,16 @@
       // Type is empty.
       CheckNotTyped(node);
       break;
+    case IrOpcode::kDeoptimizeIf:
+    case IrOpcode::kDeoptimizeUnless:
+      // Type is empty.
+      CheckNotTyped(node);
+      break;
     case IrOpcode::kDeoptimize:
     case IrOpcode::kReturn:
     case IrOpcode::kThrow:
       // Deoptimize, Return and Throw uses are End.
-      for (auto use : node->uses()) {
+      for (const Node* use : node->uses()) {
         CHECK_EQ(IrOpcode::kEnd, use->opcode());
       }
       // Type is empty.
@@ -292,7 +292,7 @@
       CHECK_EQ(IrOpcode::kLoop,
                NodeProperties::GetControlInput(node)->opcode());
       // Terminate uses are End.
-      for (auto use : node->uses()) {
+      for (const Node* use : node->uses()) {
         CHECK_EQ(IrOpcode::kEnd, use->opcode());
       }
       // Type is empty.
@@ -492,6 +492,18 @@
       // Type is Boolean.
       CheckUpperIs(node, Type::Boolean());
       break;
+    case IrOpcode::kJSToInteger:
+      // Type is OrderedNumber.
+      CheckUpperIs(node, Type::OrderedNumber());
+      break;
+    case IrOpcode::kJSToLength:
+      // Type is OrderedNumber.
+      CheckUpperIs(node, Type::OrderedNumber());
+      break;
+    case IrOpcode::kJSToName:
+      // Type is Name.
+      CheckUpperIs(node, Type::Name());
+      break;
     case IrOpcode::kJSToNumber:
       // Type is Number.
       CheckUpperIs(node, Type::Number());
@@ -500,10 +512,6 @@
       // Type is String.
       CheckUpperIs(node, Type::String());
       break;
-    case IrOpcode::kJSToName:
-      // Type is Name.
-      CheckUpperIs(node, Type::Name());
-      break;
     case IrOpcode::kJSToObject:
       // Type is Receiver.
       CheckUpperIs(node, Type::Receiver());
@@ -677,6 +685,25 @@
       CheckValueInputIs(node, 1, Type::Unsigned32());
       CheckUpperIs(node, Type::Unsigned32());
       break;
+    case IrOpcode::kNumberImul:
+      // (Unsigned32, Unsigned32) -> Signed32
+      CheckValueInputIs(node, 0, Type::Unsigned32());
+      CheckValueInputIs(node, 1, Type::Unsigned32());
+      CheckUpperIs(node, Type::Signed32());
+      break;
+    case IrOpcode::kNumberClz32:
+      // Unsigned32 -> Unsigned32
+      CheckValueInputIs(node, 0, Type::Unsigned32());
+      CheckUpperIs(node, Type::Unsigned32());
+      break;
+    case IrOpcode::kNumberCeil:
+    case IrOpcode::kNumberFloor:
+    case IrOpcode::kNumberRound:
+    case IrOpcode::kNumberTrunc:
+      // Number -> Number
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckUpperIs(node, Type::Number());
+      break;
     case IrOpcode::kNumberToInt32:
       // Number -> Signed32
       CheckValueInputIs(node, 0, Type::Number());
@@ -705,6 +732,11 @@
       CheckValueInputIs(node, 1, Type::String());
       CheckUpperIs(node, Type::Boolean());
       break;
+    case IrOpcode::kStringToNumber:
+      // String -> Number
+      CheckValueInputIs(node, 0, Type::String());
+      CheckUpperIs(node, Type::Number());
+      break;
     case IrOpcode::kReferenceEqual: {
       // (Unique, Any) -> Boolean  and
       // (Any, Unique) -> Boolean
@@ -714,6 +746,7 @@
     case IrOpcode::kObjectIsNumber:
     case IrOpcode::kObjectIsReceiver:
     case IrOpcode::kObjectIsSmi:
+    case IrOpcode::kObjectIsUndetectable:
       CheckValueInputIs(node, 0, Type::Any());
       CheckUpperIs(node, Type::Boolean());
       break;
@@ -935,6 +968,7 @@
     case IrOpcode::kChangeFloat32ToFloat64:
     case IrOpcode::kChangeFloat64ToInt32:
     case IrOpcode::kChangeFloat64ToUint32:
+    case IrOpcode::kTruncateFloat64ToUint32:
     case IrOpcode::kTruncateFloat32ToInt32:
     case IrOpcode::kTruncateFloat32ToUint32:
     case IrOpcode::kTryTruncateFloat32ToInt64:
@@ -945,6 +979,12 @@
     case IrOpcode::kFloat64ExtractHighWord32:
     case IrOpcode::kFloat64InsertLowWord32:
     case IrOpcode::kFloat64InsertHighWord32:
+    case IrOpcode::kInt32PairAdd:
+    case IrOpcode::kInt32PairSub:
+    case IrOpcode::kInt32PairMul:
+    case IrOpcode::kWord32PairShl:
+    case IrOpcode::kWord32PairShr:
+    case IrOpcode::kWord32PairSar:
     case IrOpcode::kLoadStackPointer:
     case IrOpcode::kLoadFramePointer:
     case IrOpcode::kLoadParentFramePointer:
@@ -959,7 +999,7 @@
 void Verifier::Run(Graph* graph, Typing typing) {
   CHECK_NOT_NULL(graph->start());
   CHECK_NOT_NULL(graph->end());
-  Zone zone;
+  Zone zone(graph->zone()->allocator());
   Visitor visitor(&zone, typing);
   AllNodes all(&zone, graph);
   for (Node* node : all.live) visitor.Check(node);
@@ -1049,7 +1089,7 @@
 
 void ScheduleVerifier::Run(Schedule* schedule) {
   const size_t count = schedule->BasicBlockCount();
-  Zone tmp_zone;
+  Zone tmp_zone(schedule->zone()->allocator());
   Zone* zone = &tmp_zone;
   BasicBlock* start = schedule->start();
   BasicBlockVector* rpo_order = schedule->rpo_order();
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 9c3858d..93d5a08 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -6,6 +6,7 @@
 
 #include "src/isolate-inl.h"
 
+#include "src/base/platform/elapsed-timer.h"
 #include "src/base/platform/platform.h"
 
 #include "src/compiler/access-builder.h"
@@ -71,28 +72,8 @@
   }
 }
 
-
-enum TrapReason {
-  kTrapUnreachable,
-  kTrapMemOutOfBounds,
-  kTrapDivByZero,
-  kTrapDivUnrepresentable,
-  kTrapRemByZero,
-  kTrapFloatUnrepresentable,
-  kTrapFuncInvalid,
-  kTrapFuncSigMismatch,
-  kTrapCount
-};
-
-
-static const char* kTrapMessages[] = {
-    "unreachable",       "memory access out of bounds",
-    "divide by zero",    "divide result unrepresentable",
-    "remainder by zero", "integer result unrepresentable",
-    "invalid function",  "function signature mismatch"};
 }  // namespace
 
-
 // A helper that handles building graph fragments for trapping.
 // To avoid generating a ton of redundant code that just calls the runtime
 // to trap, we generate a per-trap-reason block of code that all trap sites
@@ -103,17 +84,17 @@
       : builder_(builder),
         jsgraph_(builder->jsgraph()),
         graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {
-    for (int i = 0; i < kTrapCount; i++) traps_[i] = nullptr;
+    for (int i = 0; i < wasm::kTrapCount; i++) traps_[i] = nullptr;
   }
 
   // Make the current control path trap to unreachable.
-  void Unreachable() { ConnectTrap(kTrapUnreachable); }
+  void Unreachable() { ConnectTrap(wasm::kTrapUnreachable); }
 
   // Always trap with the given reason.
-  void TrapAlways(TrapReason reason) { ConnectTrap(reason); }
+  void TrapAlways(wasm::TrapReason reason) { ConnectTrap(reason); }
 
   // Add a check that traps if {node} is equal to {val}.
-  Node* TrapIfEq32(TrapReason reason, Node* node, int32_t val) {
+  Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val) {
     Int32Matcher m(node);
     if (m.HasValue() && !m.Is(val)) return graph()->start();
     if (val == 0) {
@@ -127,12 +108,12 @@
   }
 
   // Add a check that traps if {node} is zero.
-  Node* ZeroCheck32(TrapReason reason, Node* node) {
+  Node* ZeroCheck32(wasm::TrapReason reason, Node* node) {
     return TrapIfEq32(reason, node, 0);
   }
 
   // Add a check that traps if {node} is equal to {val}.
-  Node* TrapIfEq64(TrapReason reason, Node* node, int64_t val) {
+  Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val) {
     Int64Matcher m(node);
     if (m.HasValue() && !m.Is(val)) return graph()->start();
     AddTrapIfTrue(reason,
@@ -142,22 +123,22 @@
   }
 
   // Add a check that traps if {node} is zero.
-  Node* ZeroCheck64(TrapReason reason, Node* node) {
+  Node* ZeroCheck64(wasm::TrapReason reason, Node* node) {
     return TrapIfEq64(reason, node, 0);
   }
 
   // Add a trap if {cond} is true.
-  void AddTrapIfTrue(TrapReason reason, Node* cond) {
+  void AddTrapIfTrue(wasm::TrapReason reason, Node* cond) {
     AddTrapIf(reason, cond, true);
   }
 
   // Add a trap if {cond} is false.
-  void AddTrapIfFalse(TrapReason reason, Node* cond) {
+  void AddTrapIfFalse(wasm::TrapReason reason, Node* cond) {
     AddTrapIf(reason, cond, false);
   }
 
   // Add a trap if {cond} is true or false according to {iftrue}.
-  void AddTrapIf(TrapReason reason, Node* cond, bool iftrue) {
+  void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue) {
     Node** effect_ptr = builder_->effect_;
     Node** control_ptr = builder_->control_;
     Node* before = *effect_ptr;
@@ -198,14 +179,14 @@
   WasmGraphBuilder* builder_;
   JSGraph* jsgraph_;
   Graph* graph_;
-  Node* traps_[kTrapCount];
-  Node* effects_[kTrapCount];
+  Node* traps_[wasm::kTrapCount];
+  Node* effects_[wasm::kTrapCount];
 
   JSGraph* jsgraph() { return jsgraph_; }
   Graph* graph() { return jsgraph_->graph(); }
   CommonOperatorBuilder* common() { return jsgraph()->common(); }
 
-  void ConnectTrap(TrapReason reason) {
+  void ConnectTrap(wasm::TrapReason reason) {
     if (traps_[reason] == nullptr) {
       // Create trap code for the first time this trap is used.
       return BuildTrapCode(reason);
@@ -215,8 +196,9 @@
     builder_->AppendToPhi(traps_[reason], effects_[reason], builder_->Effect());
   }
 
-  void BuildTrapCode(TrapReason reason) {
-    Node* exception = builder_->String(kTrapMessages[reason]);
+  void BuildTrapCode(wasm::TrapReason reason) {
+    Node* exception =
+        builder_->String(wasm::WasmOpcodes::TrapReasonName(reason));
     Node* end;
     Node** control_ptr = builder_->control_;
     Node** effect_ptr = builder_->effect_;
@@ -265,7 +247,6 @@
   }
 };
 
-
 WasmGraphBuilder::WasmGraphBuilder(Zone* zone, JSGraph* jsgraph,
                                    wasm::FunctionSig* function_signature)
     : zone_(zone),
@@ -351,8 +332,7 @@
 Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
                             Node* control) {
   DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
-  Node** buf = Realloc(vals, count);
-  buf = Realloc(buf, count + 1);
+  Node** buf = Realloc(vals, count, count + 1);
   buf[count] = control;
   return graph()->NewNode(jsgraph()->common()->Phi(type, count), count + 1,
                           buf);
@@ -362,8 +342,7 @@
 Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
                                   Node* control) {
   DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
-  Node** buf = Realloc(effects, count);
-  buf = Realloc(buf, count + 1);
+  Node** buf = Realloc(effects, count, count + 1);
   buf[count] = control;
   return graph()->NewNode(jsgraph()->common()->EffectPhi(count), count + 1,
                           buf);
@@ -394,43 +373,14 @@
     case wasm::kExprI32Mul:
       op = m->Int32Mul();
       break;
-    case wasm::kExprI32DivS: {
-      trap_->ZeroCheck32(kTrapDivByZero, right);
-      Node* before = *control_;
-      Node* denom_is_m1;
-      Node* denom_is_not_m1;
-      Branch(graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
-                              jsgraph()->Int32Constant(-1)),
-             &denom_is_m1, &denom_is_not_m1);
-      *control_ = denom_is_m1;
-      trap_->TrapIfEq32(kTrapDivUnrepresentable, left, kMinInt);
-      if (*control_ != denom_is_m1) {
-        *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
-                                     denom_is_not_m1, *control_);
-      } else {
-        *control_ = before;
-      }
-      return graph()->NewNode(m->Int32Div(), left, right, *control_);
-    }
+    case wasm::kExprI32DivS:
+      return BuildI32DivS(left, right);
     case wasm::kExprI32DivU:
-      op = m->Uint32Div();
-      return graph()->NewNode(op, left, right,
-                              trap_->ZeroCheck32(kTrapDivByZero, right));
-    case wasm::kExprI32RemS: {
-      trap_->ZeroCheck32(kTrapRemByZero, right);
-      Diamond d(graph(), jsgraph()->common(),
-                graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
-                                 jsgraph()->Int32Constant(-1)));
-
-      Node* rem = graph()->NewNode(m->Int32Mod(), left, right, d.if_false);
-
-      return d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
-                   rem);
-    }
+      return BuildI32DivU(left, right);
+    case wasm::kExprI32RemS:
+      return BuildI32RemS(left, right);
     case wasm::kExprI32RemU:
-      op = m->Uint32Mod();
-      return graph()->NewNode(op, left, right,
-                              trap_->ZeroCheck32(kTrapRemByZero, right));
+      return BuildI32RemU(left, right);
     case wasm::kExprI32And:
       op = m->Word32And();
       break;
@@ -442,13 +392,23 @@
       break;
     case wasm::kExprI32Shl:
       op = m->Word32Shl();
+      right = MaskShiftCount32(right);
       break;
     case wasm::kExprI32ShrU:
       op = m->Word32Shr();
+      right = MaskShiftCount32(right);
       break;
     case wasm::kExprI32ShrS:
       op = m->Word32Sar();
+      right = MaskShiftCount32(right);
       break;
+    case wasm::kExprI32Ror:
+      op = m->Word32Ror();
+      right = MaskShiftCount32(right);
+      break;
+    case wasm::kExprI32Rol:
+      right = MaskShiftCount32(right);
+      return BuildI32Rol(left, right);
     case wasm::kExprI32Eq:
       op = m->Word32Equal();
       break;
@@ -485,76 +445,62 @@
     case wasm::kExprI64And:
       op = m->Word64And();
       break;
-#if WASM_64
-    // Opcodes only supported on 64-bit platforms.
-    // TODO(titzer): query the machine operator builder here instead of #ifdef.
+    // todo(ahaas): I added a list of missing instructions here to make merging
+    // easier when I do them one by one.
+    // kExprI64Add:
     case wasm::kExprI64Add:
       op = m->Int64Add();
       break;
+    // kExprI64Sub:
     case wasm::kExprI64Sub:
       op = m->Int64Sub();
       break;
+    // kExprI64Mul:
     case wasm::kExprI64Mul:
       op = m->Int64Mul();
       break;
-    case wasm::kExprI64DivS: {
-      trap_->ZeroCheck64(kTrapDivByZero, right);
-      Node* before = *control_;
-      Node* denom_is_m1;
-      Node* denom_is_not_m1;
-      Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
-                              jsgraph()->Int64Constant(-1)),
-             &denom_is_m1, &denom_is_not_m1);
-      *control_ = denom_is_m1;
-      trap_->TrapIfEq64(kTrapDivUnrepresentable, left,
-                        std::numeric_limits<int64_t>::min());
-      if (*control_ != denom_is_m1) {
-        *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
-                                     denom_is_not_m1, *control_);
-      } else {
-        *control_ = before;
-      }
-      return graph()->NewNode(m->Int64Div(), left, right, *control_);
-    }
+    // kExprI64DivS:
+    case wasm::kExprI64DivS:
+      return BuildI64DivS(left, right);
+    // kExprI64DivU:
     case wasm::kExprI64DivU:
-      op = m->Uint64Div();
-      return graph()->NewNode(op, left, right,
-                              trap_->ZeroCheck64(kTrapDivByZero, right));
-    case wasm::kExprI64RemS: {
-      trap_->ZeroCheck64(kTrapRemByZero, right);
-      Diamond d(jsgraph()->graph(), jsgraph()->common(),
-                graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
-                                 jsgraph()->Int64Constant(-1)));
-
-      Node* rem = graph()->NewNode(m->Int64Mod(), left, right, d.if_false);
-
-      return d.Phi(MachineRepresentation::kWord64, jsgraph()->Int64Constant(0),
-                   rem);
-    }
+      return BuildI64DivU(left, right);
+    // kExprI64RemS:
+    case wasm::kExprI64RemS:
+      return BuildI64RemS(left, right);
+    // kExprI64RemU:
     case wasm::kExprI64RemU:
-      op = m->Uint64Mod();
-      return graph()->NewNode(op, left, right,
-                              trap_->ZeroCheck64(kTrapRemByZero, right));
+      return BuildI64RemU(left, right);
     case wasm::kExprI64Ior:
       op = m->Word64Or();
       break;
+// kExprI64Xor:
     case wasm::kExprI64Xor:
       op = m->Word64Xor();
       break;
+// kExprI64Shl:
     case wasm::kExprI64Shl:
       op = m->Word64Shl();
+      right = MaskShiftCount64(right);
       break;
+    // kExprI64ShrU:
     case wasm::kExprI64ShrU:
       op = m->Word64Shr();
+      right = MaskShiftCount64(right);
       break;
+    // kExprI64ShrS:
     case wasm::kExprI64ShrS:
       op = m->Word64Sar();
+      right = MaskShiftCount64(right);
       break;
+    // kExprI64Eq:
     case wasm::kExprI64Eq:
       op = m->Word64Equal();
       break;
+// kExprI64Ne:
     case wasm::kExprI64Ne:
       return Invert(Binop(wasm::kExprI64Eq, left, right));
+// kExprI64LtS:
     case wasm::kExprI64LtS:
       op = m->Int64LessThan();
       break;
@@ -583,8 +529,12 @@
       op = m->Uint64LessThanOrEqual();
       std::swap(left, right);
       break;
-#endif
-
+    case wasm::kExprI64Ror:
+      op = m->Word64Ror();
+      right = MaskShiftCount64(right);
+      break;
+    case wasm::kExprI64Rol:
+      return BuildI64Rol(left, right);
     case wasm::kExprF32CopySign:
       return BuildF32CopySign(left, right);
     case wasm::kExprF64CopySign:
@@ -659,6 +609,15 @@
       return BuildF32Max(left, right);
     case wasm::kExprF64Max:
       return BuildF64Max(left, right);
+    case wasm::kExprF64Pow: {
+      return BuildF64Pow(left, right);
+    }
+    case wasm::kExprF64Atan2: {
+      return BuildF64Atan2(left, right);
+    }
+    case wasm::kExprF64Mod: {
+      return BuildF64Mod(left, right);
+    }
     default:
       op = UnsupportedOpcode(opcode);
   }
@@ -670,7 +629,7 @@
   const Operator* op;
   MachineOperatorBuilder* m = jsgraph()->machine();
   switch (opcode) {
-    case wasm::kExprBoolNot:
+    case wasm::kExprI32Eqz:
       op = m->Word32Equal();
       return graph()->NewNode(op, input, jsgraph()->Int32Constant(0));
     case wasm::kExprF32Abs:
@@ -786,79 +745,62 @@
       op = m->Float64RoundTiesEven().op();
       break;
     }
+    case wasm::kExprF64Acos: {
+      return BuildF64Acos(input);
+    }
+    case wasm::kExprF64Asin: {
+      return BuildF64Asin(input);
+    }
+    case wasm::kExprF64Atan: {
+      return BuildF64Atan(input);
+    }
+    case wasm::kExprF64Cos: {
+      return BuildF64Cos(input);
+    }
+    case wasm::kExprF64Sin: {
+      return BuildF64Sin(input);
+    }
+    case wasm::kExprF64Tan: {
+      return BuildF64Tan(input);
+    }
+    case wasm::kExprF64Exp: {
+      return BuildF64Exp(input);
+    }
+    case wasm::kExprF64Log: {
+      return BuildF64Log(input);
+    }
+    // kExprI32ConvertI64:
     case wasm::kExprI32ConvertI64:
       op = m->TruncateInt64ToInt32();
       break;
-#if WASM_64
-    // Opcodes only supported on 64-bit platforms.
-    // TODO(titzer): query the machine operator builder here instead of #ifdef.
+    // kExprI64SConvertI32:
     case wasm::kExprI64SConvertI32:
       op = m->ChangeInt32ToInt64();
       break;
+    // kExprI64UConvertI32:
     case wasm::kExprI64UConvertI32:
       op = m->ChangeUint32ToUint64();
       break;
-    case wasm::kExprF32SConvertI64:
-      op = m->RoundInt64ToFloat32();
-      break;
-    case wasm::kExprF32UConvertI64:
-      op = m->RoundUint64ToFloat32();
-      break;
-    case wasm::kExprF64SConvertI64:
-      op = m->RoundInt64ToFloat64();
-      break;
-    case wasm::kExprF64UConvertI64:
-      op = m->RoundUint64ToFloat64();
-      break;
-    case wasm::kExprI64SConvertF32: {
-      Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToInt64(), input);
-      Node* result =
-          graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-      Node* overflow =
-          graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-      trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
-      return result;
-    }
-    case wasm::kExprI64SConvertF64: {
-      Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToInt64(), input);
-      Node* result =
-          graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-      Node* overflow =
-          graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-      trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
-      return result;
-    }
-    case wasm::kExprI64UConvertF32: {
-      Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToUint64(), input);
-      Node* result =
-          graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-      Node* overflow =
-          graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-      trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
-      return result;
-    }
-    case wasm::kExprI64UConvertF64: {
-      Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToUint64(), input);
-      Node* result =
-          graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-      Node* overflow =
-          graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-      trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
-      return result;
-    }
+    // kExprF64ReinterpretI64:
     case wasm::kExprF64ReinterpretI64:
       op = m->BitcastInt64ToFloat64();
       break;
+    // kExprI64ReinterpretF64:
     case wasm::kExprI64ReinterpretF64:
       op = m->BitcastFloat64ToInt64();
       break;
+    // kExprI64Clz:
     case wasm::kExprI64Clz:
       op = m->Word64Clz();
       break;
+    // kExprI64Ctz:
     case wasm::kExprI64Ctz: {
       if (m->Word64Ctz().IsSupported()) {
         op = m->Word64Ctz().op();
         break;
+      } else if (m->Is32() && m->Word32Ctz().IsSupported()) {
+        op = m->Word64CtzPlaceholder();
+        break;
       } else if (m->Word64ReverseBits().IsSupported()) {
         Node* reversed = graph()->NewNode(m->Word64ReverseBits().op(), input);
         Node* result = graph()->NewNode(m->Word64Clz(), reversed);
@@ -867,15 +809,64 @@
         return BuildI64Ctz(input);
       }
     }
+    // kExprI64Popcnt:
     case wasm::kExprI64Popcnt: {
       if (m->Word64Popcnt().IsSupported()) {
         op = m->Word64Popcnt().op();
-        break;
+      } else if (m->Is32() && m->Word32Popcnt().IsSupported()) {
+        op = m->Word64PopcntPlaceholder();
       } else {
         return BuildI64Popcnt(input);
       }
+      break;
     }
-#endif
+    // kExprF32SConvertI64:
+    case wasm::kExprI64Eqz:
+      op = m->Word64Equal();
+      return graph()->NewNode(op, input, jsgraph()->Int64Constant(0));
+    case wasm::kExprF32SConvertI64:
+      if (m->Is32()) {
+        return BuildF32SConvertI64(input);
+      }
+      op = m->RoundInt64ToFloat32();
+      break;
+    // kExprF32UConvertI64:
+    case wasm::kExprF32UConvertI64:
+      if (m->Is32()) {
+        return BuildF32UConvertI64(input);
+      }
+      op = m->RoundUint64ToFloat32();
+      break;
+    // kExprF64SConvertI64:
+    case wasm::kExprF64SConvertI64:
+      if (m->Is32()) {
+        return BuildF64SConvertI64(input);
+      }
+      op = m->RoundInt64ToFloat64();
+      break;
+    // kExprF64UConvertI64:
+    case wasm::kExprF64UConvertI64:
+      if (m->Is32()) {
+        return BuildF64UConvertI64(input);
+      }
+      op = m->RoundUint64ToFloat64();
+      break;
+// kExprI64SConvertF32:
+    case wasm::kExprI64SConvertF32: {
+      return BuildI64SConvertF32(input);
+    }
+    // kExprI64SConvertF64:
+    case wasm::kExprI64SConvertF64: {
+      return BuildI64SConvertF64(input);
+    }
+    // kExprI64UConvertF32:
+    case wasm::kExprI64UConvertF32: {
+      return BuildI64UConvertF32(input);
+    }
+    // kExprI64UConvertF64:
+    case wasm::kExprI64UConvertF64: {
+      return BuildI64UConvertF64(input);
+    }
     default:
       op = UnsupportedOpcode(opcode);
   }
@@ -937,8 +928,7 @@
     count = 1;
   }
 
-  Node** buf = Realloc(vals, count);
-  buf = Realloc(buf, count + 2);
+  Node** buf = Realloc(vals, count, count + 2);
   buf[count] = *effect_;
   buf[count + 1] = *control_;
   Node* ret = graph()->NewNode(jsgraph()->common()->Return(), count + 2, vals);
@@ -956,6 +946,37 @@
   return nullptr;
 }
 
+Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
+  static const int32_t kMask32 = 0x1f;
+  if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
+    // Shifts by constants are so common we pattern-match them here.
+    Int32Matcher match(node);
+    if (match.HasValue()) {
+      int32_t masked = (match.Value() & kMask32);
+      if (match.Value() != masked) node = jsgraph()->Int32Constant(masked);
+    } else {
+      node = graph()->NewNode(jsgraph()->machine()->Word32And(), node,
+                              jsgraph()->Int32Constant(kMask32));
+    }
+  }
+  return node;
+}
+
+Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
+  static const int64_t kMask64 = 0x3f;
+  if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
+    // Shifts by constants are so common we pattern-match them here.
+    Int64Matcher match(node);
+    if (match.HasValue()) {
+      int64_t masked = (match.Value() & kMask64);
+      if (match.Value() != masked) node = jsgraph()->Int64Constant(masked);
+    } else {
+      node = graph()->NewNode(jsgraph()->machine()->Word64And(), node,
+                              jsgraph()->Int64Constant(kMask64));
+    }
+  }
+  return node;
+}
 
 Node* WasmGraphBuilder::BuildF32Neg(Node* input) {
   Node* result =
@@ -1115,6 +1136,13 @@
 
 Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js must use the wacky JS semantics.
+    input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+    return graph()->NewNode(
+        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+  }
+
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF32Trunc, input);
   Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
@@ -1123,7 +1151,7 @@
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF32SConvertI32, result);
   Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
-  trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
 
   return result;
 }
@@ -1131,7 +1159,8 @@
 
 Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js) {
+  if (module_ && module_->asm_js()) {
+    // asm.js must use the wacky JS semantics.
     return graph()->NewNode(
         m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
   }
@@ -1143,7 +1172,7 @@
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF64SConvertI32, result);
   Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
-  trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
 
   return result;
 }
@@ -1151,6 +1180,13 @@
 
 Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js must use the wacky JS semantics.
+    input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+    return graph()->NewNode(
+        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+  }
+
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF32Trunc, input);
   Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
@@ -1159,7 +1195,7 @@
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF32UConvertI32, result);
   Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
-  trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
 
   return result;
 }
@@ -1167,19 +1203,20 @@
 
 Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js) {
+  if (module_ && module_->asm_js()) {
+    // asm.js must use the wacky JS semantics.
     return graph()->NewNode(
         m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
   }
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF64Trunc, input);
-  Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), trunc);
+  Node* result = graph()->NewNode(m->TruncateFloat64ToUint32(), trunc);
 
   // Convert the result back to f64. If we end up at a different value than the
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF64UConvertI32, result);
   Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
-  trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
 
   return result;
 }
@@ -1363,89 +1400,557 @@
 Node* WasmGraphBuilder::BuildF32Trunc(Node* input) {
   MachineType type = MachineType::Float32();
   ExternalReference ref =
-      ExternalReference::f32_trunc_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f32_trunc(jsgraph()->isolate());
+
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF32Floor(Node* input) {
   MachineType type = MachineType::Float32();
   ExternalReference ref =
-      ExternalReference::f32_floor_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f32_floor(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF32Ceil(Node* input) {
   MachineType type = MachineType::Float32();
   ExternalReference ref =
-      ExternalReference::f32_ceil_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f32_ceil(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF32NearestInt(Node* input) {
   MachineType type = MachineType::Float32();
   ExternalReference ref =
-      ExternalReference::f32_nearest_int_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f32_nearest_int(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF64Trunc(Node* input) {
   MachineType type = MachineType::Float64();
   ExternalReference ref =
-      ExternalReference::f64_trunc_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f64_trunc(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF64Floor(Node* input) {
   MachineType type = MachineType::Float64();
   ExternalReference ref =
-      ExternalReference::f64_floor_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f64_floor(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF64Ceil(Node* input) {
   MachineType type = MachineType::Float64();
   ExternalReference ref =
-      ExternalReference::f64_ceil_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f64_ceil(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF64NearestInt(Node* input) {
   MachineType type = MachineType::Float64();
   ExternalReference ref =
-      ExternalReference::f64_nearest_int_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f64_nearest_int(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
-Node* WasmGraphBuilder::BuildRoundingInstruction(Node* input,
-                                                 ExternalReference ref,
-                                                 MachineType type) {
-  // We do truncation by calling a C function which calculates the truncation
-  // for us. The input is passed to the C function as a double* to avoid double
-  // parameters. For this we reserve a slot on the stack, store the parameter in
-  // that slot, pass a pointer to the slot to the C function, and after calling
-  // the C function we collect the return value from the stack slot.
+Node* WasmGraphBuilder::BuildF64Acos(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_acos_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
 
-  Node* stack_slot_param =
+Node* WasmGraphBuilder::BuildF64Asin(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_asin_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Atan(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_atan_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Cos(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_cos_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Sin(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_sin_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Tan(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_tan_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Exp(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_exp_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Log(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_log_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Atan2(Node* left, Node* right) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_atan2_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, left, right);
+}
+
+Node* WasmGraphBuilder::BuildF64Pow(Node* left, Node* right) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_pow_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, left, right);
+}
+
+Node* WasmGraphBuilder::BuildF64Mod(Node* left, Node* right) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_mod_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, left, right);
+}
+
+Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
+                                              MachineType type, Node* input0,
+                                              Node* input1) {
+  // We do truncation by calling a C function which calculates the result.
+  // The input is passed to the C function as a double*'s to avoid double
+  // parameters. For this we reserve slots on the stack, store the parameters
+  // in those slots, pass pointers to the slot to the C function,
+  // and after calling the C function we collect the return value from
+  // the stack slot.
+
+  Node* stack_slot_param0 =
       graph()->NewNode(jsgraph()->machine()->StackSlot(type.representation()));
 
-  const Operator* store_op = jsgraph()->machine()->Store(
+  const Operator* store_op0 = jsgraph()->machine()->Store(
       StoreRepresentation(type.representation(), kNoWriteBarrier));
-  *effect_ =
-      graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
-                       input, *effect_, *control_);
+  *effect_ = graph()->NewNode(store_op0, stack_slot_param0,
+                              jsgraph()->Int32Constant(0), input0, *effect_,
+                              *control_);
 
-  Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0, 1);
-  sig_builder.AddParam(MachineType::Pointer());
   Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+  Node** args = Buffer(5);
+  args[0] = function;
+  args[1] = stack_slot_param0;
+  int input_count = 1;
 
-  Node* args[] = {function, stack_slot_param};
+  if (input1 != nullptr) {
+    Node* stack_slot_param1 = graph()->NewNode(
+        jsgraph()->machine()->StackSlot(type.representation()));
+    const Operator* store_op1 = jsgraph()->machine()->Store(
+        StoreRepresentation(type.representation(), kNoWriteBarrier));
+    *effect_ = graph()->NewNode(store_op1, stack_slot_param1,
+                                jsgraph()->Int32Constant(0), input1, *effect_,
+                                *control_);
+    args[2] = stack_slot_param1;
+    ++input_count;
+  }
 
+  Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0,
+                                              input_count);
+  sig_builder.AddParam(MachineType::Pointer());
+  if (input1 != nullptr) {
+    sig_builder.AddParam(MachineType::Pointer());
+  }
   BuildCCall(sig_builder.Build(), args);
 
   const Operator* load_op = jsgraph()->machine()->Load(type);
 
   Node* load =
-      graph()->NewNode(load_op, stack_slot_param, jsgraph()->Int32Constant(0),
+      graph()->NewNode(load_op, stack_slot_param0, jsgraph()->Int32Constant(0),
+                       *effect_, *control_);
+  *effect_ = load;
+  return load;
+}
+
+Node* WasmGraphBuilder::BuildF32SConvertI64(Node* input) {
+  // TODO(titzer/bradnelson): Check handlng of asm.js case.
+  return BuildIntToFloatConversionInstruction(
+      input, ExternalReference::wasm_int64_to_float32(jsgraph()->isolate()),
+      MachineRepresentation::kWord64, MachineType::Float32());
+}
+Node* WasmGraphBuilder::BuildF32UConvertI64(Node* input) {
+  // TODO(titzer/bradnelson): Check handlng of asm.js case.
+  return BuildIntToFloatConversionInstruction(
+      input, ExternalReference::wasm_uint64_to_float32(jsgraph()->isolate()),
+      MachineRepresentation::kWord64, MachineType::Float32());
+}
+Node* WasmGraphBuilder::BuildF64SConvertI64(Node* input) {
+  return BuildIntToFloatConversionInstruction(
+      input, ExternalReference::wasm_int64_to_float64(jsgraph()->isolate()),
+      MachineRepresentation::kWord64, MachineType::Float64());
+}
+Node* WasmGraphBuilder::BuildF64UConvertI64(Node* input) {
+  return BuildIntToFloatConversionInstruction(
+      input, ExternalReference::wasm_uint64_to_float64(jsgraph()->isolate()),
+      MachineRepresentation::kWord64, MachineType::Float64());
+}
+
+Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
+    Node* input, ExternalReference ref,
+    MachineRepresentation parameter_representation,
+    const MachineType result_type) {
+  Node* stack_slot_param = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(parameter_representation));
+  Node* stack_slot_result = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(result_type.representation()));
+  const Operator* store_op = jsgraph()->machine()->Store(
+      StoreRepresentation(parameter_representation, kNoWriteBarrier));
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+                       input, *effect_, *control_);
+  MachineSignature::Builder sig_builder(jsgraph()->zone(), 0, 2);
+  sig_builder.AddParam(MachineType::Pointer());
+  sig_builder.AddParam(MachineType::Pointer());
+  Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+  Node* args[] = {function, stack_slot_param, stack_slot_result};
+  BuildCCall(sig_builder.Build(), args);
+  const Operator* load_op = jsgraph()->machine()->Load(result_type);
+  Node* load =
+      graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
+                       *effect_, *control_);
+  *effect_ = load;
+  return load;
+}
+
+Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildFloatToIntConversionInstruction(
+        input, ExternalReference::wasm_float32_to_int64(jsgraph()->isolate()),
+        MachineRepresentation::kFloat32, MachineType::Int64());
+  } else {
+    Node* trunc = graph()->NewNode(
+        jsgraph()->machine()->TryTruncateFloat32ToInt64(), input);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+    Node* overflow =
+        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    return result;
+  }
+}
+
+Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildFloatToIntConversionInstruction(
+        input, ExternalReference::wasm_float32_to_uint64(jsgraph()->isolate()),
+        MachineRepresentation::kFloat32, MachineType::Int64());
+  } else {
+    Node* trunc = graph()->NewNode(
+        jsgraph()->machine()->TryTruncateFloat32ToUint64(), input);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+    Node* overflow =
+        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    return result;
+  }
+}
+
+Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildFloatToIntConversionInstruction(
+        input, ExternalReference::wasm_float64_to_int64(jsgraph()->isolate()),
+        MachineRepresentation::kFloat64, MachineType::Int64());
+  } else {
+    Node* trunc = graph()->NewNode(
+        jsgraph()->machine()->TryTruncateFloat64ToInt64(), input);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+    Node* overflow =
+        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    return result;
+  }
+}
+
+Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildFloatToIntConversionInstruction(
+        input, ExternalReference::wasm_float64_to_uint64(jsgraph()->isolate()),
+        MachineRepresentation::kFloat64, MachineType::Int64());
+  } else {
+    Node* trunc = graph()->NewNode(
+        jsgraph()->machine()->TryTruncateFloat64ToUint64(), input);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+    Node* overflow =
+        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    return result;
+  }
+}
+
+Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
+    Node* input, ExternalReference ref,
+    MachineRepresentation parameter_representation,
+    const MachineType result_type) {
+  Node* stack_slot_param = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(parameter_representation));
+  Node* stack_slot_result = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(result_type.representation()));
+  const Operator* store_op = jsgraph()->machine()->Store(
+      StoreRepresentation(parameter_representation, kNoWriteBarrier));
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+                       input, *effect_, *control_);
+  MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 2);
+  sig_builder.AddReturn(MachineType::Int32());
+  sig_builder.AddParam(MachineType::Pointer());
+  sig_builder.AddParam(MachineType::Pointer());
+  Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+  Node* args[] = {function, stack_slot_param, stack_slot_result};
+  trap_->ZeroCheck32(wasm::kTrapFloatUnrepresentable,
+                     BuildCCall(sig_builder.Build(), args));
+  const Operator* load_op = jsgraph()->machine()->Load(result_type);
+  Node* load =
+      graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
+                       *effect_, *control_);
+  *effect_ = load;
+  return load;
+}
+
+Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js semantics return 0 on divide or mod by zero.
+    if (m->Int32DivIsSafe()) {
+      // The hardware instruction does the right thing (e.g. arm).
+      return graph()->NewNode(m->Int32Div(), left, right, graph()->start());
+    }
+
+    // Check denominator for zero.
+    Diamond z(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+        BranchHint::kFalse);
+
+    // Check numerator for -1. (avoid minint / -1 case).
+    Diamond n(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+        BranchHint::kFalse);
+
+    Node* div = graph()->NewNode(m->Int32Div(), left, right, z.if_false);
+    Node* neg =
+        graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
+
+    return n.Phi(MachineRepresentation::kWord32, neg,
+                 z.Phi(MachineRepresentation::kWord32,
+                       jsgraph()->Int32Constant(0), div));
+  }
+
+  trap_->ZeroCheck32(wasm::kTrapDivByZero, right);
+  Node* before = *control_;
+  Node* denom_is_m1;
+  Node* denom_is_not_m1;
+  Branch(
+      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+      &denom_is_m1, &denom_is_not_m1);
+  *control_ = denom_is_m1;
+  trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt);
+  if (*control_ != denom_is_m1) {
+    *control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
+                                 *control_);
+  } else {
+    *control_ = before;
+  }
+  return graph()->NewNode(m->Int32Div(), left, right, *control_);
+}
+
+Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js semantics return 0 on divide or mod by zero.
+    // Explicit check for x % 0.
+    Diamond z(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+        BranchHint::kFalse);
+
+    // Explicit check for x % -1.
+    Diamond d(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+        BranchHint::kFalse);
+    d.Chain(z.if_false);
+
+    return z.Phi(
+        MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+        d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+              graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
+  }
+
+  trap_->ZeroCheck32(wasm::kTrapRemByZero, right);
+
+  Diamond d(
+      graph(), jsgraph()->common(),
+      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+      BranchHint::kFalse);
+  d.Chain(*control_);
+
+  return d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+               graph()->NewNode(m->Int32Mod(), left, right, d.if_false));
+}
+
+Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js semantics return 0 on divide or mod by zero.
+    if (m->Uint32DivIsSafe()) {
+      // The hardware instruction does the right thing (e.g. arm).
+      return graph()->NewNode(m->Uint32Div(), left, right, graph()->start());
+    }
+
+    // Explicit check for x % 0.
+    Diamond z(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+        BranchHint::kFalse);
+
+    return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+                 graph()->NewNode(jsgraph()->machine()->Uint32Div(), left,
+                                  right, z.if_false));
+  }
+  return graph()->NewNode(m->Uint32Div(), left, right,
+                          trap_->ZeroCheck32(wasm::kTrapDivByZero, right));
+}
+
+Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js semantics return 0 on divide or mod by zero.
+    // Explicit check for x % 0.
+    Diamond z(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+        BranchHint::kFalse);
+
+    Node* rem = graph()->NewNode(jsgraph()->machine()->Uint32Mod(), left, right,
+                                 z.if_false);
+    return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+                 rem);
+  }
+
+  return graph()->NewNode(m->Uint32Mod(), left, right,
+                          trap_->ZeroCheck32(wasm::kTrapRemByZero, right));
+}
+
+Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildDiv64Call(
+        left, right, ExternalReference::wasm_int64_div(jsgraph()->isolate()),
+        MachineType::Int64(), wasm::kTrapDivByZero);
+  }
+  trap_->ZeroCheck64(wasm::kTrapDivByZero, right);
+  Node* before = *control_;
+  Node* denom_is_m1;
+  Node* denom_is_not_m1;
+  Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+                          jsgraph()->Int64Constant(-1)),
+         &denom_is_m1, &denom_is_not_m1);
+  *control_ = denom_is_m1;
+  trap_->TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
+                    std::numeric_limits<int64_t>::min());
+  if (*control_ != denom_is_m1) {
+    *control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
+                                 *control_);
+  } else {
+    *control_ = before;
+  }
+  return graph()->NewNode(jsgraph()->machine()->Int64Div(), left, right,
+                          *control_);
+}
+
+Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildDiv64Call(
+        left, right, ExternalReference::wasm_int64_mod(jsgraph()->isolate()),
+        MachineType::Int64(), wasm::kTrapRemByZero);
+  }
+  trap_->ZeroCheck64(wasm::kTrapRemByZero, right);
+  Diamond d(jsgraph()->graph(), jsgraph()->common(),
+            graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+                             jsgraph()->Int64Constant(-1)));
+
+  Node* rem = graph()->NewNode(jsgraph()->machine()->Int64Mod(), left, right,
+                               d.if_false);
+
+  return d.Phi(MachineRepresentation::kWord64, jsgraph()->Int64Constant(0),
+               rem);
+}
+
+Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildDiv64Call(
+        left, right, ExternalReference::wasm_uint64_div(jsgraph()->isolate()),
+        MachineType::Int64(), wasm::kTrapDivByZero);
+  }
+  return graph()->NewNode(jsgraph()->machine()->Uint64Div(), left, right,
+                          trap_->ZeroCheck64(wasm::kTrapDivByZero, right));
+}
+Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildDiv64Call(
+        left, right, ExternalReference::wasm_uint64_mod(jsgraph()->isolate()),
+        MachineType::Int64(), wasm::kTrapRemByZero);
+  }
+  return graph()->NewNode(jsgraph()->machine()->Uint64Mod(), left, right,
+                          trap_->ZeroCheck64(wasm::kTrapRemByZero, right));
+}
+
+Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
+                                       ExternalReference ref,
+                                       MachineType result_type, int trap_zero) {
+  Node* stack_slot_dst = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(MachineRepresentation::kWord64));
+  Node* stack_slot_src = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(MachineRepresentation::kWord64));
+
+  const Operator* store_op = jsgraph()->machine()->Store(
+      StoreRepresentation(MachineRepresentation::kWord64, kNoWriteBarrier));
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_dst, jsgraph()->Int32Constant(0),
+                       left, *effect_, *control_);
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_src, jsgraph()->Int32Constant(0),
+                       right, *effect_, *control_);
+
+  MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 2);
+  sig_builder.AddReturn(MachineType::Int32());
+  sig_builder.AddParam(MachineType::Pointer());
+  sig_builder.AddParam(MachineType::Pointer());
+
+  Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+  Node* args[] = {function, stack_slot_dst, stack_slot_src};
+
+  Node* call = BuildCCall(sig_builder.Build(), args);
+
+  // TODO(wasm): This can get simpler if we have a specialized runtime call to
+  // throw WASM exceptions by trap code instead of by string.
+  trap_->ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call);
+  trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1);
+  const Operator* load_op = jsgraph()->machine()->Load(result_type);
+  Node* load =
+      graph()->NewNode(load_op, stack_slot_dst, jsgraph()->Int32Constant(0),
                        *effect_, *control_);
   *effect_ = load;
   return load;
@@ -1457,7 +1962,7 @@
   const size_t count = 1 + params + extra;
 
   // Reallocate the buffer to make space for extra inputs.
-  args = Realloc(args, count);
+  args = Realloc(args, 1 + params, count);
 
   // Add effect and control inputs.
   args[params + 1] = *effect_;
@@ -1478,7 +1983,7 @@
   const size_t count = 1 + params + extra;
 
   // Reallocate the buffer to make space for extra inputs.
-  args = Realloc(args, count);
+  args = Realloc(args, 1 + params, count);
 
   // Add effect and control inputs.
   args[params + 1] = *effect_;
@@ -1493,7 +1998,6 @@
   return call;
 }
 
-
 Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args) {
   DCHECK_NULL(args[0]);
 
@@ -1529,10 +2033,10 @@
     // Bounds check against the table size.
     Node* size = Int32Constant(static_cast<int>(table_size));
     Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
-    trap_->AddTrapIfFalse(kTrapFuncInvalid, in_bounds);
+    trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds);
   } else {
     // No function table. Generate a trap and return a constant.
-    trap_->AddTrapIfFalse(kTrapFuncInvalid, Int32Constant(0));
+    trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0));
     return trap_->GetTrapValue(module_->GetSignature(index));
   }
   Node* table = FunctionTable();
@@ -1552,7 +2056,7 @@
         *effect_, *control_);
     Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
                                        jsgraph()->SmiConstant(index));
-    trap_->AddTrapIfFalse(kTrapFuncSigMismatch, sig_match);
+    trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match);
   }
 
   // Load code object from the table.
@@ -1640,9 +2144,34 @@
   return num;
 }
 
+Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
+  // Implement Rol by Ror since TurboFan does not have Rol opcode.
+  // TODO(weiliang): support Word32Rol opcode in TurboFan.
+  Int32Matcher m(right);
+  if (m.HasValue()) {
+    return Binop(wasm::kExprI32Ror, left,
+                 jsgraph()->Int32Constant(32 - m.Value()));
+  } else {
+    return Binop(wasm::kExprI32Ror, left,
+                 Binop(wasm::kExprI32Sub, jsgraph()->Int32Constant(32), right));
+  }
+}
+
+Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
+  // Implement Rol by Ror since TurboFan does not have Rol opcode.
+  // TODO(weiliang): support Word64Rol opcode in TurboFan.
+  Int64Matcher m(right);
+  if (m.HasValue()) {
+    return Binop(wasm::kExprI64Ror, left,
+                 jsgraph()->Int64Constant(64 - m.Value()));
+  } else {
+    return Binop(wasm::kExprI64Ror, left,
+                 Binop(wasm::kExprI64Sub, jsgraph()->Int64Constant(64), right));
+  }
+}
 
 Node* WasmGraphBuilder::Invert(Node* node) {
-  return Unop(wasm::kExprBoolNot, node);
+  return Unop(wasm::kExprI32Eqz, node);
 }
 
 
@@ -1653,19 +2182,22 @@
   Node** args = Buffer(count);
 
   // Build the start and the JS parameter nodes.
-  Node* start = Start(params + 3);
+  Node* start = Start(params + 5);
   *control_ = start;
   *effect_ = start;
-  // JS context is the last parameter.
+  // Create the context parameter
   Node* context = graph()->NewNode(
-      jsgraph()->common()->Parameter(params + 1, "context"), start);
+      jsgraph()->common()->Parameter(
+          Linkage::GetJSCallContextParamIndex(params + 1), "%context"),
+      graph()->start());
 
   int pos = 0;
   args[pos++] = Constant(wasm_code);
 
   // Convert JS parameters to WASM numbers.
   for (int i = 0; i < params; i++) {
-    Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+    Node* param =
+        graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
     args[pos++] = FromJS(param, context, sig->GetParam(i));
   }
 
@@ -1800,7 +2332,7 @@
   MachineType mem_type = module_->GetGlobalType(index);
   Node* addr = jsgraph()->IntPtrConstant(
       reinterpret_cast<uintptr_t>(module_->instance->globals_start +
-                                  module_->module->globals->at(index).offset));
+                                  module_->module->globals[index].offset));
   const Operator* op = jsgraph()->machine()->Load(mem_type);
   Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
                                 *control_);
@@ -1814,7 +2346,7 @@
   MachineType mem_type = module_->GetGlobalType(index);
   Node* addr = jsgraph()->IntPtrConstant(
       reinterpret_cast<uintptr_t>(module_->instance->globals_start +
-                                  module_->module->globals->at(index).offset));
+                                  module_->module->globals[index].offset));
   const Operator* op = jsgraph()->machine()->Store(
       StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
   Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
@@ -1843,7 +2375,7 @@
         jsgraph()->Int32Constant(static_cast<uint32_t>(limit)));
   }
 
-  trap_->AddTrapIfFalse(kTrapMemOutOfBounds, cond);
+  trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond);
 }
 
 
@@ -1851,7 +2383,7 @@
                                 Node* index, uint32_t offset) {
   Node* load;
 
-  if (module_ && module_->asm_js) {
+  if (module_ && module_->asm_js()) {
     // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
     DCHECK_EQ(0, offset);
     const Operator* op = jsgraph()->machine()->CheckedLoad(memtype);
@@ -1886,7 +2418,7 @@
 Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
                                  uint32_t offset, Node* val) {
   Node* store;
-  if (module_ && module_->asm_js) {
+  if (module_ && module_->asm_js()) {
     // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
     DCHECK_EQ(0, offset);
     const Operator* op =
@@ -1920,7 +2452,7 @@
 Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
 
 void WasmGraphBuilder::Int64LoweringForTesting() {
-  if (kPointerSize == 4) {
+  if (jsgraph()->machine()->Is32()) {
     Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(),
                     jsgraph()->common(), jsgraph()->zone(),
                     function_signature_);
@@ -1931,12 +2463,13 @@
 static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
                                       CompilationInfo* info,
                                       const char* message, uint32_t index,
-                                      const char* func_name) {
+                                      wasm::WasmName func_name) {
   Isolate* isolate = info->isolate();
   if (isolate->logger()->is_logging_code_events() ||
       isolate->cpu_profiler()->is_profiling()) {
     ScopedVector<char> buffer(128);
-    SNPrintF(buffer, "%s#%d:%s", message, index, func_name);
+    SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length,
+             func_name.name);
     Handle<String> name_str =
         isolate->factory()->NewStringFromAsciiChecked(buffer.start());
     Handle<String> script_str =
@@ -1944,15 +2477,15 @@
     Handle<Code> code = info->code();
     Handle<SharedFunctionInfo> shared =
         isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
-    PROFILE(isolate,
-            CodeCreateEvent(tag, *code, *shared, info, *script_str, 0, 0));
+    PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
+                                     info, *script_str, 0, 0));
   }
 }
 
 Handle<JSFunction> CompileJSToWasmWrapper(
     Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
     Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index) {
-  wasm::WasmFunction* func = &module->module->functions->at(index);
+  wasm::WasmFunction* func = &module->module->functions[index];
 
   //----------------------------------------------------------------------------
   // Create the JSFunction object.
@@ -1961,7 +2494,7 @@
       isolate->factory()->NewSharedFunctionInfo(name, wasm_code, false);
   int params = static_cast<int>(func->sig->parameter_count());
   shared->set_length(params);
-  shared->set_internal_formal_parameter_count(1 + params);
+  shared->set_internal_formal_parameter_count(params);
   Handle<JSFunction> function = isolate->factory()->NewFunction(
       isolate->wasm_function_map(), name, MaybeHandle<Code>());
   function->SetInternalField(0, *module_object);
@@ -1970,7 +2503,7 @@
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
-  Zone zone;
+  Zone zone(isolate->allocator());
   Graph graph(&zone);
   CommonOperatorBuilder common(&zone);
   JSOperatorBuilder javascript(&zone);
@@ -2015,8 +2548,7 @@
         module->GetFunctionSignature(index)->parameter_count());
     CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
         &zone, false, params + 1, CallDescriptor::kNoFlags);
-    // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
-    Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+    Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
     bool debugging =
 #if DEBUG
         true;
@@ -2036,12 +2568,19 @@
     CompilationInfo info(func_name, isolate, &zone, flags);
     Handle<Code> code =
         Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+#ifdef ENABLE_DISASSEMBLER
+    if (FLAG_print_opt_code && !code.is_null()) {
+      OFStream os(stdout);
+      code->Disassemble(buffer.start(), os);
+    }
+#endif
     if (debugging) {
       buffer.Dispose();
     }
 
-    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
-                              module->module->GetName(func->name_offset));
+    RecordFunctionCompilation(
+        Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
+        module->module->GetName(func->name_offset, func->name_length));
     // Set the JSFunction's machine code.
     function->set_code(*code);
   }
@@ -2050,11 +2589,13 @@
 
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
                                     Handle<JSFunction> function,
-                                    wasm::FunctionSig* sig, const char* name) {
+                                    wasm::FunctionSig* sig,
+                                    wasm::WasmName module_name,
+                                    wasm::WasmName function_name) {
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
-  Zone zone;
+  Zone zone(isolate->allocator());
   Graph graph(&zone);
   CommonOperatorBuilder common(&zone);
   JSOperatorBuilder javascript(&zone);
@@ -2095,8 +2636,7 @@
     // Schedule and compile to machine code.
     CallDescriptor* incoming =
         wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
-    // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
-    Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+    Code::Flags flags = Code::ComputeFlags(Code::WASM_TO_JS_FUNCTION);
     bool debugging =
 #if DEBUG
         true;
@@ -2114,12 +2654,18 @@
 
     CompilationInfo info(func_name, isolate, &zone, flags);
     code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+#ifdef ENABLE_DISASSEMBLER
+    if (FLAG_print_opt_code && !code.is_null()) {
+      OFStream os(stdout);
+      code->Disassemble(buffer.start(), os);
+    }
+#endif
     if (debugging) {
       buffer.Dispose();
     }
 
     RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "wasm-to-js", 0,
-                              name);
+                              module_name);
   }
   return code;
 }
@@ -2129,24 +2675,21 @@
 Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
                                  wasm::ModuleEnv* module_env,
                                  const wasm::WasmFunction& function) {
-  if (FLAG_trace_wasm_compiler || FLAG_trace_wasm_decode_time) {
+  if (FLAG_trace_wasm_compiler) {
     OFStream os(stdout);
     os << "Compiling WASM function "
        << wasm::WasmFunctionName(&function, module_env) << std::endl;
     os << std::endl;
   }
-  // Initialize the function environment for decoding.
-  wasm::FunctionEnv env;
-  env.module = module_env;
-  env.sig = function.sig;
-  env.local_i32_count = function.local_i32_count;
-  env.local_i64_count = function.local_i64_count;
-  env.local_f32_count = function.local_f32_count;
-  env.local_f64_count = function.local_f64_count;
-  env.SumLocals();
+
+  double decode_ms = 0;
+  base::ElapsedTimer decode_timer;
+  if (FLAG_trace_wasm_decode_time) {
+    decode_timer.Start();
+  }
 
   // Create a TF graph during decoding.
-  Zone zone;
+  Zone zone(isolate->allocator());
   Graph graph(&zone);
   CommonOperatorBuilder common(&zone);
   MachineOperatorBuilder machine(
@@ -2154,11 +2697,12 @@
       InstructionSelector::SupportedMachineOperatorFlags());
   JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
   WasmGraphBuilder builder(&zone, &jsgraph, function.sig);
-  wasm::TreeResult result = wasm::BuildTFGraph(
-      &builder, &env,                                                 // --
-      module_env->module->module_start,                               // --
-      module_env->module->module_start + function.code_start_offset,  // --
-      module_env->module->module_start + function.code_end_offset);   // --
+  wasm::FunctionBody body = {
+      module_env, function.sig, module_env->module->module_start,
+      module_env->module->module_start + function.code_start_offset,
+      module_env->module->module_start + function.code_end_offset};
+  wasm::TreeResult result =
+      wasm::BuildTFGraph(isolate->allocator(), &builder, body);
 
   if (result.failed()) {
     if (FLAG_trace_wasm_compiler) {
@@ -2167,17 +2711,31 @@
     }
     // Add the function as another context for the exception
     ScopedVector<char> buffer(128);
-    SNPrintF(buffer, "Compiling WASM function #%d:%s failed:",
-             function.func_index,
-             module_env->module->GetName(function.name_offset));
+    wasm::WasmName name =
+        module_env->module->GetName(function.name_offset, function.name_length);
+    SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
+             function.func_index, name.length, name.name);
     thrower.Failed(buffer.start(), result);
     return Handle<Code>::null();
   }
 
+  int index = static_cast<int>(function.func_index);
+  if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
+    PrintAst(isolate->allocator(), body);
+  }
+
+  if (FLAG_trace_wasm_decode_time) {
+    decode_ms = decode_timer.Elapsed().InMillisecondsF();
+  }
+
+  base::ElapsedTimer compile_timer;
+  if (FLAG_trace_wasm_decode_time) {
+    compile_timer.Start();
+  }
   // Run the compiler pipeline to generate machine code.
   CallDescriptor* descriptor =
       wasm::ModuleEnv::GetWasmCallDescriptor(&zone, function.sig);
-  if (kPointerSize == 4) {
+  if (machine.Is32()) {
     descriptor = module_env->GetI32WasmCallDescriptor(&zone, descriptor);
   }
   Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
@@ -2192,8 +2750,10 @@
   Vector<char> buffer;
   if (debugging) {
     buffer = Vector<char>::New(128);
-    SNPrintF(buffer, "WASM_function_#%d:%s", function.func_index,
-             module_env->module->GetName(function.name_offset));
+    wasm::WasmName name =
+        module_env->module->GetName(function.name_offset, function.name_length);
+    SNPrintF(buffer, "WASM_function_#%d:%.*s", function.func_index, name.length,
+             name.name);
     func_name = buffer.start();
   }
   CompilationInfo info(func_name, isolate, &zone, flags);
@@ -2204,11 +2764,20 @@
     buffer.Dispose();
   }
   if (!code.is_null()) {
-    RecordFunctionCompilation(
-        Logger::FUNCTION_TAG, &info, "WASM_function", function.func_index,
-        module_env->module->GetName(function.name_offset));
+    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "WASM_function",
+                              function.func_index,
+                              module_env->module->GetName(
+                                  function.name_offset, function.name_length));
   }
 
+  if (FLAG_trace_wasm_decode_time) {
+    double compile_ms = compile_timer.Elapsed().InMillisecondsF();
+    PrintF(
+        "wasm-compile ok: %d bytes, %0.3f ms decode, %d nodes, %0.3f ms "
+        "compile\n",
+        static_cast<int>(function.code_end_offset - function.code_start_offset),
+        decode_ms, static_cast<int>(graph.NodeCount()), compile_ms);
+  }
   return code;
 }
 
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index 2e86b56..bbcafa7 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -40,7 +40,9 @@
 // Wraps a JS function, producing a code object that can be called from WASM.
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
                                     Handle<JSFunction> function,
-                                    wasm::FunctionSig* sig, const char* name);
+                                    wasm::FunctionSig* sig,
+                                    wasm::WasmName module_name,
+                                    wasm::WasmName function_name);
 
 // Wraps a given wasm code object, producing a JSFunction that can be called
 // from JavaScript.
@@ -162,8 +164,12 @@
   Node* MemBuffer(uint32_t offset);
   void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
 
+  Node* MaskShiftCount32(Node* node);
+  Node* MaskShiftCount64(Node* node);
+
   Node* BuildCCall(MachineSignature* sig, Node** args);
   Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
+
   Node* BuildF32Neg(Node* input);
   Node* BuildF64Neg(Node* input);
   Node* BuildF32CopySign(Node* left, Node* right);
@@ -180,8 +186,8 @@
   Node* BuildI32Popcnt(Node* input);
   Node* BuildI64Ctz(Node* input);
   Node* BuildI64Popcnt(Node* input);
-  Node* BuildRoundingInstruction(Node* input, ExternalReference ref,
-                                 MachineType type);
+  Node* BuildCFuncInstruction(ExternalReference ref, MachineType type,
+                              Node* input0, Node* input1 = nullptr);
   Node* BuildF32Trunc(Node* input);
   Node* BuildF32Floor(Node* input);
   Node* BuildF32Ceil(Node* input);
@@ -190,10 +196,54 @@
   Node* BuildF64Floor(Node* input);
   Node* BuildF64Ceil(Node* input);
   Node* BuildF64NearestInt(Node* input);
+  Node* BuildI32Rol(Node* left, Node* right);
+  Node* BuildI64Rol(Node* left, Node* right);
 
-  Node** Realloc(Node** buffer, size_t count) {
-    Node** buf = Buffer(count);
-    if (buf != buffer) memcpy(buf, buffer, count * sizeof(Node*));
+  Node* BuildF64Acos(Node* input);
+  Node* BuildF64Asin(Node* input);
+  Node* BuildF64Atan(Node* input);
+  Node* BuildF64Cos(Node* input);
+  Node* BuildF64Sin(Node* input);
+  Node* BuildF64Tan(Node* input);
+  Node* BuildF64Exp(Node* input);
+  Node* BuildF64Log(Node* input);
+  Node* BuildF64Pow(Node* left, Node* right);
+  Node* BuildF64Atan2(Node* left, Node* right);
+  Node* BuildF64Mod(Node* left, Node* right);
+
+  Node* BuildIntToFloatConversionInstruction(
+      Node* input, ExternalReference ref,
+      MachineRepresentation parameter_representation,
+      const MachineType result_type);
+  Node* BuildF32SConvertI64(Node* input);
+  Node* BuildF32UConvertI64(Node* input);
+  Node* BuildF64SConvertI64(Node* input);
+  Node* BuildF64UConvertI64(Node* input);
+
+  Node* BuildFloatToIntConversionInstruction(
+      Node* input, ExternalReference ref,
+      MachineRepresentation parameter_representation,
+      const MachineType result_type);
+  Node* BuildI64SConvertF32(Node* input);
+  Node* BuildI64UConvertF32(Node* input);
+  Node* BuildI64SConvertF64(Node* input);
+  Node* BuildI64UConvertF64(Node* input);
+
+  Node* BuildI32DivS(Node* left, Node* right);
+  Node* BuildI32RemS(Node* left, Node* right);
+  Node* BuildI32DivU(Node* left, Node* right);
+  Node* BuildI32RemU(Node* left, Node* right);
+
+  Node* BuildI64DivS(Node* left, Node* right);
+  Node* BuildI64RemS(Node* left, Node* right);
+  Node* BuildI64DivU(Node* left, Node* right);
+  Node* BuildI64RemU(Node* left, Node* right);
+  Node* BuildDiv64Call(Node* left, Node* right, ExternalReference ref,
+                       MachineType result_type, int trap_zero);
+
+  Node** Realloc(Node** buffer, size_t old_count, size_t new_count) {
+    Node** buf = Buffer(new_count);
+    if (buf != buffer) memcpy(buf, buffer, old_count * sizeof(Node*));
     return buf;
   }
 };
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index 3176fd3..f0e14ce 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -58,7 +58,7 @@
 // ===========================================================================
 // == ia32 ===================================================================
 // ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx
 #define GP_RETURN_REGISTERS eax, edx
 #define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
 #define FP_RETURN_REGISTERS xmm1, xmm2
@@ -125,6 +125,24 @@
 #define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
 #define FP_RETURN_REGISTERS d1, d2
 
+#elif V8_TARGET_ARCH_S390X
+// ===========================================================================
+// == s390x ==================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
+#define GP_RETURN_REGISTERS r2
+#define FP_PARAM_REGISTERS d0, d2, d4, d6
+#define FP_RETURN_REGISTERS d0, d2, d4, d6
+
+#elif V8_TARGET_ARCH_S390
+// ===========================================================================
+// == s390 ===================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
+#define GP_RETURN_REGISTERS r2, r3
+#define FP_PARAM_REGISTERS d0, d2
+#define FP_RETURN_REGISTERS d0, d2
+
 #else
 // ===========================================================================
 // == unknown ================================================================
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 510c0c6..2e4eccb 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -49,8 +49,11 @@
 
   Operand ToOperand(InstructionOperand* op, int extra = 0) {
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
+  }
+
+  Operand SlotToOperand(int slot_index, int extra = 0) {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot_index);
     return Operand(offset.from_stack_pointer() ? rsp : rbp,
                    offset.offset() + extra);
   }
@@ -599,6 +602,12 @@
     }                                                            \
   } while (false)
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ movq(rsp, rbp);
+  __ popq(rbp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -615,18 +624,43 @@
     __ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ movq(rbp, MemOperand(rbp, 0));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ Cmp(Operand(rbp, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(not_equal, &done, Label::kNear);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ SmiToInteger32(
+      caller_args_count_reg,
+      Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   X64OperandConverter i(this, instr);
-
-  switch (ArchOpcodeField::decode(instr->opcode())) {
+  InstructionCode opcode = instr->opcode();
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (HasImmediateInput(instr, 0)) {
@@ -641,9 +675,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (HasImmediateInput(instr, 0)) {
         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
         __ jmp(code, RelocInfo::CODE_TARGET);
@@ -668,6 +708,7 @@
       RecordCallPosition(instr);
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -677,6 +718,11 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
       break;
@@ -735,7 +781,7 @@
       __ movq(i.OutputRegister(), rbp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ movq(i.OutputRegister(), Operand(rbp, 0));
       } else {
         __ movq(i.OutputRegister(), rbp);
@@ -799,12 +845,24 @@
     case kX64And:
       ASSEMBLE_BINOP(andq);
       break;
+    case kX64Cmp8:
+      ASSEMBLE_COMPARE(cmpb);
+      break;
+    case kX64Cmp16:
+      ASSEMBLE_COMPARE(cmpw);
+      break;
     case kX64Cmp32:
       ASSEMBLE_COMPARE(cmpl);
       break;
     case kX64Cmp:
       ASSEMBLE_COMPARE(cmpq);
       break;
+    case kX64Test8:
+      ASSEMBLE_COMPARE(testb);
+      break;
+    case kX64Test16:
+      ASSEMBLE_COMPARE(testw);
+      break;
     case kX64Test32:
       ASSEMBLE_COMPARE(testl);
       break;
@@ -1001,7 +1059,6 @@
       } else {
         __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
       }
-      __ AssertZeroExtended(i.OutputRegister());
       break;
     }
     case kSSEFloat64Cmp:
@@ -1100,7 +1157,9 @@
       } else {
         __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
       }
-      __ AssertZeroExtended(i.OutputRegister());
+      if (MiscField::decode(instr->opcode())) {
+        __ AssertZeroExtended(i.OutputRegister());
+      }
       break;
     }
     case kSSEFloat32ToInt64:
@@ -1877,18 +1936,16 @@
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    __ pushq(rbp);
-    __ movq(rbp, rsp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ pushq(rbp);
+      __ movq(rbp, rsp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      __ StubPrologue(info()->GetOutputStackFrameType());
+    }
   }
-  frame_access_state()->SetFrameAccessToDefault();
-
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1968,17 +2025,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
-    __ popq(rbp);       // Pop caller's frame pointer.
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ jmp(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
-      __ popq(rbp);       // Pop caller's frame pointer.
+      AssembleDeconstructFrame();
     }
   }
   size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
@@ -2043,9 +2098,9 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int offset;
-          if (IsMaterializableFromFrame(src_object, &offset)) {
-            __ movp(dst, Operand(rbp, offset));
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ movp(dst, g.SlotToOperand(slot));
           } else if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 6d5e77c..bd19386 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -18,8 +18,12 @@
   V(X64And32)                      \
   V(X64Cmp)                        \
   V(X64Cmp32)                      \
+  V(X64Cmp16)                      \
+  V(X64Cmp8)                       \
   V(X64Test)                       \
   V(X64Test32)                     \
+  V(X64Test16)                     \
+  V(X64Test8)                      \
   V(X64Or)                         \
   V(X64Or32)                       \
   V(X64Xor)                        \
@@ -139,7 +143,6 @@
   V(X64Poke)                       \
   V(X64StackCheck)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index 1f10b51..3c31965 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -20,8 +20,12 @@
     case kX64And32:
     case kX64Cmp:
     case kX64Cmp32:
+    case kX64Cmp16:
+    case kX64Cmp8:
     case kX64Test:
     case kX64Test32:
+    case kX64Test16:
+    case kX64Test8:
     case kX64Or:
     case kX64Or32:
     case kX64Xor:
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index d3a2a8e..ea1d48b 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -36,6 +36,33 @@
     }
   }
 
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+    if (input->opcode() != IrOpcode::kLoad ||
+        !selector()->CanCover(node, input)) {
+      return false;
+    }
+    MachineRepresentation rep =
+        LoadRepresentationOf(input->op()).representation();
+    switch (opcode) {
+      case kX64Cmp:
+      case kX64Test:
+        return rep == MachineRepresentation::kWord64 ||
+               rep == MachineRepresentation::kTagged;
+      case kX64Cmp32:
+      case kX64Test32:
+        return rep == MachineRepresentation::kWord32;
+      case kX64Cmp16:
+      case kX64Test16:
+        return rep == MachineRepresentation::kWord16;
+      case kX64Cmp8:
+      case kX64Test8:
+        return rep == MachineRepresentation::kWord8;
+      default:
+        break;
+    }
+    return false;
+  }
+
   AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
                                              Node* base, Node* displacement,
                                              InstructionOperand inputs[],
@@ -173,9 +200,7 @@
       inputs[input_count++] = g.UseUniqueRegister(index);
       addressing_mode = kMode_MR1;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -399,8 +424,13 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -588,6 +618,75 @@
 
 
 void InstructionSelector::VisitWord64Sar(Node* node) {
+  X64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
+      m.right().Is(32)) {
+    // Just load and sign-extend the interesting 4 bytes instead. This happens,
+    // for example, when we're loading and untagging SMIs.
+    BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+    if (mleft.matches() && (mleft.displacement() == nullptr ||
+                            g.CanBeImmediate(mleft.displacement()))) {
+      size_t input_count = 0;
+      InstructionOperand inputs[3];
+      AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+          m.left().node(), inputs, &input_count);
+      if (mleft.displacement() == nullptr) {
+        // Make sure that the addressing mode indicates the presence of an
+        // immediate displacement. It seems that we never use M1 and M2, but we
+        // handle them here anyways.
+        switch (mode) {
+          case kMode_MR:
+            mode = kMode_MRI;
+            break;
+          case kMode_MR1:
+            mode = kMode_MR1I;
+            break;
+          case kMode_MR2:
+            mode = kMode_MR2I;
+            break;
+          case kMode_MR4:
+            mode = kMode_MR4I;
+            break;
+          case kMode_MR8:
+            mode = kMode_MR8I;
+            break;
+          case kMode_M1:
+            mode = kMode_M1I;
+            break;
+          case kMode_M2:
+            mode = kMode_M2I;
+            break;
+          case kMode_M4:
+            mode = kMode_M4I;
+            break;
+          case kMode_M8:
+            mode = kMode_M8I;
+            break;
+          case kMode_None:
+          case kMode_MRI:
+          case kMode_MR1I:
+          case kMode_MR2I:
+          case kMode_MR4I:
+          case kMode_MR8I:
+          case kMode_M1I:
+          case kMode_M2I:
+          case kMode_M4I:
+          case kMode_M8I:
+            UNREACHABLE();
+        }
+        inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
+      } else {
+        ImmediateOperand* op = ImmediateOperand::cast(&inputs[input_count - 1]);
+        int32_t displacement = sequence()->GetImmediate(op).ToInt32();
+        *op = ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
+      }
+      InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+      InstructionCode code = kX64Movsxlq | AddressingModeField::encode(mode);
+      Emit(code, 1, outputs, input_count, inputs);
+      return;
+    }
+  }
   VisitWord64Shift(this, node, kX64Sar);
 }
 
@@ -668,8 +767,8 @@
 
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
-    VisitBinop(this, node, kX64Add, &cont);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop(this, node, kX64Add, &cont);
   }
   FlagsContinuation cont;
   VisitBinop(this, node, kX64Add, &cont);
@@ -708,7 +807,7 @@
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kX64Sub, &cont);
   }
   FlagsContinuation cont;
@@ -865,9 +964,15 @@
 
 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+  Emit(kSSEFloat64ToUint32 | MiscField::encode(1), g.DefineAsRegister(node),
+       g.Use(node->InputAt(0)));
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64ToUint32 | MiscField::encode(0), g.DefineAsRegister(node),
+       g.Use(node->InputAt(0)));
+}
 
 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
   X64OperandGenerator g(this);
@@ -1336,6 +1441,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 namespace {
 
@@ -1357,6 +1463,9 @@
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1364,23 +1473,6 @@
   }
 }
 
-// Determines if {input} of {node} can be replaced by a memory operand.
-bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
-                         Node* node, Node* input) {
-  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
-    return false;
-  }
-  MachineRepresentation rep =
-      LoadRepresentationOf(input->op()).representation();
-  if (rep == MachineRepresentation::kWord64 ||
-      rep == MachineRepresentation::kTagged) {
-    return opcode == kX64Cmp || opcode == kX64Test;
-  } else if (rep == MachineRepresentation::kWord32) {
-    return opcode == kX64Cmp32 || opcode == kX64Test32;
-  }
-  return false;
-}
-
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
@@ -1390,6 +1482,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1408,6 +1503,37 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+// Tries to match the size of the given opcode to that of the operands, if
+// possible.
+InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
+                                    Node* right) {
+  if (opcode != kX64Cmp32 && opcode != kX64Test32) {
+    return opcode;
+  }
+  // Currently, if one of the two operands is not a Load, we don't know what its
+  // machine representation is, so we bail out.
+  // TODO(epertoso): we can probably get some size information out of immediates
+  // and phi nodes.
+  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
+    return opcode;
+  }
+  // If the load representations don't match, both operands will be
+  // zero/sign-extended to 32bit.
+  LoadRepresentation left_representation = LoadRepresentationOf(left->op());
+  if (left_representation != LoadRepresentationOf(right->op())) {
+    return opcode;
+  }
+  switch (left_representation.representation()) {
+    case MachineRepresentation::kBit:
+    case MachineRepresentation::kWord8:
+      return opcode == kX64Cmp32 ? kX64Cmp8 : kX64Test8;
+    case MachineRepresentation::kWord16:
+      return opcode == kX64Cmp32 ? kX64Cmp16 : kX64Test16;
+    default:
+      return opcode;
+  }
+}
+
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont) {
@@ -1415,15 +1541,20 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // If one of the two inputs is an immediate, make sure it's on the right.
-  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+  opcode = TryNarrowOpcodeSize(opcode, left, right);
+
+  // If one of the two inputs is an immediate, make sure it's on the right, or
+  // if one of the two inputs is a memory operand, make sure it's on the left.
+  if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
+      (g.CanBeMemoryOperand(opcode, node, right) &&
+       !g.CanBeMemoryOperand(opcode, node, left))) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    if (g.CanBeMemoryOperand(opcode, node, left)) {
       return VisitCompareWithMemoryOperand(selector, opcode, left,
                                            g.UseImmediate(right), cont);
     }
@@ -1431,15 +1562,17 @@
                         cont);
   }
 
+  // Match memory operands on left side of comparison.
+  if (g.CanBeMemoryOperand(opcode, node, left)) {
+    return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                         g.UseRegister(right), cont);
+  }
+
   if (g.CanBeBetterLeftOperand(right)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
-  if (CanUseMemoryOperand(selector, opcode, node, left)) {
-    return VisitCompareWithMemoryOperand(selector, opcode, left,
-                                         g.UseRegister(right), cont);
-  }
   return VisitCompare(selector, opcode, left, right, cont,
                       node->op()->HasProperty(Operator::kCommutative));
 }
@@ -1460,6 +1593,9 @@
       if (cont->IsBranch()) {
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
+      } else if (cont->IsDeoptimize()) {
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+                                 cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1500,98 +1636,87 @@
   VisitCompare(selector, opcode, right, left, cont, false);
 }
 
-}  // namespace
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
-                                      BasicBlock* fbranch) {
-  X64OperandGenerator g(this);
-  Node* user = branch;
-  Node* value = branch->InputAt(0);
-
-  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-
-  // Try to combine with comparisons against 0 by simply inverting the branch.
-  while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
-    Int32BinopMatcher m(value);
-    if (m.right().Is(0)) {
-      user = value;
-      value = m.left().node();
-      cont.Negate();
-    } else {
-      break;
-    }
-  }
-
-  // Try to combine the branch with a comparison.
-  if (CanCover(user, value)) {
+// Shared routine for word comparison against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, FlagsContinuation* cont) {
+  while (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+      case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
+      }
       case IrOpcode::kInt32LessThan:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
       case IrOpcode::kInt32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
       case IrOpcode::kUint32LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
       case IrOpcode::kUint32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
       case IrOpcode::kWord64Equal: {
-        cont.OverwriteAndNegateIfEqual(kEqual);
+        cont->OverwriteAndNegateIfEqual(kEqual);
         Int64BinopMatcher m(value);
         if (m.right().Is(0)) {
           // Try to combine the branch with a comparison.
           Node* const user = m.node();
           Node* const value = m.left().node();
-          if (CanCover(user, value)) {
+          if (selector->CanCover(user, value)) {
             switch (value->opcode()) {
               case IrOpcode::kInt64Sub:
-                return VisitWord64Compare(this, value, &cont);
+                return VisitWord64Compare(selector, value, cont);
               case IrOpcode::kWord64And:
-                return VisitWordCompare(this, value, kX64Test, &cont);
+                return VisitWordCompare(selector, value, kX64Test, cont);
               default:
                 break;
             }
           }
-          return VisitCompareZero(this, value, kX64Cmp, &cont);
+          return VisitCompareZero(selector, value, kX64Cmp, cont);
         }
-        return VisitWord64Compare(this, value, &cont);
+        return VisitWord64Compare(selector, value, cont);
       }
       case IrOpcode::kInt64LessThan:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWord64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kInt64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWord64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kUint64LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
-        return VisitWord64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kUint64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
-        return VisitWord64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kFloat32Equal:
-        cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat32LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat64Equal:
-        cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kFloat64LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kFloat64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kProjection:
         // Check if this is the overflow output projection of an
         // <Operation>WithOverflow node.
@@ -1603,20 +1728,20 @@
           // *AFTER* this branch).
           Node* const node = value->InputAt(0);
           Node* const result = NodeProperties::FindProjection(node, 0);
-          if (result == nullptr || IsDefined(result)) {
+          if (result == nullptr || selector->IsDefined(result)) {
             switch (node->opcode()) {
               case IrOpcode::kInt32AddWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop(this, node, kX64Add32, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kX64Add32, cont);
               case IrOpcode::kInt32SubWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop(this, node, kX64Sub32, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kX64Sub32, cont);
               case IrOpcode::kInt64AddWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop(this, node, kX64Add, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kX64Add, cont);
               case IrOpcode::kInt64SubWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop(this, node, kX64Sub, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kX64Sub, cont);
               default:
                 break;
             }
@@ -1624,22 +1749,42 @@
         }
         break;
       case IrOpcode::kInt32Sub:
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
       case IrOpcode::kInt64Sub:
-        return VisitWord64Compare(this, value, &cont);
+        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kWord32And:
-        return VisitWordCompare(this, value, kX64Test32, &cont);
+        return VisitWordCompare(selector, value, kX64Test32, cont);
       case IrOpcode::kWord64And:
-        return VisitWordCompare(this, value, kX64Test, &cont);
+        return VisitWordCompare(selector, value, kX64Test, cont);
       default:
         break;
     }
+    break;
   }
 
   // Branch could not be combined with a compare, emit compare against 0.
-  VisitCompareZero(this, value, kX64Cmp32, &cont);
+  VisitCompareZero(selector, value, kX64Cmp32, cont);
 }
 
+}  // namespace
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   X64OperandGenerator g(this);
@@ -1674,7 +1819,7 @@
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
   Node* user = node;
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(user);
   if (m.right().Is(0)) {
     Node* value = m.left().node();
@@ -1709,31 +1854,33 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, kX64Cmp32, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, kX64Cmp32, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, kX64Cmp32, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, kX64Cmp32, &cont);
 }
 
 
 void InstructionSelector::VisitWord64Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int64BinopMatcher m(node);
   if (m.right().Is(0)) {
     // Try to combine the equality check with a comparison.
@@ -1756,8 +1903,8 @@
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
-    VisitBinop(this, node, kX64Add32, &cont);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop(this, node, kX64Add32, &cont);
   }
   FlagsContinuation cont;
   VisitBinop(this, node, kX64Add32, &cont);
@@ -1766,7 +1913,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kX64Sub32, &cont);
   }
   FlagsContinuation cont;
@@ -1775,61 +1922,67 @@
 
 
 void InstructionSelector::VisitInt64LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index 1575570..da7fdb4 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -43,16 +43,13 @@
       return Operand(ToRegister(op));
     }
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
-    return Operand(offset.from_stack_pointer() ? esp : ebp,
-                   offset.offset() + extra);
+    return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
   }
 
-  Operand ToMaterializableOperand(int materializable_offset) {
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        FPOffsetToFrameSlot(materializable_offset));
-    return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+  Operand SlotToOperand(int slot, int extra = 0) {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+    return Operand(offset.from_stack_pointer() ? esp : ebp,
+                   offset.offset() + extra);
   }
 
   Operand HighOperand(InstructionOperand* op) {
@@ -340,6 +337,42 @@
     __ bind(&done);                                          \
   } while (false)
 
+#define ASSEMBLE_COMPARE(asm_instr)                                   \
+  do {                                                                \
+    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+      size_t index = 0;                                               \
+      Operand left = i.MemoryOperand(&index);                         \
+      if (HasImmediateInput(instr, index)) {                          \
+        __ asm_instr(left, i.InputImmediate(index));                  \
+      } else {                                                        \
+        __ asm_instr(left, i.InputRegister(index));                   \
+      }                                                               \
+    } else {                                                          \
+      if (HasImmediateInput(instr, 1)) {                              \
+        if (instr->InputAt(0)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputImmediate(1));      \
+        } else {                                                      \
+          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));       \
+        }                                                             \
+      } else {                                                        \
+        if (instr->InputAt(1)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputRegister(1));       \
+        } else {                                                      \
+          __ asm_instr(i.InputRegister(0), i.InputOperand(1));        \
+        }                                                             \
+      }                                                               \
+    }                                                                 \
+  } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ mov(esp, ebp);
+  __ pop(ebp);
+}
+
+// For insert fninit/fld1 instructions after the Prologue
+thread_local bool is_block_0 = false;
+
+void CodeGenerator::AssembleSetupStackPointer() { is_block_0 = true; }
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -356,18 +389,64 @@
     __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ mov(ebp, MemOperand(ebp, 0));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register, Register,
+                                                     Register) {
+  // There are not enough temp registers left on ia32 for a call instruction
+  // so we pick some scratch registers and save/restore them manually here.
+  int scratch_count = 3;
+  Register scratch1 = ebx;
+  Register scratch2 = ecx;
+  Register scratch3 = edx;
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &done, Label::kNear);
+
+  __ push(scratch1);
+  __ push(scratch2);
+  __ push(scratch3);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ mov(caller_args_count_reg,
+         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack, scratch_count);
+  __ pop(scratch3);
+  __ pop(scratch2);
+  __ pop(scratch1);
+
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   X87OperandConverter i(this, instr);
+  InstructionCode opcode = instr->opcode();
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
 
-  switch (ArchOpcodeField::decode(instr->opcode())) {
+  // Workaround for CL #35139 (https://codereview.chromium.org/1775323002)
+  if (is_block_0) {
+    __ fninit();
+    __ fld1();
+    is_block_0 = false;
+  }
+
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
         __ VerifyX87StackDepth(1);
@@ -399,6 +478,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
         __ VerifyX87StackDepth(1);
@@ -406,6 +486,10 @@
       __ fstp(0);
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         no_reg, no_reg, no_reg);
+      }
       if (HasImmediateInput(instr, 0)) {
         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
         __ jmp(code, RelocInfo::CODE_TARGET);
@@ -447,6 +531,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -460,6 +545,10 @@
       __ fstp(0);
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         no_reg, no_reg, no_reg);
+      }
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
       break;
@@ -554,7 +643,7 @@
       __ mov(i.OutputRegister(), esp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ mov(i.OutputRegister(), Operand(ebp, 0));
       } else {
         __ mov(i.OutputRegister(), ebp);
@@ -615,38 +704,22 @@
       }
       break;
     case kX87Cmp:
-      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
-        size_t index = 0;
-        Operand operand = i.MemoryOperand(&index);
-        if (HasImmediateInput(instr, index)) {
-          __ cmp(operand, i.InputImmediate(index));
-        } else {
-          __ cmp(operand, i.InputRegister(index));
-        }
-      } else {
-        if (HasImmediateInput(instr, 1)) {
-          __ cmp(i.InputOperand(0), i.InputImmediate(1));
-        } else {
-          __ cmp(i.InputRegister(0), i.InputOperand(1));
-        }
-      }
+      ASSEMBLE_COMPARE(cmp);
+      break;
+    case kX87Cmp16:
+      ASSEMBLE_COMPARE(cmpw);
+      break;
+    case kX87Cmp8:
+      ASSEMBLE_COMPARE(cmpb);
       break;
     case kX87Test:
-      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
-        size_t index = 0;
-        Operand operand = i.MemoryOperand(&index);
-        if (HasImmediateInput(instr, index)) {
-          __ test(operand, i.InputImmediate(index));
-        } else {
-          __ test(i.InputRegister(index), operand);
-        }
-      } else {
-        if (HasImmediateInput(instr, 1)) {
-          __ test(i.InputOperand(0), i.InputImmediate(1));
-        } else {
-          __ test(i.InputRegister(0), i.InputOperand(1));
-        }
-      }
+      ASSEMBLE_COMPARE(test);
+      break;
+    case kX87Test16:
+      ASSEMBLE_COMPARE(test_w);
+      break;
+    case kX87Test8:
+      ASSEMBLE_COMPARE(test_b);
       break;
     case kX87Imul:
       if (HasImmediateInput(instr, 1)) {
@@ -717,6 +790,92 @@
         __ sar_cl(i.OutputOperand());
       }
       break;
+    case kX87AddPair: {
+      // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      bool use_temp = false;
+      if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+          i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+        // We cannot write to the output register directly, because it would
+        // overwrite an input for adc. We have to use the temp register.
+        use_temp = true;
+        __ Move(i.TempRegister(0), i.InputRegister(0));
+        __ add(i.TempRegister(0), i.InputRegister(2));
+      } else {
+        __ add(i.OutputRegister(0), i.InputRegister(2));
+      }
+      __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
+      if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+        __ Move(i.OutputRegister(1), i.InputRegister(1));
+      }
+      if (use_temp) {
+        __ Move(i.OutputRegister(0), i.TempRegister(0));
+      }
+      break;
+    }
+    case kX87SubPair: {
+      // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      bool use_temp = false;
+      if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+          i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+        // We cannot write to the output register directly, because it would
+        // overwrite an input for adc. We have to use the temp register.
+        use_temp = true;
+        __ Move(i.TempRegister(0), i.InputRegister(0));
+        __ sub(i.TempRegister(0), i.InputRegister(2));
+      } else {
+        __ sub(i.OutputRegister(0), i.InputRegister(2));
+      }
+      __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
+      if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+        __ Move(i.OutputRegister(1), i.InputRegister(1));
+      }
+      if (use_temp) {
+        __ Move(i.OutputRegister(0), i.TempRegister(0));
+      }
+      break;
+    }
+    case kX87MulPair: {
+      __ imul(i.OutputRegister(1), i.InputOperand(0));
+      __ mov(i.TempRegister(0), i.InputOperand(1));
+      __ imul(i.TempRegister(0), i.InputOperand(2));
+      __ add(i.OutputRegister(1), i.TempRegister(0));
+      __ mov(i.OutputRegister(0), i.InputOperand(0));
+      // Multiplies the low words and stores them in eax and edx.
+      __ mul(i.InputRegister(2));
+      __ add(i.OutputRegister(1), i.TempRegister(0));
+
+      break;
+    }
+    case kX87ShlPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
+    case kX87ShrPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
+    case kX87SarPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
     case kX87Ror:
       if (HasImmediateInput(instr, 1)) {
         __ ror(i.OutputOperand(), i.InputInt5(1));
@@ -1176,8 +1335,8 @@
       InstructionOperand* input = instr->InputAt(0);
       if (input->IsDoubleRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
-        __ fstp_d(MemOperand(esp, 0));
-        __ fld_d(MemOperand(esp, 0));
+        __ fstp_s(MemOperand(esp, 0));
+        __ fld_s(MemOperand(esp, 0));
         __ add(esp, Immediate(kDoubleSize));
       } else {
         DCHECK(input->IsDoubleStackSlot());
@@ -1264,11 +1423,12 @@
         __ fld_d(i.InputOperand(0));
       }
       __ fild_s(Operand(esp, 0));
-      __ fadd(1);
-      __ fstp(0);
+      __ fld(1);
+      __ faddp();
       __ TruncateX87TOSToI(i.OutputRegister(0));
       __ add(esp, Immediate(kInt32Size));
       __ add(i.OutputRegister(), Immediate(0x80000000));
+      __ fstp(0);
       if (!instr->InputAt(0)->IsDoubleRegister()) {
         __ fstp(0);
       }
@@ -1610,8 +1770,16 @@
   X87OperandConverter i(this, instr);
   Label::Distance flabel_distance =
       branch->fallthru ? Label::kNear : Label::kFar;
-  Label* tlabel = branch->true_label;
-  Label* flabel = branch->false_label;
+
+  Label done;
+  Label tlabel_tmp;
+  Label flabel_tmp;
+  Label* tlabel = &tlabel_tmp;
+  Label* flabel = &flabel_tmp;
+
+  Label* tlabel_dst = branch->true_label;
+  Label* flabel_dst = branch->false_label;
+
   switch (branch->condition) {
     case kUnorderedEqual:
       __ j(parity_even, flabel, flabel_distance);
@@ -1661,6 +1829,34 @@
   }
   // Add a jump if not falling through to the next block.
   if (!branch->fallthru) __ jmp(flabel);
+
+  __ jmp(&done);
+  __ bind(&tlabel_tmp);
+  FlagsMode mode = FlagsModeField::decode(instr->opcode());
+  if (mode == kFlags_deoptimize) {
+    int double_register_param_count = 0;
+    int x87_layout = 0;
+    for (size_t i = 0; i < instr->InputCount(); i++) {
+      if (instr->InputAt(i)->IsDoubleRegister()) {
+        double_register_param_count++;
+      }
+    }
+    // Currently we use only one X87 register. If double_register_param_count
+    // is bigger than 1, it means duplicated double register is added to input
+    // of this instruction.
+    if (double_register_param_count > 0) {
+      x87_layout = (0 << 3) | 1;
+    }
+    // The layout of x87 register stack is loaded on the top of FPU register
+    // stack for deoptimization.
+    __ push(Immediate(x87_layout));
+    __ fild_s(MemOperand(esp, 0));
+    __ lea(esp, Operand(esp, kPointerSize));
+  }
+  __ jmp(tlabel_dst);
+  __ bind(&flabel_tmp);
+  __ jmp(flabel_dst);
+  __ bind(&done);
 }
 
 
@@ -1914,21 +2110,16 @@
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    // Assemble a prologue similar the to cdecl calling convention.
-    __ push(ebp);
-    __ mov(ebp, esp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    // TODO(turbofan): this prologue is redundant with OSR, but needed for
-    // code aging.
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ push(ebp);
+      __ mov(ebp, esp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      __ StubPrologue(info()->GetOutputStackFrameType());
+    }
   }
-  frame_access_state()->SetFrameAccessToDefault();
-
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1941,6 +2132,10 @@
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+
+    // Initailize FPU state.
+    __ fninit();
+    __ fld1();
   }
 
   const RegList saves = descriptor->CalleeSavedRegisters();
@@ -1958,10 +2153,6 @@
     }
     frame()->AllocateSavedCalleeRegisterSlots(pushed);
   }
-
-  // Initailize FPU state.
-  __ fninit();
-  __ fld1();
 }
 
 
@@ -1994,17 +2185,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
-    __ pop(ebp);       // Pop caller's frame pointer.
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ jmp(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
-      __ pop(ebp);       // Pop caller's frame pointer.
+      AssembleDeconstructFrame();
     }
   }
   if (pop_count == 0) {
@@ -2040,15 +2229,15 @@
     Constant src_constant = g.ToConstant(source);
     if (src_constant.type() == Constant::kHeapObject) {
       Handle<HeapObject> src = src_constant.ToHeapObject();
-      int offset;
-      if (IsMaterializableFromFrame(src, &offset)) {
+      int slot;
+      if (IsMaterializableFromFrame(src, &slot)) {
         if (destination->IsRegister()) {
           Register dst = g.ToRegister(destination);
-          __ mov(dst, g.ToMaterializableOperand(offset));
+          __ mov(dst, g.SlotToOperand(slot));
         } else {
           DCHECK(destination->IsStackSlot());
           Operand dst = g.ToOperand(destination);
-          __ push(g.ToMaterializableOperand(offset));
+          __ push(g.SlotToOperand(slot));
           __ pop(dst);
         }
       } else if (destination->IsRegister()) {
diff --git a/src/compiler/x87/instruction-codes-x87.h b/src/compiler/x87/instruction-codes-x87.h
index e5d0912..d70a737 100644
--- a/src/compiler/x87/instruction-codes-x87.h
+++ b/src/compiler/x87/instruction-codes-x87.h
@@ -17,7 +17,11 @@
   V(X87Add)                        \
   V(X87And)                        \
   V(X87Cmp)                        \
+  V(X87Cmp16)                      \
+  V(X87Cmp8)                       \
   V(X87Test)                       \
+  V(X87Test16)                     \
+  V(X87Test8)                      \
   V(X87Or)                         \
   V(X87Xor)                        \
   V(X87Sub)                        \
@@ -31,6 +35,12 @@
   V(X87Shl)                        \
   V(X87Shr)                        \
   V(X87Sar)                        \
+  V(X87AddPair)                    \
+  V(X87SubPair)                    \
+  V(X87MulPair)                    \
+  V(X87ShlPair)                    \
+  V(X87ShrPair)                    \
+  V(X87SarPair)                    \
   V(X87Ror)                        \
   V(X87Lzcnt)                      \
   V(X87Popcnt)                     \
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index 079d5d2..e4d085e 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -27,6 +27,30 @@
     return DefineAsRegister(node);
   }
 
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+    if (input->opcode() != IrOpcode::kLoad ||
+        !selector()->CanCover(node, input)) {
+      return false;
+    }
+    MachineRepresentation rep =
+        LoadRepresentationOf(input->op()).representation();
+    switch (opcode) {
+      case kX87Cmp:
+      case kX87Test:
+        return rep == MachineRepresentation::kWord32 ||
+               rep == MachineRepresentation::kTagged;
+      case kX87Cmp16:
+      case kX87Test16:
+        return rep == MachineRepresentation::kWord16;
+      case kX87Cmp8:
+      case kX87Test8:
+        return rep == MachineRepresentation::kWord8;
+      default:
+        break;
+    }
+    return false;
+  }
+
   InstructionOperand CreateImmediate(int imm) {
     return sequence()->AddImmediate(Constant(imm));
   }
@@ -193,9 +217,7 @@
       inputs[input_count++] = g.UseUniqueRegister(index);
       addressing_mode = kMode_MR1;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -365,10 +387,11 @@
   }
 }
 
+namespace {
 
 // Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, FlagsContinuation* cont) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, FlagsContinuation* cont) {
   X87OperandGenerator g(selector);
   Int32BinopMatcher m(node);
   Node* left = m.left().node();
@@ -417,18 +440,24 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
 // Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode) {
   FlagsContinuation cont;
   VisitBinop(selector, node, opcode, &cont);
 }
 
+}  // namespace
 
 void InstructionSelector::VisitWord32And(Node* node) {
   VisitBinop(this, node, kX87And);
@@ -491,9 +520,10 @@
 
 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   X87OperandGenerator g(selector);
+  InstructionOperand temps[] = {g.TempRegister(eax)};
   selector->Emit(opcode, g.DefineAsFixed(node, edx),
                  g.UseFixed(node->InputAt(0), eax),
-                 g.UseUnique(node->InputAt(1)));
+                 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
 }
 
 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
@@ -539,6 +569,93 @@
   VisitShift(this, node, kX87Sar);
 }
 
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  X87OperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the temp
+  // register.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  InstructionOperand temps[] = {g.TempRegister()};
+
+  Emit(kX87AddPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  X87OperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the temp
+  // register.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  InstructionOperand temps[] = {g.TempRegister()};
+
+  Emit(kX87SubPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  X87OperandGenerator g(this);
+
+  // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
+  // register and one mov instruction.
+  InstructionOperand inputs[] = {
+      g.UseUnique(node->InputAt(0)), g.UseUnique(node->InputAt(1)),
+      g.UseUniqueRegister(node->InputAt(2)), g.UseFixed(node->InputAt(3), ecx)};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsFixed(node, eax),
+      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
+
+  InstructionOperand temps[] = {g.TempRegister(edx)};
+
+  Emit(kX87MulPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
+                          Node* node) {
+  X87OperandGenerator g(selector);
+
+  Node* shift = node->InputAt(2);
+  InstructionOperand shift_operand;
+  if (g.CanBeImmediate(shift)) {
+    shift_operand = g.UseImmediate(shift);
+  } else {
+    shift_operand = g.UseFixed(shift, ecx);
+  }
+  InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
+                                 g.UseFixed(node->InputAt(1), edx),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsFixed(node, eax),
+      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+
+  selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  VisitWord32PairShift(this, kX87ShlPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  VisitWord32PairShift(this, kX87ShrPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  VisitWord32PairShift(this, kX87SarPair, node);
+}
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
   VisitShift(this, node, kX87Ror);
@@ -714,6 +831,10 @@
   Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
 
 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   X87OperandGenerator g(this);
@@ -990,6 +1111,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
 
 namespace {
 
@@ -1011,6 +1133,9 @@
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1018,33 +1143,21 @@
   }
 }
 
-// Determines if {input} of {node} can be replaced by a memory operand.
-bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
-                         Node* node, Node* input) {
-  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
-    return false;
-  }
-  MachineRepresentation load_representation =
-      LoadRepresentationOf(input->op()).representation();
-  if (load_representation == MachineRepresentation::kWord32 ||
-      load_representation == MachineRepresentation::kTagged) {
-    return opcode == kX87Cmp || opcode == kX87Test;
-  }
-  return false;
-}
-
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
                   FlagsContinuation* cont) {
   X87OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
   if (cont->IsBranch()) {
-    selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
+    selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
-    selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
-                   left, right);
+    selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
   }
 }
 
@@ -1060,6 +1173,36 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+// Tries to match the size of the given opcode to that of the operands, if
+// possible.
+InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
+                                    Node* right) {
+  if (opcode != kX87Cmp && opcode != kX87Test) {
+    return opcode;
+  }
+  // Currently, if one of the two operands is not a Load, we don't know what its
+  // machine representation is, so we bail out.
+  // TODO(epertoso): we can probably get some size information out of immediates
+  // and phi nodes.
+  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
+    return opcode;
+  }
+  // If the load representations don't match, both operands will be
+  // zero/sign-extended to 32bit.
+  LoadRepresentation left_representation = LoadRepresentationOf(left->op());
+  if (left_representation != LoadRepresentationOf(right->op())) {
+    return opcode;
+  }
+  switch (left_representation.representation()) {
+    case MachineRepresentation::kBit:
+    case MachineRepresentation::kWord8:
+      return opcode == kX87Cmp ? kX87Cmp8 : kX87Test8;
+    case MachineRepresentation::kWord16:
+      return opcode == kX87Cmp ? kX87Cmp16 : kX87Test16;
+    default:
+      return opcode;
+  }
+}
 
 // Shared routine for multiple float32 compare operations (inputs commuted).
 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -1070,6 +1213,10 @@
   if (cont->IsBranch()) {
     selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
+                             g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(cont->Encode(kX87Float32Cmp),
@@ -1087,6 +1234,10 @@
   if (cont->IsBranch()) {
     selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
+                             g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(cont->Encode(kX87Float64Cmp),
@@ -1101,15 +1252,22 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // If one of the two inputs is an immediate, make sure it's on the right.
-  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+  InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+
+  // If one of the two inputs is an immediate, make sure it's on the right, or
+  // if one of the two inputs is a memory operand, make sure it's on the left.
+  if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
+      (g.CanBeMemoryOperand(narrowed_opcode, node, right) &&
+       !g.CanBeMemoryOperand(narrowed_opcode, node, left))) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    if (g.CanBeMemoryOperand(opcode, node, left)) {
+      // TODO(epertoso): we should use `narrowed_opcode' here once we match
+      // immediates too.
       return VisitCompareWithMemoryOperand(selector, opcode, left,
                                            g.UseImmediate(right), cont);
     }
@@ -1117,15 +1275,21 @@
                         cont);
   }
 
+  // Match memory operands on left side of comparison.
+  if (g.CanBeMemoryOperand(narrowed_opcode, node, left)) {
+    bool needs_byte_register =
+        narrowed_opcode == kX87Test8 || narrowed_opcode == kX87Cmp8;
+    return VisitCompareWithMemoryOperand(
+        selector, narrowed_opcode, left,
+        needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
+        cont);
+  }
+
   if (g.CanBeBetterLeftOperand(right)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
-  if (CanUseMemoryOperand(selector, opcode, node, left)) {
-    return VisitCompareWithMemoryOperand(selector, opcode, left,
-                                         g.UseRegister(right), cont);
-  }
   return VisitCompare(selector, opcode, left, right, cont,
                       node->op()->HasProperty(Operator::kCommutative));
 }
@@ -1145,6 +1309,9 @@
       if (cont->IsBranch()) {
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
+      } else if (cont->IsDeoptimize()) {
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+                                 cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1254,6 +1421,17 @@
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   X87OperandGenerator g(this);
@@ -1284,7 +1462,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1294,32 +1472,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kX87Add, &cont);
   }
   FlagsContinuation cont;
@@ -1329,7 +1509,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kX87Sub, &cont);
   }
   FlagsContinuation cont;
@@ -1338,37 +1518,41 @@
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/zone-pool.cc b/src/compiler/zone-pool.cc
index 2006a79..13fec35 100644
--- a/src/compiler/zone-pool.cc
+++ b/src/compiler/zone-pool.cc
@@ -13,7 +13,7 @@
       total_allocated_bytes_at_start_(zone_pool->GetTotalAllocatedBytes()),
       max_allocated_bytes_(0) {
   zone_pool_->stats_.push_back(this);
-  for (auto zone : zone_pool_->used_) {
+  for (Zone* zone : zone_pool_->used_) {
     size_t size = static_cast<size_t>(zone->allocation_size());
     std::pair<InitialValues::iterator, bool> res =
         initial_values_.insert(std::make_pair(zone, size));
@@ -64,9 +64,8 @@
   }
 }
 
-
-ZonePool::ZonePool() : max_allocated_bytes_(0), total_deleted_bytes_(0) {}
-
+ZonePool::ZonePool(base::AccountingAllocator* allocator)
+    : max_allocated_bytes_(0), total_deleted_bytes_(0), allocator_(allocator) {}
 
 ZonePool::~ZonePool() {
   DCHECK(used_.empty());
@@ -103,7 +102,7 @@
     zone = unused_.back();
     unused_.pop_back();
   } else {
-    zone = new Zone();
+    zone = new Zone(allocator_);
   }
   used_.push_back(zone);
   DCHECK_EQ(0u, zone->allocation_size());
@@ -116,7 +115,7 @@
   // Update max.
   max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
   // Update stats.
-  for (auto stat_scope : stats_) {
+  for (StatsScope* stat_scope : stats_) {
     stat_scope->ZoneReturned(zone);
   }
   // Remove from used.
diff --git a/src/compiler/zone-pool.h b/src/compiler/zone-pool.h
index aaf9daa..44a649f 100644
--- a/src/compiler/zone-pool.h
+++ b/src/compiler/zone-pool.h
@@ -61,7 +61,7 @@
     DISALLOW_COPY_AND_ASSIGN(StatsScope);
   };
 
-  ZonePool();
+  explicit ZonePool(base::AccountingAllocator* allocator);
   ~ZonePool();
 
   size_t GetMaxAllocatedBytes();
@@ -82,6 +82,7 @@
   Stats stats_;
   size_t max_allocated_bytes_;
   size_t total_deleted_bytes_;
+  base::AccountingAllocator* allocator_;
 
   DISALLOW_COPY_AND_ASSIGN(ZonePool);
 };
diff --git a/src/contexts-inl.h b/src/contexts-inl.h
index c26ce5b..344d5db 100644
--- a/src/contexts-inl.h
+++ b/src/contexts-inl.h
@@ -105,6 +105,10 @@
   return map == map->GetHeap()->with_context_map();
 }
 
+bool Context::IsDebugEvaluateContext() {
+  Map* map = this->map();
+  return map == map->GetHeap()->debug_evaluate_context_map();
+}
 
 bool Context::IsBlockContext() {
   Map* map = this->map();
diff --git a/src/contexts.cc b/src/contexts.cc
index 7549d20..67a9fea 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -164,14 +164,16 @@
   Handle<Object> unscopables;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
       isolate, unscopables,
-      Object::GetProperty(it->GetReceiver(),
-                          isolate->factory()->unscopables_symbol()),
+      JSReceiver::GetProperty(Handle<JSReceiver>::cast(it->GetReceiver()),
+                              isolate->factory()->unscopables_symbol()),
       Nothing<bool>());
   if (!unscopables->IsJSReceiver()) return Just(true);
   Handle<Object> blacklist;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, blacklist,
-                                   Object::GetProperty(unscopables, it->name()),
-                                   Nothing<bool>());
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate, blacklist,
+      JSReceiver::GetProperty(Handle<JSReceiver>::cast(unscopables),
+                              it->name()),
+      Nothing<bool>());
   return Just(!blacklist->BooleanValue());
 }
 
@@ -231,6 +233,7 @@
   Handle<Context> context(this, isolate);
 
   bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
+  bool failed_whitelist = false;
   *index = kNotFound;
   *attributes = ABSENT;
   *binding_flags = MISSING_BINDING;
@@ -291,7 +294,7 @@
         if (name->Equals(*isolate->factory()->this_string())) {
           maybe = Just(ABSENT);
         } else {
-          LookupIterator it(object, name);
+          LookupIterator it(object, name, object);
           Maybe<bool> found = UnscopableLookup(&it);
           if (found.IsNothing()) {
             maybe = Nothing<PropertyAttributes>();
@@ -376,6 +379,31 @@
         *binding_flags = MUTABLE_IS_INITIALIZED;
         return context;
       }
+    } else if (context->IsDebugEvaluateContext()) {
+      // Check materialized locals.
+      Object* obj = context->get(EXTENSION_INDEX);
+      if (obj->IsJSReceiver()) {
+        Handle<JSReceiver> extension(JSReceiver::cast(obj));
+        LookupIterator it(extension, name, extension);
+        Maybe<bool> found = JSReceiver::HasProperty(&it);
+        if (found.FromMaybe(false)) {
+          *attributes = NONE;
+          return extension;
+        }
+      }
+      // Check the original context, but do not follow its context chain.
+      obj = context->get(WRAPPED_CONTEXT_INDEX);
+      if (obj->IsContext()) {
+        Handle<Object> result = Context::cast(obj)->Lookup(
+            name, DONT_FOLLOW_CHAINS, index, attributes, binding_flags);
+        if (!result.is_null()) return result;
+      }
+      // Check whitelist. Names that do not pass whitelist shall only resolve
+      // to with, script or native contexts up the context chain.
+      obj = context->get(WHITE_LIST_INDEX);
+      if (obj->IsStringSet()) {
+        failed_whitelist = failed_whitelist || !StringSet::cast(obj)->Has(name);
+      }
     }
 
     // 3. Prepare to continue with the previous (next outermost) context.
@@ -384,7 +412,12 @@
          context->is_declaration_context())) {
       follow_context_chain = false;
     } else {
-      context = Handle<Context>(context->previous(), isolate);
+      do {
+        context = Handle<Context>(context->previous(), isolate);
+        // If we come across a whitelist context, and the name is not
+        // whitelisted, then only consider with, script or native contexts.
+      } while (failed_whitelist && !context->IsScriptContext() &&
+               !context->IsNativeContext() && !context->IsWithContext());
     }
   } while (follow_context_chain);
 
diff --git a/src/contexts.h b/src/contexts.h
index 38ebf64..90fb9a4 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -79,7 +79,6 @@
 
 #define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)                             \
   V(IS_ARRAYLIKE, JSFunction, is_arraylike)                               \
-  V(CONCAT_ITERABLE_TO_ARRAY_INDEX, JSFunction, concat_iterable_to_array) \
   V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site)     \
   V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error)                 \
   V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error)                   \
@@ -94,7 +93,9 @@
   V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property)   \
   V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments)                 \
   V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)                   \
-  V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance)
+  V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance)       \
+  V(MATH_FLOOR, JSFunction, math_floor)                                   \
+  V(MATH_SQRT, JSFunction, math_sqrt)
 
 #define NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                     \
   V(ARRAY_CONCAT_INDEX, JSFunction, array_concat)                             \
@@ -116,6 +117,7 @@
   V(MAP_GET_METHOD_INDEX, JSFunction, map_get)                                \
   V(MAP_HAS_METHOD_INDEX, JSFunction, map_has)                                \
   V(MAP_SET_METHOD_INDEX, JSFunction, map_set)                                \
+  V(MATH_POW_METHOD_INDEX, JSFunction, math_pow)                              \
   V(MESSAGE_GET_COLUMN_NUMBER_INDEX, JSFunction, message_get_column_number)   \
   V(MESSAGE_GET_LINE_NUMBER_INDEX, JSFunction, message_get_line_number)       \
   V(MESSAGE_GET_SOURCE_LINE_INDEX, JSFunction, message_get_source_line)       \
@@ -184,7 +186,7 @@
   V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun)                    \
   V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function)                  \
   V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun)                    \
-  V(TEMPLATE_INSTANTIATIONS_CACHE_INDEX, ObjectHashTable,                      \
+  V(TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary,             \
     template_instantiations_cache)                                             \
   V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function)                    \
   V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction,                             \
@@ -211,21 +213,8 @@
     js_array_fast_double_elements_map_index)                                   \
   V(JS_ARRAY_FAST_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map,                        \
     js_array_fast_holey_double_elements_map_index)                             \
-  V(JS_ARRAY_FAST_SMI_ELEMENTS_STRONG_MAP_INDEX, Map,                          \
-    js_array_fast_smi_elements_strong_map_index)                               \
-  V(JS_ARRAY_FAST_HOLEY_SMI_ELEMENTS_STRONG_MAP_INDEX, Map,                    \
-    js_array_fast_holey_smi_elements_strong_map_index)                         \
-  V(JS_ARRAY_FAST_ELEMENTS_STRONG_MAP_INDEX, Map,                              \
-    js_array_fast_elements_strong_map_index)                                   \
-  V(JS_ARRAY_FAST_HOLEY_ELEMENTS_STRONG_MAP_INDEX, Map,                        \
-    js_array_fast_holey_elements_strong_map_index)                             \
-  V(JS_ARRAY_FAST_DOUBLE_ELEMENTS_STRONG_MAP_INDEX, Map,                       \
-    js_array_fast_double_elements_strong_map_index)                            \
-  V(JS_ARRAY_FAST_HOLEY_DOUBLE_ELEMENTS_STRONG_MAP_INDEX, Map,                 \
-    js_array_fast_holey_double_elements_strong_map_index)                      \
   V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun)                                  \
   V(JS_MAP_MAP_INDEX, Map, js_map_map)                                         \
-  V(JS_OBJECT_STRONG_MAP_INDEX, Map, js_object_strong_map)                     \
   V(JS_SET_FUN_INDEX, JSFunction, js_set_fun)                                  \
   V(JS_SET_MAP_INDEX, Map, js_set_map)                                         \
   V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun)                        \
@@ -269,10 +258,6 @@
   V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map)   \
   V(STRING_FUNCTION_INDEX, JSFunction, string_function)                        \
   V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map)   \
-  V(STRONG_CONSTRUCTOR_MAP_INDEX, Map, strong_constructor_map)                 \
-  V(STRONG_FUNCTION_MAP_INDEX, Map, strong_function_map)                       \
-  V(STRONG_GENERATOR_FUNCTION_MAP_INDEX, Map, strong_generator_function_map)   \
-  V(STRONG_MAP_CACHE_INDEX, Object, strong_map_cache)                          \
   V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function)                        \
   V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun)                      \
   V(UINT16X8_FUNCTION_INDEX, JSFunction, uint16x8_function)                    \
@@ -416,12 +401,14 @@
     NATIVE_CONTEXT_SLOTS,
     FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST,
     FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_FAST_SMI_ELEMENTS_MAP_INDEX,
-    FIRST_JS_ARRAY_STRONG_MAP_SLOT =
-        JS_ARRAY_FAST_SMI_ELEMENTS_STRONG_MAP_INDEX,
 
     MIN_CONTEXT_SLOTS = GLOBAL_PROXY_INDEX,
     // This slot holds the thrown value in catch contexts.
     THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
+
+    // These slots hold values in debug evaluate contexts.
+    WRAPPED_CONTEXT_INDEX = MIN_CONTEXT_SLOTS,
+    WHITE_LIST_INDEX = MIN_CONTEXT_SLOTS + 1
   };
 
   void IncrementErrorsThrown();
@@ -474,6 +461,7 @@
   inline bool IsFunctionContext();
   inline bool IsCatchContext();
   inline bool IsWithContext();
+  inline bool IsDebugEvaluateContext();
   inline bool IsBlockContext();
   inline bool IsModuleContext();
   inline bool IsScriptContext();
@@ -539,34 +527,27 @@
 
   static int FunctionMapIndex(LanguageMode language_mode, FunctionKind kind) {
     if (IsGeneratorFunction(kind)) {
-      return is_strong(language_mode) ? STRONG_GENERATOR_FUNCTION_MAP_INDEX :
-             is_strict(language_mode) ? STRICT_GENERATOR_FUNCTION_MAP_INDEX
+      return is_strict(language_mode) ? STRICT_GENERATOR_FUNCTION_MAP_INDEX
                                       : SLOPPY_GENERATOR_FUNCTION_MAP_INDEX;
     }
 
     if (IsClassConstructor(kind)) {
       // Use strict function map (no own "caller" / "arguments")
-      return is_strong(language_mode) ? STRONG_CONSTRUCTOR_MAP_INDEX
-                                      : STRICT_FUNCTION_MAP_INDEX;
+      return STRICT_FUNCTION_MAP_INDEX;
     }
 
     if (IsArrowFunction(kind) || IsConciseMethod(kind) ||
         IsAccessorFunction(kind)) {
-      return is_strong(language_mode)
-                 ? STRONG_FUNCTION_MAP_INDEX
-                 : STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
+      return STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
     }
 
-    return is_strong(language_mode) ? STRONG_FUNCTION_MAP_INDEX :
-           is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
+    return is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
                                     : SLOPPY_FUNCTION_MAP_INDEX;
   }
 
-  static int ArrayMapIndex(ElementsKind elements_kind,
-                           Strength strength = Strength::WEAK) {
+  static int ArrayMapIndex(ElementsKind elements_kind) {
     DCHECK(IsFastElementsKind(elements_kind));
-    return elements_kind + (is_strong(strength) ? FIRST_JS_ARRAY_STRONG_MAP_SLOT
-                                                : FIRST_JS_ARRAY_MAP_SLOT);
+    return elements_kind + FIRST_JS_ARRAY_MAP_SLOT;
   }
 
   static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
diff --git a/src/counters.cc b/src/counters.cc
index a10494e..4f5c251 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -273,7 +273,9 @@
 }
 
 void RuntimeCallStats::Enter(RuntimeCallCounter* counter) {
-  Enter(new RuntimeCallTimer(counter, current_timer_));
+  RuntimeCallTimer* timer = new RuntimeCallTimer();
+  timer->Initialize(counter, current_timer_);
+  Enter(timer);
 }
 
 void RuntimeCallStats::Enter(RuntimeCallTimer* timer_) {
@@ -303,31 +305,34 @@
 #undef PRINT_COUNTER
 
   entries.Add(&this->ExternalCallback);
+  entries.Add(&this->GC);
   entries.Add(&this->UnexpectedStubMiss);
 
   entries.Print(os);
 }
 
 void RuntimeCallStats::Reset() {
+  if (!FLAG_runtime_call_stats) return;
 #define RESET_COUNTER(name, nargs, ressize) this->Runtime_##name.Reset();
   FOR_EACH_INTRINSIC(RESET_COUNTER)
 #undef RESET_COUNTER
 #define RESET_COUNTER(name, type) this->Builtin_##name.Reset();
   BUILTIN_LIST_C(RESET_COUNTER)
 #undef RESET_COUNTER
+  this->ExternalCallback.Reset();
+  this->GC.Reset();
+  this->UnexpectedStubMiss.Reset();
 }
 
-RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
-                                             RuntimeCallCounter* counter)
-    : isolate_(isolate),
-      timer_(counter,
-             isolate->counters()->runtime_call_stats()->current_timer()) {
-  if (!FLAG_runtime_call_stats) return;
-  isolate->counters()->runtime_call_stats()->Enter(&timer_);
+void RuntimeCallTimerScope::Enter(Isolate* isolate,
+                                  RuntimeCallCounter* counter) {
+  isolate_ = isolate;
+  RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
+  timer_.Initialize(counter, stats->current_timer());
+  stats->Enter(&timer_);
 }
 
-RuntimeCallTimerScope::~RuntimeCallTimerScope() {
-  if (!FLAG_runtime_call_stats) return;
+void RuntimeCallTimerScope::Leave() {
   isolate_->counters()->runtime_call_stats()->Leave(&timer_);
 }
 
diff --git a/src/counters.h b/src/counters.h
index a417da3..7183d0e 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -492,8 +492,11 @@
 // timers used for properly measuring the own time of a RuntimeCallCounter.
 class RuntimeCallTimer {
  public:
-  RuntimeCallTimer(RuntimeCallCounter* counter, RuntimeCallTimer* parent)
-      : counter_(counter), parent_(parent) {}
+  inline void Initialize(RuntimeCallCounter* counter,
+                         RuntimeCallTimer* parent) {
+    counter_ = counter;
+    parent_ = parent;
+  }
 
   inline void Start() {
     timer_.Start();
@@ -509,7 +512,9 @@
     return parent_;
   }
 
-  void AdjustForSubTimer(base::TimeDelta delta) { counter_->time -= delta; }
+  inline void AdjustForSubTimer(base::TimeDelta delta) {
+    counter_->time -= delta;
+  }
 
  private:
   RuntimeCallCounter* counter_;
@@ -523,6 +528,7 @@
       RuntimeCallCounter("UnexpectedStubMiss");
   // Counter for runtime callbacks into JavaScript.
   RuntimeCallCounter ExternalCallback = RuntimeCallCounter("ExternalCallback");
+  RuntimeCallCounter GC = RuntimeCallCounter("GC");
 #define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
   RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name);
   FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
@@ -557,8 +563,16 @@
 // the time of C++ scope.
 class RuntimeCallTimerScope {
  public:
-  explicit RuntimeCallTimerScope(Isolate* isolate, RuntimeCallCounter* counter);
-  ~RuntimeCallTimerScope();
+  inline explicit RuntimeCallTimerScope(Isolate* isolate,
+                                        RuntimeCallCounter* counter) {
+    if (FLAG_runtime_call_stats) Enter(isolate, counter);
+  }
+  inline ~RuntimeCallTimerScope() {
+    if (FLAG_runtime_call_stats) Leave();
+  }
+
+  void Enter(Isolate* isolate, RuntimeCallCounter* counter);
+  void Leave();
 
  private:
   Isolate* isolate_;
@@ -742,17 +756,11 @@
   SC(regexp_entry_native, V8.RegExpEntryNative)                                \
   SC(number_to_string_native, V8.NumberToStringNative)                         \
   SC(number_to_string_runtime, V8.NumberToStringRuntime)                       \
-  SC(math_acos_runtime, V8.MathAcosRuntime)                                    \
-  SC(math_asin_runtime, V8.MathAsinRuntime)                                    \
-  SC(math_atan_runtime, V8.MathAtanRuntime)                                    \
   SC(math_atan2_runtime, V8.MathAtan2Runtime)                                  \
   SC(math_clz32_runtime, V8.MathClz32Runtime)                                  \
   SC(math_exp_runtime, V8.MathExpRuntime)                                      \
-  SC(math_floor_runtime, V8.MathFloorRuntime)                                  \
   SC(math_log_runtime, V8.MathLogRuntime)                                      \
   SC(math_pow_runtime, V8.MathPowRuntime)                                      \
-  SC(math_round_runtime, V8.MathRoundRuntime)                                  \
-  SC(math_sqrt_runtime, V8.MathSqrtRuntime)                                    \
   SC(stack_interrupts, V8.StackInterrupts)                                     \
   SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks)                          \
   SC(runtime_calls, V8.RuntimeCalls)                          \
diff --git a/src/crankshaft/arm/lithium-arm.cc b/src/crankshaft/arm/lithium-arm.cc
index d5590f5..4072982 100644
--- a/src/crankshaft/arm/lithium-arm.cc
+++ b/src/crankshaft/arm/lithium-arm.cc
@@ -248,27 +248,6 @@
 }
 
 
-void LCallFunction::PrintDataTo(StringStream* stream) {
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  function()->PrintTo(stream);
-  if (hydrogen()->HasVectorAndSlot()) {
-    stream->Add(" (type-feedback-vector ");
-    temp_vector()->PrintTo(stream);
-    stream->Add(" ");
-    temp_slot()->PrintTo(stream);
-    stream->Add(")");
-  }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  function()->PrintTo(stream);
-  stream->Add("#%d / ", arity());
-}
-
-
 void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < InputCount(); i++) {
     InputAt(i)->PrintTo(stream);
@@ -567,12 +546,7 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator,
-                                           &objects_to_materialize));
-  return instr;
+  return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
 }
 
 
@@ -897,22 +871,16 @@
   }
   chunk_->AddInstruction(instr, current_block_);
 
-  if (instr->IsCall() || instr->IsPrologue()) {
-    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
-    if (hydrogen_val->HasObservableSideEffects()) {
-      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
-      sim->ReplayEnvironment(current_block_->last_environment());
-      hydrogen_value_for_lazy_bailout = sim;
-    }
-    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
-    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
-    chunk_->AddInstruction(bailout, current_block_);
-  }
+  CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
 }
 
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
-  return new (zone()) LPrologue();
+  LInstruction* result = new (zone()) LPrologue();
+  if (info_->num_heap_slots() > 0) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
 }
 
 
@@ -925,14 +893,14 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+  ToBooleanICStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new(zone()) LBranch(UseRegister(value));
   if (!easy_case &&
-      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
        !expected.IsGeneric())) {
     branch = AssignEnvironment(branch);
   }
@@ -1059,16 +1027,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallJSFunction(
-    HCallJSFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), r1);
-
-  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
-  return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1092,6 +1050,9 @@
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -1100,6 +1061,9 @@
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* function = UseFixed(instr->function(), r1);
   LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1216,22 +1180,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* function = UseFixed(instr->function(), r1);
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(r3);
-    vector = FixedTemp(r2);
-  }
-
-  LCallFunction* call =
-      new (zone()) LCallFunction(context, function, slot, vector);
-  return MarkAsCall(DefineFixed(call, r0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr);
@@ -1843,13 +1791,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
-    HBoundsCheckBaseIndexInformation* instr) {
-  UNREACHABLE();
-  return NULL;
-}
-
-
 LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
   // The control instruction marking the end of a block that completed
   // abruptly (e.g., threw an exception).  There is nothing specific to do.
@@ -2488,13 +2429,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
-  LOperand* object = UseFixed(instr->value(), r0);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
-  return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* value = UseFixed(instr->value(), r3);
@@ -2531,11 +2465,9 @@
   HEnvironment* outer = current_block_->last_environment();
   outer->set_ast_id(instr->ReturnId());
   HConstant* undefined = graph()->GetConstantUndefined();
-  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
-                                               instr->function(),
-                                               undefined,
-                                               instr->inlining_kind());
+  HEnvironment* inner = outer->CopyForInlining(
+      instr->closure(), instr->arguments_count(), instr->function(), undefined,
+      instr->inlining_kind(), instr->syntactic_tail_call_mode());
   // Only replay binding of arguments object if it wasn't removed from graph.
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2596,11 +2528,5 @@
   return AssignPointerMap(result);
 }
 
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
-  LOperand* context = UseRegisterAtStart(instr->context());
-  return new(zone()) LStoreFrameContext(context);
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/arm/lithium-arm.h b/src/crankshaft/arm/lithium-arm.h
index 91435cf..60fe79d 100644
--- a/src/crankshaft/arm/lithium-arm.h
+++ b/src/crankshaft/arm/lithium-arm.h
@@ -29,9 +29,7 @@
   V(BitI)                                    \
   V(BoundsCheck)                             \
   V(Branch)                                  \
-  V(CallJSFunction)                          \
   V(CallWithDescriptor)                      \
-  V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
   V(CheckArrayBufferNotNeutered)             \
@@ -133,7 +131,6 @@
   V(StackCheck)                              \
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
-  V(StoreFrameContext)                       \
   V(StoreKeyed)                              \
   V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
@@ -146,7 +143,6 @@
   V(RSubI)                                   \
   V(TaggedToI)                               \
   V(ThisFunction)                            \
-  V(ToFastProperties)                        \
   V(TransitionElementsKind)                  \
   V(TrapAllocationMemento)                   \
   V(Typeof)                                  \
@@ -227,6 +223,13 @@
   void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
   bool IsCall() const { return IsCallBits::decode(bit_field_); }
 
+  void MarkAsSyntacticTailCall() {
+    bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+  }
+  bool IsSyntacticTailCall() const {
+    return IsSyntacticTailCallBits::decode(bit_field_);
+  }
+
   // Interface to the register allocator and iterators.
   bool ClobbersTemps() const { return IsCall(); }
   bool ClobbersRegisters() const { return IsCall(); }
@@ -261,6 +264,8 @@
   virtual LOperand* TempAt(int i) = 0;
 
   class IsCallBits: public BitField<bool, 0, 1> {};
+  class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+  };
 
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
@@ -535,6 +540,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+  DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
 
   LOperand* function() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
@@ -1721,23 +1727,6 @@
 };
 
 
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallJSFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
 class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
  public:
   LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1800,29 +1789,6 @@
 };
 
 
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
-  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
-                LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2426,19 +2392,6 @@
 };
 
 
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LToFastProperties(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
-  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
@@ -2555,18 +2508,6 @@
 };
 
 
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStoreFrameContext(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/arm/lithium-codegen-arm.cc b/src/crankshaft/arm/lithium-codegen-arm.cc
index 7b2ebad..c64aac3 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -113,7 +113,7 @@
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
     if (info()->IsStub()) {
-      __ StubPrologue();
+      __ StubPrologue(StackFrame::STUB);
     } else {
       __ Prologue(info()->GeneratePreagedPrologue());
     }
@@ -263,18 +263,15 @@
         DCHECK(!frame_is_built_);
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
-        __ PushFixedFrame();
-        __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-        __ push(scratch0());
-        __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        __ Move(scratch0(), Smi::FromInt(StackFrame::STUB));
+        __ PushCommonFrame(scratch0());
         Comment(";;; Deferred code");
       }
       code->Generate();
       if (NeedsDeferredFrame()) {
         Comment(";;; Destroy frame");
         DCHECK(frame_is_built_);
-        __ pop(ip);
-        __ PopFixedFrame();
+        __ PopCommonFrame(scratch0());
         frame_is_built_ = false;
       }
       __ jmp(code->exit());
@@ -327,7 +324,7 @@
       if (table_entry->needs_frame) {
         DCHECK(!info()->saves_caller_doubles());
         Comment(";;; call deopt with frame");
-        __ PushFixedFrame();
+        __ PushCommonFrame();
         __ bl(&needs_frame);
       } else {
         __ bl(&call_deopt_entry);
@@ -342,10 +339,9 @@
       // This variant of deopt can only be used with stubs. Since we don't
       // have a function pointer to install in the stack frame that we're
       // building, install a special marker there instead.
-      DCHECK(info()->IsStub());
       __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
       __ push(ip);
-      __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+      DCHECK(info()->IsStub());
     }
 
     Comment(";;; call deopt");
@@ -2070,29 +2066,30 @@
       __ cmp(ip, Operand::Zero());
       EmitBranch(instr, ne);
     } else {
-      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      ToBooleanICStub::Types expected =
+          instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
-      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
         // undefined -> false.
         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
         __ b(eq, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
         // Boolean -> its value.
         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
         __ b(eq, instr->TrueLabel(chunk_));
         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
         __ b(eq, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
         // 'null' -> false.
         __ CompareRoot(reg, Heap::kNullValueRootIndex);
         __ b(eq, instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::SMI)) {
+      if (expected.Contains(ToBooleanICStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         __ cmp(reg, Operand::Zero());
         __ b(eq, instr->FalseLabel(chunk_));
@@ -2115,13 +2112,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
         __ b(ge, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::STRING)) {
+      if (expected.Contains(ToBooleanICStub::STRING)) {
         // String value -> false iff empty.
         Label not_string;
         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2133,19 +2130,19 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
         // Symbol value -> true.
         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
         __ b(eq, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
         // SIMD value -> true.
         __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
         __ b(eq, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
         // heap number -> false iff +0, -0, or NaN.
         DwVfpRegister dbl_scratch = double_scratch0();
         Label not_heap_number;
@@ -2365,11 +2362,10 @@
   DCHECK(ToRegister(instr->left()).is(r1));
   DCHECK(ToRegister(instr->right()).is(r0));
 
-  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
-  __ cmp(r0, Operand::Zero());
-
-  EmitBranch(instr, ComputeCompareCondition(instr->op()));
+  __ CompareRoot(r0, Heap::kTrueValueRootIndex);
+  EmitBranch(instr, eq);
 }
 
 
@@ -2443,11 +2439,12 @@
 
   __ JumpIfSmi(input, is_false);
 
-  __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
+  __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
-    __ b(eq, is_true);
+    __ b(hs, is_true);
   } else {
-    __ b(eq, is_false);
+    __ b(hs, is_false);
   }
 
   // Check if the constructor in the map is a function.
@@ -3056,17 +3053,20 @@
 
   if (instr->hydrogen()->from_inlined()) {
     __ sub(result, sp, Operand(2 * kPointerSize));
-  } else {
+  } else if (instr->hydrogen()->arguments_adaptor()) {
     // Check if the calling frame is an arguments adaptor frame.
     Label done, adapted;
     __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-    __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ ldr(result, MemOperand(scratch,
+                              CommonFrameConstants::kContextOrFrameTypeOffset));
     __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
     __ mov(result, fp, LeaveCC, ne);
     __ mov(result, scratch, LeaveCC, eq);
+  } else {
+    __ mov(result, fp);
   }
 }
 
@@ -3188,15 +3188,26 @@
   __ b(ne, &loop);
 
   __ bind(&invoke);
+
+  InvokeFlag flag = CALL_FUNCTION;
+  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+    DCHECK(!info()->saves_caller_doubles());
+    // TODO(ishell): drop current frame before pushing arguments to the stack.
+    flag = JUMP_FUNCTION;
+    ParameterCount actual(r0);
+    // It is safe to use r3, r4 and r5 as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) r3 (new.target) will be initialized below.
+    PrepareForTailCall(actual, r3, r4, r5);
+  }
+
   DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator safepoint_generator(
-      this, pointers, Safepoint::kLazyDeopt);
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in receiver which is r0, as expected
   // by InvokeFunction.
   ParameterCount actual(receiver);
-  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
-                    safepoint_generator);
+  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
 }
 
 
@@ -3243,10 +3254,9 @@
   CallRuntime(Runtime::kDeclareGlobals, instr);
 }
 
-
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int formal_parameter_count, int arity,
-                                 LInstruction* instr) {
+                                 bool is_tail_call, LInstruction* instr) {
   bool dont_adapt_arguments =
       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   bool can_invoke_directly =
@@ -3264,17 +3274,35 @@
     __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
     __ mov(r0, Operand(arity));
 
-    // Invoke function.
-    __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
-    __ Call(ip);
+    bool is_self_call = function.is_identical_to(info()->closure());
 
-    // Set up deoptimization.
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    // Invoke function.
+    if (is_self_call) {
+      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+      if (is_tail_call) {
+        __ Jump(self, RelocInfo::CODE_TARGET);
+      } else {
+        __ Call(self, RelocInfo::CODE_TARGET);
+      }
+    } else {
+      __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+      if (is_tail_call) {
+        __ Jump(ip);
+      } else {
+        __ Call(ip);
+      }
+    }
+
+    if (!is_tail_call) {
+      // Set up deoptimization.
+      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    }
   } else {
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
+    ParameterCount actual(arity);
     ParameterCount expected(formal_parameter_count);
-    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(function_reg, expected, actual, flag, generator);
   }
 }
 
@@ -3561,22 +3589,77 @@
   __ clz(result, input);
 }
 
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+                                  Register scratch1, Register scratch2,
+                                  Register scratch3) {
+#if DEBUG
+  if (actual.is_reg()) {
+    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+  } else {
+    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+  }
+#endif
+  if (FLAG_code_comments) {
+    if (actual.is_reg()) {
+      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+    } else {
+      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+    }
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch3,
+         MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &no_arguments_adaptor);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(fp, scratch2);
+  __ ldr(caller_args_count_reg,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ b(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+  __ bind(&formal_parameter_count_loaded);
+  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+  Comment(";;; }");
+}
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  HInvokeFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->function()).is(r1));
   DCHECK(instr->HasPointerMap());
 
-  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+  if (is_tail_call) {
+    DCHECK(!info()->saves_caller_doubles());
+    ParameterCount actual(instr->arity());
+    // It is safe to use r3, r4 and r5 as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) r3 (new.target) will be initialized below.
+    PrepareForTailCall(actual, r3, r4, r5);
+  }
+
+  Handle<JSFunction> known_function = hinstr->known_function();
   if (known_function.is_null()) {
     LPointerMap* pointers = instr->pointer_map();
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(instr->arity());
-    __ InvokeFunction(r1, no_reg, count, CALL_FUNCTION, generator);
+    ParameterCount actual(instr->arity());
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(r1, no_reg, actual, flag, generator);
   } else {
-    CallKnownFunction(known_function,
-                      instr->hydrogen()->formal_parameter_count(),
-                      instr->arity(), instr);
+    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+                      instr->arity(), is_tail_call, instr);
   }
 }
 
@@ -3639,56 +3722,6 @@
 }
 
 
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  DCHECK(ToRegister(instr->function()).is(r1));
-  DCHECK(ToRegister(instr->result()).is(r0));
-
-  // Change context.
-  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
-  // Always initialize new target and number of actual arguments.
-  __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
-  __ mov(r0, Operand(instr->arity()));
-
-  // Load the code entry address
-  __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-  __ Call(ip);
-
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  HCallFunction* hinstr = instr->hydrogen();
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->function()).is(r1));
-  DCHECK(ToRegister(instr->result()).is(r0));
-
-  int arity = instr->arity();
-  ConvertReceiverMode mode = hinstr->convert_mode();
-  if (hinstr->HasVectorAndSlot()) {
-    Register slot_register = ToRegister(instr->temp_slot());
-    Register vector_register = ToRegister(instr->temp_vector());
-    DCHECK(slot_register.is(r3));
-    DCHECK(vector_register.is(r2));
-
-    AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
-    int index = vector->GetIndex(hinstr->slot());
-
-    __ Move(vector_register, vector);
-    __ mov(slot_register, Operand(Smi::FromInt(index)));
-
-    Handle<Code> ic =
-        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    __ mov(r0, Operand(arity));
-    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
-  }
-}
-
-
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->constructor()).is(r1));
@@ -5151,13 +5184,6 @@
 }
 
 
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  DCHECK(ToRegister(instr->value()).is(r0));
-  __ push(r0);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
 void LCodeGen::DoTypeof(LTypeof* instr) {
   DCHECK(ToRegister(instr->value()).is(r3));
   DCHECK(ToRegister(instr->result()).is(r0));
@@ -5516,13 +5542,6 @@
   __ bind(&done);
 }
 
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
-  Register context = ToRegister(instr->context());
-  __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/arm/lithium-codegen-arm.h b/src/crankshaft/arm/lithium-codegen-arm.h
index 67925cc..8bbacc3 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.h
+++ b/src/crankshaft/arm/lithium-codegen-arm.h
@@ -218,11 +218,14 @@
                                LInstruction* instr,
                                LOperand* context);
 
+  void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+                          Register scratch2, Register scratch3);
+
   // Generate a direct call to a known function.  Expects the function
   // to be in r1.
   void CallKnownFunction(Handle<JSFunction> function,
                          int formal_parameter_count, int arity,
-                         LInstruction* instr);
+                         bool is_tail_call, LInstruction* instr);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
diff --git a/src/crankshaft/arm64/lithium-arm64.cc b/src/crankshaft/arm64/lithium-arm64.cc
index c5d4208..6cfc846 100644
--- a/src/crankshaft/arm64/lithium-arm64.cc
+++ b/src/crankshaft/arm64/lithium-arm64.cc
@@ -66,13 +66,6 @@
 }
 
 
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  function()->PrintTo(stream);
-  stream->Add("#%d / ", arity());
-}
-
-
 void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < InputCount(); i++) {
     InputAt(i)->PrintTo(stream);
@@ -135,20 +128,6 @@
 }
 
 
-void LCallFunction::PrintDataTo(StringStream* stream) {
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  function()->PrintTo(stream);
-  if (hydrogen()->HasVectorAndSlot()) {
-    stream->Add(" (type-feedback-vector ");
-    temp_vector()->PrintTo(stream);
-    stream->Add(" ");
-    temp_slot()->PrintTo(stream);
-    stream->Add(")");
-  }
-}
-
-
 void LInvokeFunction::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
   function()->PrintTo(stream);
@@ -735,33 +714,22 @@
   }
   chunk_->AddInstruction(instr, current_block_);
 
-  if (instr->IsCall() || instr->IsPrologue()) {
-    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
-    if (hydrogen_val->HasObservableSideEffects()) {
-      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
-      sim->ReplayEnvironment(current_block_->last_environment());
-      hydrogen_value_for_lazy_bailout = sim;
-    }
-    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
-    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
-    chunk_->AddInstruction(bailout, current_block_);
-  }
+  CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
 }
 
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator,
-                                           &objects_to_materialize));
-  return instr;
+  return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
 }
 
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
-  return new (zone()) LPrologue();
+  LInstruction* result = new (zone()) LPrologue();
+  if (info_->num_heap_slots() > 0) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
 }
 
 
@@ -817,13 +785,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
-    HBoundsCheckBaseIndexInformation* instr) {
-  UNREACHABLE();
-  return NULL;
-}
-
-
 LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
   info()->MarkAsRequiresFrame();
   LOperand* args = NULL;
@@ -993,7 +954,7 @@
       return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
     }
 
-    ToBooleanStub::Types expected = instr->expected_input_types();
+    ToBooleanICStub::Types expected = instr->expected_input_types();
     bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
     LOperand* temp1 = needs_temps ? TempRegister() : NULL;
     LOperand* temp2 = needs_temps ? TempRegister() : NULL;
@@ -1011,16 +972,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallJSFunction(
-    HCallJSFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), x1);
-
-  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
-  return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1045,23 +996,10 @@
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
                                                                 ops,
                                                                 zone());
-  return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* function = UseFixed(instr->function(), x1);
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(x3);
-    vector = FixedTemp(x2);
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
   }
-
-  LCallFunction* call =
-      new (zone()) LCallFunction(context, function, slot, vector);
-  return MarkAsCall(DefineFixed(call, x0), instr);
+  return MarkAsCall(DefineFixed(result, x0), instr);
 }
 
 
@@ -1454,11 +1392,9 @@
   HEnvironment* outer = current_block_->last_environment();
   outer->set_ast_id(instr->ReturnId());
   HConstant* undefined = graph()->GetConstantUndefined();
-  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
-                                               instr->function(),
-                                               undefined,
-                                               instr->inlining_kind());
+  HEnvironment* inner = outer->CopyForInlining(
+      instr->closure(), instr->arguments_count(), instr->function(), undefined,
+      instr->inlining_kind(), instr->syntactic_tail_call_mode());
   // Only replay binding of arguments object if it wasn't removed from graph.
   if ((instr->arguments_var() != NULL) &&
       instr->arguments_object()->IsLinked()) {
@@ -1553,6 +1489,9 @@
   // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
   LOperand* function = UseFixed(instr->function(), x1);
   LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -2457,13 +2396,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
-  LOperand* object = UseFixed(instr->value(), x0);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
-  return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTransitionElementsKind(
     HTransitionElementsKind* instr) {
   if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2688,12 +2620,5 @@
   return AssignEnvironment(DefineAsRegister(result));
 }
 
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
-  LOperand* context = UseRegisterAtStart(instr->context());
-  return new(zone()) LStoreFrameContext(context);
-}
-
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/arm64/lithium-arm64.h b/src/crankshaft/arm64/lithium-arm64.h
index 14abeb0..237487f 100644
--- a/src/crankshaft/arm64/lithium-arm64.h
+++ b/src/crankshaft/arm64/lithium-arm64.h
@@ -32,8 +32,6 @@
   V(BitS)                                    \
   V(BoundsCheck)                             \
   V(Branch)                                  \
-  V(CallFunction)                            \
-  V(CallJSFunction)                          \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
   V(CallWithDescriptor)                      \
@@ -142,7 +140,6 @@
   V(StackCheck)                              \
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
-  V(StoreFrameContext)                       \
   V(StoreKeyedExternal)                      \
   V(StoreKeyedFixed)                         \
   V(StoreKeyedFixedDouble)                   \
@@ -157,7 +154,6 @@
   V(SubS)                                    \
   V(TaggedToI)                               \
   V(ThisFunction)                            \
-  V(ToFastProperties)                        \
   V(TransitionElementsKind)                  \
   V(TrapAllocationMemento)                   \
   V(TruncateDoubleToIntOrSmi)                \
@@ -238,6 +234,13 @@
   void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
   bool IsCall() const { return IsCallBits::decode(bit_field_); }
 
+  void MarkAsSyntacticTailCall() {
+    bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+  }
+  bool IsSyntacticTailCall() const {
+    return IsSyntacticTailCallBits::decode(bit_field_);
+  }
+
   // Interface to the register allocator and iterators.
   bool ClobbersTemps() const { return IsCall(); }
   bool ClobbersRegisters() const { return IsCall(); }
@@ -265,6 +268,8 @@
 
  private:
   class IsCallBits: public BitField<bool, 0, 1> {};
+  class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+  };
 
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
@@ -635,6 +640,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+  DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
 
   LOperand* function() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
@@ -801,46 +807,6 @@
 };
 
 
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallJSFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
-  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
-                LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2734,19 +2700,6 @@
 };
 
 
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LToFastProperties(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
-  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
 class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
  public:
   LTransitionElementsKind(LOperand* object,
@@ -2888,18 +2841,6 @@
 };
 
 
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStoreFrameContext(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
 class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
  public:
   LWrapReceiver(LOperand* receiver, LOperand* function) {
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.cc b/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 6399a8b..9bbc8b8 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -359,38 +359,6 @@
 }
 
 
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  HCallFunction* hinstr = instr->hydrogen();
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->function()).Is(x1));
-  DCHECK(ToRegister(instr->result()).Is(x0));
-
-  int arity = instr->arity();
-  ConvertReceiverMode mode = hinstr->convert_mode();
-  if (hinstr->HasVectorAndSlot()) {
-    Register slot_register = ToRegister(instr->temp_slot());
-    Register vector_register = ToRegister(instr->temp_vector());
-    DCHECK(slot_register.is(x3));
-    DCHECK(vector_register.is(x2));
-
-    AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
-    int index = vector->GetIndex(hinstr->slot());
-
-    __ Mov(vector_register, vector);
-    __ Mov(slot_register, Operand(Smi::FromInt(index)));
-
-    Handle<Code> ic =
-        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    __ Mov(x0, arity);
-    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
-  }
-  RecordPushedArgumentsDelta(hinstr->argument_delta());
-}
-
-
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   DCHECK(instr->IsMarkedAsCall());
   DCHECK(ToRegister(instr->context()).is(cp));
@@ -606,19 +574,20 @@
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
     if (info()->IsStub()) {
-      __ StubPrologue();
+      __ StubPrologue(
+          StackFrame::STUB,
+          GetStackSlotCount() + TypedFrameConstants::kFixedSlotCount);
     } else {
       __ Prologue(info()->GeneratePreagedPrologue());
+      // Reserve space for the stack slots needed by the code.
+      int slots = GetStackSlotCount();
+      if (slots > 0) {
+        __ Claim(slots, kPointerSize);
+      }
     }
     frame_is_built_ = true;
   }
 
-  // Reserve space for the stack slots needed by the code.
-  int slots = GetStackSlotCount();
-  if (slots > 0) {
-    __ Claim(slots, kPointerSize);
-  }
-
   if (info()->saves_caller_doubles()) {
     SaveCallerDoubles();
   }
@@ -740,11 +709,11 @@
         DCHECK(!frame_is_built_);
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
-        __ Push(lr, fp, cp);
+        __ Push(lr, fp);
         __ Mov(fp, Smi::FromInt(StackFrame::STUB));
         __ Push(fp);
         __ Add(fp, __ StackPointer(),
-               StandardFrameConstants::kFixedFrameSizeFromFp);
+               TypedFrameConstants::kFixedFrameSizeFromFp);
         Comment(";;; Deferred code");
       }
 
@@ -753,7 +722,7 @@
       if (NeedsDeferredFrame()) {
         Comment(";;; Destroy frame");
         DCHECK(frame_is_built_);
-        __ Pop(xzr, cp, fp, lr);
+        __ Pop(xzr, fp, lr);
         frame_is_built_ = false;
       }
 
@@ -1560,14 +1529,26 @@
   __ B(ne, &loop);
 
   __ Bind(&invoke);
+
+  InvokeFlag flag = CALL_FUNCTION;
+  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+    DCHECK(!info()->saves_caller_doubles());
+    // TODO(ishell): drop current frame before pushing arguments to the stack.
+    flag = JUMP_FUNCTION;
+    ParameterCount actual(x0);
+    // It is safe to use x3, x4 and x5 as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) x3 (new.target) will be initialized below.
+    PrepareForTailCall(actual, x3, x4, x5);
+  }
+
   DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in argc (receiver) which is x0, as
   // expected by InvokeFunction.
   ParameterCount actual(argc);
-  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
-                    safepoint_generator);
+  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
 }
 
 
@@ -1585,16 +1566,18 @@
     // get a pointer which will work well with LAccessArgumentsAt.
     DCHECK(masm()->StackPointer().Is(jssp));
     __ Sub(result, jssp, 2 * kPointerSize);
-  } else {
+  } else if (instr->hydrogen()->arguments_adaptor()) {
     DCHECK(instr->temp() != NULL);
     Register previous_fp = ToRegister(instr->temp());
 
     __ Ldr(previous_fp,
            MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-    __ Ldr(result,
-           MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
+    __ Ldr(result, MemOperand(previous_fp,
+                              CommonFrameConstants::kContextOrFrameTypeOffset));
     __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
     __ Csel(result, fp, previous_fp, ne);
+  } else {
+    __ Mov(result, fp);
   }
 }
 
@@ -1763,17 +1746,18 @@
       __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
       EmitCompareAndBranch(instr, ne, temp, 0);
     } else {
-      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      ToBooleanICStub::Types expected =
+          instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
-      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
         // undefined -> false.
         __ JumpIfRoot(
             value, Heap::kUndefinedValueRootIndex, false_label);
       }
 
-      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
         // Boolean -> its value.
         __ JumpIfRoot(
             value, Heap::kTrueValueRootIndex, true_label);
@@ -1781,13 +1765,13 @@
             value, Heap::kFalseValueRootIndex, false_label);
       }
 
-      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
         // 'null' -> false.
         __ JumpIfRoot(
             value, Heap::kNullValueRootIndex, false_label);
       }
 
-      if (expected.Contains(ToBooleanStub::SMI)) {
+      if (expected.Contains(ToBooleanICStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         DCHECK(Smi::FromInt(0) == 0);
         __ Cbz(value, false_label);
@@ -1815,13 +1799,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
         __ B(ge, true_label);
       }
 
-      if (expected.Contains(ToBooleanStub::STRING)) {
+      if (expected.Contains(ToBooleanICStub::STRING)) {
         // String value -> false iff empty.
         Label not_string;
         __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
@@ -1832,19 +1816,19 @@
         __ Bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
         // Symbol value -> true.
         __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
         __ B(eq, true_label);
       }
 
-      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
         // SIMD value -> true.
         __ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
         __ B(eq, true_label);
       }
 
-      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
         Label not_heap_number;
         __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
 
@@ -1867,10 +1851,9 @@
   }
 }
 
-
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int formal_parameter_count, int arity,
-                                 LInstruction* instr) {
+                                 bool is_tail_call, LInstruction* instr) {
   bool dont_adapt_arguments =
       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   bool can_invoke_directly =
@@ -1898,21 +1881,38 @@
     __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
     __ Mov(arity_reg, arity);
 
-    // Invoke function.
-    __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
-    __ Call(x10);
+    bool is_self_call = function.is_identical_to(info()->closure());
 
-    // Set up deoptimization.
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    // Invoke function.
+    if (is_self_call) {
+      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+      if (is_tail_call) {
+        __ Jump(self, RelocInfo::CODE_TARGET);
+      } else {
+        __ Call(self, RelocInfo::CODE_TARGET);
+      }
+    } else {
+      __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+      if (is_tail_call) {
+        __ Jump(x10);
+      } else {
+        __ Call(x10);
+      }
+    }
+
+    if (!is_tail_call) {
+      // Set up deoptimization.
+      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    }
   } else {
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
+    ParameterCount actual(arity);
     ParameterCount expected(formal_parameter_count);
-    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(function_reg, expected, actual, flag, generator);
   }
 }
 
-
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   DCHECK(instr->IsMarkedAsCall());
   DCHECK(ToRegister(instr->result()).Is(x0));
@@ -1959,26 +1959,6 @@
 }
 
 
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  DCHECK(instr->IsMarkedAsCall());
-  DCHECK(ToRegister(instr->function()).is(x1));
-
-  // Change context.
-  __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
-  // Always initialize new target and number of actual arguments.
-  __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
-  __ Mov(x0, instr->arity());
-
-  // Load the code entry address
-  __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
-  __ Call(x10);
-
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
-}
-
-
 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   CallRuntime(instr->function(), instr->arity(), instr);
   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
@@ -2225,11 +2205,12 @@
   __ JumpIfSmi(input, false_label);
 
   Register map = scratch2;
-  __ CompareObjectType(input, map, scratch1, JS_FUNCTION_TYPE);
+  __ CompareObjectType(input, map, scratch1, FIRST_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
-    __ B(eq, true_label);
+    __ B(hs, true_label);
   } else {
-    __ B(eq, false_label);
+    __ B(hs, false_label);
   }
 
   // Check if the constructor in the map is a function.
@@ -2837,23 +2818,79 @@
   __ Scvtf(result, value);
 }
 
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+                                  Register scratch1, Register scratch2,
+                                  Register scratch3) {
+#if DEBUG
+  if (actual.is_reg()) {
+    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+  } else {
+    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+  }
+#endif
+  if (FLAG_code_comments) {
+    if (actual.is_reg()) {
+      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+    } else {
+      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+    }
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ Ldr(scratch3,
+         MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ B(ne, &no_arguments_adaptor);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(fp, scratch2);
+  __ Ldr(caller_args_count_reg,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ B(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ Mov(caller_args_count_reg,
+         Immediate(info()->literal()->parameter_count()));
+
+  __ bind(&formal_parameter_count_loaded);
+  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+  Comment(";;; }");
+}
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  HInvokeFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(cp));
   // The function is required to be in x1.
   DCHECK(ToRegister(instr->function()).is(x1));
   DCHECK(instr->HasPointerMap());
 
-  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+  if (is_tail_call) {
+    DCHECK(!info()->saves_caller_doubles());
+    ParameterCount actual(instr->arity());
+    // It is safe to use x3, x4 and x5 as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) x3 (new.target) will be initialized below.
+    PrepareForTailCall(actual, x3, x4, x5);
+  }
+
+  Handle<JSFunction> known_function = hinstr->known_function();
   if (known_function.is_null()) {
     LPointerMap* pointers = instr->pointer_map();
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(instr->arity());
-    __ InvokeFunction(x1, no_reg, count, CALL_FUNCTION, generator);
+    ParameterCount actual(instr->arity());
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(x1, no_reg, actual, flag, generator);
   } else {
-    CallKnownFunction(known_function,
-                      instr->hydrogen()->formal_parameter_count(),
-                      instr->arity(), instr);
+    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+                      instr->arity(), is_tail_call, instr);
   }
   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
 }
@@ -5244,10 +5281,10 @@
   DCHECK(ToRegister(instr->left()).is(x1));
   DCHECK(ToRegister(instr->right()).is(x0));
 
-  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
-
-  EmitCompareAndBranch(instr, TokenToCondition(instr->op(), false), x0, 0);
+  __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+  EmitBranch(instr, eq);
 }
 
 
@@ -5374,14 +5411,6 @@
 }
 
 
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  DCHECK(ToRegister(instr->value()).Is(x0));
-  DCHECK(ToRegister(instr->result()).Is(x0));
-  __ Push(x0);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   Register object = ToRegister(instr->object());
 
@@ -5702,12 +5731,5 @@
   __ Bind(&done);
 }
 
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
-  Register context = ToRegister(instr->context());
-  __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.h b/src/crankshaft/arm64/lithium-codegen-arm64.h
index cf7de10..f67ad5a 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -322,11 +322,14 @@
                                LInstruction* instr,
                                LOperand* context);
 
+  void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+                          Register scratch2, Register scratch3);
+
   // Generate a direct call to a known function.  Expects the function
   // to be in x1.
   void CallKnownFunction(Handle<JSFunction> function,
                          int formal_parameter_count, int arity,
-                         LInstruction* instr);
+                         bool is_tail_call, LInstruction* instr);
 
   // Support for recording safepoint and position information.
   void RecordAndWritePosition(int position) override;
diff --git a/src/crankshaft/compilation-phase.cc b/src/crankshaft/compilation-phase.cc
new file mode 100644
index 0000000..9b40cca
--- /dev/null
+++ b/src/crankshaft/compilation-phase.cc
@@ -0,0 +1,44 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/compilation-phase.h"
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
+    : name_(name), info_(info), zone_(info->isolate()->allocator()) {
+  if (FLAG_hydrogen_stats) {
+    info_zone_start_allocation_size_ = info->zone()->allocation_size();
+    timer_.Start();
+  }
+}
+
+CompilationPhase::~CompilationPhase() {
+  if (FLAG_hydrogen_stats) {
+    size_t size = zone()->allocation_size();
+    size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
+    isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
+  }
+}
+
+bool CompilationPhase::ShouldProduceTraceOutput() const {
+  // Trace if the appropriate trace flag is set and the phase name's first
+  // character is in the FLAG_trace_phase command line parameter.
+  AllowHandleDereference allow_deref;
+  bool tracing_on =
+      info()->IsStub()
+          ? FLAG_trace_hydrogen_stubs
+          : (FLAG_trace_hydrogen &&
+             info()->shared_info()->PassesFilter(FLAG_trace_hydrogen_filter));
+  return (tracing_on &&
+          base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) !=
+              NULL);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/compilation-phase.h b/src/crankshaft/compilation-phase.h
new file mode 100644
index 0000000..99e24c7
--- /dev/null
+++ b/src/crankshaft/compilation-phase.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_COMPILATION_PHASE_H_
+#define V8_CRANKSHAFT_COMPILATION_PHASE_H_
+
+#include "src/allocation.h"
+#include "src/compiler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class CompilationPhase BASE_EMBEDDED {
+ public:
+  CompilationPhase(const char* name, CompilationInfo* info);
+  ~CompilationPhase();
+
+ protected:
+  bool ShouldProduceTraceOutput() const;
+
+  const char* name() const { return name_; }
+  CompilationInfo* info() const { return info_; }
+  Isolate* isolate() const { return info()->isolate(); }
+  Zone* zone() { return &zone_; }
+
+ private:
+  const char* name_;
+  CompilationInfo* info_;
+  Zone zone_;
+  size_t info_zone_start_allocation_size_;
+  base::ElapsedTimer timer_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_COMPILATION_PHASE_H_
diff --git a/src/crankshaft/hydrogen-bch.cc b/src/crankshaft/hydrogen-bch.cc
deleted file mode 100644
index 060e0bc..0000000
--- a/src/crankshaft/hydrogen-bch.cc
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-bch.h"
-
-namespace v8 {
-namespace internal {
-
-/*
- * This class is a table with one element for eack basic block.
- *
- * It is used to check if, inside one loop, all execution paths contain
- * a bounds check for a particular [index, length] combination.
- * The reason is that if there is a path that stays in the loop without
- * executing a check then the check cannot be hoisted out of the loop (it
- * would likely fail and cause a deopt for no good reason).
- * We also check is there are paths that exit the loop early, and if yes we
- * perform the hoisting only if graph()->use_optimistic_licm() is true.
- * The reason is that such paths are realtively common and harmless (like in
- * a "search" method that scans an array until an element is found), but in
- * some cases they could cause a deopt if we hoist the check so this is a
- * situation we need to detect.
- */
-class InductionVariableBlocksTable BASE_EMBEDDED {
- public:
-  class Element {
-   public:
-    static const int kNoBlock = -1;
-
-    HBasicBlock* block() { return block_; }
-    void set_block(HBasicBlock* block) { block_ = block; }
-    bool is_start() { return is_start_; }
-    bool is_proper_exit() { return is_proper_exit_; }
-    bool is_in_loop() { return is_in_loop_; }
-    bool has_check() { return has_check_; }
-    void set_has_check() { has_check_ = true; }
-    InductionVariableLimitUpdate* additional_limit() {
-      return &additional_limit_;
-    }
-
-    /*
-     * Initializes the table element for a given loop (identified by its
-     * induction variable).
-     */
-    void InitializeLoop(InductionVariableData* data) {
-      DCHECK(data->limit() != NULL);
-      HLoopInformation* loop = data->phi()->block()->current_loop();
-      is_start_ = (block() == loop->loop_header());
-      is_proper_exit_ = (block() == data->induction_exit_target());
-      is_in_loop_ = loop->IsNestedInThisLoop(block()->current_loop());
-      has_check_ = false;
-    }
-
-    // Utility methods to iterate over dominated blocks.
-    void ResetCurrentDominatedBlock() { current_dominated_block_ = kNoBlock; }
-    HBasicBlock* CurrentDominatedBlock() {
-      DCHECK(current_dominated_block_ != kNoBlock);
-      return current_dominated_block_ < block()->dominated_blocks()->length() ?
-          block()->dominated_blocks()->at(current_dominated_block_) : NULL;
-    }
-    HBasicBlock* NextDominatedBlock() {
-      current_dominated_block_++;
-      return CurrentDominatedBlock();
-    }
-
-    Element()
-        : block_(NULL), is_start_(false), is_proper_exit_(false),
-          has_check_(false), additional_limit_(),
-          current_dominated_block_(kNoBlock) {}
-
-   private:
-    HBasicBlock* block_;
-    bool is_start_;
-    bool is_proper_exit_;
-    bool is_in_loop_;
-    bool has_check_;
-    InductionVariableLimitUpdate additional_limit_;
-    int current_dominated_block_;
-  };
-
-  HGraph* graph() const { return graph_; }
-  Counters* counters() const { return graph()->isolate()->counters(); }
-  HBasicBlock* loop_header() const { return loop_header_; }
-  Element* at(int index) const { return &(elements_.at(index)); }
-  Element* at(HBasicBlock* block) const { return at(block->block_id()); }
-
-  void AddCheckAt(HBasicBlock* block) {
-    at(block->block_id())->set_has_check();
-  }
-
-  /*
-   * Initializes the table for a given loop (identified by its induction
-   * variable).
-   */
-  void InitializeLoop(InductionVariableData* data) {
-    for (int i = 0; i < graph()->blocks()->length(); i++) {
-      at(i)->InitializeLoop(data);
-    }
-    loop_header_ = data->phi()->block()->current_loop()->loop_header();
-  }
-
-
-  enum Hoistability {
-    HOISTABLE,
-    OPTIMISTICALLY_HOISTABLE,
-    NOT_HOISTABLE
-  };
-
-  /*
-   * This method checks if it is appropriate to hoist the bounds checks on an
-   * induction variable out of the loop.
-   * The problem is that in the loop code graph there could be execution paths
-   * where the check is not performed, but hoisting the check has the same
-   * semantics as performing it at every loop iteration, which could cause
-   * unnecessary check failures (which would mean unnecessary deoptimizations).
-   * The method returns OK if there are no paths that perform an iteration
-   * (loop back to the header) without meeting a check, or UNSAFE is set if
-   * early exit paths are found.
-   */
-  Hoistability CheckHoistability() {
-    for (int i = 0; i < elements_.length(); i++) {
-      at(i)->ResetCurrentDominatedBlock();
-    }
-    bool unsafe = false;
-
-    HBasicBlock* current = loop_header();
-    while (current != NULL) {
-      HBasicBlock* next;
-
-      if (at(current)->has_check() || !at(current)->is_in_loop()) {
-        // We found a check or we reached a dominated block out of the loop,
-        // therefore this block is safe and we can backtrack.
-        next = NULL;
-      } else {
-        for (int i = 0; i < current->end()->SuccessorCount(); i ++) {
-          Element* successor = at(current->end()->SuccessorAt(i));
-
-          if (!successor->is_in_loop()) {
-            if (!successor->is_proper_exit()) {
-              // We found a path that exits the loop early, and is not the exit
-              // related to the induction limit, therefore hoisting checks is
-              // an optimistic assumption.
-              unsafe = true;
-            }
-          }
-
-          if (successor->is_start()) {
-            // We found a path that does one loop iteration without meeting any
-            // check, therefore hoisting checks would be likely to cause
-            // unnecessary deopts.
-            return NOT_HOISTABLE;
-          }
-        }
-
-        next = at(current)->NextDominatedBlock();
-      }
-
-      // If we have no next block we need to backtrack the tree traversal.
-      while (next == NULL) {
-        current = current->dominator();
-        if (current != NULL) {
-          next = at(current)->NextDominatedBlock();
-        } else {
-          // We reached the root: next stays NULL.
-          next = NULL;
-          break;
-        }
-      }
-
-      current = next;
-    }
-
-    return unsafe ? OPTIMISTICALLY_HOISTABLE : HOISTABLE;
-  }
-
-  explicit InductionVariableBlocksTable(HGraph* graph)
-    : graph_(graph), loop_header_(NULL),
-      elements_(graph->blocks()->length(), graph->zone()) {
-    for (int i = 0; i < graph->blocks()->length(); i++) {
-      Element element;
-      element.set_block(graph->blocks()->at(i));
-      elements_.Add(element, graph->zone());
-      DCHECK(at(i)->block()->block_id() == i);
-    }
-  }
-
-  // Tries to hoist a check out of its induction loop.
-  void ProcessRelatedChecks(
-      InductionVariableData::InductionVariableCheck* check,
-      InductionVariableData* data) {
-    HValue* length = check->check()->length();
-    check->set_processed();
-    HBasicBlock* header =
-        data->phi()->block()->current_loop()->loop_header();
-    HBasicBlock* pre_header = header->predecessors()->at(0);
-    // Check that the limit is defined in the loop preheader.
-    if (!data->limit()->IsInteger32Constant()) {
-      HBasicBlock* limit_block = data->limit()->block();
-      if (limit_block != pre_header &&
-          !limit_block->Dominates(pre_header)) {
-        return;
-      }
-    }
-    // Check that the length and limit have compatible representations.
-    if (!(data->limit()->representation().Equals(
-            length->representation()) ||
-        data->limit()->IsInteger32Constant())) {
-      return;
-    }
-    // Check that the length is defined in the loop preheader.
-    if (check->check()->length()->block() != pre_header &&
-        !check->check()->length()->block()->Dominates(pre_header)) {
-      return;
-    }
-
-    // Add checks to the table.
-    for (InductionVariableData::InductionVariableCheck* current_check = check;
-         current_check != NULL;
-         current_check = current_check->next()) {
-      if (current_check->check()->length() != length) continue;
-
-      AddCheckAt(current_check->check()->block());
-      current_check->set_processed();
-    }
-
-    // Check that we will not cause unwanted deoptimizations.
-    Hoistability hoistability = CheckHoistability();
-    if (hoistability == NOT_HOISTABLE ||
-        (hoistability == OPTIMISTICALLY_HOISTABLE &&
-         !graph()->use_optimistic_licm())) {
-      return;
-    }
-
-    // We will do the hoisting, but we must see if the limit is "limit" or if
-    // all checks are done on constants: if all check are done against the same
-    // constant limit we will use that instead of the induction limit.
-    bool has_upper_constant_limit = true;
-    int32_t upper_constant_limit =
-        check->HasUpperLimit() ? check->upper_limit() : 0;
-    for (InductionVariableData::InductionVariableCheck* current_check = check;
-         current_check != NULL;
-         current_check = current_check->next()) {
-      has_upper_constant_limit =
-          has_upper_constant_limit && current_check->HasUpperLimit() &&
-          current_check->upper_limit() == upper_constant_limit;
-      counters()->bounds_checks_eliminated()->Increment();
-      current_check->check()->set_skip_check();
-    }
-
-    // Choose the appropriate limit.
-    Zone* zone = graph()->zone();
-    HValue* context = graph()->GetInvalidContext();
-    HValue* limit = data->limit();
-    if (has_upper_constant_limit) {
-      HConstant* new_limit = HConstant::New(graph()->isolate(), zone, context,
-                                            upper_constant_limit);
-      new_limit->InsertBefore(pre_header->end());
-      limit = new_limit;
-    }
-
-    // If necessary, redefine the limit in the preheader.
-    if (limit->IsInteger32Constant() &&
-        limit->block() != pre_header &&
-        !limit->block()->Dominates(pre_header)) {
-      HConstant* new_limit = HConstant::New(graph()->isolate(), zone, context,
-                                            limit->GetInteger32Constant());
-      new_limit->InsertBefore(pre_header->end());
-      limit = new_limit;
-    }
-
-    // Do the hoisting.
-    HBoundsCheck* hoisted_check = HBoundsCheck::New(
-        graph()->isolate(), zone, context, limit, check->check()->length());
-    hoisted_check->InsertBefore(pre_header->end());
-    hoisted_check->set_allow_equality(true);
-    counters()->bounds_checks_hoisted()->Increment();
-  }
-
-  void CollectInductionVariableData(HBasicBlock* bb) {
-    bool additional_limit = false;
-
-    for (int i = 0; i < bb->phis()->length(); i++) {
-      HPhi* phi = bb->phis()->at(i);
-      phi->DetectInductionVariable();
-    }
-
-    additional_limit = InductionVariableData::ComputeInductionVariableLimit(
-        bb, at(bb)->additional_limit());
-
-    if (additional_limit) {
-      at(bb)->additional_limit()->updated_variable->
-          UpdateAdditionalLimit(at(bb)->additional_limit());
-    }
-
-    for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
-      if (!i->IsBoundsCheck()) continue;
-      HBoundsCheck* check = HBoundsCheck::cast(i);
-      InductionVariableData::BitwiseDecompositionResult decomposition;
-      InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
-      if (!decomposition.base->IsPhi()) continue;
-      HPhi* phi = HPhi::cast(decomposition.base);
-
-      if (!phi->IsInductionVariable()) continue;
-      InductionVariableData* data = phi->induction_variable_data();
-
-      // For now ignore loops decrementing the index.
-      if (data->increment() <= 0) continue;
-      if (!data->LowerLimitIsNonNegativeConstant()) continue;
-
-      // TODO(mmassi): skip OSR values for check->length().
-      if (check->length() == data->limit() ||
-          check->length() == data->additional_upper_limit()) {
-        counters()->bounds_checks_eliminated()->Increment();
-        check->set_skip_check();
-        continue;
-      }
-
-      if (!phi->IsLimitedInductionVariable()) continue;
-
-      int32_t limit = data->ComputeUpperLimit(decomposition.and_mask,
-                                              decomposition.or_mask);
-      phi->induction_variable_data()->AddCheck(check, limit);
-    }
-
-    for (int i = 0; i < bb->dominated_blocks()->length(); i++) {
-      CollectInductionVariableData(bb->dominated_blocks()->at(i));
-    }
-
-    if (additional_limit) {
-      at(bb->block_id())->additional_limit()->updated_variable->
-          UpdateAdditionalLimit(at(bb->block_id())->additional_limit());
-    }
-  }
-
-  void EliminateRedundantBoundsChecks(HBasicBlock* bb) {
-    for (int i = 0; i < bb->phis()->length(); i++) {
-      HPhi* phi = bb->phis()->at(i);
-      if (!phi->IsLimitedInductionVariable()) continue;
-
-      InductionVariableData* induction_data = phi->induction_variable_data();
-      InductionVariableData::ChecksRelatedToLength* current_length_group =
-          induction_data->checks();
-      while (current_length_group != NULL) {
-        current_length_group->CloseCurrentBlock();
-        InductionVariableData::InductionVariableCheck* current_base_check =
-            current_length_group->checks();
-        InitializeLoop(induction_data);
-
-        while (current_base_check != NULL) {
-          ProcessRelatedChecks(current_base_check, induction_data);
-          while (current_base_check != NULL &&
-                 current_base_check->processed()) {
-            current_base_check = current_base_check->next();
-          }
-        }
-
-        current_length_group = current_length_group->next();
-      }
-    }
-  }
-
- private:
-  HGraph* graph_;
-  HBasicBlock* loop_header_;
-  ZoneList<Element> elements_;
-};
-
-
-void HBoundsCheckHoistingPhase::HoistRedundantBoundsChecks() {
-  InductionVariableBlocksTable table(graph());
-  table.CollectInductionVariableData(graph()->entry_block());
-  for (int i = 0; i < graph()->blocks()->length(); i++) {
-    table.EliminateRedundantBoundsChecks(graph()->blocks()->at(i));
-  }
-}
-
-}  // namespace internal
-}  // namespace v8
diff --git a/src/crankshaft/hydrogen-bch.h b/src/crankshaft/hydrogen-bch.h
deleted file mode 100644
index cdcd407..0000000
--- a/src/crankshaft/hydrogen-bch.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_BCH_H_
-#define V8_CRANKSHAFT_HYDROGEN_BCH_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HBoundsCheckHoistingPhase : public HPhase {
- public:
-  explicit HBoundsCheckHoistingPhase(HGraph* graph)
-      : HPhase("H_Bounds checks hoisting", graph) { }
-
-  void Run() {
-    HoistRedundantBoundsChecks();
-  }
-
- private:
-  void HoistRedundantBoundsChecks();
-
-  DISALLOW_COPY_AND_ASSIGN(HBoundsCheckHoistingPhase);
-};
-
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_CRANKSHAFT_HYDROGEN_BCE_H_
diff --git a/src/crankshaft/hydrogen-environment-liveness.cc b/src/crankshaft/hydrogen-environment-liveness.cc
index ae0bd08..7965a94 100644
--- a/src/crankshaft/hydrogen-environment-liveness.cc
+++ b/src/crankshaft/hydrogen-environment-liveness.cc
@@ -37,9 +37,9 @@
     int index, HSimulate* simulate) {
   int operand_index = simulate->ToOperandIndex(index);
   if (operand_index == -1) {
-    simulate->AddAssignedValue(index, graph()->GetConstantUndefined());
+    simulate->AddAssignedValue(index, graph()->GetConstantOptimizedOut());
   } else {
-    simulate->SetOperandAt(operand_index, graph()->GetConstantUndefined());
+    simulate->SetOperandAt(operand_index, graph()->GetConstantOptimizedOut());
   }
 }
 
diff --git a/src/crankshaft/hydrogen-instructions.cc b/src/crankshaft/hydrogen-instructions.cc
index a9c6228..7de8b5d 100644
--- a/src/crankshaft/hydrogen-instructions.cc
+++ b/src/crankshaft/hydrogen-instructions.cc
@@ -25,6 +25,8 @@
 #include "src/crankshaft/mips/lithium-mips.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS64
 #include "src/crankshaft/mips64/lithium-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
 #include "src/crankshaft/x87/lithium-x87.h"  // NOLINT
 #else
@@ -773,8 +775,6 @@
     case HValue::kArgumentsLength:
     case HValue::kArgumentsObject:
     case HValue::kBlockEntry:
-    case HValue::kBoundsCheckBaseIndexInformation:
-    case HValue::kCallFunction:
     case HValue::kCallNewArray:
     case HValue::kCapturedObject:
     case HValue::kClassOfTestAndBranch:
@@ -815,7 +815,6 @@
     case HValue::kReturn:
     case HValue::kSeqStringGetChar:
     case HValue::kStoreCodeEntry:
-    case HValue::kStoreFrameContext:
     case HValue::kStoreKeyed:
     case HValue::kStoreNamedField:
     case HValue::kStoreNamedGeneric:
@@ -832,7 +831,6 @@
     case HValue::kBitwise:
     case HValue::kBoundsCheck:
     case HValue::kBranch:
-    case HValue::kCallJSFunction:
     case HValue::kCallRuntime:
     case HValue::kCallWithDescriptor:
     case HValue::kChange:
@@ -873,7 +871,6 @@
     case HValue::kStringAdd:
     case HValue::kStringCompareAndBranch:
     case HValue::kSub:
-    case HValue::kToFastProperties:
     case HValue::kTransitionElementsKind:
     case HValue::kTrapAllocationMemento:
     case HValue::kTypeof:
@@ -907,97 +904,24 @@
 }
 
 
-std::ostream& HCallJSFunction::PrintDataTo(std::ostream& os) const {  // NOLINT
-  return os << NameOf(function()) << " #" << argument_count();
-}
-
-
-HCallJSFunction* HCallJSFunction::New(Isolate* isolate, Zone* zone,
-                                      HValue* context, HValue* function,
-                                      int argument_count) {
-  bool has_stack_check = false;
-  if (function->IsConstant()) {
-    HConstant* fun_const = HConstant::cast(function);
-    Handle<JSFunction> jsfun =
-        Handle<JSFunction>::cast(fun_const->handle(isolate));
-    has_stack_check = !jsfun.is_null() &&
-        (jsfun->code()->kind() == Code::FUNCTION ||
-         jsfun->code()->kind() == Code::OPTIMIZED_FUNCTION);
-  }
-
-  return new (zone) HCallJSFunction(function, argument_count, has_stack_check);
-}
-
-
 std::ostream& HBinaryCall::PrintDataTo(std::ostream& os) const {  // NOLINT
   return os << NameOf(first()) << " " << NameOf(second()) << " #"
             << argument_count();
 }
 
+std::ostream& HInvokeFunction::PrintTo(std::ostream& os) const {  // NOLINT
+  if (tail_call_mode() == TailCallMode::kAllow) os << "Tail";
+  return HBinaryCall::PrintTo(os);
+}
 
-std::ostream& HCallFunction::PrintDataTo(std::ostream& os) const {  // NOLINT
-  os << NameOf(context()) << " " << NameOf(function());
-  if (HasVectorAndSlot()) {
-    os << " (type-feedback-vector icslot " << slot().ToInt() << ")";
+std::ostream& HInvokeFunction::PrintDataTo(std::ostream& os) const {  // NOLINT
+  HBinaryCall::PrintDataTo(os);
+  if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    os << ", JSTailCall";
   }
-  os << " (convert mode" << convert_mode() << ")";
   return os;
 }
 
-
-void HBoundsCheck::ApplyIndexChange() {
-  if (skip_check()) return;
-
-  DecompositionResult decomposition;
-  bool index_is_decomposable = index()->TryDecompose(&decomposition);
-  if (index_is_decomposable) {
-    DCHECK(decomposition.base() == base());
-    if (decomposition.offset() == offset() &&
-        decomposition.scale() == scale()) return;
-  } else {
-    return;
-  }
-
-  ReplaceAllUsesWith(index());
-
-  HValue* current_index = decomposition.base();
-  int actual_offset = decomposition.offset() + offset();
-  int actual_scale = decomposition.scale() + scale();
-
-  HGraph* graph = block()->graph();
-  Isolate* isolate = graph->isolate();
-  Zone* zone = graph->zone();
-  HValue* context = graph->GetInvalidContext();
-  if (actual_offset != 0) {
-    HConstant* add_offset =
-        HConstant::New(isolate, zone, context, actual_offset);
-    add_offset->InsertBefore(this);
-    HInstruction* add =
-        HAdd::New(isolate, zone, context, current_index, add_offset);
-    add->InsertBefore(this);
-    add->AssumeRepresentation(index()->representation());
-    add->ClearFlag(kCanOverflow);
-    current_index = add;
-  }
-
-  if (actual_scale != 0) {
-    HConstant* sar_scale = HConstant::New(isolate, zone, context, actual_scale);
-    sar_scale->InsertBefore(this);
-    HInstruction* sar =
-        HSar::New(isolate, zone, context, current_index, sar_scale);
-    sar->InsertBefore(this);
-    sar->AssumeRepresentation(index()->representation());
-    current_index = sar;
-  }
-
-  SetOperandAt(0, current_index);
-
-  base_ = NULL;
-  offset_ = 0;
-  scale_ = 0;
-}
-
-
 std::ostream& HBoundsCheck::PrintDataTo(std::ostream& os) const {  // NOLINT
   os << NameOf(index()) << " " << NameOf(length());
   if (base() != NULL && (offset() != 0 || scale() != 0)) {
@@ -1053,20 +977,16 @@
 }
 
 
-std::ostream& HBoundsCheckBaseIndexInformation::PrintDataTo(
-    std::ostream& os) const {  // NOLINT
-  // TODO(svenpanne) This 2nd base_index() looks wrong...
-  return os << "base: " << NameOf(base_index())
-            << ", check: " << NameOf(base_index());
-}
-
-
 std::ostream& HCallWithDescriptor::PrintDataTo(
     std::ostream& os) const {  // NOLINT
   for (int i = 0; i < OperandCount(); i++) {
     os << NameOf(OperandAt(i)) << " ";
   }
-  return os << "#" << argument_count();
+  os << "#" << argument_count();
+  if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    os << ", JSTailCall";
+  }
+  return os;
 }
 
 
@@ -1129,23 +1049,23 @@
 
 
 Representation HBranch::observed_input_representation(int index) {
-  if (expected_input_types_.Contains(ToBooleanStub::NULL_TYPE) ||
-      expected_input_types_.Contains(ToBooleanStub::SPEC_OBJECT) ||
-      expected_input_types_.Contains(ToBooleanStub::STRING) ||
-      expected_input_types_.Contains(ToBooleanStub::SYMBOL) ||
-      expected_input_types_.Contains(ToBooleanStub::SIMD_VALUE)) {
+  if (expected_input_types_.Contains(ToBooleanICStub::NULL_TYPE) ||
+      expected_input_types_.Contains(ToBooleanICStub::SPEC_OBJECT) ||
+      expected_input_types_.Contains(ToBooleanICStub::STRING) ||
+      expected_input_types_.Contains(ToBooleanICStub::SYMBOL) ||
+      expected_input_types_.Contains(ToBooleanICStub::SIMD_VALUE)) {
     return Representation::Tagged();
   }
-  if (expected_input_types_.Contains(ToBooleanStub::UNDEFINED)) {
-    if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+  if (expected_input_types_.Contains(ToBooleanICStub::UNDEFINED)) {
+    if (expected_input_types_.Contains(ToBooleanICStub::HEAP_NUMBER)) {
       return Representation::Double();
     }
     return Representation::Tagged();
   }
-  if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+  if (expected_input_types_.Contains(ToBooleanICStub::HEAP_NUMBER)) {
     return Representation::Double();
   }
-  if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
+  if (expected_input_types_.Contains(ToBooleanICStub::SMI)) {
     return Representation::Smi();
   }
   return Representation::None();
@@ -1563,7 +1483,8 @@
           val, representation(), false, false));
     }
   }
-  if (op() == kMathFloor && value()->IsDiv() && value()->HasOneUse()) {
+  if (op() == kMathFloor && representation().IsSmiOrInteger32() &&
+      value()->IsDiv() && value()->HasOneUse()) {
     HDiv* hdiv = HDiv::cast(value());
 
     HValue* left = hdiv->left();
@@ -1980,452 +1901,6 @@
 }
 
 
-InductionVariableData* InductionVariableData::ExaminePhi(HPhi* phi) {
-  if (phi->block()->loop_information() == NULL) return NULL;
-  if (phi->OperandCount() != 2) return NULL;
-  int32_t candidate_increment;
-
-  candidate_increment = ComputeIncrement(phi, phi->OperandAt(0));
-  if (candidate_increment != 0) {
-    return new(phi->block()->graph()->zone())
-        InductionVariableData(phi, phi->OperandAt(1), candidate_increment);
-  }
-
-  candidate_increment = ComputeIncrement(phi, phi->OperandAt(1));
-  if (candidate_increment != 0) {
-    return new(phi->block()->graph()->zone())
-        InductionVariableData(phi, phi->OperandAt(0), candidate_increment);
-  }
-
-  return NULL;
-}
-
-
-/*
- * This function tries to match the following patterns (and all the relevant
- * variants related to |, & and + being commutative):
- * base | constant_or_mask
- * base & constant_and_mask
- * (base + constant_offset) & constant_and_mask
- * (base - constant_offset) & constant_and_mask
- */
-void InductionVariableData::DecomposeBitwise(
-    HValue* value,
-    BitwiseDecompositionResult* result) {
-  HValue* base = IgnoreOsrValue(value);
-  result->base = value;
-
-  if (!base->representation().IsInteger32()) return;
-
-  if (base->IsBitwise()) {
-    bool allow_offset = false;
-    int32_t mask = 0;
-
-    HBitwise* bitwise = HBitwise::cast(base);
-    if (bitwise->right()->IsInteger32Constant()) {
-      mask = bitwise->right()->GetInteger32Constant();
-      base = bitwise->left();
-    } else if (bitwise->left()->IsInteger32Constant()) {
-      mask = bitwise->left()->GetInteger32Constant();
-      base = bitwise->right();
-    } else {
-      return;
-    }
-    if (bitwise->op() == Token::BIT_AND) {
-      result->and_mask = mask;
-      allow_offset = true;
-    } else if (bitwise->op() == Token::BIT_OR) {
-      result->or_mask = mask;
-    } else {
-      return;
-    }
-
-    result->context = bitwise->context();
-
-    if (allow_offset) {
-      if (base->IsAdd()) {
-        HAdd* add = HAdd::cast(base);
-        if (add->right()->IsInteger32Constant()) {
-          base = add->left();
-        } else if (add->left()->IsInteger32Constant()) {
-          base = add->right();
-        }
-      } else if (base->IsSub()) {
-        HSub* sub = HSub::cast(base);
-        if (sub->right()->IsInteger32Constant()) {
-          base = sub->left();
-        }
-      }
-    }
-
-    result->base = base;
-  }
-}
-
-
-void InductionVariableData::AddCheck(HBoundsCheck* check,
-                                     int32_t upper_limit) {
-  DCHECK(limit_validity() != NULL);
-  if (limit_validity() != check->block() &&
-      !limit_validity()->Dominates(check->block())) return;
-  if (!phi()->block()->current_loop()->IsNestedInThisLoop(
-      check->block()->current_loop())) return;
-
-  ChecksRelatedToLength* length_checks = checks();
-  while (length_checks != NULL) {
-    if (length_checks->length() == check->length()) break;
-    length_checks = length_checks->next();
-  }
-  if (length_checks == NULL) {
-    length_checks = new(check->block()->zone())
-        ChecksRelatedToLength(check->length(), checks());
-    checks_ = length_checks;
-  }
-
-  length_checks->AddCheck(check, upper_limit);
-}
-
-
-void InductionVariableData::ChecksRelatedToLength::CloseCurrentBlock() {
-  if (checks() != NULL) {
-    InductionVariableCheck* c = checks();
-    HBasicBlock* current_block = c->check()->block();
-    while (c != NULL && c->check()->block() == current_block) {
-      c->set_upper_limit(current_upper_limit_);
-      c = c->next();
-    }
-  }
-}
-
-
-void InductionVariableData::ChecksRelatedToLength::UseNewIndexInCurrentBlock(
-    Token::Value token,
-    int32_t mask,
-    HValue* index_base,
-    HValue* context) {
-  DCHECK(first_check_in_block() != NULL);
-  HValue* previous_index = first_check_in_block()->index();
-  DCHECK(context != NULL);
-
-  Zone* zone = index_base->block()->graph()->zone();
-  Isolate* isolate = index_base->block()->graph()->isolate();
-  set_added_constant(HConstant::New(isolate, zone, context, mask));
-  if (added_index() != NULL) {
-    added_constant()->InsertBefore(added_index());
-  } else {
-    added_constant()->InsertBefore(first_check_in_block());
-  }
-
-  if (added_index() == NULL) {
-    first_check_in_block()->ReplaceAllUsesWith(first_check_in_block()->index());
-    HInstruction* new_index = HBitwise::New(isolate, zone, context, token,
-                                            index_base, added_constant());
-    DCHECK(new_index->IsBitwise());
-    new_index->ClearAllSideEffects();
-    new_index->AssumeRepresentation(Representation::Integer32());
-    set_added_index(HBitwise::cast(new_index));
-    added_index()->InsertBefore(first_check_in_block());
-  }
-  DCHECK(added_index()->op() == token);
-
-  added_index()->SetOperandAt(1, index_base);
-  added_index()->SetOperandAt(2, added_constant());
-  first_check_in_block()->SetOperandAt(0, added_index());
-  if (previous_index->HasNoUses()) {
-    previous_index->DeleteAndReplaceWith(NULL);
-  }
-}
-
-void InductionVariableData::ChecksRelatedToLength::AddCheck(
-    HBoundsCheck* check,
-    int32_t upper_limit) {
-  BitwiseDecompositionResult decomposition;
-  InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
-
-  if (first_check_in_block() == NULL ||
-      first_check_in_block()->block() != check->block()) {
-    CloseCurrentBlock();
-
-    first_check_in_block_ = check;
-    set_added_index(NULL);
-    set_added_constant(NULL);
-    current_and_mask_in_block_ = decomposition.and_mask;
-    current_or_mask_in_block_ = decomposition.or_mask;
-    current_upper_limit_ = upper_limit;
-
-    InductionVariableCheck* new_check = new(check->block()->graph()->zone())
-        InductionVariableCheck(check, checks_, upper_limit);
-    checks_ = new_check;
-    return;
-  }
-
-  if (upper_limit > current_upper_limit()) {
-    current_upper_limit_ = upper_limit;
-  }
-
-  if (decomposition.and_mask != 0 &&
-      current_or_mask_in_block() == 0) {
-    if (current_and_mask_in_block() == 0 ||
-        decomposition.and_mask > current_and_mask_in_block()) {
-      UseNewIndexInCurrentBlock(Token::BIT_AND,
-                                decomposition.and_mask,
-                                decomposition.base,
-                                decomposition.context);
-      current_and_mask_in_block_ = decomposition.and_mask;
-    }
-    check->set_skip_check();
-  }
-  if (current_and_mask_in_block() == 0) {
-    if (decomposition.or_mask > current_or_mask_in_block()) {
-      UseNewIndexInCurrentBlock(Token::BIT_OR,
-                                decomposition.or_mask,
-                                decomposition.base,
-                                decomposition.context);
-      current_or_mask_in_block_ = decomposition.or_mask;
-    }
-    check->set_skip_check();
-  }
-
-  if (!check->skip_check()) {
-    InductionVariableCheck* new_check = new(check->block()->graph()->zone())
-        InductionVariableCheck(check, checks_, upper_limit);
-    checks_ = new_check;
-  }
-}
-
-
-/*
- * This method detects if phi is an induction variable, with phi_operand as
- * its "incremented" value (the other operand would be the "base" value).
- *
- * It cheks is phi_operand has the form "phi + constant".
- * If yes, the constant is the increment that the induction variable gets at
- * every loop iteration.
- * Otherwise it returns 0.
- */
-int32_t InductionVariableData::ComputeIncrement(HPhi* phi,
-                                                HValue* phi_operand) {
-  if (!phi_operand->representation().IsSmiOrInteger32()) return 0;
-
-  if (phi_operand->IsAdd()) {
-    HAdd* operation = HAdd::cast(phi_operand);
-    if (operation->left() == phi &&
-        operation->right()->IsInteger32Constant()) {
-      return operation->right()->GetInteger32Constant();
-    } else if (operation->right() == phi &&
-               operation->left()->IsInteger32Constant()) {
-      return operation->left()->GetInteger32Constant();
-    }
-  } else if (phi_operand->IsSub()) {
-    HSub* operation = HSub::cast(phi_operand);
-    if (operation->left() == phi &&
-        operation->right()->IsInteger32Constant()) {
-      int constant = operation->right()->GetInteger32Constant();
-      if (constant == kMinInt) return 0;
-      return -constant;
-    }
-  }
-
-  return 0;
-}
-
-
-/*
- * Swaps the information in "update" with the one contained in "this".
- * The swapping is important because this method is used while doing a
- * dominator tree traversal, and "update" will retain the old data that
- * will be restored while backtracking.
- */
-void InductionVariableData::UpdateAdditionalLimit(
-    InductionVariableLimitUpdate* update) {
-  DCHECK(update->updated_variable == this);
-  if (update->limit_is_upper) {
-    swap(&additional_upper_limit_, &update->limit);
-    swap(&additional_upper_limit_is_included_, &update->limit_is_included);
-  } else {
-    swap(&additional_lower_limit_, &update->limit);
-    swap(&additional_lower_limit_is_included_, &update->limit_is_included);
-  }
-}
-
-
-int32_t InductionVariableData::ComputeUpperLimit(int32_t and_mask,
-                                                 int32_t or_mask) {
-  // Should be Smi::kMaxValue but it must fit 32 bits; lower is safe anyway.
-  const int32_t MAX_LIMIT = 1 << 30;
-
-  int32_t result = MAX_LIMIT;
-
-  if (limit() != NULL &&
-      limit()->IsInteger32Constant()) {
-    int32_t limit_value = limit()->GetInteger32Constant();
-    if (!limit_included()) {
-      limit_value--;
-    }
-    if (limit_value < result) result = limit_value;
-  }
-
-  if (additional_upper_limit() != NULL &&
-      additional_upper_limit()->IsInteger32Constant()) {
-    int32_t limit_value = additional_upper_limit()->GetInteger32Constant();
-    if (!additional_upper_limit_is_included()) {
-      limit_value--;
-    }
-    if (limit_value < result) result = limit_value;
-  }
-
-  if (and_mask > 0 && and_mask < MAX_LIMIT) {
-    if (and_mask < result) result = and_mask;
-    return result;
-  }
-
-  // Add the effect of the or_mask.
-  result |= or_mask;
-
-  return result >= MAX_LIMIT ? kNoLimit : result;
-}
-
-
-HValue* InductionVariableData::IgnoreOsrValue(HValue* v) {
-  if (!v->IsPhi()) return v;
-  HPhi* phi = HPhi::cast(v);
-  if (phi->OperandCount() != 2) return v;
-  if (phi->OperandAt(0)->block()->is_osr_entry()) {
-    return phi->OperandAt(1);
-  } else if (phi->OperandAt(1)->block()->is_osr_entry()) {
-    return phi->OperandAt(0);
-  } else {
-    return v;
-  }
-}
-
-
-InductionVariableData* InductionVariableData::GetInductionVariableData(
-    HValue* v) {
-  v = IgnoreOsrValue(v);
-  if (v->IsPhi()) {
-    return HPhi::cast(v)->induction_variable_data();
-  }
-  return NULL;
-}
-
-
-/*
- * Check if a conditional branch to "current_branch" with token "token" is
- * the branch that keeps the induction loop running (and, conversely, will
- * terminate it if the "other_branch" is taken).
- *
- * Three conditions must be met:
- * - "current_branch" must be in the induction loop.
- * - "other_branch" must be out of the induction loop.
- * - "token" and the induction increment must be "compatible": the token should
- *   be a condition that keeps the execution inside the loop until the limit is
- *   reached.
- */
-bool InductionVariableData::CheckIfBranchIsLoopGuard(
-    Token::Value token,
-    HBasicBlock* current_branch,
-    HBasicBlock* other_branch) {
-  if (!phi()->block()->current_loop()->IsNestedInThisLoop(
-      current_branch->current_loop())) {
-    return false;
-  }
-
-  if (phi()->block()->current_loop()->IsNestedInThisLoop(
-      other_branch->current_loop())) {
-    return false;
-  }
-
-  if (increment() > 0 && (token == Token::LT || token == Token::LTE)) {
-    return true;
-  }
-  if (increment() < 0 && (token == Token::GT || token == Token::GTE)) {
-    return true;
-  }
-  if (Token::IsInequalityOp(token) && (increment() == 1 || increment() == -1)) {
-    return true;
-  }
-
-  return false;
-}
-
-
-void InductionVariableData::ComputeLimitFromPredecessorBlock(
-    HBasicBlock* block,
-    LimitFromPredecessorBlock* result) {
-  if (block->predecessors()->length() != 1) return;
-  HBasicBlock* predecessor = block->predecessors()->at(0);
-  HInstruction* end = predecessor->last();
-
-  if (!end->IsCompareNumericAndBranch()) return;
-  HCompareNumericAndBranch* branch = HCompareNumericAndBranch::cast(end);
-
-  Token::Value token = branch->token();
-  if (!Token::IsArithmeticCompareOp(token)) return;
-
-  HBasicBlock* other_target;
-  if (block == branch->SuccessorAt(0)) {
-    other_target = branch->SuccessorAt(1);
-  } else {
-    other_target = branch->SuccessorAt(0);
-    token = Token::NegateCompareOp(token);
-    DCHECK(block == branch->SuccessorAt(1));
-  }
-
-  InductionVariableData* data;
-
-  data = GetInductionVariableData(branch->left());
-  HValue* limit = branch->right();
-  if (data == NULL) {
-    data = GetInductionVariableData(branch->right());
-    token = Token::ReverseCompareOp(token);
-    limit = branch->left();
-  }
-
-  if (data != NULL) {
-    result->variable = data;
-    result->token = token;
-    result->limit = limit;
-    result->other_target = other_target;
-  }
-}
-
-
-/*
- * Compute the limit that is imposed on an induction variable when entering
- * "block" (if any).
- * If the limit is the "proper" induction limit (the one that makes the loop
- * terminate when the induction variable reaches it) it is stored directly in
- * the induction variable data.
- * Otherwise the limit is written in "additional_limit" and the method
- * returns true.
- */
-bool InductionVariableData::ComputeInductionVariableLimit(
-    HBasicBlock* block,
-    InductionVariableLimitUpdate* additional_limit) {
-  LimitFromPredecessorBlock limit;
-  ComputeLimitFromPredecessorBlock(block, &limit);
-  if (!limit.LimitIsValid()) return false;
-
-  if (limit.variable->CheckIfBranchIsLoopGuard(limit.token,
-                                               block,
-                                               limit.other_target)) {
-    limit.variable->limit_ = limit.limit;
-    limit.variable->limit_included_ = limit.LimitIsIncluded();
-    limit.variable->limit_validity_ = block;
-    limit.variable->induction_exit_block_ = block->predecessors()->at(0);
-    limit.variable->induction_exit_target_ = limit.other_target;
-    return false;
-  } else {
-    additional_limit->updated_variable = limit.variable;
-    additional_limit->limit = limit.limit;
-    additional_limit->limit_is_upper = limit.LimitIsUpper();
-    additional_limit->limit_is_included = limit.LimitIsIncluded();
-    return true;
-  }
-}
-
-
 Range* HMathMinMax::InferRange(Zone* zone) {
   if (representation().IsSmiOrInteger32()) {
     Range* a = left()->range();
@@ -2652,7 +2127,11 @@
 
 
 std::ostream& HEnterInlined::PrintDataTo(std::ostream& os) const {  // NOLINT
-  return os << function()->debug_name()->ToCString().get();
+  os << function()->debug_name()->ToCString().get();
+  if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    os << ", JSTailCall";
+  }
+  return os;
 }
 
 
@@ -3272,6 +2751,17 @@
         ? FirstSuccessor() : SecondSuccessor();
     return true;
   }
+  if (value()->type().IsNull() || value()->type().IsUndefined()) {
+    *block = FirstSuccessor();
+    return true;
+  }
+  if (value()->type().IsBoolean() ||
+      value()->type().IsSmi() ||
+      value()->type().IsString() ||
+      value()->type().IsJSReceiver()) {
+    *block = SecondSuccessor();
+    return true;
+  }
   *block = NULL;
   return false;
 }
@@ -3767,12 +3257,12 @@
     }
   }
 
-  bool keep_new_space_iterable = FLAG_log_gc || FLAG_heap_stats;
+  bool keep_heap_iterable = FLAG_log_gc || FLAG_heap_stats;
 #ifdef VERIFY_HEAP
-  keep_new_space_iterable = keep_new_space_iterable || FLAG_verify_heap;
+  keep_heap_iterable = keep_heap_iterable || FLAG_verify_heap;
 #endif
 
-  if (keep_new_space_iterable && dominator_allocate->IsNewSpaceAllocation()) {
+  if (keep_heap_iterable) {
     dominator_allocate->MakePrefillWithFiller();
   } else {
     // TODO(hpayer): This is a short-term hack to make allocation mementos
diff --git a/src/crankshaft/hydrogen-instructions.h b/src/crankshaft/hydrogen-instructions.h
index 22ed052..196a14f 100644
--- a/src/crankshaft/hydrogen-instructions.h
+++ b/src/crankshaft/hydrogen-instructions.h
@@ -56,11 +56,8 @@
   V(Bitwise)                                  \
   V(BlockEntry)                               \
   V(BoundsCheck)                              \
-  V(BoundsCheckBaseIndexInformation)          \
   V(Branch)                                   \
   V(CallWithDescriptor)                       \
-  V(CallJSFunction)                           \
-  V(CallFunction)                             \
   V(CallNewArray)                             \
   V(CallRuntime)                              \
   V(CapturedObject)                           \
@@ -135,7 +132,6 @@
   V(StackCheck)                               \
   V(StoreCodeEntry)                           \
   V(StoreContextSlot)                         \
-  V(StoreFrameContext)                        \
   V(StoreKeyed)                               \
   V(StoreKeyedGeneric)                        \
   V(StoreNamedField)                          \
@@ -146,7 +142,6 @@
   V(StringCompareAndBranch)                   \
   V(Sub)                                      \
   V(ThisFunction)                             \
-  V(ToFastProperties)                         \
   V(TransitionElementsKind)                   \
   V(TrapAllocationMemento)                    \
   V(Typeof)                                   \
@@ -735,14 +730,6 @@
   virtual void Verify() = 0;
 #endif
 
-  virtual bool TryDecompose(DecompositionResult* decomposition) {
-    if (RedefinedOperand() != NULL) {
-      return RedefinedOperand()->TryDecompose(decomposition);
-    } else {
-      return false;
-    }
-  }
-
   // Returns true conservatively if the program might be able to observe a
   // ToString() operation on this value.
   bool ToStringCanBeObserved() const {
@@ -1368,10 +1355,8 @@
 class HBranch final : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
-  DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*,
-                                 ToBooleanStub::Types);
-  DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*,
-                                 ToBooleanStub::Types,
+  DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*, ToBooleanICStub::Types);
+  DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*, ToBooleanICStub::Types,
                                  HBasicBlock*, HBasicBlock*);
 
   Representation RequiredInputRepresentation(int index) override {
@@ -1383,23 +1368,22 @@
 
   std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
 
-  ToBooleanStub::Types expected_input_types() const {
+  ToBooleanICStub::Types expected_input_types() const {
     return expected_input_types_;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Branch)
 
  private:
-  HBranch(HValue* value,
-          ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
-          HBasicBlock* true_target = NULL,
-          HBasicBlock* false_target = NULL)
+  HBranch(HValue* value, ToBooleanICStub::Types expected_input_types =
+                             ToBooleanICStub::Types(),
+          HBasicBlock* true_target = NULL, HBasicBlock* false_target = NULL)
       : HUnaryControlInstruction(value, true_target, false_target),
         expected_input_types_(expected_input_types) {
     SetFlag(kAllowUndefinedAsNaN);
   }
 
-  ToBooleanStub::Types expected_input_types_;
+  ToBooleanICStub::Types expected_input_types_;
 };
 
 
@@ -1954,10 +1938,12 @@
                             HConstant* closure_context, int arguments_count,
                             FunctionLiteral* function,
                             InliningKind inlining_kind, Variable* arguments_var,
-                            HArgumentsObject* arguments_object) {
-    return new (zone) HEnterInlined(return_id, closure, closure_context,
-                                    arguments_count, function, inlining_kind,
-                                    arguments_var, arguments_object, zone);
+                            HArgumentsObject* arguments_object,
+                            TailCallMode syntactic_tail_call_mode) {
+    return new (zone)
+        HEnterInlined(return_id, closure, closure_context, arguments_count,
+                      function, inlining_kind, arguments_var, arguments_object,
+                      syntactic_tail_call_mode, zone);
   }
 
   void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
@@ -1973,6 +1959,9 @@
   void set_arguments_pushed() { arguments_pushed_ = true; }
   FunctionLiteral* function() const { return function_; }
   InliningKind inlining_kind() const { return inlining_kind_; }
+  TailCallMode syntactic_tail_call_mode() const {
+    return syntactic_tail_call_mode_;
+  }
   BailoutId ReturnId() const { return return_id_; }
   int inlining_id() const { return inlining_id_; }
   void set_inlining_id(int inlining_id) { inlining_id_ = inlining_id; }
@@ -1991,7 +1980,7 @@
                 HConstant* closure_context, int arguments_count,
                 FunctionLiteral* function, InliningKind inlining_kind,
                 Variable* arguments_var, HArgumentsObject* arguments_object,
-                Zone* zone)
+                TailCallMode syntactic_tail_call_mode, Zone* zone)
       : return_id_(return_id),
         shared_(handle(closure->shared())),
         closure_(closure),
@@ -2000,6 +1989,7 @@
         arguments_pushed_(false),
         function_(function),
         inlining_kind_(inlining_kind),
+        syntactic_tail_call_mode_(syntactic_tail_call_mode),
         inlining_id_(0),
         arguments_var_(arguments_var),
         arguments_object_(arguments_object),
@@ -2013,6 +2003,7 @@
   bool arguments_pushed_;
   FunctionLiteral* function_;
   InliningKind inlining_kind_;
+  TailCallMode syntactic_tail_call_mode_;
   int inlining_id_;
   Variable* arguments_var_;
   HArgumentsObject* arguments_object_;
@@ -2220,51 +2211,17 @@
 };
 
 
-class HCallJSFunction final : public HCall<1> {
- public:
-  static HCallJSFunction* New(Isolate* isolate, Zone* zone, HValue* context,
-                              HValue* function, int argument_count);
-
-  HValue* function() const { return OperandAt(0); }
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  Representation RequiredInputRepresentation(int index) final {
-    DCHECK(index == 0);
-    return Representation::Tagged();
-  }
-
-  bool HasStackCheck() final { return has_stack_check_; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction)
-
- private:
-  // The argument count includes the receiver.
-  HCallJSFunction(HValue* function,
-                  int argument_count,
-                  bool has_stack_check)
-      : HCall<1>(argument_count),
-        has_stack_check_(has_stack_check) {
-      SetOperandAt(0, function);
-  }
-
-  bool has_stack_check_;
-};
-
-
-enum CallMode { NORMAL_CALL, TAIL_CALL };
-
-
 class HCallWithDescriptor final : public HInstruction {
  public:
-  static HCallWithDescriptor* New(Isolate* isolate, Zone* zone, HValue* context,
-                                  HValue* target, int argument_count,
-                                  CallInterfaceDescriptor descriptor,
-                                  const Vector<HValue*>& operands,
-                                  CallMode call_mode = NORMAL_CALL) {
-    HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
-        target, argument_count, descriptor, operands, call_mode, zone);
-    DCHECK(operands.length() == res->GetParameterCount());
+  static HCallWithDescriptor* New(
+      Isolate* isolate, Zone* zone, HValue* context, HValue* target,
+      int argument_count, CallInterfaceDescriptor descriptor,
+      const Vector<HValue*>& operands,
+      TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow,
+      TailCallMode tail_call_mode = TailCallMode::kDisallow) {
+    HCallWithDescriptor* res = new (zone)
+        HCallWithDescriptor(target, argument_count, descriptor, operands,
+                            syntactic_tail_call_mode, tail_call_mode, zone);
     return res;
   }
 
@@ -2286,7 +2243,16 @@
 
   HType CalculateInferredType() final { return HType::Tagged(); }
 
-  bool IsTailCall() const { return call_mode_ == TAIL_CALL; }
+  // Defines whether this instruction corresponds to a JS call at tail position.
+  TailCallMode syntactic_tail_call_mode() const {
+    return SyntacticTailCallModeField::decode(bit_field_);
+  }
+
+  // Defines whether this call should be generated as a tail call.
+  TailCallMode tail_call_mode() const {
+    return TailCallModeField::decode(bit_field_);
+  }
+  bool IsTailCall() const { return tail_call_mode() == TailCallMode::kAllow; }
 
   virtual int argument_count() const {
     return argument_count_;
@@ -2306,14 +2272,18 @@
   // The argument count includes the receiver.
   HCallWithDescriptor(HValue* target, int argument_count,
                       CallInterfaceDescriptor descriptor,
-                      const Vector<HValue*>& operands, CallMode call_mode,
-                      Zone* zone)
+                      const Vector<HValue*>& operands,
+                      TailCallMode syntactic_tail_call_mode,
+                      TailCallMode tail_call_mode, Zone* zone)
       : descriptor_(descriptor),
         values_(GetParameterCount() + 1, zone),
         argument_count_(argument_count),
-        call_mode_(call_mode) {
+        bit_field_(
+            TailCallModeField::encode(tail_call_mode) |
+            SyntacticTailCallModeField::encode(syntactic_tail_call_mode)) {
+    DCHECK_EQ(operands.length(), GetParameterCount());
     // We can only tail call without any stack arguments.
-    DCHECK(call_mode != TAIL_CALL || argument_count == 0);
+    DCHECK(tail_call_mode != TailCallMode::kAllow || argument_count == 0);
     AddOperand(target, zone);
     for (int i = 0; i < operands.length(); i++) {
       AddOperand(operands[i], zone);
@@ -2338,97 +2308,75 @@
   CallInterfaceDescriptor descriptor_;
   ZoneList<HValue*> values_;
   int argument_count_;
-  CallMode call_mode_;
+  class TailCallModeField : public BitField<TailCallMode, 0, 1> {};
+  class SyntacticTailCallModeField
+      : public BitField<TailCallMode, TailCallModeField::kNext, 1> {};
+  uint32_t bit_field_;
 };
 
 
 class HInvokeFunction final : public HBinaryCall {
  public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
-
-  HInvokeFunction(HValue* context,
-                  HValue* function,
-                  Handle<JSFunction> known_function,
-                  int argument_count)
-      : HBinaryCall(context, function, argument_count),
-        known_function_(known_function) {
-    formal_parameter_count_ =
-        known_function.is_null()
-            ? 0
-            : known_function->shared()->internal_formal_parameter_count();
-    has_stack_check_ = !known_function.is_null() &&
-        (known_function->code()->kind() == Code::FUNCTION ||
-         known_function->code()->kind() == Code::OPTIMIZED_FUNCTION);
-  }
-
-  static HInvokeFunction* New(Isolate* isolate, Zone* zone, HValue* context,
-                              HValue* function,
-                              Handle<JSFunction> known_function,
-                              int argument_count) {
-    return new(zone) HInvokeFunction(context, function,
-                                     known_function, argument_count);
-  }
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HInvokeFunction, HValue*,
+                                              Handle<JSFunction>, int,
+                                              TailCallMode, TailCallMode);
 
   HValue* context() { return first(); }
   HValue* function() { return second(); }
   Handle<JSFunction> known_function() { return known_function_; }
   int formal_parameter_count() const { return formal_parameter_count_; }
 
-  bool HasStackCheck() final { return has_stack_check_; }
+  bool HasStackCheck() final { return HasStackCheckField::decode(bit_field_); }
+
+  // Defines whether this instruction corresponds to a JS call at tail position.
+  TailCallMode syntactic_tail_call_mode() const {
+    return SyntacticTailCallModeField::decode(bit_field_);
+  }
+
+  // Defines whether this call should be generated as a tail call.
+  TailCallMode tail_call_mode() const {
+    return TailCallModeField::decode(bit_field_);
+  }
 
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
 
+  std::ostream& PrintTo(std::ostream& os) const override;      // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
  private:
-  HInvokeFunction(HValue* context, HValue* function, int argument_count)
+  void set_has_stack_check(bool has_stack_check) {
+    bit_field_ = HasStackCheckField::update(bit_field_, has_stack_check);
+  }
+
+  HInvokeFunction(HValue* context, HValue* function,
+                  Handle<JSFunction> known_function, int argument_count,
+                  TailCallMode syntactic_tail_call_mode,
+                  TailCallMode tail_call_mode)
       : HBinaryCall(context, function, argument_count),
-        has_stack_check_(false) {
+        known_function_(known_function),
+        bit_field_(
+            TailCallModeField::encode(tail_call_mode) |
+            SyntacticTailCallModeField::encode(syntactic_tail_call_mode)) {
+    DCHECK(tail_call_mode != TailCallMode::kAllow ||
+           syntactic_tail_call_mode == TailCallMode::kAllow);
+    formal_parameter_count_ =
+        known_function.is_null()
+            ? 0
+            : known_function->shared()->internal_formal_parameter_count();
+    set_has_stack_check(
+        !known_function.is_null() &&
+        (known_function->code()->kind() == Code::FUNCTION ||
+         known_function->code()->kind() == Code::OPTIMIZED_FUNCTION));
   }
 
   Handle<JSFunction> known_function_;
   int formal_parameter_count_;
-  bool has_stack_check_;
-};
 
-
-class HCallFunction final : public HBinaryCall {
- public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallFunction, HValue*, int,
-                                              ConvertReceiverMode);
-
-  HValue* context() const { return first(); }
-  HValue* function() const { return second(); }
-
-  ConvertReceiverMode convert_mode() const {
-    return ConvertReceiverModeField::decode(bit_field_);
-  }
-  FeedbackVectorSlot slot() const { return slot_; }
-  Handle<TypeFeedbackVector> feedback_vector() const {
-    return feedback_vector_;
-  }
-  bool HasVectorAndSlot() const { return !feedback_vector_.is_null(); }
-  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
-                        FeedbackVectorSlot slot) {
-    feedback_vector_ = vector;
-    slot_ = slot;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction)
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  int argument_delta() const override { return -argument_count(); }
-
- private:
-  HCallFunction(HValue* context, HValue* function, int argument_count,
-                ConvertReceiverMode convert_mode)
-      : HBinaryCall(context, function, argument_count),
-        bit_field_(ConvertReceiverModeField::encode(convert_mode)) {}
-  Handle<TypeFeedbackVector> feedback_vector_;
-  FeedbackVectorSlot slot_;
-
-  class ConvertReceiverModeField : public BitField<ConvertReceiverMode, 0, 2> {
-  };
-
+  class HasStackCheckField : public BitField<bool, 0, 1> {};
+  class TailCallModeField
+      : public BitField<TailCallMode, HasStackCheckField::kNext, 1> {};
+  class SyntacticTailCallModeField
+      : public BitField<TailCallMode, TailCallModeField::kNext, 1> {};
   uint32_t bit_field_;
 };
 
@@ -2550,10 +2498,10 @@
   // Indicates if we support a double (and int32) output for Math.floor and
   // Math.round.
   bool SupportsFlexibleFloorAndRound() const {
-#ifdef V8_TARGET_ARCH_ARM64
-    // TODO(rmcilroy): Re-enable this for Arm64 once http://crbug.com/476477 is
-    // fixed.
-    return false;
+#if V8_TARGET_ARCH_ARM64
+    return true;
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+    return CpuFeatures::IsSupported(SSE4_1);
 #else
     return false;
 #endif
@@ -2997,226 +2945,6 @@
 };
 
 
-class InductionVariableData;
-
-
-struct InductionVariableLimitUpdate {
-  InductionVariableData* updated_variable;
-  HValue* limit;
-  bool limit_is_upper;
-  bool limit_is_included;
-
-  InductionVariableLimitUpdate()
-      : updated_variable(NULL), limit(NULL),
-        limit_is_upper(false), limit_is_included(false) {}
-};
-
-
-class HBoundsCheck;
-class HPhi;
-class HBitwise;
-
-
-class InductionVariableData final : public ZoneObject {
- public:
-  class InductionVariableCheck : public ZoneObject {
-   public:
-    HBoundsCheck* check() { return check_; }
-    InductionVariableCheck* next() { return next_; }
-    bool HasUpperLimit() { return upper_limit_ >= 0; }
-    int32_t upper_limit() {
-      DCHECK(HasUpperLimit());
-      return upper_limit_;
-    }
-    void set_upper_limit(int32_t upper_limit) {
-      upper_limit_ = upper_limit;
-    }
-
-    bool processed() { return processed_; }
-    void set_processed() { processed_ = true; }
-
-    InductionVariableCheck(HBoundsCheck* check,
-                           InductionVariableCheck* next,
-                           int32_t upper_limit = kNoLimit)
-        : check_(check), next_(next), upper_limit_(upper_limit),
-          processed_(false) {}
-
-   private:
-    HBoundsCheck* check_;
-    InductionVariableCheck* next_;
-    int32_t upper_limit_;
-    bool processed_;
-  };
-
-  class ChecksRelatedToLength : public ZoneObject {
-   public:
-    HValue* length() { return length_; }
-    ChecksRelatedToLength* next() { return next_; }
-    InductionVariableCheck* checks() { return checks_; }
-
-    void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
-    void CloseCurrentBlock();
-
-    ChecksRelatedToLength(HValue* length, ChecksRelatedToLength* next)
-      : length_(length), next_(next), checks_(NULL),
-        first_check_in_block_(NULL),
-        added_index_(NULL),
-        added_constant_(NULL),
-        current_and_mask_in_block_(0),
-        current_or_mask_in_block_(0) {}
-
-   private:
-    void UseNewIndexInCurrentBlock(Token::Value token,
-                                   int32_t mask,
-                                   HValue* index_base,
-                                   HValue* context);
-
-    HBoundsCheck* first_check_in_block() { return first_check_in_block_; }
-    HBitwise* added_index() { return added_index_; }
-    void set_added_index(HBitwise* index) { added_index_ = index; }
-    HConstant* added_constant() { return added_constant_; }
-    void set_added_constant(HConstant* constant) { added_constant_ = constant; }
-    int32_t current_and_mask_in_block() { return current_and_mask_in_block_; }
-    int32_t current_or_mask_in_block() { return current_or_mask_in_block_; }
-    int32_t current_upper_limit() { return current_upper_limit_; }
-
-    HValue* length_;
-    ChecksRelatedToLength* next_;
-    InductionVariableCheck* checks_;
-
-    HBoundsCheck* first_check_in_block_;
-    HBitwise* added_index_;
-    HConstant* added_constant_;
-    int32_t current_and_mask_in_block_;
-    int32_t current_or_mask_in_block_;
-    int32_t current_upper_limit_;
-  };
-
-  struct LimitFromPredecessorBlock {
-    InductionVariableData* variable;
-    Token::Value token;
-    HValue* limit;
-    HBasicBlock* other_target;
-
-    bool LimitIsValid() { return token != Token::ILLEGAL; }
-
-    bool LimitIsIncluded() {
-      return Token::IsEqualityOp(token) ||
-          token == Token::GTE || token == Token::LTE;
-    }
-    bool LimitIsUpper() {
-      return token == Token::LTE || token == Token::LT || token == Token::NE;
-    }
-
-    LimitFromPredecessorBlock()
-        : variable(NULL),
-          token(Token::ILLEGAL),
-          limit(NULL),
-          other_target(NULL) {}
-  };
-
-  static const int32_t kNoLimit = -1;
-
-  static InductionVariableData* ExaminePhi(HPhi* phi);
-  static void ComputeLimitFromPredecessorBlock(
-      HBasicBlock* block,
-      LimitFromPredecessorBlock* result);
-  static bool ComputeInductionVariableLimit(
-      HBasicBlock* block,
-      InductionVariableLimitUpdate* additional_limit);
-
-  struct BitwiseDecompositionResult {
-    HValue* base;
-    int32_t and_mask;
-    int32_t or_mask;
-    HValue* context;
-
-    BitwiseDecompositionResult()
-        : base(NULL), and_mask(0), or_mask(0), context(NULL) {}
-  };
-  static void DecomposeBitwise(HValue* value,
-                               BitwiseDecompositionResult* result);
-
-  void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
-
-  bool CheckIfBranchIsLoopGuard(Token::Value token,
-                                HBasicBlock* current_branch,
-                                HBasicBlock* other_branch);
-
-  void UpdateAdditionalLimit(InductionVariableLimitUpdate* update);
-
-  HPhi* phi() { return phi_; }
-  HValue* base() { return base_; }
-  int32_t increment() { return increment_; }
-  HValue* limit() { return limit_; }
-  bool limit_included() { return limit_included_; }
-  HBasicBlock* limit_validity() { return limit_validity_; }
-  HBasicBlock* induction_exit_block() { return induction_exit_block_; }
-  HBasicBlock* induction_exit_target() { return induction_exit_target_; }
-  ChecksRelatedToLength* checks() { return checks_; }
-  HValue* additional_upper_limit() { return additional_upper_limit_; }
-  bool additional_upper_limit_is_included() {
-    return additional_upper_limit_is_included_;
-  }
-  HValue* additional_lower_limit() { return additional_lower_limit_; }
-  bool additional_lower_limit_is_included() {
-    return additional_lower_limit_is_included_;
-  }
-
-  bool LowerLimitIsNonNegativeConstant() {
-    if (base()->IsInteger32Constant() && base()->GetInteger32Constant() >= 0) {
-      return true;
-    }
-    if (additional_lower_limit() != NULL &&
-        additional_lower_limit()->IsInteger32Constant() &&
-        additional_lower_limit()->GetInteger32Constant() >= 0) {
-      // Ignoring the corner case of !additional_lower_limit_is_included()
-      // is safe, handling it adds unneeded complexity.
-      return true;
-    }
-    return false;
-  }
-
-  int32_t ComputeUpperLimit(int32_t and_mask, int32_t or_mask);
-
- private:
-  template <class T> void swap(T* a, T* b) {
-    T c(*a);
-    *a = *b;
-    *b = c;
-  }
-
-  InductionVariableData(HPhi* phi, HValue* base, int32_t increment)
-      : phi_(phi), base_(IgnoreOsrValue(base)), increment_(increment),
-        limit_(NULL), limit_included_(false), limit_validity_(NULL),
-        induction_exit_block_(NULL), induction_exit_target_(NULL),
-        checks_(NULL),
-        additional_upper_limit_(NULL),
-        additional_upper_limit_is_included_(false),
-        additional_lower_limit_(NULL),
-        additional_lower_limit_is_included_(false) {}
-
-  static int32_t ComputeIncrement(HPhi* phi, HValue* phi_operand);
-
-  static HValue* IgnoreOsrValue(HValue* v);
-  static InductionVariableData* GetInductionVariableData(HValue* v);
-
-  HPhi* phi_;
-  HValue* base_;
-  int32_t increment_;
-  HValue* limit_;
-  bool limit_included_;
-  HBasicBlock* limit_validity_;
-  HBasicBlock* induction_exit_block_;
-  HBasicBlock* induction_exit_target_;
-  ChecksRelatedToLength* checks_;
-  HValue* additional_upper_limit_;
-  bool additional_upper_limit_is_included_;
-  HValue* additional_lower_limit_;
-  bool additional_lower_limit_is_included_;
-};
-
-
 class HPhi final : public HValue {
  public:
   HPhi(int merged_index, Zone* zone)
@@ -3250,21 +2978,6 @@
 
   int merged_index() const { return merged_index_; }
 
-  InductionVariableData* induction_variable_data() {
-    return induction_variable_data_;
-  }
-  bool IsInductionVariable() {
-    return induction_variable_data_ != NULL;
-  }
-  bool IsLimitedInductionVariable() {
-    return IsInductionVariable() &&
-        induction_variable_data_->limit() != NULL;
-  }
-  void DetectInductionVariable() {
-    DCHECK(induction_variable_data_ == NULL);
-    induction_variable_data_ = InductionVariableData::ExaminePhi(this);
-  }
-
   std::ostream& PrintTo(std::ostream& os) const override;  // NOLINT
 
 #ifdef DEBUG
@@ -3310,7 +3023,6 @@
   int merged_index_ = 0;
 
   int phi_id_ = -1;
-  InductionVariableData* induction_variable_data_ = nullptr;
 
   Representation representation_from_indirect_uses_ = Representation::None();
   Representation representation_from_non_phi_uses_ = Representation::None();
@@ -3865,8 +3577,8 @@
 
 class HApplyArguments final : public HTemplateInstruction<4> {
  public:
-  DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*,
-                                 HValue*);
+  DECLARE_INSTRUCTION_FACTORY_P5(HApplyArguments, HValue*, HValue*, HValue*,
+                                 HValue*, TailCallMode);
 
   Representation RequiredInputRepresentation(int index) override {
     // The length is untagged, all other inputs are tagged.
@@ -3880,13 +3592,16 @@
   HValue* length() { return OperandAt(2); }
   HValue* elements() { return OperandAt(3); }
 
+  TailCallMode tail_call_mode() const {
+    return TailCallModeField::decode(bit_field_);
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
 
  private:
-  HApplyArguments(HValue* function,
-                  HValue* receiver,
-                  HValue* length,
-                  HValue* elements) {
+  HApplyArguments(HValue* function, HValue* receiver, HValue* length,
+                  HValue* elements, TailCallMode tail_call_mode)
+      : bit_field_(TailCallModeField::encode(tail_call_mode)) {
     set_representation(Representation::Tagged());
     SetOperandAt(0, function);
     SetOperandAt(1, receiver);
@@ -3894,12 +3609,16 @@
     SetOperandAt(3, elements);
     SetAllSideEffects();
   }
+
+  class TailCallModeField : public BitField<TailCallMode, 0, 1> {};
+  uint32_t bit_field_;
 };
 
 
 class HArgumentsElements final : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool);
+  DECLARE_INSTRUCTION_FACTORY_P2(HArgumentsElements, bool, bool);
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
 
@@ -3908,12 +3627,14 @@
   }
 
   bool from_inlined() const { return from_inlined_; }
+  bool arguments_adaptor() const { return arguments_adaptor_; }
 
  protected:
   bool DataEquals(HValue* other) override { return true; }
 
  private:
-  explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
+  explicit HArgumentsElements(bool from_inlined, bool arguments_adaptor = true)
+      : from_inlined_(from_inlined), arguments_adaptor_(arguments_adaptor) {
     // The value produced by this instruction is a pointer into the stack
     // that looks as if it was a smi because of alignment.
     set_representation(Representation::Tagged());
@@ -3923,6 +3644,7 @@
   bool IsDeletable() const override { return true; }
 
   bool from_inlined_;
+  bool arguments_adaptor_;
 };
 
 
@@ -3981,9 +3703,6 @@
 };
 
 
-class HBoundsCheckBaseIndexInformation;
-
-
 class HBoundsCheck final : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*);
@@ -3995,24 +3714,6 @@
   int offset() const { return offset_; }
   int scale() const { return scale_; }
 
-  void ApplyIndexChange();
-  bool DetectCompoundIndex() {
-    DCHECK(base() == NULL);
-
-    DecompositionResult decomposition;
-    if (index()->TryDecompose(&decomposition)) {
-      base_ = decomposition.base();
-      offset_ = decomposition.offset();
-      scale_ = decomposition.scale();
-      return true;
-    } else {
-      base_ = index();
-      offset_ = 0;
-      scale_ = 0;
-      return false;
-    }
-  }
-
   Representation RequiredInputRepresentation(int index) override {
     return representation();
   }
@@ -4031,8 +3732,6 @@
   DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
 
  protected:
-  friend class HBoundsCheckBaseIndexInformation;
-
   Range* InferRange(Zone* zone) override;
 
   bool DataEquals(HValue* other) override { return true; }
@@ -4061,34 +3760,6 @@
 };
 
 
-class HBoundsCheckBaseIndexInformation final : public HTemplateInstruction<2> {
- public:
-  explicit HBoundsCheckBaseIndexInformation(HBoundsCheck* check) {
-    DecompositionResult decomposition;
-    if (check->index()->TryDecompose(&decomposition)) {
-      SetOperandAt(0, decomposition.base());
-      SetOperandAt(1, check);
-    } else {
-      UNREACHABLE();
-    }
-  }
-
-  HValue* base_index() const { return OperandAt(0); }
-  HBoundsCheck* bounds_check() { return HBoundsCheck::cast(OperandAt(1)); }
-
-  DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation)
-
-  Representation RequiredInputRepresentation(int index) override {
-    return representation();
-  }
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  int RedefinedOperandIndex() override { return 0; }
-  bool IsPurelyInformativeDefinition() override { return true; }
-};
-
-
 class HBitwiseBinaryOperation : public HBinaryOperation {
  public:
   HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
@@ -4711,18 +4382,6 @@
 
   HValue* Canonicalize() override;
 
-  bool TryDecompose(DecompositionResult* decomposition) override {
-    if (left()->IsInteger32Constant()) {
-      decomposition->Apply(right(), left()->GetInteger32Constant());
-      return true;
-    } else if (right()->IsInteger32Constant()) {
-      decomposition->Apply(left(), right()->GetInteger32Constant());
-      return true;
-    } else {
-      return false;
-    }
-  }
-
   void RepresentationChanged(Representation to) override {
     if (to.IsTagged() &&
         (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
@@ -4802,15 +4461,6 @@
 
   HValue* Canonicalize() override;
 
-  bool TryDecompose(DecompositionResult* decomposition) override {
-    if (right()->IsInteger32Constant()) {
-      decomposition->Apply(left(), -right()->GetInteger32Constant());
-      return true;
-    } else {
-      return false;
-    }
-  }
-
   DECLARE_CONCRETE_INSTRUCTION(Sub)
 
  protected:
@@ -5065,18 +4715,6 @@
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
                            HValue* left, HValue* right);
 
-  bool TryDecompose(DecompositionResult* decomposition) override {
-    if (right()->IsInteger32Constant()) {
-      if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
-        // This is intended to look for HAdd and HSub, to handle compounds
-        // like ((base + offset) >> scale) with one single decomposition.
-        left()->TryDecompose(decomposition);
-        return true;
-      }
-    }
-    return false;
-  }
-
   Range* InferRange(Zone* zone) override;
 
   void UpdateRepresentation(Representation new_rep,
@@ -5102,18 +4740,6 @@
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
                            HValue* left, HValue* right);
 
-  bool TryDecompose(DecompositionResult* decomposition) override {
-    if (right()->IsInteger32Constant()) {
-      if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
-        // This is intended to look for HAdd and HSub, to handle compounds
-        // like ((base + offset) >> scale) with one single decomposition.
-        left()->TryDecompose(decomposition);
-        return true;
-      }
-    }
-    return false;
-  }
-
   Range* InferRange(Zone* zone) override;
 
   void UpdateRepresentation(Representation new_rep,
@@ -5572,11 +5198,6 @@
     if (HAllocate::cast(object)->IsNewSpaceAllocation()) {
       return false;
     }
-    // Stores to old space allocations require no write barriers if the value is
-    // a constant provably not in new space.
-    if (value->IsConstant() && HConstant::cast(value)->NotInNewSpace()) {
-      return false;
-    }
   }
   return true;
 }
@@ -5928,6 +5549,10 @@
                          Representation::Integer32());
   }
 
+  static HObjectAccess ForMapDescriptors() {
+    return HObjectAccess(kInobject, Map::kDescriptorsOffset);
+  }
+
   static HObjectAccess ForNameHashField() {
     return HObjectAccess(kInobject,
                          Name::kHashFieldOffset,
@@ -7391,35 +7016,6 @@
 };
 
 
-class HToFastProperties final : public HUnaryOperation {
- public:
-  DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
-
- private:
-  explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
-    set_representation(Representation::Tagged());
-    SetChangesFlag(kNewSpacePromotion);
-
-    // This instruction is not marked as kChangesMaps, but does
-    // change the map of the input operand. Use it only when creating
-    // object literals via a runtime call.
-    DCHECK(value->IsCallRuntime());
-#ifdef DEBUG
-    const Runtime::Function* function = HCallRuntime::cast(value)->function();
-    DCHECK(function->function_id == Runtime::kCreateObjectLiteral);
-#endif
-  }
-
-  bool IsDeletable() const override { return true; }
-};
-
-
 class HSeqStringGetChar final : public HTemplateInstruction<2> {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
@@ -7646,28 +7242,6 @@
   bool IsDeletable() const override { return true; }
 };
 
-
-class HStoreFrameContext: public HUnaryOperation {
- public:
-  DECLARE_INSTRUCTION_FACTORY_P1(HStoreFrameContext, HValue*);
-
-  HValue* context() { return OperandAt(0); }
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext)
- private:
-  explicit HStoreFrameContext(HValue* context)
-      : HUnaryOperation(context) {
-    set_representation(Representation::Tagged());
-    SetChangesFlag(kContextSlots);
-  }
-};
-
-
-
 #undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
diff --git a/src/crankshaft/hydrogen-osr.cc b/src/crankshaft/hydrogen-osr.cc
index c98bbf6..8de3ac0 100644
--- a/src/crankshaft/hydrogen-osr.cc
+++ b/src/crankshaft/hydrogen-osr.cc
@@ -30,7 +30,7 @@
   HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
   osr_entry_ = graph->CreateBasicBlock();
   HValue* true_value = graph->GetConstantTrue();
-  HBranch* test = builder_->New<HBranch>(true_value, ToBooleanStub::Types(),
+  HBranch* test = builder_->New<HBranch>(true_value, ToBooleanICStub::Types(),
                                          non_osr_entry, osr_entry_);
   builder_->FinishCurrentBlock(test);
 
diff --git a/src/crankshaft/hydrogen-types.cc b/src/crankshaft/hydrogen-types.cc
index 8c85625..4266e28 100644
--- a/src/crankshaft/hydrogen-types.cc
+++ b/src/crankshaft/hydrogen-types.cc
@@ -22,7 +22,7 @@
   if (type->Is(Type::Boolean())) return HType::Boolean();
   if (type->Is(Type::Undefined())) return HType::Undefined();
   if (type->Is(Type::Object())) return HType::JSObject();
-  if (type->Is(Type::Receiver())) return HType::JSReceiver();
+  if (type->Is(Type::DetectableReceiver())) return HType::JSReceiver();
   return HType::Tagged();
 }
 
@@ -43,8 +43,13 @@
   if (value->IsString()) return HType::String();
   if (value->IsBoolean()) return HType::Boolean();
   if (value->IsUndefined()) return HType::Undefined();
-  if (value->IsJSArray()) return HType::JSArray();
-  if (value->IsJSObject()) return HType::JSObject();
+  if (value->IsJSArray()) {
+    DCHECK(!value->IsUndetectable());
+    return HType::JSArray();
+  }
+  if (value->IsJSObject() && !value->IsUndetectable()) {
+    return HType::JSObject();
+  }
   DCHECK(value->IsHeapObject());
   return HType::HeapObject();
 }
diff --git a/src/crankshaft/hydrogen.cc b/src/crankshaft/hydrogen.cc
index b6fdd3a..0de6dac 100644
--- a/src/crankshaft/hydrogen.cc
+++ b/src/crankshaft/hydrogen.cc
@@ -11,7 +11,6 @@
 #include "src/ast/scopeinfo.h"
 #include "src/code-factory.h"
 #include "src/crankshaft/hydrogen-bce.h"
-#include "src/crankshaft/hydrogen-bch.h"
 #include "src/crankshaft/hydrogen-canonicalize.h"
 #include "src/crankshaft/hydrogen-check-elimination.h"
 #include "src/crankshaft/hydrogen-dce.h"
@@ -58,6 +57,8 @@
 #include "src/crankshaft/mips/lithium-codegen-mips.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS64
 #include "src/crankshaft/mips64/lithium-codegen-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-codegen-s390.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
 #include "src/crankshaft/x87/lithium-codegen-x87.h"  // NOLINT
 #else
@@ -688,28 +689,32 @@
   return value ? GetConstantTrue() : GetConstantFalse();
 }
 
-#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value,         \
-                            undetectable)                                   \
-  HConstant* HGraph::GetConstant##Name() {                                  \
-    if (!constant_##name##_.is_set()) {                                     \
-      HConstant* constant = new (zone()) HConstant(                         \
-          Unique<Object>::CreateImmovable(                                  \
-              isolate()->factory()->name##_value()),                        \
-          Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
-          false, Representation::Tagged(), htype, true, boolean_value,      \
-          undetectable, ODDBALL_TYPE);                                      \
-      constant->InsertAfter(entry_block()->first());                        \
-      constant_##name##_.set(constant);                                     \
-    }                                                                       \
-    return ReinsertConstantIfNecessary(constant_##name##_.get());           \
+#define DEFINE_GET_CONSTANT(Name, name, constant, type, htype, boolean_value, \
+                            undetectable)                                     \
+  HConstant* HGraph::GetConstant##Name() {                                    \
+    if (!constant_##name##_.is_set()) {                                       \
+      HConstant* constant = new (zone()) HConstant(                           \
+          Unique<Object>::CreateImmovable(isolate()->factory()->constant()),  \
+          Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()),   \
+          false, Representation::Tagged(), htype, true, boolean_value,        \
+          undetectable, ODDBALL_TYPE);                                        \
+      constant->InsertAfter(entry_block()->first());                          \
+      constant_##name##_.set(constant);                                       \
+    }                                                                         \
+    return ReinsertConstantIfNecessary(constant_##name##_.get());             \
   }
 
-DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false,
-                    true)
-DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true, false)
-DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false, false)
-DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false, false)
-DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false, true)
+DEFINE_GET_CONSTANT(Undefined, undefined, undefined_value, undefined,
+                    HType::Undefined(), false, true)
+DEFINE_GET_CONSTANT(True, true, true_value, boolean, HType::Boolean(), true,
+                    false)
+DEFINE_GET_CONSTANT(False, false, false_value, boolean, HType::Boolean(), false,
+                    false)
+DEFINE_GET_CONSTANT(Hole, the_hole, the_hole_value, the_hole, HType::None(),
+                    false, false)
+DEFINE_GET_CONSTANT(Null, null, null_value, null, HType::Null(), false, true)
+DEFINE_GET_CONSTANT(OptimizedOut, optimized_out, optimized_out, optimized_out,
+                    HType::None(), false, false)
 
 #undef DEFINE_GET_CONSTANT
 
@@ -906,8 +911,8 @@
     // so that the graph builder visits it and sees any live range extending
     // constructs within it.
     HConstant* constant_false = builder()->graph()->GetConstantFalse();
-    ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
-    boolean_type.Add(ToBooleanStub::BOOLEAN);
+    ToBooleanICStub::Types boolean_type = ToBooleanICStub::Types();
+    boolean_type.Add(ToBooleanICStub::BOOLEAN);
     HBranch* branch = builder()->New<HBranch>(
         constant_false, boolean_type, first_true_block_, first_false_block_);
     builder()->FinishCurrentBlock(branch);
@@ -1302,9 +1307,9 @@
   return string;
 }
 
-
-HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
+HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* checked) {
   if (object->type().IsJSObject()) return object;
+  HValue* function = checked->ActualValue();
   if (function->IsConstant() &&
       HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
     Handle<JSFunction> f = Handle<JSFunction>::cast(
@@ -1312,7 +1317,7 @@
     SharedFunctionInfo* shared = f->shared();
     if (is_strict(shared->language_mode()) || shared->native()) return object;
   }
-  return Add<HWrapReceiver>(object, function);
+  return Add<HWrapReceiver>(object, checked);
 }
 
 
@@ -3179,58 +3184,6 @@
 }
 
 
-void HGraphBuilder::BuildCompareNil(HValue* value, Type* type,
-                                    HIfContinuation* continuation,
-                                    MapEmbedding map_embedding) {
-  IfBuilder if_nil(this);
-
-  if (type->Maybe(Type::Undetectable())) {
-    if_nil.If<HIsUndetectableAndBranch>(value);
-  } else {
-    bool maybe_null = type->Maybe(Type::Null());
-    if (maybe_null) {
-      if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
-    }
-
-    if (type->Maybe(Type::Undefined())) {
-      if (maybe_null) if_nil.Or();
-      if_nil.If<HCompareObjectEqAndBranch>(value,
-                                           graph()->GetConstantUndefined());
-    }
-
-    if_nil.Then();
-    if_nil.Else();
-
-    if (type->NumClasses() == 1) {
-      BuildCheckHeapObject(value);
-      // For ICs, the map checked below is a sentinel map that gets replaced by
-      // the monomorphic map when the code is used as a template to generate a
-      // new IC. For optimized functions, there is no sentinel map, the map
-      // emitted below is the actual monomorphic map.
-      if (map_embedding == kEmbedMapsViaWeakCells) {
-        HValue* cell =
-            Add<HConstant>(Map::WeakCellForMap(type->Classes().Current()));
-        HValue* expected_map = Add<HLoadNamedField>(
-            cell, nullptr, HObjectAccess::ForWeakCellValue());
-        HValue* map =
-            Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
-        IfBuilder map_check(this);
-        map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
-        map_check.ThenDeopt(Deoptimizer::kUnknownMap);
-        map_check.End();
-      } else {
-        DCHECK(map_embedding == kEmbedMapsDirectly);
-        Add<HCheckMaps>(value, type->Classes().Current());
-      }
-    } else {
-      if_nil.Deopt(Deoptimizer::kTooManyUndetectableTypes);
-    }
-  }
-
-  if_nil.CaptureContinuation(continuation);
-}
-
-
 void HGraphBuilder::BuildCreateAllocationMemento(
     HValue* previous_object,
     HValue* previous_object_size,
@@ -3544,11 +3497,11 @@
   return Add<HLoadNamedField>(native_context, nullptr, function_access);
 }
 
-
 HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
     : HGraphBuilder(info, CallInterfaceDescriptor()),
       function_state_(NULL),
-      initial_function_state_(this, info, NORMAL_RETURN, 0),
+      initial_function_state_(this, info, NORMAL_RETURN, 0,
+                              TailCallMode::kAllow),
       ast_context_(NULL),
       break_scope_(NULL),
       inlined_count_(0),
@@ -3621,9 +3574,16 @@
 
 HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
     IterationStatement* statement) {
-  HBasicBlock* loop_entry = osr()->HasOsrEntryAt(statement)
-      ? osr()->BuildOsrLoopEntry(statement)
-      : BuildLoopEntry();
+  HBasicBlock* loop_entry;
+
+  if (osr()->HasOsrEntryAt(statement)) {
+    loop_entry = osr()->BuildOsrLoopEntry(statement);
+    if (function_state()->IsInsideDoExpressionScope()) {
+      Bailout(kDoExpressionUnmodelable);
+    }
+  } else {
+    loop_entry = BuildLoopEntry();
+  }
   return loop_entry;
 }
 
@@ -3652,7 +3612,6 @@
       info_(info),
       descriptor_(descriptor),
       zone_(info->zone()),
-      is_recursive_(false),
       use_optimistic_licm_(false),
       depends_on_empty_array_proto_elements_(false),
       type_change_checksum_(0),
@@ -4085,11 +4044,12 @@
 // a (possibly inlined) function.
 FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
                              CompilationInfo* info, InliningKind inlining_kind,
-                             int inlining_id)
+                             int inlining_id, TailCallMode tail_call_mode)
     : owner_(owner),
       compilation_info_(info),
       call_context_(NULL),
       inlining_kind_(inlining_kind),
+      tail_call_mode_(tail_call_mode),
       function_return_(NULL),
       test_context_(NULL),
       entry_(NULL),
@@ -4097,6 +4057,7 @@
       arguments_elements_(NULL),
       inlining_id_(inlining_id),
       outer_source_position_(SourcePosition::Unknown()),
+      do_expression_scope_count_(0),
       outer_(owner->function_state()) {
   if (outer_ != NULL) {
     // State for an inline function.
@@ -4153,7 +4114,7 @@
       typeof_mode_(NOT_INSIDE_TYPEOF) {
   owner->set_ast_context(this);  // Push.
 #ifdef DEBUG
-  DCHECK(owner->environment()->frame_type() == JS_FUNCTION);
+  DCHECK_EQ(JS_FUNCTION, owner->environment()->frame_type());
   original_length_ = owner->environment()->length();
 #endif
 }
@@ -4165,18 +4126,18 @@
 
 
 EffectContext::~EffectContext() {
-  DCHECK(owner()->HasStackOverflow() ||
-         owner()->current_block() == NULL ||
+  DCHECK(owner()->HasStackOverflow() || owner()->current_block() == NULL ||
          (owner()->environment()->length() == original_length_ &&
-          owner()->environment()->frame_type() == JS_FUNCTION));
+          (owner()->environment()->frame_type() == JS_FUNCTION ||
+           owner()->environment()->frame_type() == TAIL_CALLER_FUNCTION)));
 }
 
 
 ValueContext::~ValueContext() {
-  DCHECK(owner()->HasStackOverflow() ||
-         owner()->current_block() == NULL ||
+  DCHECK(owner()->HasStackOverflow() || owner()->current_block() == NULL ||
          (owner()->environment()->length() == original_length_ + 1 &&
-          owner()->environment()->frame_type() == JS_FUNCTION));
+          (owner()->environment()->frame_type() == JS_FUNCTION ||
+           owner()->environment()->frame_type() == TAIL_CALLER_FUNCTION)));
 }
 
 
@@ -4350,7 +4311,7 @@
   if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
     builder->Bailout(kArgumentsObjectValueInATestContext);
   }
-  ToBooleanStub::Types expected(condition()->to_boolean_types());
+  ToBooleanICStub::Types expected(condition()->to_boolean_types());
   ReturnControl(owner()->New<HBranch>(value, expected), BailoutId::None());
 }
 
@@ -4566,7 +4527,6 @@
   Run<HStackCheckEliminationPhase>();
 
   if (FLAG_array_bounds_checks_elimination) Run<HBoundsCheckEliminationPhase>();
-  if (FLAG_array_bounds_checks_hoisting) Run<HBoundsCheckHoistingPhase>();
   if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
   if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
 
@@ -4739,12 +4699,8 @@
         HInstruction* inner_context = Add<HCallRuntime>(
             Runtime::FunctionForId(Runtime::kPushBlockContext), 2);
         inner_context->SetFlag(HValue::kHasNoObservableSideEffects);
-        HInstruction* instr = Add<HStoreFrameContext>(inner_context);
         set_scope(scope);
         environment()->BindContext(inner_context);
-        if (instr->HasObservableSideEffects()) {
-          AddSimulate(stmt->EntryId(), REMOVABLE_SIMULATE);
-        }
       }
       VisitDeclarations(scope->declarations());
       AddSimulate(stmt->DeclsId(), REMOVABLE_SIMULATE);
@@ -4759,11 +4715,7 @@
         inner_context, nullptr,
         HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
 
-    HInstruction* instr = Add<HStoreFrameContext>(outer_context);
     environment()->BindContext(outer_context);
-    if (instr->HasObservableSideEffects()) {
-      AddSimulate(stmt->ExitId(), REMOVABLE_SIMULATE);
-    }
   }
   HBasicBlock* break_block = break_info.break_block();
   if (break_block != NULL) {
@@ -4811,23 +4763,24 @@
     HBasicBlock* cond_false = graph()->CreateBasicBlock();
     CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
 
-    if (cond_true->HasPredecessor()) {
-      cond_true->SetJoinId(stmt->ThenId());
-      set_current_block(cond_true);
-      CHECK_BAILOUT(Visit(stmt->then_statement()));
-      cond_true = current_block();
-    } else {
-      cond_true = NULL;
-    }
+    // Technically, we should be able to handle the case when one side of
+    // the test is not connected, but this can trip up liveness analysis
+    // if we did not fully connect the test context based on some optimistic
+    // assumption. If such an assumption was violated, we would end up with
+    // an environment with optimized-out values. So we should always
+    // conservatively connect the test context.
+    CHECK(cond_true->HasPredecessor());
+    CHECK(cond_false->HasPredecessor());
 
-    if (cond_false->HasPredecessor()) {
-      cond_false->SetJoinId(stmt->ElseId());
-      set_current_block(cond_false);
-      CHECK_BAILOUT(Visit(stmt->else_statement()));
-      cond_false = current_block();
-    } else {
-      cond_false = NULL;
-    }
+    cond_true->SetJoinId(stmt->ThenId());
+    set_current_block(cond_true);
+    CHECK_BAILOUT(Visit(stmt->then_statement()));
+    cond_true = current_block();
+
+    cond_false->SetJoinId(stmt->ElseId());
+    set_current_block(cond_false);
+    CHECK_BAILOUT(Visit(stmt->else_statement()));
+    cond_false = current_block();
 
     HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId());
     set_current_block(join);
@@ -4881,6 +4834,11 @@
   DCHECK(!HasStackOverflow());
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
+
+  if (function_state()->IsInsideDoExpressionScope()) {
+    return Bailout(kDoExpressionUnmodelable);
+  }
+
   Scope* outer_scope = NULL;
   Scope* inner_scope = scope();
   int drop_extra = 0;
@@ -4897,10 +4855,6 @@
           HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
       context = context_instruction;
     }
-    HInstruction* instr = Add<HStoreFrameContext>(context);
-    if (instr->HasObservableSideEffects()) {
-      AddSimulate(stmt->target()->EntryId(), REMOVABLE_SIMULATE);
-    }
     environment()->BindContext(context);
   }
 
@@ -4913,6 +4867,11 @@
   DCHECK(!HasStackOverflow());
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
+
+  if (function_state()->IsInsideDoExpressionScope()) {
+    return Bailout(kDoExpressionUnmodelable);
+  }
+
   Scope* outer_scope = NULL;
   Scope* inner_scope = scope();
   int drop_extra = 0;
@@ -4929,10 +4888,6 @@
           HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
       context = context_instruction;
     }
-    HInstruction* instr = Add<HStoreFrameContext>(context);
-    if (instr->HasObservableSideEffects()) {
-      AddSimulate(stmt->target()->ExitId(), REMOVABLE_SIMULATE);
-    }
     environment()->BindContext(context);
   }
   Goto(break_block);
@@ -5156,7 +5111,7 @@
   HBasicBlock* body_exit =
       JoinContinue(stmt, current_block(), break_info.continue_block());
   HBasicBlock* loop_successor = NULL;
-  if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) {
+  if (body_exit != NULL) {
     set_current_block(body_exit);
     loop_successor = graph()->CreateBasicBlock();
     if (stmt->cond()->ToBooleanIsFalse()) {
@@ -5198,19 +5153,17 @@
 
   // If the condition is constant true, do not generate a branch.
   HBasicBlock* loop_successor = NULL;
-  if (!stmt->cond()->ToBooleanIsTrue()) {
-    HBasicBlock* body_entry = graph()->CreateBasicBlock();
-    loop_successor = graph()->CreateBasicBlock();
-    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
-    if (body_entry->HasPredecessor()) {
-      body_entry->SetJoinId(stmt->BodyId());
-      set_current_block(body_entry);
-    }
-    if (loop_successor->HasPredecessor()) {
-      loop_successor->SetJoinId(stmt->ExitId());
-    } else {
-      loop_successor = NULL;
-    }
+  HBasicBlock* body_entry = graph()->CreateBasicBlock();
+  loop_successor = graph()->CreateBasicBlock();
+  CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+  if (body_entry->HasPredecessor()) {
+    body_entry->SetJoinId(stmt->BodyId());
+    set_current_block(body_entry);
+  }
+  if (loop_successor->HasPredecessor()) {
+    loop_successor->SetJoinId(stmt->ExitId());
+  } else {
+    loop_successor = NULL;
   }
 
   BreakAndContinueInfo break_info(stmt, scope());
@@ -5239,10 +5192,9 @@
   DCHECK(current_block() != NULL);
   HBasicBlock* loop_entry = BuildLoopEntry(stmt);
 
-  HBasicBlock* loop_successor = NULL;
+  HBasicBlock* loop_successor = graph()->CreateBasicBlock();
+  HBasicBlock* body_entry = graph()->CreateBasicBlock();
   if (stmt->cond() != NULL) {
-    HBasicBlock* body_entry = graph()->CreateBasicBlock();
-    loop_successor = graph()->CreateBasicBlock();
     CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
     if (body_entry->HasPredecessor()) {
       body_entry->SetJoinId(stmt->BodyId());
@@ -5253,6 +5205,14 @@
     } else {
       loop_successor = NULL;
     }
+  } else {
+    // Create dummy control flow so that variable liveness analysis
+    // produces teh correct result.
+    HControlInstruction* branch = New<HBranch>(graph()->GetConstantTrue());
+    branch->SetSuccessorAt(0, body_entry);
+    branch->SetSuccessorAt(1, loop_successor);
+    FinishCurrentBlock(branch);
+    set_current_block(body_entry);
   }
 
   BreakAndContinueInfo break_info(stmt, scope());
@@ -5540,9 +5500,8 @@
     FastNewClosureDescriptor descriptor(isolate());
     HValue* values[] = {context(), shared_info_value};
     HConstant* stub_value = Add<HConstant>(stub.GetCode());
-    instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
-                                     Vector<HValue*>(values, arraysize(values)),
-                                     NORMAL_CALL);
+    instr = New<HCallWithDescriptor>(
+        stub_value, 0, descriptor, Vector<HValue*>(values, arraysize(values)));
   } else {
     Add<HPushArguments>(shared_info_value);
     Runtime::FunctionId function_id =
@@ -5571,10 +5530,12 @@
 
 
 void HOptimizedGraphBuilder::VisitDoExpression(DoExpression* expr) {
+  DoExpressionScope scope(this);
   DCHECK(!HasStackOverflow());
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
-  return Bailout(kDoExpression);
+  CHECK_ALIVE(VisitBlock(expr->block()));
+  Visit(expr->result());
 }
 
 
@@ -5821,9 +5782,9 @@
       context(), AddThisFunction(), Add<HConstant>(expr->literal_index()),
       Add<HConstant>(expr->pattern()), Add<HConstant>(expr->flags())};
   HConstant* stub_value = Add<HConstant>(callable.code());
-  HInstruction* instr = New<HCallWithDescriptor>(
-      stub_value, 0, callable.descriptor(),
-      Vector<HValue*>(values, arraysize(values)), NORMAL_CALL);
+  HInstruction* instr =
+      New<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
+                               Vector<HValue*>(values, arraysize(values)));
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
@@ -6019,17 +5980,7 @@
     }
   }
 
-  if (expr->has_function()) {
-    // Return the result of the transformation to fast properties
-    // instead of the original since this operation changes the map
-    // of the object. This makes sure that the original object won't
-    // be used by other optimized code before it is transformed
-    // (e.g. because of code motion).
-    HToFastProperties* result = Add<HToFastProperties>(Pop());
-    return ast_context()->ReturnValue(result);
-  } else {
-    return ast_context()->ReturnValue(Pop());
-  }
+  return ast_context()->ReturnValue(Pop());
 }
 
 
@@ -6053,9 +6004,8 @@
     Handle<Object> raw_boilerplate;
     ASSIGN_RETURN_ON_EXCEPTION_VALUE(
         isolate(), raw_boilerplate,
-        Runtime::CreateArrayLiteralBoilerplate(
-            isolate(), literals, expr->constant_elements(),
-            is_strong(function_language_mode())),
+        Runtime::CreateArrayLiteralBoilerplate(isolate(), literals,
+                                               expr->constant_elements()),
         Bailout(kArrayBoilerplateCreationFailed));
 
     boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
@@ -6591,13 +6541,7 @@
 
   if (!info->IsFound()) {
     DCHECK(info->IsLoad());
-    if (is_strong(function_language_mode())) {
-      return New<HCallRuntime>(
-          Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
-          0);
-    } else {
-      return graph()->GetConstantUndefined();
-    }
+    return graph()->GetConstantUndefined();
   }
 
   if (info->IsData()) {
@@ -6625,8 +6569,9 @@
         info->NeedsWrappingFor(Handle<JSFunction>::cast(info->accessor()))) {
       HValue* function = Add<HConstant>(info->accessor());
       PushArgumentsFromEnvironment(argument_count);
-      return New<HCallFunction>(function, argument_count,
-                                ConvertReceiverMode::kNotNullOrUndefined);
+      return NewCallFunction(function, argument_count, TailCallMode::kDisallow,
+                             ConvertReceiverMode::kNotNullOrUndefined,
+                             TailCallMode::kDisallow);
     } else if (FLAG_inline_accessors && can_inline_accessor) {
       bool success = info->IsLoad()
           ? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id)
@@ -6640,8 +6585,9 @@
       Bailout(kInliningBailedOut);
       return nullptr;
     }
-    return BuildCallConstantFunction(Handle<JSFunction>::cast(info->accessor()),
-                                     argument_count);
+    return NewCallConstantFunction(Handle<JSFunction>::cast(info->accessor()),
+                                   argument_count, TailCallMode::kDisallow,
+                                   TailCallMode::kDisallow);
   }
 
   DCHECK(info->IsDataConstant());
@@ -8039,56 +7985,81 @@
   }
 }
 
+void HOptimizedGraphBuilder::BuildEnsureCallable(HValue* object) {
+  NoObservableSideEffectsScope scope(this);
+  const Runtime::Function* throw_called_non_callable =
+      Runtime::FunctionForId(Runtime::kThrowCalledNonCallable);
 
-HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(HValue* fun,
-                                                           int argument_count) {
-  return New<HCallJSFunction>(fun, argument_count);
+  IfBuilder is_not_function(this);
+  HValue* smi_check = is_not_function.If<HIsSmiAndBranch>(object);
+  is_not_function.Or();
+  HValue* map = AddLoadMap(object, smi_check);
+  HValue* bit_field =
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
+  HValue* bit_field_masked = AddUncasted<HBitwise>(
+      Token::BIT_AND, bit_field, Add<HConstant>(1 << Map::kIsCallable));
+  is_not_function.IfNot<HCompareNumericAndBranch>(
+      bit_field_masked, Add<HConstant>(1 << Map::kIsCallable), Token::EQ);
+  is_not_function.Then();
+  {
+    Add<HPushArguments>(object);
+    Add<HCallRuntime>(throw_called_non_callable, 1);
+  }
+  is_not_function.End();
 }
 
-
-HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
-    HValue* fun, HValue* context,
-    int argument_count, HValue* expected_param_count) {
-  HValue* new_target = graph()->GetConstantUndefined();
+HInstruction* HOptimizedGraphBuilder::NewCallFunction(
+    HValue* function, int argument_count, TailCallMode syntactic_tail_call_mode,
+    ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
+  if (syntactic_tail_call_mode == TailCallMode::kAllow) {
+    BuildEnsureCallable(function);
+  } else {
+    DCHECK_EQ(TailCallMode::kDisallow, tail_call_mode);
+  }
   HValue* arity = Add<HConstant>(argument_count - 1);
 
-  HValue* op_vals[] = {context, fun, new_target, arity, expected_param_count};
+  HValue* op_vals[] = {context(), function, arity};
 
-  Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+  Callable callable =
+      CodeFactory::Call(isolate(), convert_mode, tail_call_mode);
   HConstant* stub = Add<HConstant>(callable.code());
 
   return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
-                                  Vector<HValue*>(op_vals, arraysize(op_vals)));
+                                  Vector<HValue*>(op_vals, arraysize(op_vals)),
+                                  syntactic_tail_call_mode);
 }
 
-
-HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
-    Handle<JSFunction> jsfun, int argument_count) {
-  HValue* target = Add<HConstant>(jsfun);
-  // For constant functions, we try to avoid calling the
-  // argument adaptor and instead call the function directly
-  int formal_parameter_count =
-      jsfun->shared()->internal_formal_parameter_count();
-  bool dont_adapt_arguments =
-      (formal_parameter_count ==
-       SharedFunctionInfo::kDontAdaptArgumentsSentinel);
-  int arity = argument_count - 1;
-  bool can_invoke_directly =
-      dont_adapt_arguments || formal_parameter_count == arity;
-  if (can_invoke_directly) {
-    if (jsfun.is_identical_to(current_info()->closure())) {
-      graph()->MarkRecursive();
-    }
-    return NewPlainFunctionCall(target, argument_count);
+HInstruction* HOptimizedGraphBuilder::NewCallFunctionViaIC(
+    HValue* function, int argument_count, TailCallMode syntactic_tail_call_mode,
+    ConvertReceiverMode convert_mode, TailCallMode tail_call_mode,
+    FeedbackVectorSlot slot) {
+  if (syntactic_tail_call_mode == TailCallMode::kAllow) {
+    BuildEnsureCallable(function);
   } else {
-    HValue* param_count_value = Add<HConstant>(formal_parameter_count);
-    HValue* context = Add<HLoadNamedField>(
-        target, nullptr, HObjectAccess::ForFunctionContextPointer());
-    return NewArgumentAdaptorCall(target, context,
-        argument_count, param_count_value);
+    DCHECK_EQ(TailCallMode::kDisallow, tail_call_mode);
   }
-  UNREACHABLE();
-  return NULL;
+  int arity = argument_count - 1;
+  Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+  HValue* index_val = Add<HConstant>(vector->GetIndex(slot));
+  HValue* vector_val = Add<HConstant>(vector);
+
+  HValue* op_vals[] = {context(), function, index_val, vector_val};
+
+  Callable callable = CodeFactory::CallICInOptimizedCode(
+      isolate(), arity, convert_mode, tail_call_mode);
+  HConstant* stub = Add<HConstant>(callable.code());
+
+  return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
+                                  Vector<HValue*>(op_vals, arraysize(op_vals)),
+                                  syntactic_tail_call_mode);
+}
+
+HInstruction* HOptimizedGraphBuilder::NewCallConstantFunction(
+    Handle<JSFunction> function, int argument_count,
+    TailCallMode syntactic_tail_call_mode, TailCallMode tail_call_mode) {
+  HValue* target = Add<HConstant>(function);
+  return New<HInvokeFunction>(target, function, argument_count,
+                              syntactic_tail_call_mode, tail_call_mode);
 }
 
 
@@ -8126,6 +8097,10 @@
   bool handled_string = false;
   int ordered_functions = 0;
 
+  TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+  TailCallMode tail_call_mode =
+      function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
   int i;
   for (i = 0; i < maps->length() && ordered_functions < kMaxCallPolymorphism;
        ++i) {
@@ -8230,14 +8205,17 @@
       if (HasStackOverflow()) return;
     } else {
       // Since HWrapReceiver currently cannot actually wrap numbers and strings,
-      // use the regular CallFunctionStub for method calls to wrap the receiver.
+      // use the regular call builtin for method calls to wrap the receiver.
       // TODO(verwaest): Support creation of value wrappers directly in
       // HWrapReceiver.
       HInstruction* call =
-          needs_wrapping ? NewUncasted<HCallFunction>(
-                               function, argument_count,
-                               ConvertReceiverMode::kNotNullOrUndefined)
-                         : BuildCallConstantFunction(target, argument_count);
+          needs_wrapping
+              ? NewCallFunction(
+                    function, argument_count, syntactic_tail_call_mode,
+                    ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode)
+              : NewCallConstantFunction(target, argument_count,
+                                        syntactic_tail_call_mode,
+                                        tail_call_mode);
       PushArgumentsFromEnvironment(argument_count);
       AddInstruction(call);
       Drop(1);  // Drop the function.
@@ -8266,8 +8244,9 @@
     environment()->SetExpressionStackAt(0, receiver);
     CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
-    HInstruction* call = New<HCallFunction>(
-        function, argument_count, ConvertReceiverMode::kNotNullOrUndefined);
+    HInstruction* call = NewCallFunction(
+        function, argument_count, syntactic_tail_call_mode,
+        ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode);
 
     PushArgumentsFromEnvironment(argument_count);
 
@@ -8295,17 +8274,19 @@
   }
 }
 
-
 void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
                                          Handle<JSFunction> caller,
-                                         const char* reason) {
+                                         const char* reason,
+                                         TailCallMode tail_call_mode) {
   if (FLAG_trace_inlining) {
     base::SmartArrayPointer<char> target_name =
         target->shared()->DebugName()->ToCString();
     base::SmartArrayPointer<char> caller_name =
         caller->shared()->DebugName()->ToCString();
     if (reason == NULL) {
-      PrintF("Inlined %s called from %s.\n", target_name.get(),
+      const char* call_mode =
+          tail_call_mode == TailCallMode::kAllow ? "tail called" : "called";
+      PrintF("Inlined %s %s from %s.\n", target_name.get(), call_mode,
              caller_name.get());
     } else {
       PrintF("Did not inline %s called from %s (%s).\n",
@@ -8362,12 +8343,12 @@
   return nodes_added;
 }
 
-
 bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
                                        int arguments_count,
                                        HValue* implicit_return_value,
                                        BailoutId ast_id, BailoutId return_id,
-                                       InliningKind inlining_kind) {
+                                       InliningKind inlining_kind,
+                                       TailCallMode syntactic_tail_call_mode) {
   if (target->context()->native_context() !=
       top_info()->closure()->context()->native_context()) {
     return false;
@@ -8376,7 +8357,6 @@
   if (nodes_added == kNotInlinable) return false;
 
   Handle<JSFunction> caller = current_info()->closure();
-
   if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
     TraceInline(target, caller, "target AST is too large [early]");
     return false;
@@ -8498,15 +8478,6 @@
     }
   }
 
-  // In strong mode it is an error to call a function with too few arguments.
-  // In that case do not inline because then the arity check would be skipped.
-  if (is_strong(function->language_mode()) &&
-      arguments_count < function->parameter_count()) {
-    TraceInline(target, caller,
-                "too few arguments passed to a strong function");
-    return false;
-  }
-
   // Generate the deoptimization data for the unoptimized version of
   // the target function if we don't already have it.
   if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
@@ -8537,17 +8508,15 @@
   // Save the pending call context. Set up new one for the inlined function.
   // The function state is new-allocated because we need to delete it
   // in two different places.
-  FunctionState* target_state =
-      new FunctionState(this, &target_info, inlining_kind, inlining_id);
+  FunctionState* target_state = new FunctionState(
+      this, &target_info, inlining_kind, inlining_id,
+      function_state()->ComputeTailCallMode(syntactic_tail_call_mode));
 
   HConstant* undefined = graph()->GetConstantUndefined();
 
-  HEnvironment* inner_env =
-      environment()->CopyForInlining(target,
-                                     arguments_count,
-                                     function,
-                                     undefined,
-                                     function_state()->inlining_kind());
+  HEnvironment* inner_env = environment()->CopyForInlining(
+      target, arguments_count, function, undefined,
+      function_state()->inlining_kind(), syntactic_tail_call_mode);
 
   HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
   inner_env->BindContext(context);
@@ -8577,10 +8546,10 @@
   current_block()->UpdateEnvironment(inner_env);
   Scope* saved_scope = scope();
   set_scope(target_info.scope());
-  HEnterInlined* enter_inlined =
-      Add<HEnterInlined>(return_id, target, context, arguments_count, function,
-                         function_state()->inlining_kind(),
-                         function->scope()->arguments(), arguments_object);
+  HEnterInlined* enter_inlined = Add<HEnterInlined>(
+      return_id, target, context, arguments_count, function,
+      function_state()->inlining_kind(), function->scope()->arguments(),
+      arguments_object, syntactic_tail_call_mode);
   if (top_info()->is_tracking_positions()) {
     enter_inlined->set_inlining_id(inlining_id);
   }
@@ -8608,7 +8577,7 @@
       TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
   graph()->update_type_change_checksum(type_info->own_type_change_checksum());
 
-  TraceInline(target, caller, NULL);
+  TraceInline(target, caller, NULL, syntactic_tail_call_mode);
 
   if (current_block() != NULL) {
     FunctionState* state = function_state();
@@ -8692,7 +8661,8 @@
 
 bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
   return TryInline(expr->target(), expr->arguments()->length(), NULL,
-                   expr->id(), expr->ReturnId(), NORMAL_RETURN);
+                   expr->id(), expr->ReturnId(), NORMAL_RETURN,
+                   expr->tail_call_mode());
 }
 
 
@@ -8700,7 +8670,7 @@
                                                 HValue* implicit_return_value) {
   return TryInline(expr->target(), expr->arguments()->length(),
                    implicit_return_value, expr->id(), expr->ReturnId(),
-                   CONSTRUCT_CALL_RETURN);
+                   CONSTRUCT_CALL_RETURN, TailCallMode::kDisallow);
 }
 
 bool HOptimizedGraphBuilder::TryInlineGetter(Handle<Object> getter,
@@ -8710,7 +8680,7 @@
   if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
   return getter->IsJSFunction() &&
          TryInline(Handle<JSFunction>::cast(getter), 0, NULL, ast_id, return_id,
-                   GETTER_CALL_RETURN);
+                   GETTER_CALL_RETURN, TailCallMode::kDisallow);
 }
 
 bool HOptimizedGraphBuilder::TryInlineSetter(Handle<Object> setter,
@@ -8721,7 +8691,8 @@
   if (TryInlineApiSetter(setter, receiver_map, id)) return true;
   return setter->IsJSFunction() &&
          TryInline(Handle<JSFunction>::cast(setter), 1, implicit_return_value,
-                   id, assignment_id, SETTER_CALL_RETURN);
+                   id, assignment_id, SETTER_CALL_RETURN,
+                   TailCallMode::kDisallow);
 }
 
 
@@ -8729,13 +8700,15 @@
                                                    Call* expr,
                                                    int arguments_count) {
   return TryInline(function, arguments_count, NULL, expr->id(),
-                   expr->ReturnId(), NORMAL_RETURN);
+                   expr->ReturnId(), NORMAL_RETURN, expr->tail_call_mode());
 }
 
 
 bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
   if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
   BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
+  // We intentionally ignore expr->tail_call_mode() here because builtins
+  // we inline here do not observe if they were tail called or not.
   switch (id) {
     case kMathExp:
       if (!FLAG_fast_math) break;
@@ -8819,6 +8792,25 @@
   }
   // Try to inline calls like Math.* as operations in the calling function.
   switch (id) {
+    case kObjectHasOwnProperty: {
+      // It's not safe to look through the phi for elements if we're compiling
+      // for osr.
+      if (top_info()->is_osr()) return false;
+      if (argument_count != 2) return false;
+      HValue* key = Top();
+      if (!key->IsLoadKeyed()) return false;
+      HValue* elements = HLoadKeyed::cast(key)->elements();
+      if (!elements->IsPhi() || elements->OperandCount() != 1) return false;
+      if (!elements->OperandAt(0)->IsForInCacheArray()) return false;
+      HForInCacheArray* cache = HForInCacheArray::cast(elements->OperandAt(0));
+      HValue* receiver = environment()->ExpressionStackAt(1);
+      if (!receiver->IsPhi() || receiver->OperandCount() != 1) return false;
+      if (cache->enumerable() != receiver->OperandAt(0)) return false;
+      Drop(3);  // key, receiver, function
+      Add<HCheckMapValue>(receiver, cache->map());
+      ast_context()->ReturnValue(graph()->GetConstantTrue());
+      return true;
+    }
     case kStringCharCodeAt:
     case kStringCharAt:
       if (argument_count == 2) {
@@ -8841,6 +8833,9 @@
       if (argument_count == 2) {
         HValue* argument = Pop();
         Drop(2);  // Receiver and function.
+        argument = AddUncasted<HForceRepresentation>(
+            argument, Representation::Integer32());
+        argument->SetFlag(HValue::kTruncatingToInt32);
         HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
         ast_context()->ReturnInstruction(result, expr->id());
         return true;
@@ -9053,7 +9048,7 @@
 
       Drop(args_count_no_receiver);
       HValue* receiver = Pop();
-      HValue* function = Pop();
+      Drop(1);  // Function.
       HValue* result;
 
       {
@@ -9129,7 +9124,8 @@
           if_inline.Else();
           {
             Add<HPushArguments>(receiver);
-            result = Add<HCallJSFunction>(function, 1);
+            result = AddInstruction(NewCallConstantFunction(
+                function, 1, TailCallMode::kDisallow, TailCallMode::kDisallow));
             if (!ast_context()->IsEffect()) Push(result);
           }
           if_inline.End();
@@ -9193,12 +9189,8 @@
   Handle<JSFunction> function = expr->target();
   int argc = expr->arguments()->length();
   SmallMapList receiver_maps;
-  return TryInlineApiCall(function,
-                          receiver,
-                          &receiver_maps,
-                          argc,
-                          expr->id(),
-                          kCallApiFunction);
+  return TryInlineApiCall(function, receiver, &receiver_maps, argc, expr->id(),
+                          kCallApiFunction, expr->tail_call_mode());
 }
 
 
@@ -9208,12 +9200,8 @@
     SmallMapList* receiver_maps) {
   Handle<JSFunction> function = expr->target();
   int argc = expr->arguments()->length();
-  return TryInlineApiCall(function,
-                          receiver,
-                          receiver_maps,
-                          argc,
-                          expr->id(),
-                          kCallApiMethod);
+  return TryInlineApiCall(function, receiver, receiver_maps, argc, expr->id(),
+                          kCallApiMethod, expr->tail_call_mode());
 }
 
 bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<Object> function,
@@ -9223,10 +9211,8 @@
   receiver_maps.Add(receiver_map, zone());
   return TryInlineApiCall(function,
                           NULL,  // Receiver is on expression stack.
-                          &receiver_maps,
-                          0,
-                          ast_id,
-                          kCallApiGetter);
+                          &receiver_maps, 0, ast_id, kCallApiGetter,
+                          TailCallMode::kDisallow);
 }
 
 bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<Object> function,
@@ -9236,22 +9222,23 @@
   receiver_maps.Add(receiver_map, zone());
   return TryInlineApiCall(function,
                           NULL,  // Receiver is on expression stack.
-                          &receiver_maps,
-                          1,
-                          ast_id,
-                          kCallApiSetter);
+                          &receiver_maps, 1, ast_id, kCallApiSetter,
+                          TailCallMode::kDisallow);
 }
 
-bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<Object> function,
-                                              HValue* receiver,
-                                              SmallMapList* receiver_maps,
-                                              int argc, BailoutId ast_id,
-                                              ApiCallType call_type) {
+bool HOptimizedGraphBuilder::TryInlineApiCall(
+    Handle<Object> function, HValue* receiver, SmallMapList* receiver_maps,
+    int argc, BailoutId ast_id, ApiCallType call_type,
+    TailCallMode syntactic_tail_call_mode) {
   if (function->IsJSFunction() &&
       Handle<JSFunction>::cast(function)->context()->native_context() !=
           top_info()->closure()->context()->native_context()) {
     return false;
   }
+  if (argc > CallApiCallbackStub::kArgMax) {
+    return false;
+  }
+
   CallOptimization optimization(function);
   if (!optimization.is_simple_api_call()) return false;
   Handle<Map> holder_map;
@@ -9347,33 +9334,24 @@
                        api_function_address, nullptr};
 
   HInstruction* call = nullptr;
+  CHECK(argc <= CallApiCallbackStub::kArgMax);
   if (!is_function) {
-    CallApiAccessorStub stub(isolate(), is_store, call_data_undefined,
+    CallApiCallbackStub stub(isolate(), is_store, call_data_undefined,
                              !optimization.is_constant_call());
     Handle<Code> code = stub.GetCode();
     HConstant* code_value = Add<HConstant>(code);
-    ApiAccessorDescriptor descriptor(isolate());
     call = New<HCallWithDescriptor>(
-        code_value, argc + 1, descriptor,
-        Vector<HValue*>(op_vals, arraysize(op_vals) - 1));
-  } else if (argc <= CallApiFunctionWithFixedArgsStub::kMaxFixedArgs) {
-    CallApiFunctionWithFixedArgsStub stub(isolate(), argc, call_data_undefined);
-    Handle<Code> code = stub.GetCode();
-    HConstant* code_value = Add<HConstant>(code);
-    ApiFunctionWithFixedArgsDescriptor descriptor(isolate());
-    call = New<HCallWithDescriptor>(
-        code_value, argc + 1, descriptor,
-        Vector<HValue*>(op_vals, arraysize(op_vals) - 1));
-    Drop(1);  // Drop function.
+        code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
+        Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
+        syntactic_tail_call_mode);
   } else {
-    op_vals[arraysize(op_vals) - 1] = Add<HConstant>(argc);
-    CallApiFunctionStub stub(isolate(), call_data_undefined);
+    CallApiCallbackStub stub(isolate(), argc, call_data_undefined);
     Handle<Code> code = stub.GetCode();
     HConstant* code_value = Add<HConstant>(code);
-    ApiFunctionDescriptor descriptor(isolate());
-    call =
-        New<HCallWithDescriptor>(code_value, argc + 1, descriptor,
-                                 Vector<HValue*>(op_vals, arraysize(op_vals)));
+    call = New<HCallWithDescriptor>(
+        code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
+        Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
+        syntactic_tail_call_mode);
     Drop(1);  // Drop function.
   }
 
@@ -9405,9 +9383,14 @@
     }
   }
 
+  TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+  TailCallMode tail_call_mode =
+      function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
   PushArgumentsFromEnvironment(arguments_count);
   HInvokeFunction* call =
-      New<HInvokeFunction>(function, known_function, arguments_count);
+      New<HInvokeFunction>(function, known_function, arguments_count,
+                           syntactic_tail_call_mode, tail_call_mode);
   Drop(1);  // Function
   ast_context()->ReturnInstruction(call, expr->id());
 }
@@ -9459,13 +9442,15 @@
   HValue* checked_function = AddCheckMap(function, function_map);
 
   if (function_state()->outer() == NULL) {
+    TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+    TailCallMode tail_call_mode =
+        function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
     HInstruction* elements = Add<HArgumentsElements>(false);
     HInstruction* length = Add<HArgumentsLength>(elements);
     HValue* wrapped_receiver = BuildWrapReceiver(receiver, checked_function);
-    HInstruction* result = New<HApplyArguments>(function,
-                                                wrapped_receiver,
-                                                length,
-                                                elements);
+    HInstruction* result = New<HApplyArguments>(
+        function, wrapped_receiver, length, elements, tail_call_mode);
     ast_context()->ReturnInstruction(result, expr->id());
   } else {
     // We are inside inlined function and we know exactly what is inside
@@ -9733,9 +9718,6 @@
 
 
 void HOptimizedGraphBuilder::VisitCall(Call* expr) {
-  if (expr->tail_call_mode() == TailCallMode::kAllow) {
-    return Bailout(kTailCall);
-  }
   DCHECK(!HasStackOverflow());
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
@@ -9744,6 +9726,10 @@
   int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
   HInstruction* call = NULL;
 
+  TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+  TailCallMode tail_call_mode =
+      function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
   Property* prop = callee->AsProperty();
   if (prop != NULL) {
     CHECK_ALIVE(VisitForValue(prop->obj()));
@@ -9797,16 +9783,19 @@
       // Wrap the receiver if necessary.
       if (NeedsWrapping(maps->first(), known_function)) {
         // Since HWrapReceiver currently cannot actually wrap numbers and
-        // strings, use the regular CallFunctionStub for method calls to wrap
+        // strings, use the regular call builtin for method calls to wrap
         // the receiver.
         // TODO(verwaest): Support creation of value wrappers directly in
         // HWrapReceiver.
-        call = New<HCallFunction>(function, argument_count,
-                                  ConvertReceiverMode::kNotNullOrUndefined);
+        call = NewCallFunction(
+            function, argument_count, syntactic_tail_call_mode,
+            ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode);
       } else if (TryInlineCall(expr)) {
         return;
       } else {
-        call = BuildCallConstantFunction(known_function, argument_count);
+        call =
+            NewCallConstantFunction(known_function, argument_count,
+                                    syntactic_tail_call_mode, tail_call_mode);
       }
 
     } else {
@@ -9825,8 +9814,9 @@
       Push(receiver);
 
       CHECK_ALIVE(VisitExpressions(expr->arguments(), arguments_flag));
-      call = New<HCallFunction>(function, argument_count,
-                                ConvertReceiverMode::kNotNullOrUndefined);
+      call = NewCallFunction(function, argument_count, syntactic_tail_call_mode,
+                             ConvertReceiverMode::kNotNullOrUndefined,
+                             tail_call_mode);
     }
     PushArgumentsFromEnvironment(argument_count);
 
@@ -9873,20 +9863,22 @@
       if (TryInlineCall(expr)) return;
 
       PushArgumentsFromEnvironment(argument_count);
-      call = BuildCallConstantFunction(expr->target(), argument_count);
+      call = NewCallConstantFunction(expr->target(), argument_count,
+                                     syntactic_tail_call_mode, tail_call_mode);
     } else {
       PushArgumentsFromEnvironment(argument_count);
-      HCallFunction* call_function = New<HCallFunction>(
-          function, argument_count, ConvertReceiverMode::kNullOrUndefined);
-      call = call_function;
       if (expr->is_uninitialized() &&
           expr->IsUsingCallFeedbackICSlot(isolate())) {
         // We've never seen this call before, so let's have Crankshaft learn
         // through the type vector.
-        Handle<TypeFeedbackVector> vector =
-            handle(current_feedback_vector(), isolate());
-        FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
-        call_function->SetVectorAndSlot(vector, slot);
+        call = NewCallFunctionViaIC(function, argument_count,
+                                    syntactic_tail_call_mode,
+                                    ConvertReceiverMode::kNullOrUndefined,
+                                    tail_call_mode, expr->CallFeedbackICSlot());
+      } else {
+        call = NewCallFunction(
+            function, argument_count, syntactic_tail_call_mode,
+            ConvertReceiverMode::kNullOrUndefined, tail_call_mode);
       }
     }
   }
@@ -10509,7 +10501,29 @@
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
   if (expr->is_jsruntime()) {
-    return Bailout(kCallToAJavaScriptRuntimeFunction);
+    // Crankshaft always specializes to the native context, so we can just grab
+    // the constant function from the current native context and embed that into
+    // the code object.
+    Handle<JSFunction> known_function(
+        JSFunction::cast(
+            current_info()->native_context()->get(expr->context_index())),
+        isolate());
+
+    // The callee and the receiver both have to be pushed onto the operand stack
+    // before arguments are being evaluated.
+    HConstant* function = Add<HConstant>(known_function);
+    HValue* receiver = ImplicitReceiverFor(function, known_function);
+    Push(function);
+    Push(receiver);
+
+    int argument_count = expr->arguments()->length() + 1;  // Count receiver.
+    CHECK_ALIVE(VisitExpressions(expr->arguments()));
+    PushArgumentsFromEnvironment(argument_count);
+    HInstruction* call = NewCallConstantFunction(known_function, argument_count,
+                                                 TailCallMode::kDisallow,
+                                                 TailCallMode::kDisallow);
+    Drop(1);  // Function
+    return ast_context()->ReturnInstruction(call, expr->id());
   }
 
   const Runtime::Function* function = expr->function();
@@ -10661,7 +10675,7 @@
     rep = Representation::Smi();
   }
 
-  if (returns_original_input && !is_strong(function_language_mode())) {
+  if (returns_original_input) {
     // We need an explicit HValue representing ToNumber(input).  The
     // actual HChange instruction we need is (sometimes) added in a later
     // phase, so it is not available now to be used as an input to HAdd and
@@ -10686,11 +10700,7 @@
     add->set_observed_input_representation(1, rep);
     add->set_observed_input_representation(2, Representation::Smi());
   }
-  if (!is_strong(function_language_mode())) {
-    instr->ClearAllSideEffects();
-  } else {
-    Add<HSimulate>(expr->ToNumberId(), REMOVABLE_SIMULATE);
-  }
+  instr->ClearAllSideEffects();
   instr->SetFlag(HInstruction::kCannotBeTagged);
   return instr;
 }
@@ -11331,12 +11341,10 @@
 
     // Translate right subexpression by visiting it in the same AST
     // context as the entire expression.
-    if (eval_right->HasPredecessor()) {
-      eval_right->SetJoinId(expr->RightId());
-      set_current_block(eval_right);
-      Visit(expr->right());
-    }
-
+    CHECK(eval_right->HasPredecessor());
+    eval_right->SetJoinId(expr->RightId());
+    set_current_block(eval_right);
+    Visit(expr->right());
   } else if (ast_context()->IsValue()) {
     CHECK_ALIVE(VisitForValue(expr->left()));
     DCHECK(current_block() != NULL);
@@ -11358,7 +11366,7 @@
     // We need an extra block to maintain edge-split form.
     HBasicBlock* empty_block = graph()->CreateBasicBlock();
     HBasicBlock* eval_right = graph()->CreateBasicBlock();
-    ToBooleanStub::Types expected(expr->left()->to_boolean_types());
+    ToBooleanICStub::Types expected(expr->left()->to_boolean_types());
     HBranch* test = is_logical_and
         ? New<HBranch>(left_value, expected, eval_right, empty_block)
         : New<HBranch>(left_value, expected, empty_block, eval_right);
@@ -11392,20 +11400,22 @@
     // second one is not a merge node, and that we really have no good AST ID to
     // put on that first HSimulate.
 
-    if (empty_block->HasPredecessor()) {
-      empty_block->SetJoinId(expr->id());
-    } else {
-      empty_block = NULL;
-    }
+    // Technically, we should be able to handle the case when one side of
+    // the test is not connected, but this can trip up liveness analysis
+    // if we did not fully connect the test context based on some optimistic
+    // assumption. If such an assumption was violated, we would end up with
+    // an environment with optimized-out values. So we should always
+    // conservatively connect the test context.
 
-    if (right_block->HasPredecessor()) {
-      right_block->SetJoinId(expr->RightId());
-      set_current_block(right_block);
-      CHECK_BAILOUT(VisitForEffect(expr->right()));
-      right_block = current_block();
-    } else {
-      right_block = NULL;
-    }
+    CHECK(right_block->HasPredecessor());
+    CHECK(empty_block->HasPredecessor());
+
+    empty_block->SetJoinId(expr->id());
+
+    right_block->SetJoinId(expr->RightId());
+    set_current_block(right_block);
+    CHECK_BAILOUT(VisitForEffect(expr->right()));
+    right_block = current_block();
 
     HBasicBlock* join_block =
       CreateJoin(empty_block, right_block, expr->id());
@@ -11474,7 +11484,7 @@
   if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
     return HandleLiteralCompareTypeof(expr, sub_expr, check);
   }
-  if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
+  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
     return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
   }
   if (expr->IsLiteralCompareNull(&sub_expr)) {
@@ -11510,6 +11520,7 @@
   }
 
   if (op == Token::INSTANCEOF) {
+    DCHECK(!FLAG_harmony_instanceof);
     // Check to see if the rhs of the instanceof is a known function.
     if (right->IsConstant() &&
         HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
@@ -11699,6 +11710,20 @@
         New<HCompareNumericAndBranch>(left, right, op);
     return result;
   } else {
+    if (op == Token::EQ) {
+      if (left->IsConstant() &&
+          HConstant::cast(left)->GetInstanceType() == ODDBALL_TYPE &&
+          HConstant::cast(left)->IsUndetectable()) {
+        return New<HIsUndetectableAndBranch>(right);
+      }
+
+      if (right->IsConstant() &&
+          HConstant::cast(right)->GetInstanceType() == ODDBALL_TYPE &&
+          HConstant::cast(right)->IsUndetectable()) {
+        return New<HIsUndetectableAndBranch>(left);
+      }
+    }
+
     if (combined_rep.IsTagged() || combined_rep.IsNone()) {
       HCompareGeneric* result = Add<HCompareGeneric>(left, right, op);
       result->set_observed_input_representation(1, left_rep);
@@ -11738,22 +11763,17 @@
   if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
   CHECK_ALIVE(VisitForValue(sub_expr));
   HValue* value = Pop();
+  HControlInstruction* instr;
   if (expr->op() == Token::EQ_STRICT) {
     HConstant* nil_constant = nil == kNullValue
         ? graph()->GetConstantNull()
         : graph()->GetConstantUndefined();
-    HCompareObjectEqAndBranch* instr =
-        New<HCompareObjectEqAndBranch>(value, nil_constant);
-    return ast_context()->ReturnControl(instr, expr->id());
+    instr = New<HCompareObjectEqAndBranch>(value, nil_constant);
   } else {
     DCHECK_EQ(Token::EQ, expr->op());
-    Type* type = expr->combined_type()->Is(Type::None())
-                     ? Type::Any()
-                     : expr->combined_type();
-    HIfContinuation continuation;
-    BuildCompareNil(value, type, &continuation);
-    return ast_context()->ReturnContinuation(&continuation, expr->id());
+    instr = New<HIsUndetectableAndBranch>(value);
   }
+  return ast_context()->ReturnControl(instr, expr->id());
 }
 
 
@@ -12268,22 +12288,13 @@
   if (input->type().IsSmi()) {
     return ast_context()->ReturnValue(input);
   } else {
-    IfBuilder if_inputissmi(this);
-    if_inputissmi.If<HIsSmiAndBranch>(input);
-    if_inputissmi.Then();
-    {
-      // Return the input value.
-      Push(input);
-      Add<HSimulate>(call->id(), FIXED_SIMULATE);
-    }
-    if_inputissmi.Else();
-    {
-      Add<HPushArguments>(input);
-      Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kToInteger), 1));
-      Add<HSimulate>(call->id(), FIXED_SIMULATE);
-    }
-    if_inputissmi.End();
-    return ast_context()->ReturnValue(Pop());
+    Callable callable = CodeFactory::ToInteger(isolate());
+    HValue* stub = Add<HConstant>(callable.code());
+    HValue* values[] = {context(), input};
+    HInstruction* result =
+        New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+                                 Vector<HValue*>(values, arraysize(values)));
+    return ast_context()->ReturnInstruction(result, call->id());
   }
 }
 
@@ -12528,6 +12539,18 @@
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
+// Support for direct creation of new objects.
+void HOptimizedGraphBuilder::GenerateNewObject(CallRuntime* call) {
+  DCHECK_EQ(2, call->arguments()->length());
+  CHECK_ALIVE(VisitExpressions(call->arguments()));
+  FastNewObjectStub stub(isolate());
+  FastNewObjectDescriptor descriptor(isolate());
+  HValue* values[] = {context(), Pop(), Pop()};
+  HConstant* stub_value = Add<HConstant>(stub.GetCode());
+  HInstruction* result = New<HCallWithDescriptor>(
+      stub_value, 0, descriptor, Vector<HValue*>(values, arraysize(values)));
+  return ast_context()->ReturnInstruction(result, call->id());
+}
 
 // Support for direct calls from JavaScript to native RegExp code.
 void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
@@ -12621,6 +12644,45 @@
 void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
   DCHECK_LE(2, call->arguments()->length());
   CHECK_ALIVE(VisitExpressions(call->arguments()));
+
+  // Try and customize ES6 instanceof here.
+  // We should at least have the constructor on the expression stack.
+  if (FLAG_harmony_instanceof && FLAG_harmony_instanceof_opt &&
+      call->arguments()->length() == 3) {
+    HValue* target = environment()->ExpressionStackAt(2);
+    if (target->IsConstant()) {
+      HConstant* constant_function = HConstant::cast(target);
+      if (constant_function->handle(isolate())->IsJSFunction()) {
+        Handle<JSFunction> func =
+            Handle<JSFunction>::cast(constant_function->handle(isolate()));
+        if (*func == isolate()->native_context()->ordinary_has_instance()) {
+          // Look at the function, which will be argument 1.
+          HValue* right = environment()->ExpressionStackAt(1);
+          if (right->IsConstant() &&
+              HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
+            Handle<JSFunction> constructor = Handle<JSFunction>::cast(
+                HConstant::cast(right)->handle(isolate()));
+            if (constructor->IsConstructor() &&
+                !constructor->map()->has_non_instance_prototype()) {
+              JSFunction::EnsureHasInitialMap(constructor);
+              DCHECK(constructor->has_initial_map());
+              Handle<Map> initial_map(constructor->initial_map(), isolate());
+              top_info()->dependencies()->AssumeInitialMapCantChange(
+                  initial_map);
+              HInstruction* prototype =
+                  Add<HConstant>(handle(initial_map->prototype(), isolate()));
+              HValue* left = environment()->ExpressionStackAt(0);
+              HHasInPrototypeChainAndBranch* result =
+                  New<HHasInPrototypeChainAndBranch>(left, prototype);
+              Drop(3);
+              return ast_context()->ReturnControl(result, call->id());
+            }
+          }
+        }
+      }
+    }
+  }
+
   CallTrampolineDescriptor descriptor(isolate());
   PushArgumentsFromEnvironment(call->arguments()->length() - 1);
   HValue* trampoline = Add<HConstant>(isolate()->builtins()->Call());
@@ -12646,24 +12708,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateMathClz32(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathClz32);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathFloor);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12673,15 +12717,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathSqrt);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12888,6 +12923,12 @@
   return ast_context()->ReturnValue(value);
 }
 
+void HOptimizedGraphBuilder::GenerateGetOrdinaryHasInstance(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 0);
+  // ordinary_has_instance is immutable so we can treat it as a constant.
+  HValue* value = Add<HConstant>(isolate()->ordinary_has_instance());
+  return ast_context()->ReturnValue(value);
+}
 
 #undef CHECK_BAILOUT
 #undef CHECK_ALIVE
@@ -13121,14 +13162,21 @@
   return new_env;
 }
 
+void HEnvironment::MarkAsTailCaller() {
+  DCHECK_EQ(JS_FUNCTION, frame_type());
+  frame_type_ = TAIL_CALLER_FUNCTION;
+}
+
+void HEnvironment::ClearTailCallerMark() {
+  DCHECK_EQ(TAIL_CALLER_FUNCTION, frame_type());
+  frame_type_ = JS_FUNCTION;
+}
 
 HEnvironment* HEnvironment::CopyForInlining(
-    Handle<JSFunction> target,
-    int arguments,
-    FunctionLiteral* function,
-    HConstant* undefined,
-    InliningKind inlining_kind) const {
-  DCHECK(frame_type() == JS_FUNCTION);
+    Handle<JSFunction> target, int arguments, FunctionLiteral* function,
+    HConstant* undefined, InliningKind inlining_kind,
+    TailCallMode syntactic_tail_call_mode) const {
+  DCHECK_EQ(JS_FUNCTION, frame_type());
 
   // Outer environment is a copy of this one without the arguments.
   int arity = function->scope()->num_parameters();
@@ -13137,6 +13185,11 @@
   outer->Drop(arguments + 1);  // Including receiver.
   outer->ClearHistory();
 
+  if (syntactic_tail_call_mode == TailCallMode::kAllow) {
+    DCHECK_EQ(NORMAL_RETURN, inlining_kind);
+    outer->MarkAsTailCaller();
+  }
+
   if (inlining_kind == CONSTRUCT_CALL_RETURN) {
     // Create artificial constructor stub environment.  The receiver should
     // actually be the constructor function, but we pass the newly allocated
diff --git a/src/crankshaft/hydrogen.h b/src/crankshaft/hydrogen.h
index ce0d0df..10c0baa 100644
--- a/src/crankshaft/hydrogen.h
+++ b/src/crankshaft/hydrogen.h
@@ -11,6 +11,7 @@
 #include "src/ast/scopes.h"
 #include "src/bailout-reason.h"
 #include "src/compiler.h"
+#include "src/crankshaft/compilation-phase.h"
 #include "src/crankshaft/hydrogen-instructions.h"
 #include "src/zone.h"
 
@@ -293,8 +294,6 @@
 };
 
 
-class BoundsCheckTable;
-class InductionVariableBlocksTable;
 class HGraph final : public ZoneObject {
  public:
   explicit HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor);
@@ -333,6 +332,7 @@
   HConstant* GetConstantBool(bool value);
   HConstant* GetConstantHole();
   HConstant* GetConstantNull();
+  HConstant* GetConstantOptimizedOut();
   HConstant* GetInvalidContext();
 
   bool IsConstantUndefined(HConstant* constant);
@@ -400,9 +400,6 @@
     use_optimistic_licm_ = value;
   }
 
-  void MarkRecursive() { is_recursive_ = true; }
-  bool is_recursive() const { return is_recursive_; }
-
   void MarkDependsOnEmptyArrayProtoElements() {
     // Add map dependency if not already added.
     if (depends_on_empty_array_proto_elements_) return;
@@ -474,6 +471,7 @@
   SetOncePointer<HConstant> constant_false_;
   SetOncePointer<HConstant> constant_the_hole_;
   SetOncePointer<HConstant> constant_null_;
+  SetOncePointer<HConstant> constant_optimized_out_;
   SetOncePointer<HConstant> constant_invalid_context_;
 
   HOsrBuilder* osr_;
@@ -482,7 +480,6 @@
   CallInterfaceDescriptor descriptor_;
   Zone* zone_;
 
-  bool is_recursive_;
   bool use_optimistic_licm_;
   bool depends_on_empty_array_proto_elements_;
   int type_change_checksum_;
@@ -504,10 +501,10 @@
   JS_GETTER,
   JS_SETTER,
   ARGUMENTS_ADAPTOR,
+  TAIL_CALLER_FUNCTION,
   STUB
 };
 
-
 class HEnvironment final : public ZoneObject {
  public:
   HEnvironment(HEnvironment* outer,
@@ -616,16 +613,21 @@
   // Create an "inlined version" of this environment, where the original
   // environment is the outer environment but the top expression stack
   // elements are moved to an inner environment as parameters.
-  HEnvironment* CopyForInlining(Handle<JSFunction> target,
-                                int arguments,
-                                FunctionLiteral* function,
-                                HConstant* undefined,
-                                InliningKind inlining_kind) const;
+  HEnvironment* CopyForInlining(Handle<JSFunction> target, int arguments,
+                                FunctionLiteral* function, HConstant* undefined,
+                                InliningKind inlining_kind,
+                                TailCallMode syntactic_tail_call_mode) const;
 
   HEnvironment* DiscardInlined(bool drop_extra) {
     HEnvironment* outer = outer_;
-    while (outer->frame_type() != JS_FUNCTION) outer = outer->outer_;
+    while (outer->frame_type() != JS_FUNCTION &&
+           outer->frame_type() != TAIL_CALLER_FUNCTION) {
+      outer = outer->outer_;
+    }
     if (drop_extra) outer->Drop(1);
+    if (outer->frame_type() == TAIL_CALLER_FUNCTION) {
+      outer->ClearTailCallerMark();
+    }
     return outer;
   }
 
@@ -683,6 +685,11 @@
                                       FrameType frame_type,
                                       int arguments) const;
 
+  // Marks current environment as tail caller by setting frame type to
+  // TAIL_CALLER_FUNCTION.
+  void MarkAsTailCaller();
+  void ClearTailCallerMark();
+
   // True if index is included in the expression stack part of the environment.
   bool HasExpressionAt(int index) const;
 
@@ -852,10 +859,9 @@
 
 class FunctionState final {
  public:
-  FunctionState(HOptimizedGraphBuilder* owner,
-                CompilationInfo* info,
-                InliningKind inlining_kind,
-                int inlining_id);
+  FunctionState(HOptimizedGraphBuilder* owner, CompilationInfo* info,
+                InliningKind inlining_kind, int inlining_id,
+                TailCallMode tail_call_mode);
   ~FunctionState();
 
   CompilationInfo* compilation_info() { return compilation_info_; }
@@ -870,6 +876,11 @@
 
   FunctionState* outer() { return outer_; }
 
+  TailCallMode ComputeTailCallMode(TailCallMode tail_call_mode) const {
+    if (tail_call_mode_ == TailCallMode::kDisallow) return tail_call_mode_;
+    return tail_call_mode;
+  }
+
   HEnterInlined* entry() { return entry_; }
   void set_entry(HEnterInlined* entry) { entry_ = entry; }
 
@@ -887,6 +898,10 @@
 
   int inlining_id() const { return inlining_id_; }
 
+  void IncrementInDoExpressionScope() { do_expression_scope_count_++; }
+  void DecrementInDoExpressionScope() { do_expression_scope_count_--; }
+  bool IsInsideDoExpressionScope() { return do_expression_scope_count_ > 0; }
+
  private:
   HOptimizedGraphBuilder* owner_;
 
@@ -899,6 +914,10 @@
   // The kind of call which is currently being inlined.
   InliningKind inlining_kind_;
 
+  // Defines whether the calls with TailCallMode::kAllow in the function body
+  // can be generated as tail calls.
+  TailCallMode tail_call_mode_;
+
   // When inlining in an effect or value context, this is the return block.
   // It is NULL otherwise.  When inlining in a test context, there are a
   // pair of return blocks in the context.  When not inlining, there is no
@@ -919,6 +938,8 @@
   int inlining_id_;
   SourcePosition outer_source_position_;
 
+  int do_expression_scope_count_;
+
   FunctionState* outer_;
 };
 
@@ -1267,6 +1288,26 @@
     return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8));
   }
 
+  template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
+            class P7, class P8, class P9>
+  I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8,
+                  p9);
+  }
+
+  template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
+            class P7, class P8, class P9>
+  HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7,
+                            P8 p8, P9 p9) {
+    return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8, p8));
+  }
+
+  template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
+            class P7, class P8, class P9>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) {
+    return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8, p9));
+  }
+
   void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
 
   // When initializing arrays, we'll unfold the loop if the number of elements
@@ -1837,11 +1878,6 @@
 
   HValue* BuildElementIndexHash(HValue* index);
 
-  enum MapEmbedding { kEmbedMapsDirectly, kEmbedMapsViaWeakCells };
-
-  void BuildCompareNil(HValue* value, Type* type, HIfContinuation* continuation,
-                       MapEmbedding map_embedding = kEmbedMapsDirectly);
-
   void BuildCreateAllocationMemento(HValue* previous_object,
                                     HValue* previous_object_size,
                                     HValue* payload);
@@ -2198,6 +2234,7 @@
   F(IsRegExp)                          \
   F(IsJSProxy)                         \
   F(Call)                              \
+  F(NewObject)                         \
   F(ValueOf)                           \
   F(StringCharFromCode)                \
   F(StringCharAt)                      \
@@ -2222,6 +2259,7 @@
   F(RegExpSource)                      \
   F(NumberToString)                    \
   F(DebugIsActive)                     \
+  F(GetOrdinaryHasInstance)            \
   /* Typed Arrays */                   \
   F(TypedArrayInitialize)              \
   F(MaxSmi)                            \
@@ -2235,9 +2273,6 @@
   F(ConstructDouble)                   \
   F(DoubleHi)                          \
   F(DoubleLo)                          \
-  F(MathClz32)                         \
-  F(MathFloor)                         \
-  F(MathSqrt)                          \
   F(MathLogRT)                         \
   /* ES6 Collections */                \
   F(MapClear)                          \
@@ -2404,7 +2439,8 @@
   int InliningAstSize(Handle<JSFunction> target);
   bool TryInline(Handle<JSFunction> target, int arguments_count,
                  HValue* implicit_return_value, BailoutId ast_id,
-                 BailoutId return_id, InliningKind inlining_kind);
+                 BailoutId return_id, InliningKind inlining_kind,
+                 TailCallMode syntactic_tail_call_mode);
 
   bool TryInlineCall(Call* expr);
   bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
@@ -2435,16 +2471,17 @@
                           BailoutId ast_id);
   bool TryInlineApiCall(Handle<Object> function, HValue* receiver,
                         SmallMapList* receiver_maps, int argc, BailoutId ast_id,
-                        ApiCallType call_type);
+                        ApiCallType call_type,
+                        TailCallMode syntactic_tail_call_mode);
   static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
   static bool CanInlineArrayResizeOperation(Handle<Map> receiver_map);
 
   // If --trace-inlining, print a line of the inlining trace.  Inlining
   // succeeded if the reason string is NULL and failed if there is a
   // non-NULL reason string.
-  void TraceInline(Handle<JSFunction> target,
-                   Handle<JSFunction> caller,
-                   const char* failure_reason);
+  void TraceInline(Handle<JSFunction> target, Handle<JSFunction> caller,
+                   const char* failure_reason,
+                   TailCallMode tail_call_mode = TailCallMode::kDisallow);
 
   void HandleGlobalVariableAssignment(Variable* var, HValue* value,
                                       FeedbackVectorSlot slot,
@@ -2826,14 +2863,23 @@
   void AddCheckPrototypeMaps(Handle<JSObject> holder,
                              Handle<Map> receiver_map);
 
-  HInstruction* NewPlainFunctionCall(HValue* fun, int argument_count);
+  void BuildEnsureCallable(HValue* object);
 
-  HInstruction* NewArgumentAdaptorCall(HValue* fun, HValue* context,
-                                       int argument_count,
-                                       HValue* expected_param_count);
+  HInstruction* NewCallFunction(HValue* function, int argument_count,
+                                TailCallMode syntactic_tail_call_mode,
+                                ConvertReceiverMode convert_mode,
+                                TailCallMode tail_call_mode);
 
-  HInstruction* BuildCallConstantFunction(Handle<JSFunction> target,
-                                          int argument_count);
+  HInstruction* NewCallFunctionViaIC(HValue* function, int argument_count,
+                                     TailCallMode syntactic_tail_call_mode,
+                                     ConvertReceiverMode convert_mode,
+                                     TailCallMode tail_call_mode,
+                                     FeedbackVectorSlot slot);
+
+  HInstruction* NewCallConstantFunction(Handle<JSFunction> target,
+                                        int argument_count,
+                                        TailCallMode syntactic_tail_call_mode,
+                                        TailCallMode tail_call_mode);
 
   bool CanBeFunctionApplyArguments(Call* expr);
 
@@ -3032,6 +3078,19 @@
   HGraphBuilder* builder_;
 };
 
+class DoExpressionScope final {
+ public:
+  explicit DoExpressionScope(HOptimizedGraphBuilder* builder)
+      : builder_(builder) {
+    builder_->function_state()->IncrementInDoExpressionScope();
+  }
+  ~DoExpressionScope() {
+    builder_->function_state()->DecrementInDoExpressionScope();
+  }
+
+ private:
+  HOptimizedGraphBuilder* builder_;
+};
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.cc b/src/crankshaft/ia32/lithium-codegen-ia32.cc
index a535153..ae1ca1f 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -57,13 +57,6 @@
   // the frame (that is done in GeneratePrologue).
   FrameScope frame_scope(masm_, StackFrame::MANUAL);
 
-  support_aligned_spilled_doubles_ = info()->IsOptimizing();
-
-  dynamic_frame_alignment_ = info()->IsOptimizing() &&
-      ((chunk()->num_double_slots() > 2 &&
-        !chunk()->graph()->is_recursive()) ||
-       !info()->osr_ast_id().IsNone());
-
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -130,31 +123,6 @@
 
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-    if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
-      // Move state of dynamic frame alignment into edx.
-      __ Move(edx, Immediate(kNoAlignmentPadding));
-
-      Label do_not_pad, align_loop;
-      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
-      // Align esp + 4 to a multiple of 2 * kPointerSize.
-      __ test(esp, Immediate(kPointerSize));
-      __ j(not_zero, &do_not_pad, Label::kNear);
-      __ push(Immediate(0));
-      __ mov(ebx, esp);
-      __ mov(edx, Immediate(kAlignmentPaddingPushed));
-      // Copy arguments, receiver, and return address.
-      __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
-      __ bind(&align_loop);
-      __ mov(eax, Operand(ebx, 1 * kPointerSize));
-      __ mov(Operand(ebx, 0), eax);
-      __ add(Operand(ebx), Immediate(kPointerSize));
-      __ dec(ecx);
-      __ j(not_zero, &align_loop, Label::kNear);
-      __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
-      __ bind(&do_not_pad);
-    }
   }
 
   info()->set_prologue_offset(masm_->pc_offset());
@@ -162,61 +130,29 @@
     DCHECK(!frame_is_built_);
     frame_is_built_ = true;
     if (info()->IsStub()) {
-      __ StubPrologue();
+      __ StubPrologue(StackFrame::STUB);
     } else {
       __ Prologue(info()->GeneratePreagedPrologue());
     }
   }
 
-  if (info()->IsOptimizing() &&
-      dynamic_frame_alignment_ &&
-      FLAG_debug_code) {
-    __ test(esp, Immediate(kPointerSize));
-    __ Assert(zero, kFrameIsExpectedToBeAligned);
-  }
-
   // Reserve space for the stack slots needed by the code.
   int slots = GetStackSlotCount();
   DCHECK(slots != 0 || !info()->IsOptimizing());
   if (slots > 0) {
-    if (slots == 1) {
-      if (dynamic_frame_alignment_) {
-        __ push(edx);
-      } else {
-        __ push(Immediate(kNoAlignmentPadding));
-      }
-    } else {
-      if (FLAG_debug_code) {
-        __ sub(Operand(esp), Immediate(slots * kPointerSize));
+    __ sub(Operand(esp), Immediate(slots * kPointerSize));
 #ifdef _MSC_VER
-        MakeSureStackPagesMapped(slots * kPointerSize);
+    MakeSureStackPagesMapped(slots * kPointerSize);
 #endif
-        __ push(eax);
-        __ mov(Operand(eax), Immediate(slots));
-        Label loop;
-        __ bind(&loop);
-        __ mov(MemOperand(esp, eax, times_4, 0),
-               Immediate(kSlotsZapValue));
-        __ dec(eax);
-        __ j(not_zero, &loop);
-        __ pop(eax);
-      } else {
-        __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
-        MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
-      }
-
-      if (support_aligned_spilled_doubles_) {
-        Comment(";;; Store dynamic frame alignment tag for spilled doubles");
-        // Store dynamic frame alignment state in the first local.
-        int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
-        if (dynamic_frame_alignment_) {
-          __ mov(Operand(ebp, offset), edx);
-        } else {
-          __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
-        }
-      }
+    if (FLAG_debug_code) {
+      __ push(eax);
+      __ mov(Operand(eax), Immediate(slots));
+      Label loop;
+      __ bind(&loop);
+      __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
+      __ dec(eax);
+      __ j(not_zero, &loop);
+      __ pop(eax);
     }
 
     if (info()->saves_caller_doubles()) SaveCallerDoubles();
@@ -298,47 +234,11 @@
 
   osr_pc_offset_ = masm()->pc_offset();
 
-    // Move state of dynamic frame alignment into edx.
-  __ Move(edx, Immediate(kNoAlignmentPadding));
-
-  if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
-    Label do_not_pad, align_loop;
-    // Align ebp + 4 to a multiple of 2 * kPointerSize.
-    __ test(ebp, Immediate(kPointerSize));
-    __ j(zero, &do_not_pad, Label::kNear);
-    __ push(Immediate(0));
-    __ mov(ebx, esp);
-    __ mov(edx, Immediate(kAlignmentPaddingPushed));
-
-    // Move all parts of the frame over one word. The frame consists of:
-    // unoptimized frame slots, alignment state, context, frame pointer, return
-    // address, receiver, and the arguments.
-    __ mov(ecx, Immediate(scope()->num_parameters() +
-           5 + graph()->osr()->UnoptimizedFrameSlots()));
-
-    __ bind(&align_loop);
-    __ mov(eax, Operand(ebx, 1 * kPointerSize));
-    __ mov(Operand(ebx, 0), eax);
-    __ add(Operand(ebx), Immediate(kPointerSize));
-    __ dec(ecx);
-    __ j(not_zero, &align_loop, Label::kNear);
-    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
-    __ sub(Operand(ebp), Immediate(kPointerSize));
-    __ bind(&do_not_pad);
-  }
-
-  // Save the first local, which is overwritten by the alignment state.
-  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
-  __ push(alignment_loc);
-
-  // Set the dynamic frame alignment state.
-  __ mov(alignment_loc, edx);
-
   // Adjust the frame size, subsuming the unoptimized frame into the
   // optimized frame.
   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
-  DCHECK(slots >= 1);
-  __ sub(esp, Immediate((slots - 1) * kPointerSize));
+  DCHECK(slots >= 0);
+  __ sub(esp, Immediate(slots * kPointerSize));
 }
 
 
@@ -380,29 +280,24 @@
   if (needs_frame.is_linked()) {
     __ bind(&needs_frame);
     /* stack layout
-       4: entry address
-       3: return address  <-- esp
-       2: garbage
+       3: entry address
+       2: return address  <-- esp
        1: garbage
        0: garbage
     */
-    __ sub(esp, Immediate(kPointerSize));    // Reserve space for stub marker.
-    __ push(MemOperand(esp, kPointerSize));  // Copy return address.
-    __ push(MemOperand(esp, 3 * kPointerSize));  // Copy entry address.
+    __ push(MemOperand(esp, 0));                 // Copy return address.
+    __ push(MemOperand(esp, 2 * kPointerSize));  // Copy entry address.
 
     /* stack layout
        4: entry address
        3: return address
-       2: garbage
        1: return address
        0: entry address  <-- esp
     */
-    __ mov(MemOperand(esp, 4 * kPointerSize), ebp);  // Save ebp.
-    // Copy context.
-    __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
-    __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+    __ mov(MemOperand(esp, 3 * kPointerSize), ebp);  // Save ebp.
     // Fill ebp with the right stack frame address.
-    __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+    __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
+
     // This variant of deopt can only be used with stubs. Since we don't
     // have a function pointer to install in the stack frame that we're
     // building, install a special marker there instead.
@@ -411,8 +306,7 @@
            Immediate(Smi::FromInt(StackFrame::STUB)));
 
     /* stack layout
-       4: old ebp
-       3: context pointer
+       3: old ebp
        2: stub marker
        1: return address
        0: entry address  <-- esp
@@ -447,9 +341,8 @@
         frame_is_built_ = true;
         // Build the frame in such a way that esi isn't trashed.
         __ push(ebp);  // Caller's frame pointer.
-        __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
         __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-        __ lea(ebp, Operand(esp, 2 * kPointerSize));
+        __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
         Comment(";;; Deferred code");
       }
       code->Generate();
@@ -1969,15 +1862,16 @@
       __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
       EmitBranch(instr, not_equal);
     } else {
-      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
-      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+      ToBooleanICStub::Types expected =
+          instr->hydrogen()->expected_input_types();
+      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
-      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
         // undefined -> false.
         __ cmp(reg, factory()->undefined_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
         // true -> true.
         __ cmp(reg, factory()->true_value());
         __ j(equal, instr->TrueLabel(chunk_));
@@ -1985,13 +1879,13 @@
         __ cmp(reg, factory()->false_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
         // 'null' -> false.
         __ cmp(reg, factory()->null_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::SMI)) {
+      if (expected.Contains(ToBooleanICStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         __ test(reg, Operand(reg));
         __ j(equal, instr->FalseLabel(chunk_));
@@ -2011,18 +1905,18 @@
         if (expected.CanBeUndetectable()) {
           // Undetectable -> false.
           __ test_b(FieldOperand(map, Map::kBitFieldOffset),
-                    1 << Map::kIsUndetectable);
+                    Immediate(1 << Map::kIsUndetectable));
           __ j(not_zero, instr->FalseLabel(chunk_));
         }
       }
 
-      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
         __ j(above_equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::STRING)) {
+      if (expected.Contains(ToBooleanICStub::STRING)) {
         // String value -> false iff empty.
         Label not_string;
         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2033,19 +1927,19 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
         // Symbol value -> true.
         __ CmpInstanceType(map, SYMBOL_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
         // SIMD value -> true.
         __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
         __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -2237,7 +2131,7 @@
   }
   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
-            1 << Map::kIsUndetectable);
+            Immediate(1 << Map::kIsUndetectable));
   EmitBranch(instr, not_zero);
 }
 
@@ -2267,11 +2161,10 @@
   DCHECK(ToRegister(instr->left()).is(edx));
   DCHECK(ToRegister(instr->right()).is(eax));
 
-  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
-  __ test(eax, eax);
-
-  EmitBranch(instr, ComputeCompareCondition(instr->op()));
+  __ CompareRoot(eax, Heap::kTrueValueRootIndex);
+  EmitBranch(instr, equal);
 }
 
 
@@ -2342,11 +2235,12 @@
   DCHECK(!temp.is(temp2));
   __ JumpIfSmi(input, is_false);
 
-  __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
+  __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
-    __ j(equal, is_true);
+    __ j(above_equal, is_true);
   } else {
-    __ j(equal, is_false);
+    __ j(above_equal, is_false);
   }
 
   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
@@ -2429,7 +2323,7 @@
 
   // Deoptimize if the object needs to be access checked.
   __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded);
+            Immediate(1 << Map::kIsAccessCheckNeeded));
   DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
   // Deoptimize for proxies.
   __ CmpInstanceType(object_map, JS_PROXY_TYPE);
@@ -2462,18 +2356,11 @@
   __ bind(&done);
 }
 
-
-void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
-  int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+void LCodeGen::EmitReturn(LReturn* instr) {
+  int extra_value_count = 1;
 
   if (instr->has_constant_parameter_count()) {
     int parameter_count = ToInteger32(instr->constant_parameter_count());
-    if (dynamic_frame_alignment && FLAG_debug_code) {
-      __ cmp(Operand(esp,
-                     (parameter_count + extra_value_count) * kPointerSize),
-             Immediate(kAlignmentZapValue));
-      __ Assert(equal, kExpectedAlignmentMarker);
-    }
     __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
   } else {
     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
@@ -2481,20 +2368,9 @@
     // The argument count parameter is a smi
     __ SmiUntag(reg);
     Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
-    if (dynamic_frame_alignment && FLAG_debug_code) {
-      DCHECK(extra_value_count == 2);
-      __ cmp(Operand(esp, reg, times_pointer_size,
-                     extra_value_count * kPointerSize),
-             Immediate(kAlignmentZapValue));
-      __ Assert(equal, kExpectedAlignmentMarker);
-    }
 
     // emit code to restore stack based on instr->parameter_count()
     __ pop(return_addr_reg);  // save return address
-    if (dynamic_frame_alignment) {
-      __ inc(reg);  // 1 more for alignment
-    }
-
     __ shl(reg, kPointerSizeLog2);
     __ add(esp, reg);
     __ jmp(return_addr_reg);
@@ -2513,25 +2389,12 @@
     __ CallRuntime(Runtime::kTraceExit);
   }
   if (info()->saves_caller_doubles()) RestoreCallerDoubles();
-  if (dynamic_frame_alignment_) {
-    // Fetch the state of the dynamic frame alignment.
-    __ mov(edx, Operand(ebp,
-      JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
-  }
   if (NeedsEagerFrame()) {
     __ mov(esp, ebp);
     __ pop(ebp);
   }
-  if (dynamic_frame_alignment_) {
-    Label no_padding;
-    __ cmp(edx, Immediate(kNoAlignmentPadding));
-    __ j(equal, &no_padding, Label::kNear);
 
-    EmitReturn(instr, true);
-    __ bind(&no_padding);
-  }
-
-  EmitReturn(instr, false);
+  EmitReturn(instr);
 }
 
 
@@ -2941,11 +2804,12 @@
 
   if (instr->hydrogen()->from_inlined()) {
     __ lea(result, Operand(esp, -2 * kPointerSize));
-  } else {
+  } else if (instr->hydrogen()->arguments_adaptor()) {
     // Check for arguments adapter frame.
     Label done, adapted;
     __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-    __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+    __ mov(result,
+           Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ cmp(Operand(result),
            Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
     __ j(equal, &adapted, Label::kNear);
@@ -2961,6 +2825,8 @@
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
     __ bind(&done);
+  } else {
+    __ mov(result, Operand(ebp));
   }
 }
 
@@ -3004,12 +2870,12 @@
     __ mov(scratch,
            FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
     __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
-              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+              Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
     __ j(not_equal, &receiver_ok, dist);
 
     // Do not transform the receiver to object for builtins.
     __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
-              1 << SharedFunctionInfo::kNativeBitWithinByte);
+              Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
     __ j(not_equal, &receiver_ok, dist);
   }
 
@@ -3065,13 +2931,25 @@
 
   // Invoke the function.
   __ bind(&invoke);
+
+  InvokeFlag flag = CALL_FUNCTION;
+  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+    DCHECK(!info()->saves_caller_doubles());
+    // TODO(ishell): drop current frame before pushing arguments to the stack.
+    flag = JUMP_FUNCTION;
+    ParameterCount actual(eax);
+    // It is safe to use ebx, ecx and edx as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) ebx (expected arguments count) and edx (new.target) will be
+    //    initialized below.
+    PrepareForTailCall(actual, ebx, ecx, edx);
+  }
+
   DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator safepoint_generator(
-      this, pointers, Safepoint::kLazyDeopt);
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   ParameterCount actual(eax);
-  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
-                    safepoint_generator);
+  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
 }
 
 
@@ -3115,10 +2993,9 @@
   CallRuntime(Runtime::kDeclareGlobals, instr);
 }
 
-
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int formal_parameter_count, int arity,
-                                 LInstruction* instr) {
+                                 bool is_tail_call, LInstruction* instr) {
   bool dont_adapt_arguments =
       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   bool can_invoke_directly =
@@ -3134,21 +3011,38 @@
     __ mov(edx, factory()->undefined_value());
     __ mov(eax, arity);
 
+    bool is_self_call = function.is_identical_to(info()->closure());
+
     // Invoke function directly.
-    if (function.is_identical_to(info()->closure())) {
-      __ CallSelf();
+    if (is_self_call) {
+      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+      if (is_tail_call) {
+        __ Jump(self, RelocInfo::CODE_TARGET);
+      } else {
+        __ Call(self, RelocInfo::CODE_TARGET);
+      }
     } else {
-      __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+      Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
+      if (is_tail_call) {
+        __ jmp(target);
+      } else {
+        __ call(target);
+      }
     }
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+    if (!is_tail_call) {
+      // Set up deoptimization.
+      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    }
   } else {
     // We need to adapt arguments.
     LPointerMap* pointers = instr->pointer_map();
     SafepointGenerator generator(
         this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
+    ParameterCount actual(arity);
     ParameterCount expected(formal_parameter_count);
-    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(function_reg, expected, actual, flag, generator);
   }
 }
 
@@ -3190,35 +3084,6 @@
 }
 
 
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  DCHECK(ToRegister(instr->function()).is(edi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  // Change context.
-  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
-  // Always initialize new target and number of actual arguments.
-  __ mov(edx, factory()->undefined_value());
-  __ mov(eax, instr->arity());
-
-  bool is_self_call = false;
-  if (instr->hydrogen()->function()->IsConstant()) {
-    HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
-    Handle<JSFunction> jsfun =
-      Handle<JSFunction>::cast(fun_const->handle(isolate()));
-    is_self_call = jsfun.is_identical_to(info()->closure());
-  }
-
-  if (is_self_call) {
-    __ CallSelf();
-  } else {
-    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
-  }
-
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   Register input_reg = ToRegister(instr->value());
   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3313,8 +3178,14 @@
   }
 }
 
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+  XMMRegister output_reg = ToDoubleRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  CpuFeatureScope scope(masm(), SSE4_1);
+  __ roundsd(output_reg, input_reg, kRoundDown);
+}
 
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
   XMMRegister xmm_scratch = double_scratch0();
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3378,8 +3249,23 @@
   }
 }
 
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+  XMMRegister xmm_scratch = double_scratch0();
+  XMMRegister output_reg = ToDoubleRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  CpuFeatureScope scope(masm(), SSE4_1);
+  Label done;
+  __ roundsd(output_reg, input_reg, kRoundUp);
+  __ Move(xmm_scratch, -0.5);
+  __ addsd(xmm_scratch, output_reg);
+  __ ucomisd(xmm_scratch, input_reg);
+  __ j(below_equal, &done, Label::kNear);
+  __ Move(xmm_scratch, 1.0);
+  __ subsd(output_reg, xmm_scratch);
+  __ bind(&done);
+}
 
-void LCodeGen::DoMathRound(LMathRound* instr) {
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   XMMRegister xmm_scratch = double_scratch0();
@@ -3569,54 +3455,78 @@
   MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
 }
 
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+                                  Register scratch1, Register scratch2,
+                                  Register scratch3) {
+#if DEBUG
+  if (actual.is_reg()) {
+    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+  } else {
+    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+  }
+#endif
+  if (FLAG_code_comments) {
+    if (actual.is_reg()) {
+      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+    } else {
+      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+    }
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(ebp, scratch2);
+  __ mov(caller_args_count_reg,
+         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count.
+  __ mov(caller_args_count_reg,
+         Immediate(info()->literal()->parameter_count()));
+
+  __ bind(&formal_parameter_count_loaded);
+  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
+                        ReturnAddressState::kNotOnStack, 0);
+  Comment(";;; }");
+}
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  HInvokeFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(esi));
   DCHECK(ToRegister(instr->function()).is(edi));
   DCHECK(instr->HasPointerMap());
 
-  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+  if (is_tail_call) {
+    DCHECK(!info()->saves_caller_doubles());
+    ParameterCount actual(instr->arity());
+    // It is safe to use ebx, ecx and edx as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) ebx (expected arguments count) and edx (new.target) will be
+    //    initialized below.
+    PrepareForTailCall(actual, ebx, ecx, edx);
+  }
+
+  Handle<JSFunction> known_function = hinstr->known_function();
   if (known_function.is_null()) {
     LPointerMap* pointers = instr->pointer_map();
-    SafepointGenerator generator(
-        this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(instr->arity());
-    __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount actual(instr->arity());
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(edi, no_reg, actual, flag, generator);
   } else {
-    CallKnownFunction(known_function,
-                      instr->hydrogen()->formal_parameter_count(),
-                      instr->arity(), instr);
-  }
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  HCallFunction* hinstr = instr->hydrogen();
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->function()).is(edi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  int arity = instr->arity();
-  ConvertReceiverMode mode = hinstr->convert_mode();
-  if (hinstr->HasVectorAndSlot()) {
-    Register slot_register = ToRegister(instr->temp_slot());
-    Register vector_register = ToRegister(instr->temp_vector());
-    DCHECK(slot_register.is(edx));
-    DCHECK(vector_register.is(ebx));
-
-    AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
-    int index = vector->GetIndex(hinstr->slot());
-
-    __ mov(vector_register, vector);
-    __ mov(slot_register, Immediate(Smi::FromInt(index)));
-
-    Handle<Code> ic =
-        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    __ Set(eax, arity);
-    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+                      instr->arity(), is_tail_call, instr);
   }
 }
 
@@ -4482,7 +4392,7 @@
       __ ucomisd(result_reg, xmm_scratch);
       __ j(not_zero, &done, Label::kNear);
       __ movmskpd(temp_reg, result_reg);
-      __ test_b(temp_reg, 1);
+      __ test_b(temp_reg, Immediate(1));
       DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
     }
     __ jmp(&done, Label::kNear);
@@ -4704,7 +4614,7 @@
 
   __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
   __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
-            1 << JSArrayBuffer::WasNeutered::kShift);
+            Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
   DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
 }
 
@@ -4720,8 +4630,7 @@
     InstanceType last;
     instr->hydrogen()->GetCheckInterval(&first, &last);
 
-    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
-            static_cast<int8_t>(first));
+    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
 
     // If there is only one type in the interval check for equality.
     if (first == last) {
@@ -4730,8 +4639,7 @@
       DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
       // Omit check for the last type.
       if (last != LAST_TYPE) {
-        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
-                static_cast<int8_t>(last));
+        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
         DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
       }
     }
@@ -4742,7 +4650,7 @@
 
     if (base::bits::IsPowerOfTwo32(mask)) {
       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
-      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
       DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
                    Deoptimizer::kWrongInstanceType);
     } else {
@@ -5026,13 +4934,6 @@
 }
 
 
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  DCHECK(ToRegister(instr->value()).is(eax));
-  __ push(eax);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
 void LCodeGen::DoTypeof(LTypeof* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
   DCHECK(ToRegister(instr->value()).is(ebx));
@@ -5099,7 +5000,7 @@
     // Check for undetectable objects => true.
     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
+              Immediate(1 << Map::kIsUndetectable));
     final_branch_condition = not_zero;
 
   } else if (String::Equals(type_name, factory()->function_string())) {
@@ -5120,7 +5021,7 @@
     __ j(below, false_label, false_distance);
     // Check for callable or undetectable objects => false.
     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
-              (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+              Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     final_branch_condition = zero;
 
 // clang-format off
@@ -5381,13 +5282,6 @@
   __ bind(&done);
 }
 
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
-  Register context = ToRegister(instr->context());
-  __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.h b/src/crankshaft/ia32/lithium-codegen-ia32.h
index 589ef2e..bc61c96 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -29,8 +29,6 @@
         jump_table_(4, info->zone()),
         scope_(info->scope()),
         deferred_(8, info->zone()),
-        dynamic_frame_alignment_(false),
-        support_aligned_spilled_doubles_(false),
         frame_is_built_(false),
         safepoints_(info->zone()),
         resolver_(this),
@@ -193,11 +191,14 @@
 
   void LoadContextFromDeferred(LOperand* context);
 
+  void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+                          Register scratch2, Register scratch3);
+
   // Generate a direct call to a known function. Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int formal_parameter_count, int arity,
-                         LInstruction* instr);
+                         bool is_tail_call, LInstruction* instr);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
@@ -300,7 +301,7 @@
   template <class T>
   void EmitVectorStoreICRegisters(T* instr);
 
-  void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
+  void EmitReturn(LReturn* instr);
 
   // Emits code for pushing either a tagged constant, a (non-double)
   // register, or a stack slot operand.
@@ -319,8 +320,6 @@
   ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   Scope* const scope_;
   ZoneList<LDeferredCode*> deferred_;
-  bool dynamic_frame_alignment_;
-  bool support_aligned_spilled_doubles_;
   bool frame_is_built_;
 
   // Builder that keeps track of safepoints in the code. The table
diff --git a/src/crankshaft/ia32/lithium-ia32.cc b/src/crankshaft/ia32/lithium-ia32.cc
index e2772d5..4afeef5 100644
--- a/src/crankshaft/ia32/lithium-ia32.cc
+++ b/src/crankshaft/ia32/lithium-ia32.cc
@@ -267,27 +267,6 @@
 }
 
 
-void LCallFunction::PrintDataTo(StringStream* stream) {
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  function()->PrintTo(stream);
-  if (hydrogen()->HasVectorAndSlot()) {
-    stream->Add(" (type-feedback-vector ");
-    temp_vector()->PrintTo(stream);
-    stream->Add(" ");
-    temp_slot()->PrintTo(stream);
-    stream->Add(")");
-  }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  function()->PrintTo(stream);
-  stream->Add("#%d / ", arity());
-}
-
-
 void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < InputCount(); i++) {
     InputAt(i)->PrintTo(stream);
@@ -434,13 +413,6 @@
   LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
 
-  // Reserve the first spill slot for the state of dynamic alignment.
-  if (info()->IsOptimizing()) {
-    int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
-    DCHECK_EQ(alignment_state_index, 4);
-    USE(alignment_state_index);
-  }
-
   // If compiling for OSR, reserve space for the unoptimized frame,
   // which will be subsumed into this frame.
   if (graph()->has_osr()) {
@@ -618,11 +590,7 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(
-      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
-  return instr;
+  return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
 }
 
 
@@ -936,22 +904,16 @@
   }
   chunk_->AddInstruction(instr, current_block_);
 
-  if (instr->IsCall() || instr->IsPrologue()) {
-    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
-    if (hydrogen_val->HasObservableSideEffects()) {
-      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
-      sim->ReplayEnvironment(current_block_->last_environment());
-      hydrogen_value_for_lazy_bailout = sim;
-    }
-    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
-    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
-    chunk_->AddInstruction(bailout, current_block_);
-  }
+  CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
 }
 
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
-  return new (zone()) LPrologue();
+  LInstruction* result = new (zone()) LPrologue();
+  if (info_->num_heap_slots() > 0) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
 }
 
 
@@ -964,15 +926,15 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+  ToBooleanICStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
   LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
   if (!easy_case &&
-      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
        !expected.IsGeneric())) {
     branch = AssignEnvironment(branch);
   }
@@ -1100,16 +1062,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallJSFunction(
-    HCallJSFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), edi);
-
-  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
-  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1132,6 +1084,9 @@
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1140,6 +1095,9 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* function = UseFixed(instr->function(), edi);
   LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1170,22 +1128,33 @@
   }
 }
 
-
 LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegisterAtStart(instr->value());
-  LMathFloor* result = new(zone()) LMathFloor(input);
-  return AssignEnvironment(DefineAsRegister(result));
+  if (instr->representation().IsInteger32()) {
+    LMathFloorI* result = new (zone()) LMathFloorI(input);
+    return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    LMathFloorD* result = new (zone()) LMathFloorD(input);
+    return DefineAsRegister(result);
+  }
 }
 
-
 LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegister(instr->value());
-  LOperand* temp = FixedTemp(xmm4);
-  LMathRound* result = new(zone()) LMathRound(input, temp);
-  return AssignEnvironment(DefineAsRegister(result));
+  if (instr->representation().IsInteger32()) {
+    LOperand* temp = FixedTemp(xmm4);
+    LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
+    return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    LMathRoundD* result = new (zone()) LMathRoundD(input);
+    return DefineAsRegister(result);
+  }
 }
 
-
 LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
   LOperand* input = UseRegister(instr->value());
   LMathFround* result = new (zone()) LMathFround(input);
@@ -1253,22 +1222,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* function = UseFixed(instr->function(), edi);
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(edx);
-    vector = FixedTemp(ebx);
-  }
-
-  LCallFunction* call =
-      new (zone()) LCallFunction(context, function, slot, vector);
-  return MarkAsCall(DefineFixed(call, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
@@ -1834,13 +1787,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
-    HBoundsCheckBaseIndexInformation* instr) {
-  UNREACHABLE();
-  return NULL;
-}
-
-
 LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
   // The control instruction marking the end of a block that completed
   // abruptly (e.g., threw an exception).  There is nothing specific to do.
@@ -2507,11 +2453,6 @@
       Retry(kNotEnoughSpillSlotsForOsr);
       spill_index = 0;
     }
-    if (spill_index == 0) {
-      // The dynamic frame alignment state overwrites the first local.
-      // The first local is saved at the end of the unoptimized frame.
-      spill_index = graph()->osr()->UnoptimizedFrameSlots();
-    }
     spill_index += StandardFrameConstants::kFixedSlotCount;
   }
   return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
@@ -2551,13 +2492,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
-  LOperand* object = UseFixed(instr->value(), eax);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseFixed(instr->value(), ebx);
@@ -2595,11 +2529,9 @@
   HEnvironment* outer = current_block_->last_environment();
   outer->set_ast_id(instr->ReturnId());
   HConstant* undefined = graph()->GetConstantUndefined();
-  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
-                                               instr->function(),
-                                               undefined,
-                                               instr->inlining_kind());
+  HEnvironment* inner = outer->CopyForInlining(
+      instr->closure(), instr->arguments_count(), instr->function(), undefined,
+      instr->inlining_kind(), instr->syntactic_tail_call_mode());
   // Only replay binding of arguments object if it wasn't removed from graph.
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2660,13 +2592,6 @@
   return AssignPointerMap(result);
 }
 
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
-  LOperand* context = UseRegisterAtStart(instr->context());
-  return new(zone()) LStoreFrameContext(context);
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/ia32/lithium-ia32.h b/src/crankshaft/ia32/lithium-ia32.h
index e22ab43..68541a4 100644
--- a/src/crankshaft/ia32/lithium-ia32.h
+++ b/src/crankshaft/ia32/lithium-ia32.h
@@ -33,9 +33,7 @@
   V(BitI)                                    \
   V(BoundsCheck)                             \
   V(Branch)                                  \
-  V(CallJSFunction)                          \
   V(CallWithDescriptor)                      \
-  V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
   V(CheckArrayBufferNotNeutered)             \
@@ -105,12 +103,14 @@
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathExp)                                 \
-  V(MathFloor)                               \
+  V(MathFloorD)                              \
+  V(MathFloorI)                              \
   V(MathFround)                              \
   V(MathLog)                                 \
   V(MathMinMax)                              \
   V(MathPowHalf)                             \
-  V(MathRound)                               \
+  V(MathRoundD)                              \
+  V(MathRoundI)                              \
   V(MathSqrt)                                \
   V(MaybeGrowElements)                       \
   V(ModByConstI)                             \
@@ -135,7 +135,6 @@
   V(StackCheck)                              \
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
-  V(StoreFrameContext)                       \
   V(StoreKeyed)                              \
   V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
@@ -147,7 +146,6 @@
   V(SubI)                                    \
   V(TaggedToI)                               \
   V(ThisFunction)                            \
-  V(ToFastProperties)                        \
   V(TransitionElementsKind)                  \
   V(TrapAllocationMemento)                   \
   V(Typeof)                                  \
@@ -156,7 +154,6 @@
   V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
-
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
   Opcode opcode() const final { return LInstruction::k##type; } \
   void CompileToNative(LCodeGen* generator) final;              \
@@ -228,6 +225,13 @@
   void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
   bool IsCall() const { return IsCallBits::decode(bit_field_); }
 
+  void MarkAsSyntacticTailCall() {
+    bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+  }
+  bool IsSyntacticTailCall() const {
+    return IsSyntacticTailCallBits::decode(bit_field_);
+  }
+
   // Interface to the register allocator and iterators.
   bool ClobbersTemps() const { return IsCall(); }
   bool ClobbersRegisters() const { return IsCall(); }
@@ -262,6 +266,8 @@
   virtual LOperand* TempAt(int i) = 0;
 
   class IsCallBits: public BitField<bool, 0, 1> {};
+  class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+  };
 
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
@@ -539,6 +545,7 @@
   LOperand* elements() { return inputs_[3]; }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+  DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
 };
 
 
@@ -808,23 +815,43 @@
   void PrintDataTo(StringStream* stream) override;
 };
 
-
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+// Math.floor with a double result.
+class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LMathFloor(LOperand* value) {
-    inputs_[0] = value;
-  }
+  explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
 
-  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 };
 
-
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+// Math.floor with an integer result.
+class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathRound(LOperand* value, LOperand* temp) {
+  explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.round with a double result.
+class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.round with an integer result.
+class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRoundI(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
@@ -832,7 +859,7 @@
   LOperand* temp() { return temps_[0]; }
   LOperand* value() { return inputs_[0]; }
 
-  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 };
 
@@ -1723,23 +1750,6 @@
 };
 
 
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallJSFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
 class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
  public:
   LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1798,29 +1808,6 @@
 };
 
 
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
-  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
-                LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
-  void PrintDataTo(StringStream* stream) override;
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
 class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2415,19 +2402,6 @@
 };
 
 
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LToFastProperties(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
-  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
@@ -2542,18 +2516,6 @@
 };
 
 
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStoreFrameContext(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/lithium-allocator-inl.h b/src/crankshaft/lithium-allocator-inl.h
index 22611b5..631af60 100644
--- a/src/crankshaft/lithium-allocator-inl.h
+++ b/src/crankshaft/lithium-allocator-inl.h
@@ -21,6 +21,8 @@
 #include "src/crankshaft/mips/lithium-mips.h" // NOLINT
 #elif V8_TARGET_ARCH_MIPS64
 #include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
 #include "src/crankshaft/x87/lithium-x87.h" // NOLINT
 #else
diff --git a/src/crankshaft/lithium-allocator.cc b/src/crankshaft/lithium-allocator.cc
index 5d05292..6155dc0 100644
--- a/src/crankshaft/lithium-allocator.cc
+++ b/src/crankshaft/lithium-allocator.cc
@@ -510,9 +510,9 @@
   return LifetimePosition::Invalid();
 }
 
-
 LAllocator::LAllocator(int num_values, HGraph* graph)
-    : chunk_(NULL),
+    : zone_(graph->isolate()->allocator()),
+      chunk_(NULL),
       live_in_sets_(graph->blocks()->length(), zone()),
       live_ranges_(num_values * 2, zone()),
       fixed_live_ranges_(NULL),
@@ -529,7 +529,6 @@
       has_osr_entry_(false),
       allocation_ok_(true) {}
 
-
 void LAllocator::InitializeLivenessAnalysis() {
   // Initialize the live_in sets for each block to NULL.
   int block_count = graph_->blocks()->length();
diff --git a/src/crankshaft/lithium-allocator.h b/src/crankshaft/lithium-allocator.h
index 46289e0..b648bd8 100644
--- a/src/crankshaft/lithium-allocator.h
+++ b/src/crankshaft/lithium-allocator.h
@@ -6,6 +6,7 @@
 #define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
 
 #include "src/allocation.h"
+#include "src/crankshaft/compilation-phase.h"
 #include "src/crankshaft/lithium.h"
 #include "src/zone.h"
 
diff --git a/src/crankshaft/lithium-codegen.cc b/src/crankshaft/lithium-codegen.cc
index c5b7e9c..53fedcf 100644
--- a/src/crankshaft/lithium-codegen.cc
+++ b/src/crankshaft/lithium-codegen.cc
@@ -30,6 +30,9 @@
 #elif V8_TARGET_ARCH_PPC
 #include "src/crankshaft/ppc/lithium-ppc.h"          // NOLINT
 #include "src/crankshaft/ppc/lithium-codegen-ppc.h"  // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h"          // NOLINT
+#include "src/crankshaft/s390/lithium-codegen-s390.h"  // NOLINT
 #else
 #error Unsupported target architecture.
 #endif
@@ -237,8 +240,8 @@
       break;
     }
     case JS_GETTER: {
-      DCHECK(translation_size == 1);
-      DCHECK(height == 0);
+      DCHECK_EQ(1, translation_size);
+      DCHECK_EQ(0, height);
       int shared_id = DefineDeoptimizationLiteral(
           environment->entry() ? environment->entry()->shared()
                                : info()->shared_info());
@@ -252,8 +255,8 @@
       break;
     }
     case JS_SETTER: {
-      DCHECK(translation_size == 2);
-      DCHECK(height == 0);
+      DCHECK_EQ(2, translation_size);
+      DCHECK_EQ(0, height);
       int shared_id = DefineDeoptimizationLiteral(
           environment->entry() ? environment->entry()->shared()
                                : info()->shared_info());
@@ -266,6 +269,20 @@
       }
       break;
     }
+    case TAIL_CALLER_FUNCTION: {
+      DCHECK_EQ(0, translation_size);
+      int shared_id = DefineDeoptimizationLiteral(
+          environment->entry() ? environment->entry()->shared()
+                               : info()->shared_info());
+      translation->BeginTailCallerFrame(shared_id);
+      if (info()->closure().is_identical_to(environment->closure())) {
+        translation->StoreJSFrameFunction();
+      } else {
+        int closure_id = DefineDeoptimizationLiteral(environment->closure());
+        translation->StoreLiteral(closure_id);
+      }
+      break;
+    }
     case ARGUMENTS_ADAPTOR: {
       int shared_id = DefineDeoptimizationLiteral(
           environment->entry() ? environment->entry()->shared()
diff --git a/src/crankshaft/lithium-inl.h b/src/crankshaft/lithium-inl.h
index 9044b4c..938588e 100644
--- a/src/crankshaft/lithium-inl.h
+++ b/src/crankshaft/lithium-inl.h
@@ -21,6 +21,8 @@
 #include "src/crankshaft/mips64/lithium-mips64.h"  // NOLINT
 #elif V8_TARGET_ARCH_PPC
 #include "src/crankshaft/ppc/lithium-ppc.h"  // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
 #include "src/crankshaft/x87/lithium-x87.h"  // NOLINT
 #else
diff --git a/src/crankshaft/lithium.cc b/src/crankshaft/lithium.cc
index 6776390..d34b04f 100644
--- a/src/crankshaft/lithium.cc
+++ b/src/crankshaft/lithium.cc
@@ -30,6 +30,9 @@
 #elif V8_TARGET_ARCH_X87
 #include "src/crankshaft/x87/lithium-x87.h"  // NOLINT
 #include "src/crankshaft/x87/lithium-codegen-x87.h"  // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h"          // NOLINT
+#include "src/crankshaft/s390/lithium-codegen-s390.h"  // NOLINT
 #else
 #error "Unknown architecture."
 #endif
@@ -247,7 +250,9 @@
 }
 
 LChunk::LChunk(CompilationInfo* info, HGraph* graph)
-    : base_frame_slots_(StandardFrameConstants::kFixedFrameSize / kPointerSize),
+    : base_frame_slots_(info->IsStub()
+                            ? TypedFrameConstants::kFixedSlotCount
+                            : StandardFrameConstants::kFixedSlotCount),
       current_frame_slots_(base_frame_slots_),
       info_(info),
       graph_(graph),
@@ -333,7 +338,6 @@
   }
 }
 
-
 LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
   return LConstantOperand::Create(constant->id(), zone());
 }
@@ -461,7 +465,8 @@
     void* jit_handler_data =
         assembler.positions_recorder()->DetachJITHandlerData();
     LOG_CODE_EVENT(info()->isolate(),
-                   CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
+                   CodeEndLinePosInfoRecordEvent(AbstractCode::cast(*code),
+                                                 jit_handler_data));
 
     CodeGenerator::PrintCode(code, info());
     DCHECK(!(info()->isolate()->serializer_enabled() &&
@@ -502,18 +507,94 @@
   status_ = ABORTED;
 }
 
+void LChunkBuilderBase::CreateLazyBailoutForCall(HBasicBlock* current_block,
+                                                 LInstruction* instr,
+                                                 HInstruction* hydrogen_val) {
+  if (!instr->IsCall()) return;
+
+  HEnvironment* hydrogen_env = current_block->last_environment();
+  HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+  DCHECK_NOT_NULL(hydrogen_env);
+  if (instr->IsSyntacticTailCall()) {
+    // If it was a syntactic tail call we need to drop the current frame and
+    // all the frames on top of it that are either an arguments adaptor frame
+    // or a tail caller frame.
+    hydrogen_env = hydrogen_env->outer();
+    while (hydrogen_env != nullptr &&
+           (hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR ||
+            hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION)) {
+      hydrogen_env = hydrogen_env->outer();
+    }
+    if (hydrogen_env != nullptr) {
+      if (hydrogen_env->frame_type() == JS_FUNCTION) {
+        // In case an outer frame is a function frame we have to replay
+        // environment manually because
+        // 1) it does not contain a result of inlined function yet,
+        // 2) we can't find the proper simulate that corresponds to the point
+        //    after inlined call to do a ReplayEnvironment() on.
+        // So we push return value on top of outer environment.
+        // As for JS_GETTER/JS_SETTER/JS_CONSTRUCT nothing has to be done here,
+        // the deoptimizer ensures that the result of the callee is correctly
+        // propagated to result register during deoptimization.
+        hydrogen_env = hydrogen_env->Copy();
+        hydrogen_env->Push(hydrogen_val);
+      }
+    } else {
+      // Although we don't need this lazy bailout for normal execution
+      // (because when we tail call from the outermost function we should pop
+      // its frame) we still need it when debugger is on.
+      hydrogen_env = current_block->last_environment();
+    }
+  } else {
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      sim->ReplayEnvironment(hydrogen_env);
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+  }
+  LInstruction* bailout = LChunkBuilderBase::AssignEnvironment(
+      new (zone()) LLazyBailout(), hydrogen_env);
+  bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+  chunk_->AddInstruction(bailout, current_block);
+}
+
+LInstruction* LChunkBuilderBase::AssignEnvironment(LInstruction* instr,
+                                                   HEnvironment* hydrogen_env) {
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  DCHECK_NE(TAIL_CALLER_FUNCTION, hydrogen_env->frame_type());
+  instr->set_environment(CreateEnvironment(
+      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
+  return instr;
+}
 
 LEnvironment* LChunkBuilderBase::CreateEnvironment(
     HEnvironment* hydrogen_env, int* argument_index_accumulator,
     ZoneList<HValue*>* objects_to_materialize) {
   if (hydrogen_env == NULL) return NULL;
 
+  BailoutId ast_id = hydrogen_env->ast_id();
+  DCHECK(!ast_id.IsNone() ||
+         (hydrogen_env->frame_type() != JS_FUNCTION &&
+          hydrogen_env->frame_type() != TAIL_CALLER_FUNCTION));
+
+  if (hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION) {
+    // Skip potential outer arguments adaptor frame.
+    HEnvironment* outer_hydrogen_env = hydrogen_env->outer();
+    if (outer_hydrogen_env != nullptr &&
+        outer_hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR) {
+      outer_hydrogen_env = outer_hydrogen_env->outer();
+    }
+    LEnvironment* outer = CreateEnvironment(
+        outer_hydrogen_env, argument_index_accumulator, objects_to_materialize);
+    return new (zone())
+        LEnvironment(hydrogen_env->closure(), hydrogen_env->frame_type(),
+                     ast_id, 0, 0, 0, outer, hydrogen_env->entry(), zone());
+  }
+
   LEnvironment* outer =
       CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator,
                         objects_to_materialize);
-  BailoutId ast_id = hydrogen_env->ast_id();
-  DCHECK(!ast_id.IsNone() ||
-         hydrogen_env->frame_type() != JS_FUNCTION);
 
   int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION)
                           ? 0
diff --git a/src/crankshaft/lithium.h b/src/crankshaft/lithium.h
index 5cfc0c3..a2c0283 100644
--- a/src/crankshaft/lithium.h
+++ b/src/crankshaft/lithium.h
@@ -9,6 +9,7 @@
 
 #include "src/allocation.h"
 #include "src/bailout-reason.h"
+#include "src/crankshaft/compilation-phase.h"
 #include "src/crankshaft/hydrogen.h"
 #include "src/safepoint-table.h"
 #include "src/zone-allocator.h"
@@ -744,6 +745,16 @@
   // Will not be moved to a register even if one is freely available.
   virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0;
 
+  // Constructs proper environment for a lazy bailout point after call, creates
+  // LLazyBailout instruction and adds it to current block.
+  void CreateLazyBailoutForCall(HBasicBlock* current_block, LInstruction* instr,
+                                HInstruction* hydrogen_val);
+
+  // Assigns given environment to an instruction.  An instruction which can
+  // deoptimize must have an environment.
+  LInstruction* AssignEnvironment(LInstruction* instr,
+                                  HEnvironment* hydrogen_env);
+
   LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
                                   int* argument_index_accumulator,
                                   ZoneList<HValue*>* objects_to_materialize);
diff --git a/src/crankshaft/mips/lithium-codegen-mips.cc b/src/crankshaft/mips/lithium-codegen-mips.cc
index 8febb57..f1717ca 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -136,7 +136,7 @@
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
     if (info()->IsStub()) {
-      __ StubPrologue();
+      __ StubPrologue(StackFrame::STUB);
     } else {
       __ Prologue(info()->GeneratePreagedPrologue());
     }
@@ -278,18 +278,15 @@
         DCHECK(!frame_is_built_);
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
-        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
         __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-        __ push(scratch0());
-        __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        __ PushCommonFrame(scratch0());
         Comment(";;; Deferred code");
       }
       code->Generate();
       if (NeedsDeferredFrame()) {
         Comment(";;; Destroy frame");
         DCHECK(frame_is_built_);
-        __ pop(at);
-        __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+        __ PopCommonFrame(scratch0());
         frame_is_built_ = false;
       }
       __ jmp(code->exit());
@@ -328,7 +325,7 @@
       if (table_entry->needs_frame) {
         DCHECK(!info()->saves_caller_doubles());
         Comment(";;; call deopt with frame");
-        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+        __ PushCommonFrame();
         __ Call(&needs_frame);
       } else {
         __ Call(&call_deopt_entry);
@@ -342,10 +339,9 @@
       // This variant of deopt can only be used with stubs. Since we don't
       // have a function pointer to install in the stack frame that we're
       // building, install a special marker there instead.
-      DCHECK(info()->IsStub());
       __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
       __ push(at);
-      __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+      DCHECK(info()->IsStub());
     }
 
     Comment(";;; call deopt");
@@ -1966,29 +1962,30 @@
       __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
       EmitBranch(instr, ne, at, Operand(zero_reg));
     } else {
-      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      ToBooleanICStub::Types expected =
+          instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
-      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
         // undefined -> false.
         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
-      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
         // Boolean -> its value.
         __ LoadRoot(at, Heap::kTrueValueRootIndex);
         __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
         __ LoadRoot(at, Heap::kFalseValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
-      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
         // 'null' -> false.
         __ LoadRoot(at, Heap::kNullValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
 
-      if (expected.Contains(ToBooleanStub::SMI)) {
+      if (expected.Contains(ToBooleanICStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
@@ -2009,14 +2006,14 @@
         }
       }
 
-      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
         __ Branch(instr->TrueLabel(chunk_),
                   ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
       }
 
-      if (expected.Contains(ToBooleanStub::STRING)) {
+      if (expected.Contains(ToBooleanICStub::STRING)) {
         // String value -> false iff empty.
         Label not_string;
         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2027,14 +2024,14 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
         // Symbol value -> true.
         const Register scratch = scratch1();
         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
       }
 
-      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
         // SIMD value -> true.
         const Register scratch = scratch1();
         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2042,7 +2039,7 @@
                   Operand(SIMD128_VALUE_TYPE));
       }
 
-      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
         // heap number -> false iff +0, -0, or NaN.
         DoubleRegister dbl_scratch = double_scratch0();
         Label not_heap_number;
@@ -2274,11 +2271,10 @@
   DCHECK(ToRegister(instr->left()).is(a1));
   DCHECK(ToRegister(instr->right()).is(a0));
 
-  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
-
-  EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
-             Operand(zero_reg));
+  __ LoadRoot(at, Heap::kTrueValueRootIndex);
+  EmitBranch(instr, eq, v0, Operand(at));
 }
 
 
@@ -2355,10 +2351,11 @@
 
   __ JumpIfSmi(input, is_false);
   __ GetObjectType(input, temp, temp2);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
-    __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
+    __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
   } else {
-    __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
+    __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
   }
 
   // Check if the constructor in the map is a function.
@@ -2988,17 +2985,20 @@
 
   if (instr->hydrogen()->from_inlined()) {
     __ Subu(result, sp, 2 * kPointerSize);
-  } else {
+  } else if (instr->hydrogen()->arguments_adaptor()) {
     // Check if the calling frame is an arguments adaptor frame.
     Label done, adapted;
     __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-    __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ lw(result,
+          MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
     __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
     __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
+  } else {
+    __ mov(result, fp);
   }
 }
 
@@ -3121,15 +3121,25 @@
   __ sll(scratch, length, 2);
 
   __ bind(&invoke);
+
+  InvokeFlag flag = CALL_FUNCTION;
+  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+    DCHECK(!info()->saves_caller_doubles());
+    // TODO(ishell): drop current frame before pushing arguments to the stack.
+    flag = JUMP_FUNCTION;
+    ParameterCount actual(a0);
+    // It is safe to use t0, t1 and t2 as scratch registers here given that
+    // we are not going to return to caller function anyway.
+    PrepareForTailCall(actual, t0, t1, t2);
+  }
+
   DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator safepoint_generator(
-      this, pointers, Safepoint::kLazyDeopt);
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in receiver which is a0, as expected
   // by InvokeFunction.
   ParameterCount actual(receiver);
-  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
-                    safepoint_generator);
+  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
 }
 
 
@@ -3175,10 +3185,9 @@
   CallRuntime(Runtime::kDeclareGlobals, instr);
 }
 
-
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int formal_parameter_count, int arity,
-                                 LInstruction* instr) {
+                                 bool is_tail_call, LInstruction* instr) {
   bool dont_adapt_arguments =
       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   bool can_invoke_directly =
@@ -3195,17 +3204,35 @@
     __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
     __ li(a0, Operand(arity));
 
-    // Invoke function.
-    __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
-    __ Call(at);
+    bool is_self_call = function.is_identical_to(info()->closure());
 
-    // Set up deoptimization.
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    // Invoke function.
+    if (is_self_call) {
+      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+      if (is_tail_call) {
+        __ Jump(self, RelocInfo::CODE_TARGET);
+      } else {
+        __ Call(self, RelocInfo::CODE_TARGET);
+      }
+    } else {
+      __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+      if (is_tail_call) {
+        __ Jump(at);
+      } else {
+        __ Call(at);
+      }
+    }
+
+    if (!is_tail_call) {
+      // Set up deoptimization.
+      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    }
   } else {
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
+    ParameterCount actual(arity);
     ParameterCount expected(formal_parameter_count);
-    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(function_reg, expected, actual, flag, generator);
   }
 }
 
@@ -3539,22 +3566,78 @@
   __ Clz(result, input);
 }
 
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+                                  Register scratch1, Register scratch2,
+                                  Register scratch3) {
+#if DEBUG
+  if (actual.is_reg()) {
+    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+  } else {
+    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+  }
+#endif
+  if (FLAG_code_comments) {
+    if (actual.is_reg()) {
+      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+    } else {
+      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+    }
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ Branch(&no_arguments_adaptor, ne, scratch3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(fp, scratch2);
+  __ lw(caller_args_count_reg,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ Branch(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(scratch1,
+        FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+  __ bind(&formal_parameter_count_loaded);
+  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+  Comment(";;; }");
+}
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  HInvokeFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->function()).is(a1));
   DCHECK(instr->HasPointerMap());
 
-  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+  if (is_tail_call) {
+    DCHECK(!info()->saves_caller_doubles());
+    ParameterCount actual(instr->arity());
+    // It is safe to use t0, t1 and t2 as scratch registers here given that
+    // we are not going to return to caller function anyway.
+    PrepareForTailCall(actual, t0, t1, t2);
+  }
+
+  Handle<JSFunction> known_function = hinstr->known_function();
   if (known_function.is_null()) {
     LPointerMap* pointers = instr->pointer_map();
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(instr->arity());
-    __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
+    ParameterCount actual(instr->arity());
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(a1, no_reg, actual, flag, generator);
   } else {
-    CallKnownFunction(known_function,
-                      instr->hydrogen()->formal_parameter_count(),
-                      instr->arity(), instr);
+    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+                      instr->arity(), is_tail_call, instr);
   }
 }
 
@@ -3596,56 +3679,6 @@
 }
 
 
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  DCHECK(ToRegister(instr->function()).is(a1));
-  DCHECK(ToRegister(instr->result()).is(v0));
-
-  // Change context.
-  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
-  // Always initialize new target and number of actual arguments.
-  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
-  __ li(a0, Operand(instr->arity()));
-
-  // Load the code entry address
-  __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
-  __ Call(at);
-
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  HCallFunction* hinstr = instr->hydrogen();
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->function()).is(a1));
-  DCHECK(ToRegister(instr->result()).is(v0));
-
-  int arity = instr->arity();
-  ConvertReceiverMode mode = hinstr->convert_mode();
-  if (hinstr->HasVectorAndSlot()) {
-    Register slot_register = ToRegister(instr->temp_slot());
-    Register vector_register = ToRegister(instr->temp_vector());
-    DCHECK(slot_register.is(a3));
-    DCHECK(vector_register.is(a2));
-
-    AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
-    int index = vector->GetIndex(hinstr->slot());
-
-    __ li(vector_register, vector);
-    __ li(slot_register, Operand(Smi::FromInt(index)));
-
-    Handle<Code> ic =
-        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    __ li(a0, Operand(arity));
-    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
-  }
-}
-
-
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -4189,8 +4222,7 @@
   Register object = ToRegister(instr->object());
   Register temp = ToRegister(instr->temp());
   Label no_memento_found;
-  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
-                                     ne, &no_memento_found);
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
   DeoptimizeIf(al, instr);
   __ bind(&no_memento_found);
 }
@@ -5139,14 +5171,6 @@
 }
 
 
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  DCHECK(ToRegister(instr->value()).is(a0));
-  DCHECK(ToRegister(instr->result()).is(v0));
-  __ push(a0);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
 void LCodeGen::DoTypeof(LTypeof* instr) {
   DCHECK(ToRegister(instr->value()).is(a3));
   DCHECK(ToRegister(instr->result()).is(v0));
@@ -5534,13 +5558,6 @@
   __ bind(&done);
 }
 
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
-  Register context = ToRegister(instr->context());
-  __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/mips/lithium-codegen-mips.h b/src/crankshaft/mips/lithium-codegen-mips.h
index df72b2e..7a316e5 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/src/crankshaft/mips/lithium-codegen-mips.h
@@ -212,11 +212,14 @@
                                LInstruction* instr,
                                LOperand* context);
 
+  void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+                          Register scratch2, Register scratch3);
+
   // Generate a direct call to a known function.  Expects the function
   // to be in a1.
   void CallKnownFunction(Handle<JSFunction> function,
                          int formal_parameter_count, int arity,
-                         LInstruction* instr);
+                         bool is_tail_call, LInstruction* instr);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
diff --git a/src/crankshaft/mips/lithium-mips.cc b/src/crankshaft/mips/lithium-mips.cc
index a7c5488..71c34df 100644
--- a/src/crankshaft/mips/lithium-mips.cc
+++ b/src/crankshaft/mips/lithium-mips.cc
@@ -255,27 +255,6 @@
 }
 
 
-void LCallFunction::PrintDataTo(StringStream* stream) {
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  function()->PrintTo(stream);
-  if (hydrogen()->HasVectorAndSlot()) {
-    stream->Add(" (type-feedback-vector ");
-    temp_vector()->PrintTo(stream);
-    stream->Add(" ");
-    temp_slot()->PrintTo(stream);
-    stream->Add(")");
-  }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  function()->PrintTo(stream);
-  stream->Add("#%d / ", arity());
-}
-
-
 void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < InputCount(); i++) {
     InputAt(i)->PrintTo(stream);
@@ -574,12 +553,7 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator,
-                                           &objects_to_materialize));
-  return instr;
+  return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
 }
 
 
@@ -907,22 +881,16 @@
   }
   chunk_->AddInstruction(instr, current_block_);
 
-  if (instr->IsCall() || instr->IsPrologue()) {
-    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
-    if (hydrogen_val->HasObservableSideEffects()) {
-      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
-      sim->ReplayEnvironment(current_block_->last_environment());
-      hydrogen_value_for_lazy_bailout = sim;
-    }
-    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
-    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
-    chunk_->AddInstruction(bailout, current_block_);
-  }
+  CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
 }
 
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
-  return new (zone()) LPrologue();
+  LInstruction* result = new (zone()) LPrologue();
+  if (info_->num_heap_slots() > 0) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
 }
 
 
@@ -935,14 +903,14 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+  ToBooleanICStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new(zone()) LBranch(UseRegister(value));
   if (!easy_case &&
-      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
        !expected.IsGeneric())) {
     branch = AssignEnvironment(branch);
   }
@@ -1064,16 +1032,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallJSFunction(
-    HCallJSFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), a1);
-
-  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1097,6 +1055,9 @@
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, v0), instr);
 }
 
@@ -1105,6 +1066,9 @@
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* function = UseFixed(instr->function(), a1);
   LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1224,22 +1188,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* function = UseFixed(instr->function(), a1);
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(a3);
-    vector = FixedTemp(a2);
-  }
-
-  LCallFunction* call =
-      new (zone()) LCallFunction(context, function, slot, vector);
-  return MarkAsCall(DefineFixed(call, v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
@@ -1790,13 +1738,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
-    HBoundsCheckBaseIndexInformation* instr) {
-  UNREACHABLE();
-  return NULL;
-}
-
-
 LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
   // The control instruction marking the end of a block that completed
   // abruptly (e.g., threw an exception).  There is nothing specific to do.
@@ -2435,13 +2376,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
-  LOperand* object = UseFixed(instr->value(), a0);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* value = UseFixed(instr->value(), a3);
@@ -2478,11 +2412,9 @@
   HEnvironment* outer = current_block_->last_environment();
   outer->set_ast_id(instr->ReturnId());
   HConstant* undefined = graph()->GetConstantUndefined();
-  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
-                                               instr->function(),
-                                               undefined,
-                                               instr->inlining_kind());
+  HEnvironment* inner = outer->CopyForInlining(
+      instr->closure(), instr->arguments_count(), instr->function(), undefined,
+      instr->inlining_kind(), instr->syntactic_tail_call_mode());
   // Only replay binding of arguments object if it wasn't removed from graph.
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2543,13 +2475,6 @@
   return AssignPointerMap(result);
 }
 
-
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
-  LOperand* context = UseRegisterAtStart(instr->context());
-  return new(zone()) LStoreFrameContext(context);
-}
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/mips/lithium-mips.h b/src/crankshaft/mips/lithium-mips.h
index 8b36c5d..7d41093 100644
--- a/src/crankshaft/mips/lithium-mips.h
+++ b/src/crankshaft/mips/lithium-mips.h
@@ -29,9 +29,7 @@
   V(BitI)                                    \
   V(BoundsCheck)                             \
   V(Branch)                                  \
-  V(CallJSFunction)                          \
   V(CallWithDescriptor)                      \
-  V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
   V(CheckArrayBufferNotNeutered)             \
@@ -132,7 +130,6 @@
   V(StackCheck)                              \
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
-  V(StoreFrameContext)                       \
   V(StoreKeyed)                              \
   V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
@@ -144,7 +141,6 @@
   V(SubI)                                    \
   V(TaggedToI)                               \
   V(ThisFunction)                            \
-  V(ToFastProperties)                        \
   V(TransitionElementsKind)                  \
   V(TrapAllocationMemento)                   \
   V(Typeof)                                  \
@@ -224,6 +220,13 @@
   void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
   bool IsCall() const { return IsCallBits::decode(bit_field_); }
 
+  void MarkAsSyntacticTailCall() {
+    bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+  }
+  bool IsSyntacticTailCall() const {
+    return IsSyntacticTailCallBits::decode(bit_field_);
+  }
+
   // Interface to the register allocator and iterators.
   bool ClobbersTemps() const { return IsCall(); }
   bool ClobbersRegisters() const { return IsCall(); }
@@ -258,6 +261,8 @@
   virtual LOperand* TempAt(int i) = 0;
 
   class IsCallBits: public BitField<bool, 0, 1> {};
+  class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+  };
 
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
@@ -532,6 +537,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+  DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
 
   LOperand* function() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
@@ -1684,23 +1690,6 @@
 };
 
 
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallJSFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
 class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
  public:
   LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1763,29 +1752,6 @@
 };
 
 
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
-  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
-                LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2385,19 +2351,6 @@
 };
 
 
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LToFastProperties(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
-  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
@@ -2514,18 +2467,6 @@
 };
 
 
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStoreFrameContext(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.cc b/src/crankshaft/mips64/lithium-codegen-mips64.cc
index ddf908d..c7bbe9f 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -111,7 +111,7 @@
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
     if (info()->IsStub()) {
-      __ StubPrologue();
+      __ StubPrologue(StackFrame::STUB);
     } else {
       __ Prologue(info()->GeneratePreagedPrologue());
     }
@@ -253,19 +253,15 @@
         DCHECK(!frame_is_built_);
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
-        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
         __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-        __ push(scratch0());
-        __ Daddu(fp, sp,
-            Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        __ PushCommonFrame(scratch0());
         Comment(";;; Deferred code");
       }
       code->Generate();
       if (NeedsDeferredFrame()) {
         Comment(";;; Destroy frame");
         DCHECK(frame_is_built_);
-        __ pop(at);
-        __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+        __ PopCommonFrame(scratch0());
         frame_is_built_ = false;
       }
       __ jmp(code->exit());
@@ -300,7 +296,7 @@
         if (table_entry->needs_frame) {
           DCHECK(!info()->saves_caller_doubles());
           Comment(";;; call deopt with frame");
-          __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+          __ PushCommonFrame();
           __ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
           __ li(t9, Operand(entry - base));
         } else {
@@ -313,7 +309,7 @@
         if (table_entry->needs_frame) {
           DCHECK(!info()->saves_caller_doubles());
           Comment(";;; call deopt with frame");
-          __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+          __ PushCommonFrame();
           __ BranchAndLink(&needs_frame);
         } else {
           __ BranchAndLink(&call_deopt_entry);
@@ -327,10 +323,9 @@
       // This variant of deopt can only be used with stubs. Since we don't
       // have a function pointer to install in the stack frame that we're
       // building, install a special marker there instead.
-      DCHECK(info()->IsStub());
       __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
       __ push(at);
-      __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+      DCHECK(info()->IsStub());
     }
 
     Comment(";;; call deopt");
@@ -1343,9 +1338,10 @@
     switch (constant) {
       case -1:
         if (overflow) {
-          __ DsubuAndCheckForOverflow(result, zero_reg, left, scratch);
-          DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
-                       Operand(zero_reg));
+          Label no_overflow;
+          __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
+          DeoptimizeIf(al, instr);
+          __ bind(&no_overflow);
         } else {
           __ Dsubu(result, zero_reg, left);
         }
@@ -1444,9 +1440,10 @@
     switch (constant) {
       case -1:
         if (overflow) {
-          __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
-          DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
-                       Operand(zero_reg));
+          Label no_overflow;
+          __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
+          DeoptimizeIf(al, instr);
+          __ bind(&no_overflow);
         } else {
           __ Subu(result, zero_reg, left);
         }
@@ -1652,13 +1649,13 @@
     DCHECK(right->IsRegister() || right->IsConstantOperand());
     __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
   } else {  // can_overflow.
-    Register overflow = scratch0();
-    Register scratch = scratch1();
+    Register scratch = scratch0();
+    Label no_overflow_label;
     DCHECK(right->IsRegister() || right->IsConstantOperand());
-    __ DsubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
-                                ToOperand(right), overflow, scratch);
-    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
-                 Operand(zero_reg));
+    __ DsubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+                       &no_overflow_label, scratch);
+    DeoptimizeIf(al, instr);
+    __ bind(&no_overflow_label);
   }
 }
 
@@ -1673,13 +1670,13 @@
     DCHECK(right->IsRegister() || right->IsConstantOperand());
     __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
   } else {  // can_overflow.
-    Register overflow = scratch0();
-    Register scratch = scratch1();
+    Register scratch = scratch0();
+    Label no_overflow_label;
     DCHECK(right->IsRegister() || right->IsConstantOperand());
-    __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
-                               ToOperand(right), overflow, scratch);
-    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
-                 Operand(zero_reg));
+    __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+                      &no_overflow_label, scratch);
+    DeoptimizeIf(al, instr);
+    __ bind(&no_overflow_label);
   }
 }
 
@@ -1813,13 +1810,13 @@
     DCHECK(right->IsRegister() || right->IsConstantOperand());
     __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
   } else {  // can_overflow.
-    Register overflow = scratch0();
+    Label no_overflow_label;
     Register scratch = scratch1();
     DCHECK(right->IsRegister() || right->IsConstantOperand());
-    __ DadduAndCheckForOverflow(ToRegister(result), ToRegister(left),
-                                ToOperand(right), overflow, scratch);
-    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
-                 Operand(zero_reg));
+    __ DaddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+                       &no_overflow_label, scratch);
+    DeoptimizeIf(al, instr);
+    __ bind(&no_overflow_label);
   }
 }
 
@@ -1834,13 +1831,13 @@
     DCHECK(right->IsRegister() || right->IsConstantOperand());
     __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
   } else {  // can_overflow.
-    Register overflow = scratch0();
+    Label no_overflow_label;
     Register scratch = scratch1();
     DCHECK(right->IsRegister() || right->IsConstantOperand());
-    __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left),
-                               ToOperand(right), overflow, scratch);
-    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
-                 Operand(zero_reg));
+    __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+                      &no_overflow_label, scratch);
+    DeoptimizeIf(al, instr);
+    __ bind(&no_overflow_label);
   }
 }
 
@@ -2083,29 +2080,30 @@
       __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
       EmitBranch(instr, ne, at, Operand(zero_reg));
     } else {
-      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      ToBooleanICStub::Types expected =
+          instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
-      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
         // undefined -> false.
         __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
-      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
         // Boolean -> its value.
         __ LoadRoot(at, Heap::kTrueValueRootIndex);
         __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
         __ LoadRoot(at, Heap::kFalseValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
-      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
         // 'null' -> false.
         __ LoadRoot(at, Heap::kNullValueRootIndex);
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
       }
 
-      if (expected.Contains(ToBooleanStub::SMI)) {
+      if (expected.Contains(ToBooleanICStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
@@ -2126,14 +2124,14 @@
         }
       }
 
-      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
         __ Branch(instr->TrueLabel(chunk_),
                   ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
       }
 
-      if (expected.Contains(ToBooleanStub::STRING)) {
+      if (expected.Contains(ToBooleanICStub::STRING)) {
         // String value -> false iff empty.
         Label not_string;
         __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2144,14 +2142,14 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
         // Symbol value -> true.
         const Register scratch = scratch1();
         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
       }
 
-      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
         // SIMD value -> true.
         const Register scratch = scratch1();
         __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2159,7 +2157,7 @@
                   Operand(SIMD128_VALUE_TYPE));
       }
 
-      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
         // heap number -> false iff +0, -0, or NaN.
         DoubleRegister dbl_scratch = double_scratch0();
         Label not_heap_number;
@@ -2391,11 +2389,10 @@
   DCHECK(ToRegister(instr->left()).is(a1));
   DCHECK(ToRegister(instr->right()).is(a0));
 
-  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
-
-  EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
-             Operand(zero_reg));
+  __ LoadRoot(at, Heap::kTrueValueRootIndex);
+  EmitBranch(instr, eq, v0, Operand(at));
 }
 
 
@@ -2473,10 +2470,11 @@
   __ JumpIfSmi(input, is_false);
 
   __ GetObjectType(input, temp, temp2);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
-    __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
+    __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
   } else {
-    __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
+    __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
   }
 
   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
@@ -3168,17 +3166,20 @@
 
   if (instr->hydrogen()->from_inlined()) {
     __ Dsubu(result, sp, 2 * kPointerSize);
-  } else {
+  } else if (instr->hydrogen()->arguments_adaptor()) {
     // Check if the calling frame is an arguments adaptor frame.
     Label done, adapted;
     __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-    __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ ld(result,
+          MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
     __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
     __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
+  } else {
+    __ mov(result, fp);
   }
 }
 
@@ -3305,15 +3306,25 @@
   __ dsll(scratch, length, kPointerSizeLog2);
 
   __ bind(&invoke);
+
+  InvokeFlag flag = CALL_FUNCTION;
+  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+    DCHECK(!info()->saves_caller_doubles());
+    // TODO(ishell): drop current frame before pushing arguments to the stack.
+    flag = JUMP_FUNCTION;
+    ParameterCount actual(a0);
+    // It is safe to use t0, t1 and t2 as scratch registers here given that
+    // we are not going to return to caller function anyway.
+    PrepareForTailCall(actual, t0, t1, t2);
+  }
+
   DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator safepoint_generator(
-      this, pointers, Safepoint::kLazyDeopt);
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in receiver which is a0, as expected
   // by InvokeFunction.
   ParameterCount actual(receiver);
-  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
-                    safepoint_generator);
+  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
 }
 
 
@@ -3359,10 +3370,9 @@
   CallRuntime(Runtime::kDeclareGlobals, instr);
 }
 
-
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int formal_parameter_count, int arity,
-                                 LInstruction* instr) {
+                                 bool is_tail_call, LInstruction* instr) {
   bool dont_adapt_arguments =
       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   bool can_invoke_directly =
@@ -3379,17 +3389,35 @@
     __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
     __ li(a0, Operand(arity));
 
-    // Invoke function.
-    __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
-    __ Call(at);
+    bool is_self_call = function.is_identical_to(info()->closure());
 
-    // Set up deoptimization.
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    // Invoke function.
+    if (is_self_call) {
+      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+      if (is_tail_call) {
+        __ Jump(self, RelocInfo::CODE_TARGET);
+      } else {
+        __ Call(self, RelocInfo::CODE_TARGET);
+      }
+    } else {
+      __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+      if (is_tail_call) {
+        __ Jump(at);
+      } else {
+        __ Call(at);
+      }
+    }
+
+    if (!is_tail_call) {
+      // Set up deoptimization.
+      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    }
   } else {
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
+    ParameterCount actual(arity);
     ParameterCount expected(formal_parameter_count);
-    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(function_reg, expected, actual, flag, generator);
   }
 }
 
@@ -3742,22 +3770,75 @@
   __ Clz(result, input);
 }
 
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+                                  Register scratch1, Register scratch2,
+                                  Register scratch3) {
+#if DEBUG
+  if (actual.is_reg()) {
+    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+  } else {
+    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+  }
+#endif
+  if (FLAG_code_comments) {
+    if (actual.is_reg()) {
+      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+    } else {
+      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+    }
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ Branch(&no_arguments_adaptor, ne, scratch3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(fp, scratch2);
+  __ ld(caller_args_count_reg,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ Branch(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+  __ bind(&formal_parameter_count_loaded);
+  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+  Comment(";;; }");
+}
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  HInvokeFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->function()).is(a1));
   DCHECK(instr->HasPointerMap());
 
-  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+  if (is_tail_call) {
+    DCHECK(!info()->saves_caller_doubles());
+    ParameterCount actual(instr->arity());
+    // It is safe to use t0, t1 and t2 as scratch registers here given that
+    // we are not going to return to caller function anyway.
+    PrepareForTailCall(actual, t0, t1, t2);
+  }
+
+  Handle<JSFunction> known_function = hinstr->known_function();
   if (known_function.is_null()) {
     LPointerMap* pointers = instr->pointer_map();
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(instr->arity());
-    __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
+    ParameterCount actual(instr->arity());
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(a1, no_reg, actual, flag, generator);
   } else {
-    CallKnownFunction(known_function,
-                      instr->hydrogen()->formal_parameter_count(),
-                      instr->arity(), instr);
+    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+                      instr->arity(), is_tail_call, instr);
   }
 }
 
@@ -3799,56 +3880,6 @@
 }
 
 
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  DCHECK(ToRegister(instr->function()).is(a1));
-  DCHECK(ToRegister(instr->result()).is(v0));
-
-  // Change context.
-  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
-  // Always initialize new target and number of actual arguments.
-  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
-  __ li(a0, Operand(instr->arity()));
-
-  // Load the code entry address
-  __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
-  __ Call(at);
-
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  HCallFunction* hinstr = instr->hydrogen();
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->function()).is(a1));
-  DCHECK(ToRegister(instr->result()).is(v0));
-
-  int arity = instr->arity();
-  ConvertReceiverMode mode = hinstr->convert_mode();
-  if (hinstr->HasVectorAndSlot()) {
-    Register slot_register = ToRegister(instr->temp_slot());
-    Register vector_register = ToRegister(instr->temp_vector());
-    DCHECK(slot_register.is(a3));
-    DCHECK(vector_register.is(a2));
-
-    AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
-    int index = vector->GetIndex(hinstr->slot());
-
-    __ li(vector_register, vector);
-    __ li(slot_register, Operand(Smi::FromInt(index)));
-
-    Handle<Code> ic =
-        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    __ li(a0, Operand(arity));
-    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
-  }
-}
-
-
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -4423,8 +4454,7 @@
   Register object = ToRegister(instr->object());
   Register temp = ToRegister(instr->temp());
   Label no_memento_found;
-  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
-                                     ne, &no_memento_found);
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
   DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
   __ bind(&no_memento_found);
 }
@@ -5344,14 +5374,6 @@
 }
 
 
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  DCHECK(ToRegister(instr->value()).is(a0));
-  DCHECK(ToRegister(instr->result()).is(v0));
-  __ push(a0);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
 void LCodeGen::DoTypeof(LTypeof* instr) {
   DCHECK(ToRegister(instr->value()).is(a3));
   DCHECK(ToRegister(instr->result()).is(v0));
@@ -5738,13 +5760,6 @@
   __ bind(&done);
 }
 
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
-  Register context = ToRegister(instr->context());
-  __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.h b/src/crankshaft/mips64/lithium-codegen-mips64.h
index 2f1cefa..4a700bd 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -214,11 +214,14 @@
                                LInstruction* instr,
                                LOperand* context);
 
+  void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+                          Register scratch2, Register scratch3);
+
   // Generate a direct call to a known function.  Expects the function
   // to be in a1.
   void CallKnownFunction(Handle<JSFunction> function,
                          int formal_parameter_count, int arity,
-                         LInstruction* instr);
+                         bool is_tail_call, LInstruction* instr);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
diff --git a/src/crankshaft/mips64/lithium-mips64.cc b/src/crankshaft/mips64/lithium-mips64.cc
index b66e8ba..bcfbc24 100644
--- a/src/crankshaft/mips64/lithium-mips64.cc
+++ b/src/crankshaft/mips64/lithium-mips64.cc
@@ -255,27 +255,6 @@
 }
 
 
-void LCallFunction::PrintDataTo(StringStream* stream) {
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  function()->PrintTo(stream);
-  if (hydrogen()->HasVectorAndSlot()) {
-    stream->Add(" (type-feedback-vector ");
-    temp_vector()->PrintTo(stream);
-    stream->Add(" ");
-    temp_slot()->PrintTo(stream);
-    stream->Add(")");
-  }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  function()->PrintTo(stream);
-  stream->Add("#%d / ", arity());
-}
-
-
 void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < InputCount(); i++) {
     InputAt(i)->PrintTo(stream);
@@ -574,12 +553,7 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator,
-                                           &objects_to_materialize));
-  return instr;
+  return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
 }
 
 
@@ -907,22 +881,16 @@
   }
   chunk_->AddInstruction(instr, current_block_);
 
-  if (instr->IsCall() || instr->IsPrologue()) {
-    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
-    if (hydrogen_val->HasObservableSideEffects()) {
-      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
-      sim->ReplayEnvironment(current_block_->last_environment());
-      hydrogen_value_for_lazy_bailout = sim;
-    }
-    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
-    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
-    chunk_->AddInstruction(bailout, current_block_);
-  }
+  CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
 }
 
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
-  return new (zone()) LPrologue();
+  LInstruction* result = new (zone()) LPrologue();
+  if (info_->num_heap_slots() > 0) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
 }
 
 
@@ -935,14 +903,14 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+  ToBooleanICStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new(zone()) LBranch(UseRegister(value));
   if (!easy_case &&
-      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
        !expected.IsGeneric())) {
     branch = AssignEnvironment(branch);
   }
@@ -1064,16 +1032,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallJSFunction(
-    HCallJSFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), a1);
-
-  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1097,6 +1055,9 @@
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, v0), instr);
 }
 
@@ -1105,6 +1066,9 @@
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* function = UseFixed(instr->function(), a1);
   LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1224,22 +1188,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* function = UseFixed(instr->function(), a1);
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(a3);
-    vector = FixedTemp(a2);
-  }
-
-  LCallFunction* call =
-      new (zone()) LCallFunction(context, function, slot, vector);
-  return MarkAsCall(DefineFixed(call, v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
@@ -1796,13 +1744,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
-    HBoundsCheckBaseIndexInformation* instr) {
-  UNREACHABLE();
-  return NULL;
-}
-
-
 LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
   // The control instruction marking the end of a block that completed
   // abruptly (e.g., threw an exception).  There is nothing specific to do.
@@ -2440,13 +2381,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
-  LOperand* object = UseFixed(instr->value(), a0);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* value = UseFixed(instr->value(), a3);
@@ -2483,11 +2417,9 @@
   HEnvironment* outer = current_block_->last_environment();
   outer->set_ast_id(instr->ReturnId());
   HConstant* undefined = graph()->GetConstantUndefined();
-  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
-                                               instr->function(),
-                                               undefined,
-                                               instr->inlining_kind());
+  HEnvironment* inner = outer->CopyForInlining(
+      instr->closure(), instr->arguments_count(), instr->function(), undefined,
+      instr->inlining_kind(), instr->syntactic_tail_call_mode());
   // Only replay binding of arguments object if it wasn't removed from graph.
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2548,13 +2480,6 @@
   return AssignPointerMap(result);
 }
 
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
-  LOperand* context = UseRegisterAtStart(instr->context());
-  return new(zone()) LStoreFrameContext(context);
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/mips64/lithium-mips64.h b/src/crankshaft/mips64/lithium-mips64.h
index 8d2324f..41cf93c 100644
--- a/src/crankshaft/mips64/lithium-mips64.h
+++ b/src/crankshaft/mips64/lithium-mips64.h
@@ -31,9 +31,7 @@
   V(BitI)                                    \
   V(BoundsCheck)                             \
   V(Branch)                                  \
-  V(CallJSFunction)                          \
   V(CallWithDescriptor)                      \
-  V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
   V(CheckArrayBufferNotNeutered)             \
@@ -134,7 +132,6 @@
   V(StackCheck)                              \
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
-  V(StoreFrameContext)                       \
   V(StoreKeyed)                              \
   V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
@@ -147,7 +144,6 @@
   V(SubS)                                    \
   V(TaggedToI)                               \
   V(ThisFunction)                            \
-  V(ToFastProperties)                        \
   V(TransitionElementsKind)                  \
   V(TrapAllocationMemento)                   \
   V(Typeof)                                  \
@@ -227,6 +223,13 @@
   void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
   bool IsCall() const { return IsCallBits::decode(bit_field_); }
 
+  void MarkAsSyntacticTailCall() {
+    bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+  }
+  bool IsSyntacticTailCall() const {
+    return IsSyntacticTailCallBits::decode(bit_field_);
+  }
+
   // Interface to the register allocator and iterators.
   bool ClobbersTemps() const { return IsCall(); }
   bool ClobbersRegisters() const { return IsCall(); }
@@ -261,6 +264,8 @@
   virtual LOperand* TempAt(int i) = 0;
 
   class IsCallBits: public BitField<bool, 0, 1> {};
+  class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+  };
 
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
@@ -535,6 +540,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+  DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
 
   LOperand* function() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
@@ -1746,23 +1752,6 @@
 };
 
 
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallJSFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
 class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
  public:
   LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1825,29 +1814,6 @@
 };
 
 
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
-  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
-                LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2431,19 +2397,6 @@
 };
 
 
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LToFastProperties(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
-  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
@@ -2560,18 +2513,6 @@
 };
 
 
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStoreFrameContext(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.cc b/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 921d9b6..d5d0104 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -123,7 +123,7 @@
   info()->set_prologue_offset(prologue_offset);
   if (NeedsEagerFrame()) {
     if (info()->IsStub()) {
-      __ StubPrologue(ip, prologue_offset);
+      __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
     } else {
       __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
     }
@@ -265,15 +265,14 @@
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
         __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
-        __ PushFixedFrame(scratch0());
-        __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        __ PushCommonFrame(scratch0());
         Comment(";;; Deferred code");
       }
       code->Generate();
       if (NeedsDeferredFrame()) {
         Comment(";;; Destroy frame");
         DCHECK(frame_is_built_);
-        __ PopFixedFrame(ip);
+        __ PopCommonFrame(scratch0());
         frame_is_built_ = false;
       }
       __ b(code->exit());
@@ -322,7 +321,7 @@
       if (table_entry->needs_frame) {
         DCHECK(!info()->saves_caller_doubles());
         Comment(";;; call deopt with frame");
-        __ PushFixedFrame();
+        __ PushCommonFrame();
         __ b(&needs_frame, SetLK);
       } else {
         __ b(&call_deopt_entry, SetLK);
@@ -336,10 +335,9 @@
       // This variant of deopt can only be used with stubs. Since we don't
       // have a function pointer to install in the stack frame that we're
       // building, install a special marker there instead.
-      DCHECK(info()->IsStub());
       __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
       __ push(ip);
-      __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+      DCHECK(info()->IsStub());
     }
 
     Comment(";;; call deopt");
@@ -2103,29 +2101,30 @@
       __ cmpi(ip, Operand::Zero());
       EmitBranch(instr, ne);
     } else {
-      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      ToBooleanICStub::Types expected =
+          instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
-      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
         // undefined -> false.
         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
         __ beq(instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
         // Boolean -> its value.
         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
         __ beq(instr->TrueLabel(chunk_));
         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
         __ beq(instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
         // 'null' -> false.
         __ CompareRoot(reg, Heap::kNullValueRootIndex);
         __ beq(instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::SMI)) {
+      if (expected.Contains(ToBooleanICStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         __ cmpi(reg, Operand::Zero());
         __ beq(instr->FalseLabel(chunk_));
@@ -2148,13 +2147,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
         __ bge(instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::STRING)) {
+      if (expected.Contains(ToBooleanICStub::STRING)) {
         // String value -> false iff empty.
         Label not_string;
         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2166,20 +2165,20 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
         // Symbol value -> true.
         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
         __ beq(instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
         // SIMD value -> true.
         Label not_simd;
         __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
         __ beq(instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2423,11 +2422,10 @@
   DCHECK(ToRegister(instr->left()).is(r4));
   DCHECK(ToRegister(instr->right()).is(r3));
 
-  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
-  __ cmpi(r3, Operand::Zero());
-
-  EmitBranch(instr, ComputeCompareCondition(instr->op()));
+  __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+  EmitBranch(instr, eq);
 }
 
 
@@ -2498,11 +2496,12 @@
 
   __ JumpIfSmi(input, is_false);
 
-  __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
+  __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
-    __ beq(is_true);
+    __ bge(is_true);
   } else {
-    __ beq(is_false);
+    __ bge(is_false);
   }
 
   // Check if the constructor in the map is a function.
@@ -3208,11 +3207,12 @@
 
   if (instr->hydrogen()->from_inlined()) {
     __ subi(result, sp, Operand(2 * kPointerSize));
-  } else {
+  } else if (instr->hydrogen()->arguments_adaptor()) {
     // Check if the calling frame is an arguments adaptor frame.
     __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-    __ LoadP(result,
-             MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ LoadP(
+        result,
+        MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
 
     // Result is the frame pointer for the frame if not adapted and for the real
@@ -3229,6 +3229,8 @@
       __ mr(result, scratch);
       __ bind(&done);
     }
+  } else {
+    __ mr(result, fp);
   }
 }
 
@@ -3348,14 +3350,26 @@
   __ bdnz(&loop);
 
   __ bind(&invoke);
+
+  InvokeFlag flag = CALL_FUNCTION;
+  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+    DCHECK(!info()->saves_caller_doubles());
+    // TODO(ishell): drop current frame before pushing arguments to the stack.
+    flag = JUMP_FUNCTION;
+    ParameterCount actual(r3);
+    // It is safe to use r6, r7 and r8 as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) r6 (new.target) will be initialized below.
+    PrepareForTailCall(actual, r6, r7, r8);
+  }
+
   DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in receiver which is r3, as expected
   // by InvokeFunction.
   ParameterCount actual(receiver);
-  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
-                    safepoint_generator);
+  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
 }
 
 
@@ -3400,10 +3414,9 @@
   CallRuntime(Runtime::kDeclareGlobals, instr);
 }
 
-
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int formal_parameter_count, int arity,
-                                 LInstruction* instr) {
+                                 bool is_tail_call, LInstruction* instr) {
   bool dont_adapt_arguments =
       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   bool can_invoke_directly =
@@ -3425,19 +3438,31 @@
 
     // Invoke function.
     if (is_self_call) {
-      __ CallSelf();
+      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+      if (is_tail_call) {
+        __ Jump(self, RelocInfo::CODE_TARGET);
+      } else {
+        __ Call(self, RelocInfo::CODE_TARGET);
+      }
     } else {
       __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
-      __ CallJSEntry(ip);
+      if (is_tail_call) {
+        __ JumpToJSEntry(ip);
+      } else {
+        __ CallJSEntry(ip);
+      }
     }
 
-    // Set up deoptimization.
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    if (!is_tail_call) {
+      // Set up deoptimization.
+      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    }
   } else {
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
+    ParameterCount actual(arity);
     ParameterCount expected(formal_parameter_count);
-    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(function_reg, expected, actual, flag, generator);
   }
 }
 
@@ -3766,22 +3791,77 @@
   __ cntlzw_(result, input);
 }
 
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+                                  Register scratch1, Register scratch2,
+                                  Register scratch3) {
+#if DEBUG
+  if (actual.is_reg()) {
+    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+  } else {
+    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+  }
+#endif
+  if (FLAG_code_comments) {
+    if (actual.is_reg()) {
+      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+    } else {
+      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+    }
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(scratch3,
+           MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&no_arguments_adaptor);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mr(fp, scratch2);
+  __ LoadP(caller_args_count_reg,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ b(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+  __ bind(&formal_parameter_count_loaded);
+  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+  Comment(";;; }");
+}
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  HInvokeFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->function()).is(r4));
   DCHECK(instr->HasPointerMap());
 
-  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+  if (is_tail_call) {
+    DCHECK(!info()->saves_caller_doubles());
+    ParameterCount actual(instr->arity());
+    // It is safe to use r6, r7 and r8 as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) r6 (new.target) will be initialized below.
+    PrepareForTailCall(actual, r6, r7, r8);
+  }
+
+  Handle<JSFunction> known_function = hinstr->known_function();
   if (known_function.is_null()) {
     LPointerMap* pointers = instr->pointer_map();
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(instr->arity());
-    __ InvokeFunction(r4, no_reg, count, CALL_FUNCTION, generator);
+    ParameterCount actual(instr->arity());
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(r4, no_reg, actual, flag, generator);
   } else {
-    CallKnownFunction(known_function,
-                      instr->hydrogen()->formal_parameter_count(),
-                      instr->arity(), instr);
+    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+                      instr->arity(), is_tail_call, instr);
   }
 }
 
@@ -3823,67 +3903,6 @@
 }
 
 
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  DCHECK(ToRegister(instr->function()).is(r4));
-  DCHECK(ToRegister(instr->result()).is(r3));
-
-  // Change context.
-  __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
-  // Always initialize new target and number of actual arguments.
-  __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
-  __ mov(r3, Operand(instr->arity()));
-
-  bool is_self_call = false;
-  if (instr->hydrogen()->function()->IsConstant()) {
-    HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
-    Handle<JSFunction> jsfun =
-        Handle<JSFunction>::cast(fun_const->handle(isolate()));
-    is_self_call = jsfun.is_identical_to(info()->closure());
-  }
-
-  if (is_self_call) {
-    __ CallSelf();
-  } else {
-    __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
-    __ CallJSEntry(ip);
-  }
-
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  HCallFunction* hinstr = instr->hydrogen();
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->function()).is(r4));
-  DCHECK(ToRegister(instr->result()).is(r3));
-
-  int arity = instr->arity();
-  ConvertReceiverMode mode = hinstr->convert_mode();
-  if (hinstr->HasVectorAndSlot()) {
-    Register slot_register = ToRegister(instr->temp_slot());
-    Register vector_register = ToRegister(instr->temp_vector());
-    DCHECK(slot_register.is(r6));
-    DCHECK(vector_register.is(r5));
-
-    AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
-    int index = vector->GetIndex(hinstr->slot());
-
-    __ Move(vector_register, vector);
-    __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-
-    Handle<Code> ic =
-        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    __ mov(r3, Operand(arity));
-    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
-  }
-}
-
-
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->constructor()).is(r4));
@@ -4469,9 +4488,10 @@
 
 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   Register object = ToRegister(instr->object());
-  Register temp = ToRegister(instr->temp());
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
   Label no_memento_found;
-  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+  __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
   DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
   __ bind(&no_memento_found);
 }
@@ -5389,13 +5409,6 @@
 }
 
 
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  DCHECK(ToRegister(instr->value()).is(r3));
-  __ push(r3);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
 void LCodeGen::DoTypeof(LTypeof* instr) {
   DCHECK(ToRegister(instr->value()).is(r6));
   DCHECK(ToRegister(instr->result()).is(r3));
@@ -5742,13 +5755,7 @@
   __ bind(&done);
 }
 
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
-  Register context = ToRegister(instr->context());
-  __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
 #undef __
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.h b/src/crankshaft/ppc/lithium-codegen-ppc.h
index 1b72bf8..28f1680 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -194,11 +194,14 @@
   void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
                                LInstruction* instr, LOperand* context);
 
+  void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+                          Register scratch2, Register scratch3);
+
   // Generate a direct call to a known function.  Expects the function
   // to be in r4.
   void CallKnownFunction(Handle<JSFunction> function,
                          int formal_parameter_count, int arity,
-                         LInstruction* instr);
+                         bool is_tail_call, LInstruction* instr);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
diff --git a/src/crankshaft/ppc/lithium-ppc.cc b/src/crankshaft/ppc/lithium-ppc.cc
index 2a04d99..b739786 100644
--- a/src/crankshaft/ppc/lithium-ppc.cc
+++ b/src/crankshaft/ppc/lithium-ppc.cc
@@ -261,27 +261,6 @@
 }
 
 
-void LCallFunction::PrintDataTo(StringStream* stream) {
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  function()->PrintTo(stream);
-  if (hydrogen()->HasVectorAndSlot()) {
-    stream->Add(" (type-feedback-vector ");
-    temp_vector()->PrintTo(stream);
-    stream->Add(" ");
-    temp_slot()->PrintTo(stream);
-    stream->Add(")");
-  }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  function()->PrintTo(stream);
-  stream->Add("#%d / ", arity());
-}
-
-
 void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < InputCount(); i++) {
     InputAt(i)->PrintTo(stream);
@@ -580,11 +559,7 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(
-      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
-  return instr;
+  return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
 }
 
 
@@ -911,22 +886,16 @@
   }
   chunk_->AddInstruction(instr, current_block_);
 
-  if (instr->IsCall() || instr->IsPrologue()) {
-    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
-    if (hydrogen_val->HasObservableSideEffects()) {
-      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
-      sim->ReplayEnvironment(current_block_->last_environment());
-      hydrogen_value_for_lazy_bailout = sim;
-    }
-    LInstruction* bailout = AssignEnvironment(new (zone()) LLazyBailout());
-    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
-    chunk_->AddInstruction(bailout, current_block_);
-  }
+  CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
 }
 
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
-  return new (zone()) LPrologue();
+  LInstruction* result = new (zone()) LPrologue();
+  if (info_->num_heap_slots() > 0) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
 }
 
 
@@ -939,14 +908,14 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+  ToBooleanICStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
                    type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new (zone()) LBranch(UseRegister(value));
   if (!easy_case &&
-      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
        !expected.IsGeneric())) {
     branch = AssignEnvironment(branch);
   }
@@ -1070,15 +1039,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallJSFunction(HCallJSFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), r4);
-
-  LCallJSFunction* result = new (zone()) LCallJSFunction(function);
-
-  return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
 
@@ -1101,6 +1061,9 @@
 
   LCallWithDescriptor* result =
       new (zone()) LCallWithDescriptor(descriptor, ops, zone());
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, r3), instr);
 }
 
@@ -1109,6 +1072,9 @@
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* function = UseFixed(instr->function(), r4);
   LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1225,22 +1191,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* function = UseFixed(instr->function(), r4);
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(r6);
-    vector = FixedTemp(r5);
-  }
-
-  LCallFunction* call =
-      new (zone()) LCallFunction(context, function, slot, vector);
-  return MarkAsCall(DefineFixed(call, r3), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r3), instr);
@@ -1806,13 +1756,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
-    HBoundsCheckBaseIndexInformation* instr) {
-  UNREACHABLE();
-  return NULL;
-}
-
-
 LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
   // The control instruction marking the end of a block that completed
   // abruptly (e.g., threw an exception).  There is nothing specific to do.
@@ -2263,9 +2206,10 @@
 LInstruction* LChunkBuilder::DoTrapAllocationMemento(
     HTrapAllocationMemento* instr) {
   LOperand* object = UseRegister(instr->object());
-  LOperand* temp = TempRegister();
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
   LTrapAllocationMemento* result =
-      new (zone()) LTrapAllocationMemento(object, temp);
+      new (zone()) LTrapAllocationMemento(object, temp1, temp2);
   return AssignEnvironment(result);
 }
 
@@ -2441,13 +2385,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
-  LOperand* object = UseFixed(instr->value(), r3);
-  LToFastProperties* result = new (zone()) LToFastProperties(object);
-  return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* value = UseFixed(instr->value(), r6);
@@ -2486,7 +2423,7 @@
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(
       instr->closure(), instr->arguments_count(), instr->function(), undefined,
-      instr->inlining_kind());
+      instr->inlining_kind(), instr->syntactic_tail_call_mode());
   // Only replay binding of arguments object if it wasn't removed from graph.
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2548,11 +2485,5 @@
   return AssignPointerMap(result);
 }
 
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
-  LOperand* context = UseRegisterAtStart(instr->context());
-  return new (zone()) LStoreFrameContext(context);
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/ppc/lithium-ppc.h b/src/crankshaft/ppc/lithium-ppc.h
index 0dfde05..c39f620 100644
--- a/src/crankshaft/ppc/lithium-ppc.h
+++ b/src/crankshaft/ppc/lithium-ppc.h
@@ -29,9 +29,7 @@
   V(BitI)                                    \
   V(BoundsCheck)                             \
   V(Branch)                                  \
-  V(CallJSFunction)                          \
   V(CallWithDescriptor)                      \
-  V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
   V(CheckArrayBufferNotNeutered)             \
@@ -133,7 +131,6 @@
   V(StackCheck)                              \
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
-  V(StoreFrameContext)                       \
   V(StoreKeyed)                              \
   V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
@@ -146,7 +143,6 @@
   V(RSubI)                                   \
   V(TaggedToI)                               \
   V(ThisFunction)                            \
-  V(ToFastProperties)                        \
   V(TransitionElementsKind)                  \
   V(TrapAllocationMemento)                   \
   V(Typeof)                                  \
@@ -223,6 +219,13 @@
   void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
   bool IsCall() const { return IsCallBits::decode(bit_field_); }
 
+  void MarkAsSyntacticTailCall() {
+    bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+  }
+  bool IsSyntacticTailCall() const {
+    return IsSyntacticTailCallBits::decode(bit_field_);
+  }
+
   // Interface to the register allocator and iterators.
   bool ClobbersTemps() const { return IsCall(); }
   bool ClobbersRegisters() const { return IsCall(); }
@@ -257,6 +260,8 @@
   virtual LOperand* TempAt(int i) = 0;
 
   class IsCallBits : public BitField<bool, 0, 1> {};
+  class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+  };
 
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
@@ -525,6 +530,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+  DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
 
   LOperand* function() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
@@ -1659,21 +1665,6 @@
 };
 
 
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallJSFunction(LOperand* function) { inputs_[0] = function; }
-
-  LOperand* function() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
 class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
  public:
   LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1736,29 +1727,6 @@
 };
 
 
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
-  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
-                LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2090,16 +2058,17 @@
   ElementsKind to_kind() { return hydrogen()->to_kind(); }
 };
 
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> {
  public:
-  LTrapAllocationMemento(LOperand* object, LOperand* temp) {
+  LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = object;
-    temps_[0] = temp;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
   }
 
   LOperand* object() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
 
   DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
 };
@@ -2326,17 +2295,6 @@
 };
 
 
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LToFastProperties(LOperand* value) { inputs_[0] = value; }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
-  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
@@ -2445,16 +2403,6 @@
 };
 
 
-class LStoreFrameContext : public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStoreFrameContext(LOperand* context) { inputs_[0] = context; }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/s390/OWNERS b/src/crankshaft/s390/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/crankshaft/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/crankshaft/s390/lithium-codegen-s390.cc b/src/crankshaft/s390/lithium-codegen-s390.cc
new file mode 100644
index 0000000..689f4bc
--- /dev/null
+++ b/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -0,0 +1,5668 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/s390/lithium-codegen-s390.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+class SafepointGenerator final : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
+  virtual ~SafepointGenerator() {}
+
+  void BeforeCall(int call_size) const override {}
+
+  void AfterCall() const override {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+         GenerateJumpTable() && GenerateSafepointTable();
+}
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetTotalFrameSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+void LCodeGen::SaveCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Save clobbered callee double registers");
+  int count = 0;
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  while (!save_iterator.Done()) {
+    __ std(DoubleRegister::from_code(save_iterator.Current()),
+           MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+void LCodeGen::RestoreCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Restore clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  int count = 0;
+  while (!save_iterator.Done()) {
+    __ ld(DoubleRegister::from_code(save_iterator.Current()),
+          MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+    // r3: Callee's JS function.
+    // cp: Callee's context.
+    // fp: Caller's frame pointer.
+    // lr: Caller's pc.
+    // ip: Our own function entry (required by the prologue)
+  }
+
+  int prologue_offset = masm_->pc_offset();
+
+  if (prologue_offset) {
+    // Prologue logic requires its starting address in ip and the
+    // corresponding offset from the function entry.  Need to add
+    // 4 bytes for the size of AHI/AGHI that AddP expands into.
+    prologue_offset += sizeof(FourByteInstr);
+    __ AddP(ip, ip, Operand(prologue_offset));
+  }
+  info()->set_prologue_offset(prologue_offset);
+  if (NeedsEagerFrame()) {
+    if (info()->IsStub()) {
+      __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
+    } else {
+      __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
+    }
+    frame_is_built_ = true;
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    __ lay(sp, MemOperand(sp, -(slots * kPointerSize)));
+    if (FLAG_debug_code) {
+      __ Push(r2, r3);
+      __ mov(r2, Operand(slots * kPointerSize));
+      __ mov(r3, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ StoreP(r3, MemOperand(sp, r2, kPointerSize));
+      __ lay(r2, MemOperand(r2, -kPointerSize));
+      __ CmpP(r2, Operand::Zero());
+      __ bne(&loop);
+      __ Pop(r2, r3);
+    }
+  }
+
+  if (info()->saves_caller_doubles()) {
+    SaveCallerDoubles();
+  }
+  return !is_aborted();
+}
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+  Comment(";;; Prologue begin");
+
+  // Possibly allocate a local context.
+  if (info()->scope()->num_heap_slots() > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is in r3.
+    int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+    if (info()->scope()->is_script_scope()) {
+      __ push(r3);
+      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ CallRuntime(Runtime::kNewScriptContext);
+      deopt_mode = Safepoint::kLazyDeopt;
+    } else if (slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(r3);
+      __ CallRuntime(Runtime::kNewFunctionContext);
+    }
+    RecordSafepoint(deopt_mode);
+
+    // Context is returned in both r2 and cp.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in cp.
+    __ LoadRR(cp, r2);
+    __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+    for (int i = first_parameter; i < num_parameters; i++) {
+      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+                               (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ LoadP(r2, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextMemOperand(cp, var->index());
+        __ StoreP(r2, target);
+        // Update the write barrier. This clobbers r5 and r2.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(cp, target.offset(), r2, r5,
+                                    GetLinkRegisterState(), kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, r2, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  Comment(";;; Prologue end");
+}
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 0);
+  __ lay(sp, MemOperand(sp, -slots * kPointerSize));
+}
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+}
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(
+          ";;; <@%d,#%d> "
+          "-------------------- Deferred %s --------------------",
+          code->instruction_index(), code->instr()->hydrogen_value()->id(),
+          code->instr()->Mnemonic());
+      __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
+        __ PushCommonFrame(scratch0());
+        Comment(";;; Deferred code");
+      }
+      code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        __ PopCommonFrame(scratch0());
+        frame_is_built_ = false;
+      }
+      __ b(code->exit());
+    }
+  }
+
+  return !is_aborted();
+}
+
+bool LCodeGen::GenerateJumpTable() {
+  // Check that the jump table is accessible from everywhere in the function
+  // code, i.e. that offsets in halfworld to the table can be encoded in the
+  // 32-bit signed immediate of a branch instruction.
+  // To simplify we consider the code size from the first instruction to the
+  // end of the jump table. We also don't consider the pc load delta.
+  // Each entry in the jump table generates one instruction and inlines one
+  // 32bit data after it.
+  // TODO(joransiu): The Int24 condition can likely be relaxed for S390
+  if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) {
+    Abort(kGeneratedCodeIsTooLarge);
+  }
+
+  if (jump_table_.length() > 0) {
+    Label needs_frame, call_deopt_entry;
+
+    Comment(";;; -------------------- Jump table --------------------");
+    Address base = jump_table_[0].address;
+
+    Register entry_offset = scratch0();
+
+    int length = jump_table_.length();
+    for (int i = 0; i < length; i++) {
+      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+      __ bind(&table_entry->label);
+
+      DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->deopt_info);
+
+      // Second-level deopt table entries are contiguous and small, so instead
+      // of loading the full, absolute address of each one, load an immediate
+      // offset which will be added to the base address later.
+      __ mov(entry_offset, Operand(entry - base));
+
+      if (table_entry->needs_frame) {
+        DCHECK(!info()->saves_caller_doubles());
+        Comment(";;; call deopt with frame");
+        __ PushCommonFrame();
+        __ b(r14, &needs_frame);
+      } else {
+        __ b(r14, &call_deopt_entry);
+      }
+      info()->LogDeoptCallPosition(masm()->pc_offset(),
+                                   table_entry->deopt_info.inlining_id);
+    }
+
+    if (needs_frame.is_linked()) {
+      __ bind(&needs_frame);
+      // This variant of deopt can only be used with stubs. Since we don't
+      // have a function pointer to install in the stack frame that we're
+      // building, install a special marker there instead.
+      DCHECK(info()->IsStub());
+      __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+      __ push(ip);
+      DCHECK(info()->IsStub());
+    }
+
+    Comment(";;; call deopt");
+    __ bind(&call_deopt_entry);
+
+    if (info()->saves_caller_doubles()) {
+      DCHECK(info()->IsStub());
+      RestoreCallerDoubles();
+    }
+
+    // Add the base address to the offset previously loaded in entry_offset.
+    __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
+    __ AddP(ip, entry_offset, ip);
+    __ Jump(ip);
+  }
+
+  // The deoptimization jump table is the last part of the instruction
+  // sequence. Mark the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
+  return !is_aborted();
+}
+
+Register LCodeGen::ToRegister(int code) const {
+  return Register::from_code(code);
+}
+
+DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
+  return DoubleRegister::from_code(code);
+}
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  DCHECK(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      AllowDeferredHandleDereference get_number;
+      DCHECK(literal->IsNumber());
+      __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
+    } else if (r.IsDouble()) {
+      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+    } else {
+      DCHECK(r.IsSmiOrTagged());
+      __ Move(scratch, literal);
+    }
+    return scratch;
+  } else if (op->IsStackSlot()) {
+    __ LoadP(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
+                                       Register dst) {
+  DCHECK(IsInteger32(const_op));
+  HConstant* constant = chunk_->LookupConstant(const_op);
+  int32_t value = constant->Integer32Value();
+  if (IsSmi(const_op)) {
+    __ LoadSmiLiteral(dst, Smi::FromInt(value));
+  } else {
+    __ LoadIntLiteral(dst, value);
+  }
+}
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  DCHECK(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  return ToRepresentation(op, Representation::Integer32());
+}
+
+intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
+                                    const Representation& r) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  DCHECK(r.IsSmiOrTagged());
+  return reinterpret_cast<intptr_t>(Smi::FromInt(value));
+}
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  return Smi::FromInt(constant->Integer32Value());
+}
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk()->LookupConstant(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      return Operand(Smi::FromInt(constant->Integer32Value()));
+    } else if (r.IsInteger32()) {
+      DCHECK(constant->HasInteger32Value());
+      return Operand(constant->Integer32Value());
+    } else if (r.IsDouble()) {
+      Abort(kToOperandUnsupportedDoubleImmediate);
+    }
+    DCHECK(r.IsTagged());
+    return Operand(constant->handle(isolate()));
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort(kToOperandIsDoubleRegisterUnimplemented);
+    return Operand::Zero();
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand::Zero();
+}
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize;
+}
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, FrameSlotToFPOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+  DCHECK(op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp,
+                      ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+  }
+}
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+
+  WriteTranslation(environment->outer(), translation);
+  WriteTranslationFrame(environment, translation);
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(
+        environment, translation, value, environment->HasTaggedValueAt(i),
+        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+  }
+}
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation, LOperand* op,
+                                bool is_tagged, bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment, translation, value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer, dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    int index = op->index();
+    if (is_tagged) {
+      translation->StoreStackSlot(index);
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(index);
+    } else {
+      translation->StoreInt32StackSlot(index);
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    int index = op->index();
+    translation->StoreDoubleStackSlot(index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  DCHECK(instr != NULL);
+  __ Call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+  // Signal that we don't inline smi code before these stubs in the
+  // optimizing code generator.
+  if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
+    __ nop();
+  }
+}
+
+void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
+                           LInstruction* instr, SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+
+  __ CallRuntime(function, num_arguments, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    __ Move(cp, ToRegister(context));
+  } else if (context->IsStackSlot()) {
+    __ LoadP(cp, ToMemOperand(context));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+                                       LInstruction* instr, LOperand* context) {
+  LoadContextFromDeferred(context);
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(instr->pointer_map(), argc,
+                               Safepoint::kNoLazyDeopt);
+}
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index, translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, zone());
+  }
+}
+
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            Deoptimizer::BailoutType bailout_type,
+                            CRegister cr) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+    return;
+  }
+
+  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+    Register scratch = scratch0();
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+    Label no_deopt;
+
+    // Store the condition on the stack if necessary
+    if (cond != al) {
+      Label done;
+      __ LoadImmP(scratch, Operand::Zero());
+      __ b(NegateCondition(cond), &done, Label::kNear);
+      __ LoadImmP(scratch, Operand(1));
+      __ bind(&done);
+      __ push(scratch);
+    }
+
+    Label done;
+    __ Push(r3);
+    __ mov(scratch, Operand(count));
+    __ LoadW(r3, MemOperand(scratch));
+    __ Sub32(r3, r3, Operand(1));
+    __ Cmp32(r3, Operand::Zero());
+    __ bne(&no_deopt, Label::kNear);
+
+    __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times));
+    __ StoreW(r3, MemOperand(scratch));
+    __ Pop(r3);
+
+    if (cond != al) {
+      // Clean up the stack before the deoptimizer call
+      __ pop(scratch);
+    }
+
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+
+    __ b(&done);
+
+    __ bind(&no_deopt);
+    __ StoreW(r3, MemOperand(scratch));
+    __ Pop(r3);
+
+    if (cond != al) {
+      // Clean up the stack before the deoptimizer call
+      __ pop(scratch);
+    }
+
+    __ bind(&done);
+
+    if (cond != al) {
+      cond = ne;
+      __ CmpP(scratch, Operand::Zero());
+    }
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
+  }
+
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+  DCHECK(info()->IsStub() || frame_is_built_);
+  // Go through jump table if we need to handle condition, build frame, or
+  // restore caller doubles.
+  if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+  } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
+                                            !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+        jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    __ b(cond, &jump_table_.last().label /*, cr*/);
+  }
+}
+
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            CRegister cr) {
+  Deoptimizer::BailoutType bailout_type =
+      info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+  DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
+}
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                            SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kLazyDeopt);
+  }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+                               int arguments, Safepoint::DeoptMode deopt_mode) {
+  DCHECK(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_, label->hydrogen_value()->id(),
+          label->block_id(), LabelType(label));
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  Label dividend_is_not_negative, done;
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ CmpP(dividend, Operand::Zero());
+    __ bge(&dividend_is_not_negative, Label::kNear);
+    if (shift) {
+      // Note that this is correct even for kMinInt operands.
+      __ LoadComplementRR(dividend, dividend);
+      __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+      __ LoadComplementRR(dividend, dividend);
+      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+        DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+      }
+    } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ mov(dividend, Operand::Zero());
+    } else {
+      DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
+    }
+    __ b(&done, Label::kNear);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  if (shift) {
+    __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+  } else {
+    __ mov(dividend, Operand::Zero());
+  }
+  __ bind(&done);
+}
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  __ mov(ip, Operand(Abs(divisor)));
+  __ Mul(result, result, ip);
+  __ SubP(result, dividend, result /*, LeaveOE, SetRC*/);
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
+    __ Cmp32(dividend, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+    __ bind(&remainder_not_zero);
+  }
+}
+
+void LCodeGen::DoModI(LModI* instr) {
+  HMod* hmod = instr->hydrogen();
+  Register left_reg = ToRegister(instr->left());
+  Register right_reg = ToRegister(instr->right());
+  Register result_reg = ToRegister(instr->result());
+  Label done;
+
+  // Check for x % 0.
+  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ Cmp32(right_reg, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for kMinInt % -1, dr will return undefined, which is not what we
+  // want. We have to deopt if we care about -0, because we can't return that.
+  if (hmod->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    __ Cmp32(left_reg, Operand(kMinInt));
+    __ bne(&no_overflow_possible, Label::kNear);
+    __ Cmp32(right_reg, Operand(-1));
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    } else {
+      __ b(ne, &no_overflow_possible, Label::kNear);
+      __ mov(result_reg, Operand::Zero());
+      __ b(&done, Label::kNear);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  // Divide instruction dr will implicity use register pair
+  // r0 & r1 below.
+  DCHECK(!left_reg.is(r1));
+  DCHECK(!right_reg.is(r1));
+  DCHECK(!result_reg.is(r1));
+  __ LoadRR(r0, left_reg);
+  __ srda(r0, Operand(32));
+  __ dr(r0, right_reg);  // R0:R1 = R1 / divisor - R0 remainder
+
+  __ LoadAndTestP_ExtendSrc(result_reg, r0);  // Copy remainder to resultreg
+
+  // If we care about -0, test if the dividend is <0 and the result is 0.
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ bne(&done, Label::kNear);
+    __ Cmp32(left_reg, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ bind(&done);
+}
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ Cmp32(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    __ Cmp32(dividend, Operand(0x80000000));
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+  }
+
+  int32_t shift = WhichPowerOf2Abs(divisor);
+
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
+    __ TestBitRange(dividend, shift - 1, 0, r0);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
+  }
+
+  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
+    __ LoadComplementRR(result, dividend);
+    return;
+  }
+  if (shift == 0) {
+    __ LoadRR(result, dividend);
+  } else {
+    if (shift == 1) {
+      __ ShiftRight(result, dividend, Operand(31));
+    } else {
+      __ ShiftRightArith(result, dividend, Operand(31));
+      __ ShiftRight(result, result, Operand(32 - shift));
+    }
+    __ AddP(result, dividend, result);
+    __ ShiftRightArith(result, result, Operand(shift));
+#if V8_TARGET_ARCH_S390X
+    __ lgfr(result, result);
+#endif
+  }
+  if (divisor < 0) __ LoadComplementRR(result, result);
+}
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ Cmp32(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ LoadComplementRR(result, result);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    Register scratch = scratch0();
+    __ mov(ip, Operand(divisor));
+    __ Mul(scratch, result, ip);
+    __ Cmp32(scratch, dividend);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  const Register dividend = ToRegister(instr->dividend());
+  const Register divisor = ToRegister(instr->divisor());
+  Register result = ToRegister(instr->result());
+
+  DCHECK(!dividend.is(result));
+  DCHECK(!divisor.is(result));
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ Cmp32(divisor, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ Cmp32(dividend, Operand::Zero());
+    __ bne(&dividend_not_zero, Label::kNear);
+    __ Cmp32(divisor, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    Label dividend_not_min_int;
+    __ Cmp32(dividend, Operand(kMinInt));
+    __ bne(&dividend_not_min_int, Label::kNear);
+    __ Cmp32(divisor, Operand(-1));
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+    __ bind(&dividend_not_min_int);
+  }
+
+  __ LoadRR(r0, dividend);
+  __ srda(r0, Operand(32));
+  __ dr(r0, divisor);  // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
+
+  __ LoadAndTestP_ExtendSrc(result, r1);  // Move quotient to result register
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    // Deoptimize if remainder is not 0.
+    __ Cmp32(r0, Operand::Zero());
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register result = ToRegister(instr->result());
+  int32_t divisor = instr->divisor();
+  bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 0) {
+    if (shift || !result.is(dividend)) {
+      __ ShiftRightArith(result, dividend, Operand(shift));
+#if V8_TARGET_ARCH_S390X
+      __ lgfr(result, result);
+#endif
+    }
+    return;
+  }
+
+// If the divisor is negative, we have to negate and handle edge cases.
+#if V8_TARGET_ARCH_S390X
+  if (divisor == -1 && can_overflow) {
+    __ Cmp32(dividend, Operand(0x80000000));
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+  }
+#endif
+
+  __ LoadComplementRR(result, dividend);
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
+  }
+
+// If the negation could not overflow, simply shifting is OK.
+#if !V8_TARGET_ARCH_S390X
+  if (!can_overflow) {
+#endif
+    if (shift) {
+      __ ShiftRightArithP(result, result, Operand(shift));
+    }
+    return;
+#if !V8_TARGET_ARCH_S390X
+  }
+
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+    return;
+  }
+
+  Label overflow_label, done;
+  __ b(overflow, &overflow_label, Label::kNear);
+  __ ShiftRightArith(result, result, Operand(shift));
+#if V8_TARGET_ARCH_S390X
+  __ lgfr(result, result);
+#endif
+  __ b(&done, Label::kNear);
+  __ bind(&overflow_label);
+  __ mov(result, Operand(kMinInt / divisor));
+  __ bind(&done);
+#endif
+}
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ Cmp32(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(result, dividend, Abs(divisor));
+    if (divisor < 0) __ LoadComplementRR(result, result);
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister(instr->temp());
+  DCHECK(!temp.is(dividend) && !temp.is(result));
+  Label needs_adjustment, done;
+  __ Cmp32(dividend, Operand::Zero());
+  __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ LoadComplementRR(result, result);
+  __ b(&done, Label::kNear);
+  __ bind(&needs_adjustment);
+  __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(result, temp, Abs(divisor));
+  if (divisor < 0) __ LoadComplementRR(result, result);
+  __ SubP(result, result, Operand(1));
+  __ bind(&done);
+}
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  const Register dividend = ToRegister(instr->dividend());
+  const Register divisor = ToRegister(instr->divisor());
+  Register result = ToRegister(instr->result());
+
+  DCHECK(!dividend.is(result));
+  DCHECK(!divisor.is(result));
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ Cmp32(divisor, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ Cmp32(dividend, Operand::Zero());
+    __ bne(&dividend_not_zero, Label::kNear);
+    __ Cmp32(divisor, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    __ Cmp32(dividend, Operand(kMinInt));
+    __ bne(&no_overflow_possible, Label::kNear);
+    __ Cmp32(divisor, Operand(-1));
+    if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+    } else {
+      __ bne(&no_overflow_possible, Label::kNear);
+      __ LoadRR(result, dividend);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  __ LoadRR(r0, dividend);
+  __ srda(r0, Operand(32));
+  __ dr(r0, divisor);  // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
+
+  __ lr(result, r1);  // Move quotient to result register
+
+  Label done;
+  Register scratch = scratch0();
+  // If both operands have the same sign then we are done.
+  __ Xor(scratch, dividend, divisor);
+  __ ltr(scratch, scratch);  // use 32 bit version LoadAndTestRR even in 64 bit
+  __ bge(&done, Label::kNear);
+
+  // If there is no remainder then we are done.
+  __ lr(scratch, result);
+  __ msr(scratch, divisor);
+  __ Cmp32(dividend, scratch);
+  __ beq(&done, Label::kNear);
+
+  // We performed a truncating division. Correct the result.
+  __ Sub32(result, result, Operand(1));
+  __ bind(&done);
+}
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+  DoubleRegister addend = ToDoubleRegister(instr->addend());
+  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+
+  // Unable to use madbr as the intermediate value is not rounded
+  // to proper precision
+  __ ldr(result, multiplier);
+  __ mdbr(result, multiplicand);
+  __ adbr(result, addend);
+}
+
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
+  DoubleRegister minuend = ToDoubleRegister(instr->minuend());
+  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+
+  // Unable to use msdbr as the intermediate value is not rounded
+  // to proper precision
+  __ ldr(result, multiplier);
+  __ mdbr(result, multiplicand);
+  __ sdbr(result, minuend);
+}
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->left());
+  LOperand* right_op = instr->right();
+
+  bool bailout_on_minus_zero =
+      instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (right_op->IsConstantOperand()) {
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      __ CmpP(left, Operand::Zero());
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    }
+
+    switch (constant) {
+      case -1:
+        if (can_overflow) {
+#if V8_TARGET_ARCH_S390X
+          if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+            __ LoadComplementRR(result, left);
+            DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+#if V8_TARGET_ARCH_S390X
+          } else {
+            __ LoadComplementRR(result, left);
+            __ TestIfInt32(result, r0);
+            DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+          }
+#endif
+        } else {
+          __ LoadComplementRR(result, left);
+        }
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+// If left is strictly negative and the constant is null, the
+// result is -0. Deoptimize if required, otherwise return 0.
+#if V8_TARGET_ARCH_S390X
+          if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+            __ Cmp32(left, Operand::Zero());
+#if V8_TARGET_ARCH_S390X
+          } else {
+            __ Cmp32(left, Operand::Zero());
+          }
+#endif
+          DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+        }
+        __ LoadImmP(result, Operand::Zero());
+        break;
+      case 1:
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (base::bits::IsPowerOfTwo32(constant_abs)) {
+          int32_t shift = WhichPowerOf2(constant_abs);
+          __ ShiftLeftP(result, left, Operand(shift));
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ LoadComplementRR(result, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs - 1);
+          __ ShiftLeftP(scratch, left, Operand(shift));
+          __ AddP(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ LoadComplementRR(result, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs + 1);
+          __ ShiftLeftP(scratch, left, Operand(shift));
+          __ SubP(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ LoadComplementRR(result, result);
+        } else {
+          // Generate standard code.
+          __ Move(result, left);
+          __ MulP(result, Operand(constant));
+        }
+    }
+
+  } else {
+    DCHECK(right_op->IsRegister());
+    Register right = ToRegister(right_op);
+
+    if (can_overflow) {
+#if V8_TARGET_ARCH_S390X
+      // result = left * right.
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ SmiUntag(scratch, right);
+        __ msgr(result, scratch);
+      } else {
+        __ LoadRR(result, left);
+        __ msgr(result, right);
+      }
+      __ TestIfInt32(result, r0);
+      DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiTag(result);
+      }
+#else
+      // r0:scratch = scratch * right
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(scratch, left);
+        __ mr_z(r0, right);
+        __ LoadRR(result, scratch);
+      } else {
+        // r0:scratch = scratch * right
+        __ LoadRR(scratch, left);
+        __ mr_z(r0, right);
+        __ LoadRR(result, scratch);
+      }
+      __ TestIfInt32(r0, result, scratch);
+      DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+#endif
+    } else {
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ Mul(result, result, right);
+      } else {
+        __ Mul(result, left, right);
+      }
+    }
+
+    if (bailout_on_minus_zero) {
+      Label done;
+#if V8_TARGET_ARCH_S390X
+      if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+        __ XorP(r0, left, right);
+        __ LoadAndTestRR(r0, r0);
+        __ bge(&done, Label::kNear);
+#if V8_TARGET_ARCH_S390X
+      } else {
+        __ XorP(r0, left, right);
+        __ Cmp32(r0, Operand::Zero());
+        __ bge(&done, Label::kNear);
+      }
+#endif
+      // Bail out if the result is minus zero.
+      __ CmpP(result, Operand::Zero());
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+      __ bind(&done);
+    }
+  }
+}
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left_op = instr->left();
+  LOperand* right_op = instr->right();
+  DCHECK(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+
+  if (right_op->IsConstantOperand()) {
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ AndP(result, left, Operand(ToOperand(right_op)));
+        break;
+      case Token::BIT_OR:
+        __ OrP(result, left, Operand(ToOperand(right_op)));
+        break;
+      case Token::BIT_XOR:
+        __ XorP(result, left, Operand(ToOperand(right_op)));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else if (right_op->IsStackSlot()) {
+    // Reg-Mem instruction clobbers, so copy src to dst first.
+    if (!left.is(result)) __ LoadRR(result, left);
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ AndP(result, ToMemOperand(right_op));
+        break;
+      case Token::BIT_OR:
+        __ OrP(result, ToMemOperand(right_op));
+        break;
+      case Token::BIT_XOR:
+        __ XorP(result, ToMemOperand(right_op));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    DCHECK(right_op->IsRegister());
+
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ AndP(result, left, ToRegister(right_op));
+        break;
+      case Token::BIT_OR:
+        __ OrP(result, left, ToRegister(right_op));
+        break;
+      case Token::BIT_XOR:
+        __ XorP(result, left, ToRegister(right_op));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  if (right_op->IsRegister()) {
+    // Mask the right_op operand.
+    __ AndP(scratch, ToRegister(right_op), Operand(0x1F));
+    switch (instr->op()) {
+      case Token::ROR:
+        // rotate_right(a, b) == rotate_left(a, 32 - b)
+        __ LoadComplementRR(scratch, scratch);
+        __ rll(result, left, scratch, Operand(32));
+#if V8_TARGET_ARCH_S390X
+        __ lgfr(result, result);
+#endif
+        break;
+      case Token::SAR:
+        __ ShiftRightArith(result, left, scratch);
+#if V8_TARGET_ARCH_S390X
+        __ lgfr(result, result);
+#endif
+        break;
+      case Token::SHR:
+        __ ShiftRight(result, left, scratch);
+#if V8_TARGET_ARCH_S390X
+        __ lgfr(result, result);
+#endif
+        if (instr->can_deopt()) {
+#if V8_TARGET_ARCH_S390X
+          __ ltgfr(result, result /*, SetRC*/);
+#else
+          __ ltr(result, result);  // Set the <,==,> condition
+#endif
+          DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
+        }
+        break;
+      case Token::SHL:
+        __ ShiftLeft(result, left, scratch);
+#if V8_TARGET_ARCH_S390X
+        __ lgfr(result, result);
+#endif
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::ROR:
+        if (shift_count != 0) {
+          __ rll(result, left, Operand(32 - shift_count));
+#if V8_TARGET_ARCH_S390X
+          __ lgfr(result, result);
+#endif
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ ShiftRightArith(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_S390X
+          __ lgfr(result, result);
+#endif
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ ShiftRight(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_S390X
+          __ lgfr(result, result);
+#endif
+        } else {
+          if (instr->can_deopt()) {
+            __ Cmp32(left, Operand::Zero());
+            DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
+          }
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+#if V8_TARGET_ARCH_S390X
+          if (instr->hydrogen_value()->representation().IsSmi()) {
+            __ ShiftLeftP(result, left, Operand(shift_count));
+#else
+          if (instr->hydrogen_value()->representation().IsSmi() &&
+              instr->can_deopt()) {
+            if (shift_count != 1) {
+              __ ShiftLeft(result, left, Operand(shift_count - 1));
+#if V8_TARGET_ARCH_S390X
+              __ lgfr(result, result);
+#endif
+              __ SmiTagCheckOverflow(result, result, scratch);
+            } else {
+              __ SmiTagCheckOverflow(result, left, scratch);
+            }
+            DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+#endif
+          } else {
+            __ ShiftLeft(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_S390X
+            __ lgfr(result, result);
+#endif
+          }
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+
+  bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+                     instr->hydrogen()->representation().IsExternal());
+
+#if V8_TARGET_ARCH_S390X
+  // The overflow detection needs to be tested on the lower 32-bits.
+  // As a result, on 64-bit, we need to force 32-bit arithmetic operations
+  // to set the CC overflow bit properly.  The result is then sign-extended.
+  bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#else
+  bool checkOverflow = true;
+#endif
+
+  if (right->IsConstantOperand()) {
+    if (!isInteger || !checkOverflow)
+      __ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
+    else
+      __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
+  } else if (right->IsRegister()) {
+    if (!isInteger)
+      __ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
+    else if (!checkOverflow)
+      __ SubP_ExtendSrc(ToRegister(result), ToRegister(left),
+                        ToRegister(right));
+    else
+      __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right));
+  } else {
+    if (!left->Equals(instr->result()))
+      __ LoadRR(ToRegister(result), ToRegister(left));
+
+    MemOperand mem = ToMemOperand(right);
+    if (!isInteger) {
+      __ SubP(ToRegister(result), mem);
+    } else {
+#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
+      // We want to read the 32-bits directly from memory
+      MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
+#else
+      MemOperand Upper32Mem = ToMemOperand(right);
+#endif
+      if (checkOverflow) {
+        __ Sub32(ToRegister(result), Upper32Mem);
+      } else {
+        __ SubP_ExtendSrc(ToRegister(result), Upper32Mem);
+      }
+    }
+  }
+
+#if V8_TARGET_ARCH_S390X
+  if (isInteger && checkOverflow)
+    __ lgfr(ToRegister(result), ToRegister(result));
+#endif
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+}
+
+void LCodeGen::DoRSubI(LRSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+
+  DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
+         right->IsConstantOperand());
+
+#if V8_TARGET_ARCH_S390X
+  // The overflow detection needs to be tested on the lower 32-bits.
+  // As a result, on 64-bit, we need to force 32-bit arithmetic operations
+  // to set the CC overflow bit properly.  The result is then sign-extended.
+  bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#else
+  bool checkOverflow = true;
+#endif
+
+  Operand right_operand = ToOperand(right);
+  __ mov(r0, right_operand);
+
+  if (!checkOverflow) {
+    __ SubP_ExtendSrc(ToRegister(result), r0, ToRegister(left));
+  } else {
+    __ Sub32(ToRegister(result), r0, ToRegister(left));
+  }
+}
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
+}
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  DCHECK(instr->result()->IsDoubleRegister());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  uint64_t bits = instr->bits();
+  __ LoadDoubleLiteral(result, bits, scratch0());
+}
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ Move(ToRegister(instr->result()), object);
+}
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
+                                           String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToInteger32(LConstantOperand::cast(index));
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+  }
+  Register scratch = scratch0();
+  DCHECK(!scratch.is(string));
+  DCHECK(!scratch.is(ToRegister(index)));
+  // TODO(joransiu) : Fold Add into FieldMemOperand
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ AddP(scratch, string, ToRegister(index));
+  } else {
+    STATIC_ASSERT(kUC16Size == 2);
+    __ ShiftLeftP(scratch, ToRegister(index), Operand(1));
+    __ AddP(scratch, string, scratch);
+  }
+  return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  if (FLAG_debug_code) {
+    Register scratch = scratch0();
+    __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+    __ AndP(scratch, scratch,
+            Operand(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ CmpP(scratch,
+            Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
+                                                          : two_byte_seq_type));
+    __ Check(eq, kUnexpectedStringType);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ llc(result, operand);
+  } else {
+    __ llh(result, operand);
+  }
+}
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register value = ToRegister(instr->value());
+
+  if (FLAG_debug_code) {
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+            ? one_byte_seq_type
+            : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ stc(value, operand);
+  } else {
+    __ sth(value, operand);
+  }
+}
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+                     instr->hydrogen()->representation().IsExternal());
+#if V8_TARGET_ARCH_S390X
+  // The overflow detection needs to be tested on the lower 32-bits.
+  // As a result, on 64-bit, we need to force 32-bit arithmetic operations
+  // to set the CC overflow bit properly.  The result is then sign-extended.
+  bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#else
+  bool checkOverflow = true;
+#endif
+
+  if (right->IsConstantOperand()) {
+    if (!isInteger || !checkOverflow)
+      __ AddP(ToRegister(result), ToRegister(left), ToOperand(right));
+    else
+      __ Add32(ToRegister(result), ToRegister(left), ToOperand(right));
+  } else if (right->IsRegister()) {
+    if (!isInteger)
+      __ AddP(ToRegister(result), ToRegister(left), ToRegister(right));
+    else if (!checkOverflow)
+      __ AddP_ExtendSrc(ToRegister(result), ToRegister(left),
+                        ToRegister(right));
+    else
+      __ Add32(ToRegister(result), ToRegister(left), ToRegister(right));
+  } else {
+    if (!left->Equals(instr->result()))
+      __ LoadRR(ToRegister(result), ToRegister(left));
+
+    MemOperand mem = ToMemOperand(right);
+    if (!isInteger) {
+      __ AddP(ToRegister(result), mem);
+    } else {
+#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
+      // We want to read the 32-bits directly from memory
+      MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
+#else
+      MemOperand Upper32Mem = ToMemOperand(right);
+#endif
+      if (checkOverflow) {
+        __ Add32(ToRegister(result), Upper32Mem);
+      } else {
+        __ AddP_ExtendSrc(ToRegister(result), Upper32Mem);
+      }
+    }
+  }
+
+#if V8_TARGET_ARCH_S390X
+  if (isInteger && checkOverflow)
+    __ lgfr(ToRegister(result), ToRegister(result));
+#endif
+  // Doptimize on overflow
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+}
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  HMathMinMax::Operation operation = instr->hydrogen()->operation();
+  Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
+  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Register left_reg = ToRegister(left);
+    Register right_reg = EmitLoadRegister(right, ip);
+    Register result_reg = ToRegister(instr->result());
+    Label return_left, done;
+#if V8_TARGET_ARCH_S390X
+    if (instr->hydrogen_value()->representation().IsSmi()) {
+#endif
+      __ CmpP(left_reg, right_reg);
+#if V8_TARGET_ARCH_S390X
+    } else {
+      __ Cmp32(left_reg, right_reg);
+    }
+#endif
+    __ b(cond, &return_left, Label::kNear);
+    __ Move(result_reg, right_reg);
+    __ b(&done, Label::kNear);
+    __ bind(&return_left);
+    __ Move(result_reg, left_reg);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    DoubleRegister left_reg = ToDoubleRegister(left);
+    DoubleRegister right_reg = ToDoubleRegister(right);
+    DoubleRegister result_reg = ToDoubleRegister(instr->result());
+    Label check_nan_left, check_zero, return_left, return_right, done;
+    __ cdbr(left_reg, right_reg);
+    __ bunordered(&check_nan_left, Label::kNear);
+    __ beq(&check_zero);
+    __ b(cond, &return_left, Label::kNear);
+    __ b(&return_right, Label::kNear);
+
+    __ bind(&check_zero);
+    __ lzdr(kDoubleRegZero);
+    __ cdbr(left_reg, kDoubleRegZero);
+    __ bne(&return_left, Label::kNear);  // left == right != 0.
+
+    // At this point, both left and right are either 0 or -0.
+    // N.B. The following works because +0 + -0 == +0
+    if (operation == HMathMinMax::kMathMin) {
+      // For min we want logical-or of sign bit: -(-L + -R)
+      __ lcdbr(left_reg, left_reg);
+      __ ldr(result_reg, left_reg);
+      if (left_reg.is(right_reg)) {
+        __ adbr(result_reg, right_reg);
+      } else {
+        __ sdbr(result_reg, right_reg);
+      }
+      __ lcdbr(result_reg, result_reg);
+    } else {
+      // For max we want logical-and of sign bit: (L + R)
+      __ ldr(result_reg, left_reg);
+      __ adbr(result_reg, right_reg);
+    }
+    __ b(&done, Label::kNear);
+
+    __ bind(&check_nan_left);
+    __ cdbr(left_reg, left_reg);
+    __ bunordered(&return_left, Label::kNear);  // left == NaN.
+
+    __ bind(&return_right);
+    if (!right_reg.is(result_reg)) {
+      __ ldr(result_reg, right_reg);
+    }
+    __ b(&done, Label::kNear);
+
+    __ bind(&return_left);
+    if (!left_reg.is(result_reg)) {
+      __ ldr(result_reg, left_reg);
+    }
+    __ bind(&done);
+  }
+}
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->left());
+  DoubleRegister right = ToDoubleRegister(instr->right());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  // All operations except MOD are computed in-place.
+  DCHECK(instr->op() == Token::MOD || left.is(result));
+  switch (instr->op()) {
+    case Token::ADD:
+      __ adbr(result, right);
+      break;
+    case Token::SUB:
+      __ sdbr(result, right);
+      break;
+    case Token::MUL:
+      __ mdbr(result, right);
+      break;
+    case Token::DIV:
+      __ ddbr(result, right);
+      break;
+    case Token::MOD: {
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ MovToFloatParameters(left, right);
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(result);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r3));
+  DCHECK(ToRegister(instr->right()).is(r2));
+  DCHECK(ToRegister(instr->result()).is(r2));
+
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+template <class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cond) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+
+  if (right_block == left_block || cond == al) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block));
+  } else if (right_block == next_block) {
+    __ b(cond, chunk_->GetAssemblyLabel(left_block));
+  } else {
+    __ b(cond, chunk_->GetAssemblyLabel(left_block));
+    __ b(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) {
+  int true_block = instr->TrueDestination(chunk_);
+  __ b(cond, chunk_->GetAssemblyLabel(true_block));
+}
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ b(cond, chunk_->GetAssemblyLabel(false_block));
+}
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  DoubleRegister dbl_scratch = double_scratch0();
+
+  if (r.IsInteger32()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    __ Cmp32(reg, Operand::Zero());
+    EmitBranch(instr, ne);
+  } else if (r.IsSmi()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    __ CmpP(reg, Operand::Zero());
+    EmitBranch(instr, ne);
+  } else if (r.IsDouble()) {
+    DCHECK(!info()->IsStub());
+    DoubleRegister reg = ToDoubleRegister(instr->value());
+    __ lzdr(kDoubleRegZero);
+    __ cdbr(reg, kDoubleRegZero);
+    // Test the double value. Zero and NaN are false.
+    Condition lt_gt = static_cast<Condition>(lt | gt);
+
+    EmitBranch(instr, lt_gt);
+  } else {
+    DCHECK(r.IsTagged());
+    Register reg = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+      EmitBranch(instr, eq);
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      __ CmpP(reg, Operand::Zero());
+      EmitBranch(instr, ne);
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, al);
+    } else if (type.IsHeapNumber()) {
+      DCHECK(!info()->IsStub());
+      __ ld(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+      // Test the double value. Zero and NaN are false.
+      __ lzdr(kDoubleRegZero);
+      __ cdbr(dbl_scratch, kDoubleRegZero);
+      Condition lt_gt = static_cast<Condition>(lt | gt);
+      EmitBranch(instr, lt_gt);
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+      __ CmpP(ip, Operand::Zero());
+      EmitBranch(instr, ne);
+    } else {
+      ToBooleanICStub::Types expected =
+          instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+        // undefined -> false.
+        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+        __ beq(instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+        __ beq(instr->TrueLabel(chunk_));
+        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+        __ beq(instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ CompareRoot(reg, Heap::kNullValueRootIndex);
+        __ beq(instr->FalseLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanICStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ CmpP(reg, Operand::Zero());
+        __ beq(instr->FalseLabel(chunk_));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ TestIfSmi(reg);
+        DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
+                Operand(1 << Map::kIsUndetectable));
+          __ bne(instr->FalseLabel(chunk_));
+        }
+      }
+
+      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
+        __ bge(instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanICStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+        __ bge(&not_string, Label::kNear);
+        __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+        __ CmpP(ip, Operand::Zero());
+        __ bne(instr->TrueLabel(chunk_));
+        __ b(instr->FalseLabel(chunk_));
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+        // Symbol value -> true.
+        __ CompareInstanceType(map, ip, SYMBOL_TYPE);
+        __ beq(instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+        // SIMD value -> true.
+        Label not_simd;
+        __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
+        __ beq(instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        Label not_heap_number;
+        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+        __ bne(&not_heap_number, Label::kNear);
+        __ LoadDouble(dbl_scratch,
+                      FieldMemOperand(reg, HeapNumber::kValueOffset));
+        __ lzdr(kDoubleRegZero);
+        __ cdbr(dbl_scratch, kDoubleRegZero);
+        __ bunordered(instr->FalseLabel(chunk_));  // NaN -> false.
+        __ beq(instr->FalseLabel(chunk_));         // +0, -0 -> false.
+        __ b(instr->TrueLabel(chunk_));
+        __ bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+      }
+    }
+  }
+}
+
+void LCodeGen::EmitGoto(int block) {
+  if (!IsNextEmittedBlock(block)) {
+    __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
+  }
+}
+
+void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
+
+Condition LCodeGen::TokenToCondition(Token::Value op) {
+  Condition cond = kNoCondition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = ne;
+      break;
+    case Token::LT:
+      cond = lt;
+      break;
+    case Token::GT:
+      cond = gt;
+      break;
+    case Token::LTE:
+      cond = le;
+      break;
+    case Token::GTE:
+      cond = ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cond = TokenToCondition(instr->op());
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+                         ? instr->TrueDestination(chunk_)
+                         : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Compare left and right operands as doubles and load the
+      // resulting flags into the normal status register.
+      __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right));
+      // If a NaN is involved, i.e. the result is unordered,
+      // jump to false block label.
+      __ bunordered(instr->FalseLabel(chunk_));
+    } else {
+      if (right->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(right));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          if (is_unsigned) {
+            __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+          } else {
+            __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+          }
+        } else {
+          if (is_unsigned) {
+            __ CmpLogical32(ToRegister(left), ToOperand(right));
+          } else {
+            __ Cmp32(ToRegister(left), ToOperand(right));
+          }
+        }
+      } else if (left->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(left));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          if (is_unsigned) {
+            __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+          } else {
+            __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+          }
+        } else {
+          if (is_unsigned) {
+            __ CmpLogical32(ToRegister(right), ToOperand(left));
+          } else {
+            __ Cmp32(ToRegister(right), ToOperand(left));
+          }
+        }
+        // We commuted the operands, so commute the condition.
+        cond = CommuteCondition(cond);
+      } else if (instr->hydrogen_value()->representation().IsSmi()) {
+        if (is_unsigned) {
+          __ CmpLogicalP(ToRegister(left), ToRegister(right));
+        } else {
+          __ CmpP(ToRegister(left), ToRegister(right));
+        }
+      } else {
+        if (is_unsigned) {
+          __ CmpLogical32(ToRegister(left), ToRegister(right));
+        } else {
+          __ Cmp32(ToRegister(left), ToRegister(right));
+        }
+      }
+    }
+    EmitBranch(instr, cond);
+  }
+}
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+
+  __ CmpP(left, right);
+  EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+  if (instr->hydrogen()->representation().IsTagged()) {
+    Register input_reg = ToRegister(instr->object());
+    __ CmpP(input_reg, Operand(factory()->the_hole_value()));
+    EmitBranch(instr, eq);
+    return;
+  }
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->object());
+  __ cdbr(input_reg, input_reg);
+  EmitFalseBranch(instr, ordered);
+
+  Register scratch = scratch0();
+  // Convert to GPR and examine the upper 32 bits
+  __ lgdr(scratch, input_reg);
+  __ srlg(scratch, scratch, Operand(32));
+  __ Cmp32(scratch, Operand(kHoleNanUpper32));
+  EmitBranch(instr, eq);
+}
+
+Condition LCodeGen::EmitIsString(Register input, Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+  return lt;
+}
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp1 = ToRegister(instr->temp());
+
+  SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+                              ? OMIT_SMI_CHECK
+                              : INLINE_SMI_CHECK;
+  Condition true_cond =
+      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond);
+}
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Register input_reg = EmitLoadRegister(instr->value(), ip);
+  __ TestIfSmi(input_reg);
+  EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ tm(FieldMemOperand(temp, Map::kBitFieldOffset),
+        Operand(1 << Map::kIsUndetectable));
+  EmitBranch(instr, ne);
+}
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r3));
+  DCHECK(ToRegister(instr->right()).is(r2));
+
+  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+  __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+  EmitBranch(instr, eq);
+}
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK(from == to || to == LAST_TYPE);
+  return from;
+}
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return ge;
+  if (from == FIRST_TYPE) return le;
+  UNREACHABLE();
+  return eq;
+}
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+
+  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  __ LoadlW(result, FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ LoadlW(scratch, FieldMemOperand(input, String::kHashFieldOffset));
+  __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
+  __ AndP(r0, scratch);
+  EmitBranch(instr, eq);
+}
+
+// Branches to a label or falls through with the answer in flags.  Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
+                               Handle<String> class_name, Register input,
+                               Register temp, Register temp2) {
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
+
+  __ JumpIfSmi(input, is_false);
+
+  __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    __ bge(is_true);
+  } else {
+    __ bge(is_false);
+  }
+
+  // Check if the constructor in the map is a function.
+  Register instance_type = ip;
+  __ GetMapConstructor(temp, temp, temp2, instance_type);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
+  if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
+    __ bne(is_true);
+  } else {
+    __ bne(is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(temp,
+           FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+  __ CmpP(temp, Operand(class_name));
+  // End with the answer in flags.
+}
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->temp());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                  class_name, input, temp, temp2);
+
+  EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  __ mov(temp, Operand(instr->map()));
+  __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+  EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+  DCHECK(ToRegister(instr->result()).is(r2));
+  InstanceOfStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+    LHasInPrototypeChainAndBranch* instr) {
+  Register const object = ToRegister(instr->object());
+  Register const object_map = scratch0();
+  Register const object_instance_type = ip;
+  Register const object_prototype = object_map;
+  Register const prototype = ToRegister(instr->prototype());
+
+  // The {object} must be a spec object.  It's sufficient to know that {object}
+  // is not a smi, since all other non-spec objects have {null} prototypes and
+  // will be ruled out below.
+  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+    __ TestIfSmi(object);
+    EmitFalseBranch(instr, eq);
+  }
+  // Loop through the {object}s prototype chain looking for the {prototype}.
+  __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+  Label loop;
+  __ bind(&loop);
+
+  // Deoptimize if the object needs to be access checked.
+  __ LoadlB(object_instance_type,
+            FieldMemOperand(object_map, Map::kBitFieldOffset));
+  __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
+  DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
+  // Deoptimize for proxies.
+  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+  DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+  __ LoadP(object_prototype,
+           FieldMemOperand(object_map, Map::kPrototypeOffset));
+  __ CmpP(object_prototype, prototype);
+  EmitTrueBranch(instr, eq);
+  __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+  EmitFalseBranch(instr, eq);
+  __ LoadP(object_map,
+           FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+  __ b(&loop);
+}
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // This instruction also signals no smi code inlined
+  __ CmpP(r2, Operand::Zero());
+
+  Condition condition = ComputeCompareCondition(op);
+  Label true_value, done;
+
+  __ b(condition, &true_value, Label::kNear);
+
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+  __ b(&done, Label::kNear);
+
+  __ bind(&true_value);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+
+  __ bind(&done);
+}
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in r2.  We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ push(r2);
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit);
+  }
+  if (info()->saves_caller_doubles()) {
+    RestoreCallerDoubles();
+  }
+  if (instr->has_constant_parameter_count()) {
+    int parameter_count = ToInteger32(instr->constant_parameter_count());
+    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+    if (NeedsEagerFrame()) {
+      masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+    } else if (sp_delta != 0) {
+      // TODO(joransiu): Clean this up into Macro Assembler
+      if (sp_delta >= 0 && sp_delta < 4096)
+        __ la(sp, MemOperand(sp, sp_delta));
+      else
+        __ lay(sp, MemOperand(sp, sp_delta));
+    }
+  } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
+    Register reg = ToRegister(instr->parameter_count());
+    // The argument count parameter is a smi
+    if (NeedsEagerFrame()) {
+      masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+    }
+    __ SmiToPtrArrayOffset(r0, reg);
+    __ AddP(sp, sp, r0);
+  }
+
+  __ Ret();
+}
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = LoadDescriptor::SlotRegister();
+  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(r2));
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ Move(vector_register, vector);
+  // No need to allocate this register.
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+}
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = ToRegister(instr->temp_slot());
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ Move(vector_register, vector);
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+}
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(r2));
+
+  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+                        .code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+    } else {
+      Label skip;
+      __ bne(&skip, Label::kNear);
+      __ mov(result, Operand(factory()->undefined_value()));
+      __ bind(&skip);
+    }
+  }
+}
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadP(scratch, target);
+    __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+    } else {
+      __ bne(&skip_assignment);
+    }
+  }
+
+  __ StoreP(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+                                ? OMIT_SMI_CHECK
+                                : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context, target.offset(), value, scratch,
+                              GetLinkRegisterState(), kSaveFPRegs,
+                              EMIT_REMEMBERED_SET, check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+  Register object = ToRegister(instr->object());
+
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    MemOperand operand = MemOperand(object, offset);
+    __ LoadRepresentation(result, operand, access.representation(), r0);
+    return;
+  }
+
+  if (instr->hydrogen()->representation().IsDouble()) {
+    DCHECK(access.IsInobject());
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ ld(result, FieldMemOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  if (!access.IsInobject()) {
+    __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    object = result;
+  }
+
+  Representation representation = access.representation();
+
+#if V8_TARGET_ARCH_S390X
+  // 64-bit Smi optimization
+  if (representation.IsSmi() &&
+      instr->hydrogen()->representation().IsInteger32()) {
+    // Read int value directly from upper half of the smi.
+    offset = SmiWordOffset(offset);
+    representation = Representation::Integer32();
+  }
+#endif
+
+  __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
+                        r0);
+}
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(r2));
+
+  // Name is always in r4.
+  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), NOT_INSIDE_TYPEOF,
+                        instr->hydrogen()->initialization_state())
+                        .code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register scratch = scratch0();
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Get the prototype or initial map from the function.
+  __ LoadP(result,
+           FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+  DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+  __ bne(&done, Label::kNear);
+
+  // Get the prototype from the initial map.
+  __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+  // There are two words between the frame pointer and the last argument.
+  // Subtracting from length accounts for one of them add one more.
+  if (instr->length()->IsConstantOperand()) {
+    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+    if (instr->index()->IsConstantOperand()) {
+      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+      int index = (const_length - const_index) + 1;
+      __ LoadP(result, MemOperand(arguments, index * kPointerSize));
+    } else {
+      Register index = ToRegister(instr->index());
+      __ SubP(result, index, Operand(const_length + 1));
+      __ LoadComplementRR(result, result);
+      __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
+      __ LoadP(result, MemOperand(arguments, result));
+    }
+  } else if (instr->index()->IsConstantOperand()) {
+    Register length = ToRegister(instr->length());
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int loc = const_index - 1;
+    if (loc != 0) {
+      __ SubP(result, length, Operand(loc));
+      __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
+      __ LoadP(result, MemOperand(arguments, result));
+    } else {
+      __ ShiftLeftP(result, length, Operand(kPointerSizeLog2));
+      __ LoadP(result, MemOperand(arguments, result));
+    }
+  } else {
+    Register length = ToRegister(instr->length());
+    Register index = ToRegister(instr->index());
+    __ SubP(result, length, index);
+    __ AddP(result, result, Operand(1));
+    __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
+    __ LoadP(result, MemOperand(arguments, result));
+  }
+}
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int base_offset = instr->base_offset();
+  bool use_scratch = false;
+
+  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    if (key_is_constant) {
+      base_offset += constant_key << element_size_shift;
+      if (!is_int20(base_offset)) {
+        __ mov(scratch0(), Operand(base_offset));
+        base_offset = 0;
+        use_scratch = true;
+      }
+    } else {
+      __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi);
+      use_scratch = true;
+    }
+    if (elements_kind == FLOAT32_ELEMENTS) {
+      if (!use_scratch) {
+        __ ldeb(result, MemOperand(external_pointer, base_offset));
+      } else {
+        __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset));
+      }
+    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      if (!use_scratch) {
+        __ ld(result, MemOperand(external_pointer, base_offset));
+      } else {
+        __ ld(result, MemOperand(scratch0(), external_pointer, base_offset));
+      }
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    MemOperand mem_operand =
+        PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+                            constant_key, element_size_shift, base_offset);
+    switch (elements_kind) {
+      case INT8_ELEMENTS:
+        __ LoadB(result, mem_operand);
+        break;
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ LoadlB(result, mem_operand);
+        break;
+      case INT16_ELEMENTS:
+        __ LoadHalfWordP(result, mem_operand);
+        break;
+      case UINT16_ELEMENTS:
+        __ LoadLogicalHalfWordP(result, mem_operand);
+        break;
+      case INT32_ELEMENTS:
+        __ LoadW(result, mem_operand, r0);
+        break;
+      case UINT32_ELEMENTS:
+        __ LoadlW(result, mem_operand, r0);
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          __ CmpLogical32(result, Operand(0x80000000));
+          DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+
+  bool use_scratch = false;
+  intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
+  if (!key_is_constant) {
+    use_scratch = true;
+    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+  }
+
+  // Memory references support up to 20-bits signed displacement in RXY form
+  // Include Register::kExponentOffset in check, so we are guaranteed not to
+  // overflow displacement later.
+  if (!is_int20(base_offset + Register::kExponentOffset)) {
+    use_scratch = true;
+    if (key_is_constant) {
+      __ mov(scratch, Operand(base_offset));
+    } else {
+      __ AddP(scratch, Operand(base_offset));
+    }
+    base_offset = 0;
+  }
+
+  if (!use_scratch) {
+    __ ld(result, MemOperand(elements, base_offset));
+  } else {
+    __ ld(result, MemOperand(scratch, elements, base_offset));
+  }
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    if (!use_scratch) {
+      __ LoadlW(r0,
+                MemOperand(elements, base_offset + Register::kExponentOffset));
+    } else {
+      __ LoadlW(r0, MemOperand(scratch, elements,
+                               base_offset + Register::kExponentOffset));
+    }
+    __ Cmp32(r0, Operand(kHoleNanUpper32));
+    DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+  }
+}
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  HLoadKeyed* hinstr = instr->hydrogen();
+  Register elements = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  int offset = instr->base_offset();
+
+  if (instr->key()->IsConstantOperand()) {
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+  } else {
+    Register key = ToRegister(instr->key());
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (hinstr->key()->representation().IsSmi()) {
+      __ SmiToPtrArrayOffset(scratch, key);
+    } else {
+      __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
+    }
+  }
+
+  bool requires_hole_check = hinstr->RequiresHoleCheck();
+  Representation representation = hinstr->representation();
+
+#if V8_TARGET_ARCH_S390X
+  // 64-bit Smi optimization
+  if (representation.IsInteger32() &&
+      hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+    DCHECK(!requires_hole_check);
+    // Read int value directly from upper half of the smi.
+    offset = SmiWordOffset(offset);
+  }
+#endif
+
+  if (instr->key()->IsConstantOperand()) {
+    __ LoadRepresentation(result, MemOperand(elements, offset), representation,
+                          r1);
+  } else {
+    __ LoadRepresentation(result, MemOperand(scratch, elements, offset),
+                          representation, r1);
+  }
+
+  // Check for the hole value.
+  if (requires_hole_check) {
+    if (IsFastSmiElementsKind(hinstr->elements_kind())) {
+      __ TestIfSmi(result);
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+    } else {
+      __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+    }
+  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
+    Label done;
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    __ CmpP(result, scratch);
+    __ bne(&done);
+    if (info()->IsStub()) {
+      // A stub can safely convert the hole to undefined only if the array
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // it needs to bail out.
+      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
+      __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+      __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
+      DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+    }
+    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+    __ bind(&done);
+  }
+}
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+  if (instr->is_fixed_typed_array()) {
+    DoLoadKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->representation().IsDouble()) {
+    DoLoadKeyedFixedDoubleArray(instr);
+  } else {
+    DoLoadKeyedFixedArray(instr);
+  }
+}
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
+                                         bool key_is_constant, bool key_is_smi,
+                                         int constant_key,
+                                         int element_size_shift,
+                                         int base_offset) {
+  Register scratch = scratch0();
+
+  if (key_is_constant) {
+    int offset = (base_offset + (constant_key << element_size_shift));
+    if (!is_int20(offset)) {
+      __ mov(scratch, Operand(offset));
+      return MemOperand(base, scratch);
+    } else {
+      return MemOperand(base,
+                        (constant_key << element_size_shift) + base_offset);
+    }
+  }
+
+  bool needs_shift =
+      (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
+
+  if (needs_shift) {
+    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+  } else {
+    scratch = key;
+  }
+
+  if (!is_int20(base_offset)) {
+    __ AddP(scratch, Operand(base_offset));
+    base_offset = 0;
+  }
+  return MemOperand(scratch, base, base_offset);
+}
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+                        isolate(), instr->hydrogen()->initialization_state())
+                        .code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    __ lay(result, MemOperand(sp, -2 * kPointerSize));
+  } else if (instr->hydrogen()->arguments_adaptor()) {
+    // Check if the calling frame is an arguments adaptor frame.
+    Label done, adapted;
+    __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ LoadP(
+        result,
+        MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
+    __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ beq(&adapted, Label::kNear);
+    __ LoadRR(result, fp);
+    __ b(&done, Label::kNear);
+
+    __ bind(&adapted);
+    __ LoadRR(result, scratch);
+    __ bind(&done);
+  } else {
+    __ LoadRR(result, fp);
+  }
+}
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elem = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ CmpP(fp, elem);
+  __ mov(result, Operand(scope()->num_parameters()));
+  __ beq(&done, Label::kNear);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(result,
+           MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, result_in_receiver;
+
+  if (!instr->hydrogen()->known_function()) {
+    // Do not transform the receiver to object for strict mode
+    // functions or builtins.
+    __ LoadP(scratch,
+             FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadlW(scratch, FieldMemOperand(
+                           scratch, SharedFunctionInfo::kCompilerHintsOffset));
+    __ AndP(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+                                 (1 << SharedFunctionInfo::kNativeBit)));
+    __ bne(&result_in_receiver, Label::kNear);
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+  __ beq(&global_object, Label::kNear);
+  __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
+  __ beq(&global_object, Label::kNear);
+
+  // Deoptimize if the receiver is not a JS object.
+  __ TestIfSmi(receiver);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+  __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
+  DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+
+  __ b(&result_in_receiver, Label::kNear);
+  __ bind(&global_object);
+  __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
+  __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+  __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+
+  if (result.is(receiver)) {
+    __ bind(&result_in_receiver);
+  } else {
+    Label result_ok;
+    __ b(&result_ok, Label::kNear);
+    __ bind(&result_in_receiver);
+    __ LoadRR(result, receiver);
+    __ bind(&result_ok);
+  }
+}
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  DCHECK(receiver.is(r2));  // Used for parameter count.
+  DCHECK(function.is(r3));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(r2));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ CmpLogicalP(length, Operand(kArgumentsLimit));
+  DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ LoadRR(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ AddP(elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ CmpP(length, Operand::Zero());
+  __ beq(&invoke, Label::kNear);
+  __ bind(&loop);
+  __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2));
+  __ LoadP(scratch, MemOperand(elements, r1));
+  __ push(scratch);
+  __ BranchOnCount(length, &loop);
+
+  __ bind(&invoke);
+
+  InvokeFlag flag = CALL_FUNCTION;
+  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+    DCHECK(!info()->saves_caller_doubles());
+    // TODO(ishell): drop current frame before pushing arguments to the stack.
+    flag = JUMP_FUNCTION;
+    ParameterCount actual(r2);
+    // It is safe to use r5, r6 and r7 as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) r5 (new.target) will be initialized below.
+    PrepareForTailCall(actual, r5, r6, r7);
+  }
+
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+  // The number of arguments is stored in receiver which is r2, as expected
+  // by InvokeFunction.
+  ParameterCount actual(receiver);
+  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
+}
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->value();
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort(kDoPushArgumentNotImplementedForDoubleType);
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, ip);
+    __ push(argument_reg);
+  }
+}
+
+void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+void LCodeGen::DoContext(LContext* instr) {
+  // If there is a non-return use, the context must be moved to a register.
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in cp.
+    DCHECK(result.is(cp));
+  }
+}
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  __ Move(scratch0(), instr->hydrogen()->pairs());
+  __ push(scratch0());
+  __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
+  __ push(scratch0());
+  CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count, int arity,
+                                 bool is_tail_call, LInstruction* instr) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  Register function_reg = r3;
+
+  LPointerMap* pointers = instr->pointer_map();
+
+  if (can_invoke_directly) {
+    // Change context.
+    __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+    // Always initialize new target and number of actual arguments.
+    __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+    __ mov(r2, Operand(arity));
+
+    bool is_self_call = function.is_identical_to(info()->closure());
+
+    // Invoke function.
+    if (is_self_call) {
+      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+      if (is_tail_call) {
+        __ Jump(self, RelocInfo::CODE_TARGET);
+      } else {
+        __ Call(self, RelocInfo::CODE_TARGET);
+      }
+    } else {
+      __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+      if (is_tail_call) {
+        __ JumpToJSEntry(ip);
+      } else {
+        __ CallJSEntry(ip);
+      }
+    }
+
+    if (!is_tail_call) {
+      // Set up deoptimization.
+      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    }
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount actual(arity);
+    ParameterCount expected(formal_parameter_count);
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(function_reg, expected, actual, flag, generator);
+  }
+}
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Deoptimize if not a heap number.
+  __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+
+  Label done;
+  Register exponent = scratch0();
+  scratch = no_reg;
+  __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it.
+  __ Cmp32(exponent, Operand::Zero());
+  // Move the input to the result if necessary.
+  __ Move(result, input);
+  __ bge(&done);
+
+  // Input is negative. Reverse its sign.
+  // Preserve the value of all registers.
+  {
+    PushSafepointRegistersScope scope(this);
+
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(r3) ? r2 : r3;
+    Register tmp2 = input.is(r4) ? r2 : r4;
+    Register tmp3 = input.is(r5) ? r2 : r5;
+    Register tmp4 = input.is(r6) ? r2 : r6;
+
+    // exponent: floating point exponent value.
+
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ b(&allocated);
+
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
+
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+                            instr->context());
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(r2)) __ LoadRR(tmp1, r2);
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+
+    // Clear the sign bit.
+    __ nilf(exponent, Operand(~HeapNumber::kSignMask));
+    __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+    __ StoreToSafepointRegisterSlot(tmp1, result);
+  }
+
+  __ bind(&done);
+}
+
+void LCodeGen::EmitMathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Label done;
+  __ CmpP(input, Operand::Zero());
+  __ Move(result, input);
+  __ bge(&done, Label::kNear);
+  __ LoadComplementRR(result, result);
+  // Deoptimize on overflow.
+  DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+  __ bind(&done);
+}
+
+#if V8_TARGET_ARCH_S390X
+void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Label done;
+  __ Cmp32(input, Operand::Zero());
+  __ Move(result, input);
+  __ bge(&done, Label::kNear);
+
+  // Deoptimize on overflow.
+  __ Cmp32(input, Operand(0x80000000));
+  DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+
+  __ LoadComplementRR(result, result);
+  __ bind(&done);
+}
+#endif
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMathAbs* instr_;
+  };
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    DoubleRegister input = ToDoubleRegister(instr->value());
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ lpdbr(result, input);
+#if V8_TARGET_ARCH_S390X
+  } else if (r.IsInteger32()) {
+    EmitInteger32MathAbs(instr);
+  } else if (r.IsSmi()) {
+#else
+  } else if (r.IsSmiOrInteger32()) {
+#endif
+    EmitMathAbs(instr);
+  } else {
+    // Representation is tagged.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input = ToRegister(instr->value());
+    // Smi check.
+    __ JumpIfNotSmi(input, deferred->entry());
+    // If smi, handle it directly.
+    EmitMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register input_high = scratch0();
+  Register scratch = ip;
+  Label done, exact;
+
+  __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
+                   &exact);
+  DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+  __ bind(&exact);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    __ CmpP(result, Operand::Zero());
+    __ bne(&done, Label::kNear);
+    __ Cmp32(input_high, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+  }
+  __ bind(&done);
+}
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
+  DoubleRegister input_plus_dot_five = double_scratch1;
+  Register scratch1 = scratch0();
+  Register scratch2 = ip;
+  DoubleRegister dot_five = double_scratch0();
+  Label convert, done;
+
+  __ LoadDoubleLiteral(dot_five, 0.5, r0);
+  __ lpdbr(double_scratch1, input);
+  __ cdbr(double_scratch1, dot_five);
+  DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
+  // If input is in [-0.5, -0], the result is -0.
+  // If input is in [+0, +0.5[, the result is +0.
+  // If the input is +0.5, the result is 1.
+  __ bgt(&convert, Label::kNear);  // Out of [-0.5, +0.5].
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // [-0.5, -0] (negative) yields minus zero.
+    __ TestDoubleSign(input, scratch1);
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+  }
+  Label return_zero;
+  __ cdbr(input, dot_five);
+  __ bne(&return_zero, Label::kNear);
+  __ LoadImmP(result, Operand(1));  // +0.5.
+  __ b(&done, Label::kNear);
+  // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+  // flag kBailoutOnMinusZero.
+  __ bind(&return_zero);
+  __ LoadImmP(result, Operand::Zero());
+  __ b(&done, Label::kNear);
+
+  __ bind(&convert);
+  __ ldr(input_plus_dot_five, input);
+  __ adbr(input_plus_dot_five, dot_five);
+  // Reuse dot_five (double_scratch0) as we no longer need this value.
+  __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
+                   double_scratch0(), &done, &done);
+  DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+  __ bind(&done);
+}
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  DoubleRegister output_reg = ToDoubleRegister(instr->result());
+
+  // Round double to float
+  __ ledbr(output_reg, input_reg);
+  // Extend from float to double
+  __ ldebr(output_reg, output_reg);
+}
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ sqdbr(result, input);
+}
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister temp = double_scratch0();
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label skip, done;
+
+  __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
+  __ cdbr(input, temp);
+  __ bne(&skip, Label::kNear);
+  __ lcdbr(result, temp);
+  __ b(&done, Label::kNear);
+
+  // Add +0 to convert -0 to +0.
+  __ bind(&skip);
+  __ ldr(result, input);
+  __ lzdr(kDoubleRegZero);
+  __ adbr(result, kDoubleRegZero);
+  __ sqdbr(result, result);
+  __ bind(&done);
+}
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
+         ToDoubleRegister(instr->right()).is(d2));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(d1));
+  DCHECK(ToDoubleRegister(instr->result()).is(d3));
+
+  if (exponent_type.IsSmi()) {
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
+    __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+    __ bind(&no_deopt);
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(isolate(), MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+  DoubleRegister double_scratch2 = double_scratch0();
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
+                                double_scratch2, temp1, temp2, scratch0());
+}
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
+                   1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Label done;
+  __ llgfr(result, input);
+  __ flogr(r0, result);
+  __ LoadRR(result, r0);
+  __ CmpP(r0, Operand::Zero());
+  __ beq(&done, Label::kNear);
+  __ SubP(result, Operand(32));
+  __ bind(&done);
+}
+
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+                                  Register scratch1, Register scratch2,
+                                  Register scratch3) {
+#if DEBUG
+  if (actual.is_reg()) {
+    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+  } else {
+    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+  }
+#endif
+  if (FLAG_code_comments) {
+    if (actual.is_reg()) {
+      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+    } else {
+      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+    }
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(scratch3,
+           MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&no_arguments_adaptor);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ LoadRR(fp, scratch2);
+  __ LoadP(caller_args_count_reg,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ b(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+  __ bind(&formal_parameter_count_loaded);
+  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+  Comment(";;; }");
+}
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  HInvokeFunction* hinstr = instr->hydrogen();
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(r3));
+  DCHECK(instr->HasPointerMap());
+
+  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+  if (is_tail_call) {
+    DCHECK(!info()->saves_caller_doubles());
+    ParameterCount actual(instr->arity());
+    // It is safe to use r5, r6 and r7 as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) r5 (new.target) will be initialized below.
+    PrepareForTailCall(actual, r5, r6, r7);
+  }
+
+  Handle<JSFunction> known_function = hinstr->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount actual(instr->arity());
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(r3, no_reg, actual, flag, generator);
+  } else {
+    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+                      instr->arity(), is_tail_call, instr);
+  }
+}
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(ToRegister(instr->result()).is(r2));
+
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ Jump(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ JumpToJSEntry(ip);
+    }
+  } else {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      __ Call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ CallJSEntry(ip);
+    }
+    generator.AfterCall();
+  }
+}
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(r3));
+  DCHECK(ToRegister(instr->result()).is(r2));
+
+  __ mov(r2, Operand(instr->arity()));
+  if (instr->arity() == 1) {
+    // We only need the allocation site for the case we have a length argument.
+    // The case may bail out to the runtime, which will determine the correct
+    // elements kind with the site.
+    __ Move(r4, instr->hydrogen()->site());
+  } else {
+    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+  }
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+      // We might need a change here
+      // look at the first argument
+      __ LoadP(r7, MemOperand(sp, 0));
+      __ CmpP(r7, Operand::Zero());
+      __ beq(&packed_case, Label::kNear);
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      __ b(&done, Label::kNear);
+      __ bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    __ bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  __ lay(code_object,
+         MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(code_object,
+            FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
+}
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+    __ lay(result, MemOperand(base, ToInteger32(offset)));
+  } else {
+    Register offset = ToRegister(instr->offset());
+    __ lay(result, MemOperand(base, offset));
+  }
+}
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  HStoreNamedField* hinstr = instr->hydrogen();
+  Representation representation = instr->representation();
+
+  Register object = ToRegister(instr->object());
+  Register scratch = scratch0();
+  HObjectAccess access = hinstr->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    Register value = ToRegister(instr->value());
+    MemOperand operand = MemOperand(object, offset);
+    __ StoreRepresentation(value, operand, representation, r0);
+    return;
+  }
+
+  __ AssertNotSmi(object);
+
+#if V8_TARGET_ARCH_S390X
+  DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+         IsInteger32(LConstantOperand::cast(instr->value())));
+#else
+  DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+         IsSmi(LConstantOperand::cast(instr->value())));
+#endif
+  if (!FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!hinstr->has_transition());
+    DCHECK(!hinstr->NeedsWriteBarrier());
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    DCHECK(offset >= 0);
+    __ std(value, FieldMemOperand(object, offset));
+    return;
+  }
+
+  if (hinstr->has_transition()) {
+    Handle<Map> transition = hinstr->transition_map();
+    AddDeprecationDependency(transition);
+    __ mov(scratch, Operand(transition));
+    __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
+    if (hinstr->NeedsWriteBarrierForMap()) {
+      Register temp = ToRegister(instr->temp());
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
+                           kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register record_dest = object;
+  Register record_value = no_reg;
+  Register record_scratch = scratch;
+#if V8_TARGET_ARCH_S390X
+  if (FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ std(value, FieldMemOperand(object, offset));
+    if (hinstr->NeedsWriteBarrier()) {
+      record_value = ToRegister(instr->value());
+    }
+  } else {
+    if (representation.IsSmi() &&
+        hinstr->value()->representation().IsInteger32()) {
+      DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+      // 64-bit Smi optimization
+      // Store int value directly to upper half of the smi.
+      offset = SmiWordOffset(offset);
+      representation = Representation::Integer32();
+    }
+#endif
+    if (access.IsInobject()) {
+      Register value = ToRegister(instr->value());
+      MemOperand operand = FieldMemOperand(object, offset);
+      __ StoreRepresentation(value, operand, representation, r0);
+      record_value = value;
+    } else {
+      Register value = ToRegister(instr->value());
+      __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+      MemOperand operand = FieldMemOperand(scratch, offset);
+      __ StoreRepresentation(value, operand, representation, r0);
+      record_dest = scratch;
+      record_value = value;
+      record_scratch = object;
+    }
+#if V8_TARGET_ARCH_S390X
+  }
+#endif
+
+  if (hinstr->NeedsWriteBarrier()) {
+    __ RecordWriteField(record_dest, offset, record_value, record_scratch,
+                        GetLinkRegisterState(), kSaveFPRegs,
+                        EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
+                        hinstr->PointersToHereCheckForValue());
+  }
+}
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+  }
+
+  __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
+  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state())
+                        .code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Representation representation = instr->hydrogen()->length()->representation();
+  DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
+  DCHECK(representation.IsSmiOrInteger32());
+
+  Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
+  if (instr->length()->IsConstantOperand()) {
+    int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+    Register index = ToRegister(instr->index());
+    if (representation.IsSmi()) {
+      __ CmpLogicalP(index, Operand(Smi::FromInt(length)));
+    } else {
+      __ CmpLogical32(index, Operand(length));
+    }
+    cc = CommuteCondition(cc);
+  } else if (instr->index()->IsConstantOperand()) {
+    int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+    Register length = ToRegister(instr->length());
+    if (representation.IsSmi()) {
+      __ CmpLogicalP(length, Operand(Smi::FromInt(index)));
+    } else {
+      __ CmpLogical32(length, Operand(index));
+    }
+  } else {
+    Register index = ToRegister(instr->index());
+    Register length = ToRegister(instr->length());
+    if (representation.IsSmi()) {
+      __ CmpLogicalP(length, index);
+    } else {
+      __ CmpLogical32(length, index);
+    }
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    Label done;
+    __ b(NegateCondition(cc), &done, Label::kNear);
+    __ stop("eliminated bounds check failed");
+    __ bind(&done);
+  } else {
+    DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+  }
+}
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+    Register address = scratch0();
+    DoubleRegister value(ToDoubleRegister(instr->value()));
+    if (key_is_constant) {
+      if (constant_key != 0) {
+        base_offset += constant_key << element_size_shift;
+        if (!is_int20(base_offset)) {
+          __ mov(address, Operand(base_offset));
+          __ AddP(address, external_pointer);
+        } else {
+          __ AddP(address, external_pointer, Operand(base_offset));
+        }
+        base_offset = 0;
+      } else {
+        address = external_pointer;
+      }
+    } else {
+      __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi);
+      __ AddP(address, external_pointer);
+    }
+    if (elements_kind == FLOAT32_ELEMENTS) {
+      __ ledbr(double_scratch0(), value);
+      __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset));
+    } else {  // Storing doubles, not floats.
+      __ StoreDouble(value, MemOperand(address, base_offset));
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    MemOperand mem_operand =
+        PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+                            constant_key, element_size_shift, base_offset);
+    switch (elements_kind) {
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+      case INT8_ELEMENTS:
+        if (key_is_constant) {
+          __ StoreByte(value, mem_operand, r0);
+        } else {
+          __ StoreByte(value, mem_operand);
+        }
+        break;
+      case INT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        if (key_is_constant) {
+          __ StoreHalfWord(value, mem_operand, r0);
+        } else {
+          __ StoreHalfWord(value, mem_operand);
+        }
+        break;
+      case INT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        if (key_is_constant) {
+          __ StoreW(value, mem_operand, r0);
+        } else {
+          __ StoreW(value, mem_operand);
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  DoubleRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = no_reg;
+  Register scratch = scratch0();
+  DoubleRegister double_scratch = double_scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int base_offset = instr->base_offset() + constant_key * kDoubleSize;
+  bool use_scratch = false;
+  intptr_t address_offset = base_offset;
+
+  if (key_is_constant) {
+    // Memory references support up to 20-bits signed displacement in RXY form
+    if (!is_int20((address_offset))) {
+      __ mov(scratch, Operand(address_offset));
+      address_offset = 0;
+      use_scratch = true;
+    }
+  } else {
+    use_scratch = true;
+    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+    // Memory references support up to 20-bits signed displacement in RXY form
+    if (!is_int20((address_offset))) {
+      __ AddP(scratch, Operand(address_offset));
+      address_offset = 0;
+    }
+  }
+
+  if (instr->NeedsCanonicalization()) {
+    // Turn potential sNaN value into qNaN.
+    __ CanonicalizeNaN(double_scratch, value);
+    DCHECK(address_offset >= 0);
+    if (use_scratch)
+      __ std(double_scratch, MemOperand(scratch, elements, address_offset));
+    else
+      __ std(double_scratch, MemOperand(elements, address_offset));
+  } else {
+    if (use_scratch)
+      __ std(value, MemOperand(scratch, elements, address_offset));
+    else
+      __ std(value, MemOperand(elements, address_offset));
+  }
+}
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+  HStoreKeyed* hinstr = instr->hydrogen();
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+  Register scratch = scratch0();
+  int offset = instr->base_offset();
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    DCHECK(!hinstr->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+  } else {
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (hinstr->key()->representation().IsSmi()) {
+      __ SmiToPtrArrayOffset(scratch, key);
+    } else {
+      if (instr->hydrogen()->IsDehoisted() ||
+          !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+#if V8_TARGET_ARCH_S390X
+        // If array access is dehoisted, the key, being an int32, can contain
+        // a negative value, as needs to be sign-extended to 64-bit for
+        // memory access.
+        __ lgfr(key, key);
+#endif
+        __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
+      } else {
+        // Small optimization to reduce pathlength.  After Bounds Check,
+        // the key is guaranteed to be non-negative.  Leverage RISBG,
+        // which also performs zero-extension.
+        __ risbg(scratch, key, Operand(32 - kPointerSizeLog2),
+                 Operand(63 - kPointerSizeLog2), Operand(kPointerSizeLog2),
+                 true);
+      }
+    }
+  }
+
+  Representation representation = hinstr->value()->representation();
+
+#if V8_TARGET_ARCH_S390X
+  // 64-bit Smi optimization
+  if (representation.IsInteger32()) {
+    DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+    // Store int value directly to upper half of the smi.
+    offset = SmiWordOffset(offset);
+  }
+#endif
+
+  if (instr->key()->IsConstantOperand()) {
+    __ StoreRepresentation(value, MemOperand(elements, offset), representation,
+                           scratch);
+  } else {
+    __ StoreRepresentation(value, MemOperand(scratch, elements, offset),
+                           representation, r0);
+  }
+
+  if (hinstr->NeedsWriteBarrier()) {
+    SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
+                                ? OMIT_SMI_CHECK
+                                : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    if (instr->key()->IsConstantOperand()) {
+      __ lay(key, MemOperand(elements, offset));
+    } else {
+      __ lay(key, MemOperand(scratch, elements, offset));
+    }
+    __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
+                   EMIT_REMEMBERED_SET, check_needed,
+                   hinstr->PointersToHereCheckForValue());
+  }
+}
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+  // By cases: external, fast double
+  if (instr->is_fixed_typed_array()) {
+    DoStoreKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+    DoStoreKeyedFixedDoubleArray(instr);
+  } else {
+    DoStoreKeyedFixedArray(instr);
+  }
+}
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state())
+                        .code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+  class DeferredMaybeGrowElements final : public LDeferredCode {
+   public:
+    DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMaybeGrowElements* instr_;
+  };
+
+  Register result = r2;
+  DeferredMaybeGrowElements* deferred =
+      new (zone()) DeferredMaybeGrowElements(this, instr);
+  LOperand* key = instr->key();
+  LOperand* current_capacity = instr->current_capacity();
+
+  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+  DCHECK(key->IsConstantOperand() || key->IsRegister());
+  DCHECK(current_capacity->IsConstantOperand() ||
+         current_capacity->IsRegister());
+
+  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    if (constant_key >= constant_capacity) {
+      // Deferred case.
+      __ b(deferred->entry());
+    }
+  } else if (key->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    __ Cmp32(ToRegister(current_capacity), Operand(constant_key));
+    __ ble(deferred->entry());
+  } else if (current_capacity->IsConstantOperand()) {
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    __ Cmp32(ToRegister(key), Operand(constant_capacity));
+    __ bge(deferred->entry());
+  } else {
+    __ Cmp32(ToRegister(key), ToRegister(current_capacity));
+    __ bge(deferred->entry());
+  }
+
+  if (instr->elements()->IsRegister()) {
+    __ Move(result, ToRegister(instr->elements()));
+  } else {
+    __ LoadP(result, ToMemOperand(instr->elements()));
+  }
+
+  __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register result = r2;
+  __ LoadImmP(result, Operand::Zero());
+
+  // We have to call a stub.
+  {
+    PushSafepointRegistersScope scope(this);
+    if (instr->object()->IsRegister()) {
+      __ Move(result, ToRegister(instr->object()));
+    } else {
+      __ LoadP(result, ToMemOperand(instr->object()));
+    }
+
+    LOperand* key = instr->key();
+    if (key->IsConstantOperand()) {
+      __ LoadSmiLiteral(r5, ToSmi(LConstantOperand::cast(key)));
+    } else {
+      __ SmiTag(r5, ToRegister(key));
+    }
+
+    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+                               instr->hydrogen()->kind());
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(
+        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    __ StoreToSafepointRegisterSlot(result, result);
+  }
+
+  // Deopt on smi, which means the elements array changed to dictionary mode.
+  __ TestIfSmi(result);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+}
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+  __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ CmpP(scratch, Operand(from_map));
+  __ bne(&not_applicable);
+
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ mov(new_map_reg, Operand(to_map));
+    __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteForMap(object_reg, new_map_reg, scratch,
+                         GetLinkRegisterState(), kDontSaveFPRegs);
+  } else {
+    DCHECK(ToRegister(instr->context()).is(cp));
+    DCHECK(object_reg.is(r2));
+    PushSafepointRegistersScope scope(this);
+    __ Move(r3, to_map);
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kLazyDeopt);
+  }
+  __ bind(&not_applicable);
+}
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
+  DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+  __ bind(&no_memento_found);
+}
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r3));
+  DCHECK(ToRegister(instr->right()).is(r2));
+  StringAddStub stub(isolate(), instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt final : public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new (zone()) DeferredStringCharCodeAt(this, instr);
+
+  StringCharLoadGenerator::Generate(
+      masm(), ToRegister(instr->string()), ToRegister(instr->index()),
+      ToRegister(instr->result()), deferred->entry());
+  __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ LoadImmP(result, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  if (instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
+    __ push(scratch);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+                          instr->context());
+  __ AssertSmi(r2);
+  __ SmiUntag(r2);
+  __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode final : public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override {
+      codegen()->DoDeferredStringCharFromCode(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new (zone()) DeferredStringCharFromCode(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  DCHECK(!char_code.is(result));
+
+  __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode));
+  __ bgt(deferred->entry());
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2));
+  __ AddP(result, r0);
+  __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+  __ beq(deferred->entry());
+  __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ LoadImmP(result, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  DCHECK(output->IsDoubleRegister());
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ LoadP(scratch, ToMemOperand(input));
+    __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
+  } else {
+    __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
+  }
+}
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+  __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
+}
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI final : public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+                                       instr_->temp2(), SIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagI* instr_;
+  };
+
+  Register src = ToRegister(instr->value());
+  Register dst = ToRegister(instr->result());
+
+  DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
+#if V8_TARGET_ARCH_S390X
+  __ SmiTag(dst, src);
+#else
+  // Add src to itself to defect SMI overflow.
+  __ Add32(dst, src, src);
+  __ b(overflow, deferred->entry());
+#endif
+  __ bind(deferred->exit());
+}
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU final : public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+                                       instr_->temp2(), UNSIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagU* instr_;
+  };
+
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
+  __ CmpLogicalP(input, Operand(Smi::kMaxValue));
+  __ bgt(deferred->entry());
+  __ SmiTag(result, input);
+  __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+                                     LOperand* temp1, LOperand* temp2,
+                                     IntegerSignedness signedness) {
+  Label done, slow;
+  Register src = ToRegister(value);
+  Register dst = ToRegister(instr->result());
+  Register tmp1 = scratch0();
+  Register tmp2 = ToRegister(temp1);
+  Register tmp3 = ToRegister(temp2);
+  DoubleRegister dbl_scratch = double_scratch0();
+
+  if (signedness == SIGNED_INT32) {
+    // There was overflow, so bits 30 and 31 of the original integer
+    // disagree. Try to allocate a heap number in new space and store
+    // the value in there. If that fails, call the runtime system.
+    if (dst.is(src)) {
+      __ SmiUntag(src, dst);
+      __ xilf(src, Operand(HeapNumber::kSignMask));
+    }
+    __ ConvertIntToDouble(src, dbl_scratch);
+  } else {
+    __ ConvertUnsignedIntToDouble(src, dbl_scratch);
+  }
+
+  if (FLAG_inline_new) {
+    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
+    __ b(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ LoadImmP(dst, Operand::Zero());
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagI and NumberTagD use the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(r2, dst);
+  }
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD final : public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+  } else {
+    __ b(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ LoadImmP(reg, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                               Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(r2, reg);
+}
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  Register output = ToRegister(instr->result());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ TestUnsignedSmiCandidate(input, r0);
+    DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
+  }
+#if !V8_TARGET_ARCH_S390X
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      !hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ SmiTagCheckOverflow(output, input, r0);
+    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+  } else {
+#endif
+    __ SmiTag(output, input);
+#if !V8_TARGET_ARCH_S390X
+  }
+#endif
+}
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  if (instr->needs_check()) {
+    __ tmll(input, Operand(kHeapObjectTag));
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+    __ SmiUntag(result, input);
+  } else {
+    __ SmiUntag(result, input);
+  }
+}
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                DoubleRegister result_reg,
+                                NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+  Register scratch = scratch0();
+  DCHECK(!result_reg.is(double_scratch0()));
+
+  Label convert, load_smi, done;
+
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+
+    // Heap number map check.
+    __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+    __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex));
+
+    if (can_convert_undefined_to_nan) {
+      __ bne(&convert, Label::kNear);
+    } else {
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+    }
+    // load heap number
+    __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    if (deoptimize_on_minus_zero) {
+      __ TestDoubleIsMinusZero(result_reg, scratch, ip);
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    }
+    __ b(&done, Label::kNear);
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+      // Convert undefined (and hole) to NaN.
+      __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+      __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+      __ b(&done, Label::kNear);
+    }
+  } else {
+    __ SmiUntag(scratch, input_reg);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+  }
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  // scratch: untagged value of input_reg
+  __ ConvertIntToDouble(scratch, result_reg);
+  __ bind(&done);
+}
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Register input_reg = ToRegister(instr->value());
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->temp());
+  DoubleRegister double_scratch = double_scratch0();
+  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
+
+  // Heap number map check.
+  __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+
+  if (instr->truncating()) {
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    Label no_heap_number, check_bools, check_false;
+    __ bne(&no_heap_number, Label::kNear);
+    __ LoadRR(scratch2, input_reg);
+    __ TruncateHeapNumberToI(input_reg, scratch2);
+    __ b(&done, Label::kNear);
+
+    // Check for Oddballs. Undefined/False is converted to zero and True to one
+    // for truncating conversions.
+    __ bind(&no_heap_number);
+    __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+    __ bne(&check_bools);
+    __ LoadImmP(input_reg, Operand::Zero());
+    __ b(&done, Label::kNear);
+
+    __ bind(&check_bools);
+    __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
+    __ bne(&check_false, Label::kNear);
+    __ LoadImmP(input_reg, Operand(1));
+    __ b(&done, Label::kNear);
+
+    __ bind(&check_false);
+    __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+    __ LoadImmP(input_reg, Operand::Zero());
+  } else {
+    // Deoptimize if we don't have a heap number.
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+
+    __ ld(double_scratch2,
+          FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // preserve heap number pointer in scratch2 for minus zero check below
+      __ LoadRR(scratch2, input_reg);
+    }
+    __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
+                             double_scratch);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ CmpP(input_reg, Operand::Zero());
+      __ bne(&done, Label::kNear);
+      __ TestHeapNumberSign(scratch2, scratch1);
+      DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+    }
+  }
+  __ bind(&done);
+}
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI final : public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiUntag(input_reg);
+  } else {
+    DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
+
+    // Branch to deferred code if the input is a HeapObject.
+    __ JumpIfNotSmi(input_reg, deferred->entry());
+
+    __ SmiUntag(input_reg);
+    __ bind(deferred->exit());
+  }
+}
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+                              ? NUMBER_CANDIDATE_IS_SMI
+                              : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+  DoubleRegister double_scratch = double_scratch0();
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+                             double_scratch);
+    // Deoptimize if the input wasn't a int32 (inside a double).
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ CmpP(result_reg, Operand::Zero());
+      __ bne(&done, Label::kNear);
+      __ TestDoubleSign(double_input, scratch1);
+      DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+      __ bind(&done);
+    }
+  }
+}
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+  DoubleRegister double_scratch = double_scratch0();
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+                             double_scratch);
+    // Deoptimize if the input wasn't a int32 (inside a double).
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ CmpP(result_reg, Operand::Zero());
+      __ bne(&done, Label::kNear);
+      __ TestDoubleSign(double_input, scratch1);
+      DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+      __ bind(&done);
+    }
+  }
+#if V8_TARGET_ARCH_S390X
+  __ SmiTag(result_reg);
+#else
+  __ SmiTagCheckOverflow(result_reg, r0);
+  DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+#endif
+}
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->value();
+  __ TestIfSmi(ToRegister(input));
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+}
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ TestIfSmi(ToRegister(input));
+    DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+  }
+}
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+    LCheckArrayBufferNotNeutered* instr) {
+  Register view = ToRegister(instr->view());
+  Register scratch = scratch0();
+
+  __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
+  __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+  __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
+  DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
+}
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
+                      Operand(first));
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+    } else {
+      DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
+                          Operand(last));
+        DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+      __ AndP(scratch, Operand(mask));
+      DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
+    } else {
+      __ AndP(scratch, Operand(mask));
+      __ CmpP(scratch, Operand(tag));
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+    }
+  }
+}
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Register reg = ToRegister(instr->value());
+  Handle<HeapObject> object = instr->hydrogen()->object().handle();
+  AllowDeferredHandleDereference smi_check;
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Register reg = ToRegister(instr->value());
+    Handle<Cell> cell = isolate()->factory()->NewCell(object);
+    __ mov(ip, Operand(cell));
+    __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset));
+  } else {
+    __ CmpP(reg, Operand(object));
+  }
+  DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+}
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  Register temp = ToRegister(instr->temp());
+  {
+    PushSafepointRegistersScope scope(this);
+    __ push(object);
+    __ LoadImmP(cp, Operand::Zero());
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(instr->pointer_map(), 1,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(r2, temp);
+  }
+  __ TestIfSmi(temp);
+  DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
+}
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps final : public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+        : LDeferredCode(codegen), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    void Generate() override {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register reg = ToRegister(input);
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
+    __ bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMap(reg, map, &success);
+    __ beq(&success);
+  }
+
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  __ CompareMap(reg, map, &success);
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ bne(deferred->entry());
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+  }
+
+  __ bind(&success);
+}
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
+}
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
+
+  // Check for heap number
+  __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ CmpP(scratch, Operand(factory()->heap_number_map()));
+  __ beq(&heap_number, Label::kNear);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ CmpP(input_reg, Operand(factory()->undefined_value()));
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+  __ LoadImmP(result_reg, Operand::Zero());
+  __ b(&done, Label::kNear);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
+  __ b(&done, Label::kNear);
+
+  // smi
+  __ bind(&is_smi);
+  __ ClampUint8(result_reg, result_reg);
+
+  __ bind(&done);
+}
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  __ lgdr(result_reg, value_reg);
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ srlg(result_reg, result_reg, Operand(32));
+  } else {
+    __ llgfr(result_reg, result_reg);
+  }
+}
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Combine hi_reg:lo_reg into a single 64-bit register.
+  __ sllg(scratch, hi_reg, Operand(32));
+  __ lr(scratch, lo_reg);
+
+  // Bitwise convert from GPR to FPR
+  __ ldgr(result_reg, scratch);
+}
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate final : public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  }
+
+  __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ LoadIntLiteral(scratch, size);
+    } else {
+      scratch = ToRegister(instr->size());
+    }
+    __ lay(scratch, MemOperand(scratch, -kPointerSize));
+    Label loop;
+    __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    __ bind(&loop);
+    __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag));
+#if V8_TARGET_ARCH_S390X
+    __ lay(scratch, MemOperand(scratch, -kPointerSize));
+#else
+    // TODO(joransiu): Improve the following sequence.
+    // Need to use AHI instead of LAY as top nibble is not set with LAY, causing
+    // incorrect result with the signed compare
+    __ AddP(scratch, Operand(-kPointerSize));
+#endif
+    __ CmpP(scratch, Operand::Zero());
+    __ bge(&loop);
+  }
+}
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ LoadSmiLiteral(result, Smi::FromInt(0));
+
+  PushSafepointRegistersScope scope(this);
+  if (instr->size()->IsRegister()) {
+    Register size = ToRegister(instr->size());
+    DCHECK(!size.is(result));
+    __ SmiTag(size);
+    __ push(size);
+  } else {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+#if !V8_TARGET_ARCH_S390X
+    if (size >= 0 && size <= Smi::kMaxValue) {
+#endif
+      __ Push(Smi::FromInt(size));
+#if !V8_TARGET_ARCH_S390X
+    } else {
+      // We should never get here at runtime => abort
+      __ stop("invalid allocation size");
+      return;
+    }
+#endif
+  }
+
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ Push(Smi::FromInt(flags));
+
+  CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  DCHECK(ToRegister(instr->value()).is(r5));
+  DCHECK(ToRegister(instr->result()).is(r2));
+  Label end, do_call;
+  Register value_register = ToRegister(instr->value());
+  __ JumpIfNotSmi(value_register, &do_call);
+  __ mov(r2, Operand(isolate()->factory()->number_string()));
+  __ b(&end);
+  __ bind(&do_call);
+  TypeofStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&end);
+}
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  Condition final_branch_condition =
+      EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
+                   instr->type_literal());
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(instr, final_branch_condition);
+  }
+}
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
+                                 Register input, Handle<String> type_name) {
+  Condition final_branch_condition = kNoCondition;
+  Register scratch = scratch0();
+  Factory* factory = isolate()->factory();
+  if (String::Equals(type_name, factory->number_string())) {
+    __ JumpIfSmi(input, true_label);
+    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->string_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
+    final_branch_condition = lt;
+
+  } else if (String::Equals(type_name, factory->symbol_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->boolean_string())) {
+    __ CompareRoot(input, Heap::kTrueValueRootIndex);
+    __ beq(true_label);
+    __ CompareRoot(input, Heap::kFalseValueRootIndex);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->undefined_string())) {
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ beq(false_label);
+    __ JumpIfSmi(input, false_label);
+    // Check for undetectable objects => true.
+    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+    __ CmpP(r0, Operand::Zero());
+    final_branch_condition = ne;
+
+  } else if (String::Equals(type_name, factory->function_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ AndP(scratch, scratch,
+            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    __ CmpP(scratch, Operand(1 << Map::kIsCallable));
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->object_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ beq(true_label);
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
+    __ blt(false_label);
+    // Check for callable or undetectable objects => false.
+    __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ AndP(r0, scratch,
+            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    __ CmpP(r0, Operand::Zero());
+    final_branch_condition = eq;
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
+  } else if (String::Equals(type_name, factory->type##_string())) {  \
+    __ JumpIfSmi(input, false_label);                                \
+    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
+    __ CompareRoot(scratch, Heap::k##Type##MapRootIndex);            \
+    final_branch_condition = eq;
+  SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+    // clang-format on
+
+  } else {
+    __ b(false_label);
+  }
+
+  return final_branch_condition;
+}
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % 2);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= 2;
+      }
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
+  }
+
+  DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
+}
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  LoadContextFromDeferred(instr->context());
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck final : public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+    __ bge(&done, Label::kNear);
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
+    CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
+             instr);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new (zone()) DeferredStackCheck(this, instr);
+    __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+    __ blt(deferred_stack_check->entry());
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  Label use_cache, call_runtime;
+  __ CheckEnumCache(&call_runtime);
+
+  __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ b(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(r2);
+  CallRuntime(Runtime::kForInEnumerate, instr);
+  __ bind(&use_cache);
+}
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+  __ EnumLength(result, map);
+  __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
+  __ bne(&load_cache, Label::kNear);
+  __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+  __ b(&done, Label::kNear);
+
+  __ bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+  __ CmpP(result, Operand::Zero());
+  DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+
+  __ bind(&done);
+}
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  Register map = ToRegister(instr->map());
+  __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  __ CmpP(map, scratch0());
+  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+}
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result, Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object, index);
+  __ LoadImmP(cp, Operand::Zero());
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(instr->pointer_map(), 2,
+                               Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble final : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
+                              Register result, Register object, Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {}
+    void Generate() override {
+      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
+
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new (zone())
+      DeferredLoadMutableDouble(this, instr, result, object, index);
+
+  Label out_of_object, done;
+
+  __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
+  __ bne(deferred->entry());
+  __ ShiftRightArithP(index, index, Operand(1));
+
+  __ CmpP(index, Operand::Zero());
+  __ blt(&out_of_object, Label::kNear);
+
+  __ SmiToPtrArrayOffset(r0, index);
+  __ AddP(scratch, object, r0);
+  __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+  __ b(&done, Label::kNear);
+
+  __ bind(&out_of_object);
+  __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  // Index is equal to negated out of object property index plus 1.
+  __ SmiToPtrArrayOffset(r0, index);
+  __ SubP(scratch, result, r0);
+  __ LoadP(result,
+           FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/s390/lithium-codegen-s390.h b/src/crankshaft/s390/lithium-codegen-s390.h
new file mode 100644
index 0000000..6d364cb
--- /dev/null
+++ b/src/crankshaft/s390/lithium-codegen-s390.h
@@ -0,0 +1,359 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
+#define V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
+
+#include "src/ast/scopes.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
+#include "src/crankshaft/s390/lithium-s390.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen : public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        jump_table_(4, info->zone()),
+        scope_(info->scope()),
+        deferred_(8, info->zone()),
+        frame_is_built_(false),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+           !info()->IsStub() || info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  LinkRegisterStatus GetLinkRegisterState() const {
+    return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+  }
+
+  // Support for converting LOperands to assembler types.
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LConstantOperand must be an Integer32 or Smi
+  void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  intptr_t ToRepresentation(LConstantOperand* op,
+                            const Representation& r) const;
+  int32_t ToInteger32(LConstantOperand* op) const;
+  Smi* ToSmi(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+  MemOperand ToHighMemOperand(LOperand* op) const;
+
+  bool IsInteger32(LConstantOperand* op) const;
+  bool IsSmi(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+                             LOperand* temp1, LOperand* temp2,
+                             IntegerSignedness signedness);
+
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocate(LAllocate* instr);
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
+                                   Register object, Register index);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  MemOperand PrepareKeyedOperand(Register key, Register base,
+                                 bool key_is_constant, bool key_is_tagged,
+                                 int constant_key, int element_size_shift,
+                                 int base_offset);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  LanguageMode language_mode() const { return info()->language_mode(); }
+
+  Scope* scope() const { return scope_; }
+
+  Register scratch0() { return kLithiumScratch; }
+  DoubleRegister double_scratch0() { return kScratchDoubleReg; }
+
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true, Label* if_false,
+                       Handle<String> class_name, Register input,
+                       Register temporary, Register temporary2);
+
+  bool HasAllocatedStackSlots() const {
+    return chunk()->HasAllocatedStackSlots();
+  }
+  int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+  int GetTotalFrameSlotCount() const {
+    return chunk()->GetTotalFrameSlotCount();
+  }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+  void SaveCallerDoubles();
+  void RestoreCallerDoubles();
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) override;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+                       LInstruction* instr, SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* function, int num_arguments,
+                   LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id, int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, instr);
+  }
+
+  void LoadContextFromDeferred(LOperand* context);
+  void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+                               LInstruction* instr, LOperand* context);
+
+  void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+                          Register scratch2, Register scratch3);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in r4.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count, int arity,
+                         bool is_tail_call, LInstruction* instr);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason,
+                    Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
+
+  void AddToTranslation(LEnvironment* environment, Translation* translation,
+                        LOperand* op, bool is_tagged, bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  MemOperand BuildSeqStringOperand(Register string, LOperand* index,
+                                   String::Encoding encoding);
+
+  void EmitMathAbs(LMathAbs* instr);
+#if V8_TARGET_ARCH_S390X
+  void EmitInteger32MathAbs(LMathAbs* instr);
+#endif
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+                       int arguments, Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
+                                    Safepoint::DeoptMode mode);
+
+  void RecordAndWritePosition(int position) override;
+
+  static Condition TokenToCondition(Token::Value op);
+  void EmitGoto(int block);
+
+  // EmitBranch expects to be the last instruction of a block.
+  template <class InstrType>
+  void EmitBranch(InstrType instr, Condition condition);
+  template <class InstrType>
+  void EmitTrueBranch(InstrType instr, Condition condition);
+  template <class InstrType>
+  void EmitFalseBranch(InstrType instr, Condition condition);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        DoubleRegister result, NumberUntagDMode mode);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
+                         Handle<String> type_name);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+                         SmiCheck check_needed);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
+                    int* offset, AllocationSiteMode mode);
+
+  void EnsureSpaceForLazyDeopt(int space_needed) override;
+  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+  template <class T>
+  void EmitVectorStoreICRegisters(T* instr);
+
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+  Scope* const scope_;
+  ZoneList<LDeferredCode*> deferred_;
+  bool frame_is_built_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope final BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+      StoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->CallStub(&stub);
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      RestoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->CallStub(&stub);
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+class LDeferredCode : public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() {}
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
+};
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
diff --git a/src/crankshaft/s390/lithium-gap-resolver-s390.cc b/src/crankshaft/s390/lithium-gap-resolver-s390.cc
new file mode 100644
index 0000000..cffcede
--- /dev/null
+++ b/src/crankshaft/s390/lithium-gap-resolver-s390.cc
@@ -0,0 +1,280 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
+
+#include "src/crankshaft/s390/lithium-codegen-s390.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = {1};
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32, owner->zone()),
+      root_index_(0),
+      in_cycle_(false),
+      saved_destination_(NULL) {}
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when by reaching this move again.
+      PerformMove(i);
+      if (in_cycle_) {
+        RestoreValue();
+      }
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      DCHECK(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+  }
+  Verify();
+}
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+
+  // We can only find a cycle, when doing a depth-first traversal of moves,
+  // be encountering the starting move again. So by spilling the source of
+  // the starting move, we break the cycle.  All moves are then unblocked,
+  // and the starting move is completed by writing the spilled value to
+  // its destination.  All other moves from the spilled source have been
+  // completed prior to breaking the cycle.
+  // An additional complication is that moves to MemOperands with large
+  // offsets (more than 1K or 4K) require us to spill this spilled value to
+  // the stack, to free up the register.
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local.  Multiple moves can
+  // be pending because this function is recursive.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    DCHECK(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+  // We save in a register the value that should end up in the source of
+  // moves_[root_index].  After performing all moves in the tree rooted
+  // in that move, we save the value to that source.
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+  if (source->IsRegister()) {
+    __ LoadRR(kSavedValueRegister, cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ ldr(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ LoadDouble(kScratchDoubleReg, cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+  // This move will be done by restoring the saved value to the destination.
+  moves_[index].Eliminate();
+}
+
+void LGapResolver::RestoreValue() {
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
+
+  // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+  if (saved_destination_->IsRegister()) {
+    __ LoadRR(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+  } else if (saved_destination_->IsStackSlot()) {
+    __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ ldr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ StoreDouble(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ LoadRR(cgen_->ToRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      __ StoreP(source_register, cgen_->ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ LoadP(cgen_->ToRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        __ LoadP(ip, source_operand);
+        __ StoreP(ip, destination_operand);
+      } else {
+        __ LoadP(kSavedValueRegister, source_operand);
+        __ StoreP(kSavedValueRegister, destination_operand);
+      }
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      if (cgen_->IsInteger32(constant_source)) {
+        cgen_->EmitLoadIntegerConstant(constant_source, dst);
+      } else {
+        __ Move(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      DoubleRegister result = cgen_->ToDoubleRegister(destination);
+      double v = cgen_->ToDouble(constant_source);
+      __ LoadDoubleLiteral(result, v, ip);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      if (cgen_->IsInteger32(constant_source)) {
+        cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister);
+      } else {
+        __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
+      }
+      __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldr(cgen_->ToDoubleRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ StoreDouble(source_register, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ LoadDouble(cgen_->ToDoubleRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+// kSavedDoubleValueRegister was used to break the cycle,
+// but kSavedValueRegister is free.
+#if V8_TARGET_ARCH_S390X
+        __ lg(kSavedValueRegister, source_operand);
+        __ stg(kSavedValueRegister, destination_operand);
+#else
+        MemOperand source_high_operand = cgen_->ToHighMemOperand(source);
+        MemOperand destination_high_operand =
+            cgen_->ToHighMemOperand(destination);
+        __ LoadlW(kSavedValueRegister, source_operand);
+        __ StoreW(kSavedValueRegister, destination_operand);
+        __ LoadlW(kSavedValueRegister, source_high_operand);
+        __ StoreW(kSavedValueRegister, destination_high_operand);
+#endif
+      } else {
+        __ LoadDouble(kScratchDoubleReg, source_operand);
+        __ StoreDouble(kScratchDoubleReg, destination_operand);
+      }
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+#undef __
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/s390/lithium-gap-resolver-s390.h b/src/crankshaft/s390/lithium-gap-resolver-s390.h
new file mode 100644
index 0000000..087224c
--- /dev/null
+++ b/src/crankshaft/s390/lithium-gap-resolver-s390.h
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
+#define V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver final BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+};
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
diff --git a/src/crankshaft/s390/lithium-s390.cc b/src/crankshaft/s390/lithium-s390.cc
new file mode 100644
index 0000000..a18f877
--- /dev/null
+++ b/src/crankshaft/s390/lithium-s390.cc
@@ -0,0 +1,2290 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/s390/lithium-s390.h"
+
+#include <sstream>
+
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/s390/lithium-codegen-s390.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                           \
+  void L##type::CompileToNative(LCodeGen* generator) { \
+    generator->Do##type(this);                         \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() || !operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD:
+      return "add-d";
+    case Token::SUB:
+      return "sub-d";
+    case Token::MUL:
+      return "mul-d";
+    case Token::DIV:
+      return "div-d";
+    case Token::MOD:
+      return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD:
+      return "add-t";
+    case Token::SUB:
+      return "sub-t";
+    case Token::MUL:
+      return "mul-t";
+    case Token::MOD:
+      return "mod-t";
+    case Token::DIV:
+      return "div-t";
+    case Token::BIT_AND:
+      return "bit-and-t";
+    case Token::BIT_OR:
+      return "bit-or-t";
+    case Token::BIT_XOR:
+      return "bit-xor-t";
+    case Token::ROR:
+      return "ror-t";
+    case Token::SHL:
+      return "shl-t";
+    case Token::SAR:
+      return "sar-t";
+    case Token::SHR:
+      return "shr-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
+              true_block_id(), false_block_id());
+}
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(), true_block_id(),
+              false_block_id());
+}
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  std::ostringstream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.str().c_str());
+  value()->PrintTo(stream);
+}
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d]", base_offset());
+  } else {
+    stream->Add("]");
+  }
+}
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d] <-", base_offset());
+  } else {
+    stream->Add("] <- ");
+  }
+
+  if (value() == NULL) {
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
+           hydrogen()->value()->representation().IsDouble());
+    stream->Add("<the hole(nan)>");
+  } else {
+    value()->PrintTo(stream);
+  }
+}
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  // Skip a slot if for a double-width slot.
+  if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+  return current_frame_slots_++;
+}
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+  int index = GetNextSpillIndex(kind);
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new (zone()) LPlatformChunk(info(), graph());
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new (zone())
+      LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value,
+             new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value, new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                              LUnallocated::USED_AT_START));
+}
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new (zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new (zone()) LUnallocated(LUnallocated::NONE));
+}
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new (zone()) LUnallocated(LUnallocated::NONE,
+                                              LUnallocated::USED_AT_START));
+}
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : Use(value);
+}
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : UseAtStart(value);
+}
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : UseRegister(value);
+}
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : UseRegisterAtStart(value);
+}
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : Use(value, new (zone()) LUnallocated(LUnallocated::ANY));
+}
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr, int index) {
+  return Define(instr,
+                new (zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new (zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+                                         Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
+}
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment = (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+                           !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new (zone()) LPointerMap(zone()));
+  return instr;
+}
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+  LUnallocated* operand =
+      new (zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new (zone()) LLabel(instr->block());
+}
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new (zone()) LDummyUse(UseAny(instr->value())));
+}
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new (zone()) LDeoptimize);
+}
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+
+    HValue* right_value = instr->right();
+    LOperand* right = NULL;
+    int constant_value = 0;
+    bool does_deopt = false;
+    if (right_value->IsConstant()) {
+      HConstant* constant = HConstant::cast(right_value);
+      right = chunk_->DefineConstantOperand(constant);
+      constant_value = constant->Integer32Value() & 0x1f;
+      // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+      // truncated to smi.
+      if (instr->representation().IsSmi() && constant_value > 0) {
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
+    } else {
+      right = UseRegisterAtStart(right_value);
+    }
+
+    // Shift operations can only deoptimize if we do a logical shift
+    // by 0 and the result cannot be truncated to int32.
+    if (op == Token::SHR && constant_value == 0) {
+      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+    }
+
+    LInstruction* result =
+        DefineAsRegister(new (zone()) LShiftI(op, left, right, does_deopt));
+    return does_deopt ? AssignEnvironment(result) : result;
+  } else {
+    return DoArithmeticT(op, instr);
+  }
+}
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+  if (op == Token::MOD) {
+    LOperand* left = UseFixedDouble(instr->left(), d1);
+    LOperand* right = UseFixedDouble(instr->right(), d2);
+    LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+    // We call a C function for double modulo. It can't trigger a GC. We need
+    // to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    return MarkAsCall(DefineFixedDouble(result, d1), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+    LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+    return DefineSameAsFirst(result);
+  }
+}
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left_operand = UseFixed(left, r3);
+  LOperand* right_operand = UseFixed(right, r2);
+  LArithmeticT* result =
+      new (zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  DCHECK(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new (zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new (zone())
+                                   LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new (zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      instr = new (zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+  // Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, The register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
+}
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+  LInstruction* result = new (zone()) LPrologue();
+  if (info_->num_heap_slots() > 0) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new (zone()) LGoto(instr->FirstSuccessor());
+}
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+  ToBooleanICStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+
+  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+                   type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+  LInstruction* branch = new (zone()) LBranch(UseRegister(value));
+  if (!easy_case &&
+      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
+       !expected.IsGeneric())) {
+    branch = AssignEnvironment(branch);
+  }
+  return branch;
+}
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new (zone()) LDebugBreak();
+}
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+  LOperand* temp = TempRegister();
+  return new (zone()) LCmpMapAndBranch(value, temp);
+}
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* value = UseRegister(instr->value());
+  return DefineAsRegister(new (zone()) LArgumentsLength(value));
+}
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new (zone()) LArgumentsElements);
+}
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* left =
+      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+  LOperand* right =
+      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+  return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+    HHasInPrototypeChainAndBranch* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* prototype = UseRegister(instr->prototype());
+  LHasInPrototypeChainAndBranch* result =
+      new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+  return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegisterAtStart(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new (zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), r3);
+  LOperand* receiver = UseFixed(instr->receiver(), r2);
+  LOperand* length = UseFixed(instr->length(), r4);
+  LOperand* elements = UseFixed(instr->elements(), r5);
+  LApplyArguments* result =
+      new (zone()) LApplyArguments(function, receiver, length, elements);
+  return MarkAsCall(DefineFixed(result, r2), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  for (int i = 0; i < argc; ++i) {
+    LOperand* argument = Use(instr->argument(i));
+    AddInstruction(new (zone()) LPushArgument(argument), instr);
+  }
+  return NULL;
+}
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+    HStoreCodeEntry* store_code_entry) {
+  LOperand* function = UseRegister(store_code_entry->function());
+  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+  return new (zone()) LStoreCodeEntry(function, code_object);
+}
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(new (zone())
+                              LInnerAllocatedObject(base_object, offset));
+}
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses() ? NULL
+                            : DefineAsRegister(new (zone()) LThisFunction);
+}
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new (zone()) LContext, cp);
+  }
+
+  return DefineAsRegister(new (zone()) LContext);
+}
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(new (zone()) LDeclareGlobals(context), instr);
+}
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  // Target
+  ops.Add(target, zone());
+  // Context
+  LOperand* op = UseFixed(instr->OperandAt(1), cp);
+  ops.Add(op, zone());
+  // Other register parameters
+  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+       i < instr->OperandCount(); i++) {
+    op =
+        UseFixed(instr->OperandAt(i),
+                 descriptor.GetRegisterParameter(
+                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+    ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result =
+      new (zone()) LCallWithDescriptor(descriptor, ops, zone());
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
+  return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), r3);
+  LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
+  return MarkAsCall(DefineFixed(result, r2), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFloor* result = new (zone()) LMathFloor(input);
+  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = TempDoubleRegister();
+  LMathRound* result = new (zone()) LMathRound(input, temp);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+  Representation r = instr->value()->representation();
+  LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+                          ? NULL
+                          : UseFixed(instr->context(), cp);
+  LOperand* input = UseRegister(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LMathAbs(context, input));
+  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+  if (!r.IsDouble()) result = AssignEnvironment(result);
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), d1);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d1), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathClz32* result = new (zone()) LMathClz32(input);
+  return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LOperand* double_temp = TempDoubleRegister();
+  LMathExp* result = new (zone()) LMathExp(input, double_temp, temp1, temp2);
+  return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathSqrt* result = new (zone()) LMathSqrt(input);
+  return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathPowHalf* result = new (zone()) LMathPowHalf(input);
+  return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* constructor = UseFixed(instr->constructor(), r3);
+  LCallNewArray* result = new (zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    return DefineAsRegister(new (zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LDivByPowerOf2I(dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+       divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LDivByConstI(dividend, divisor));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LDivI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+      (!instr->IsMathFloorOfDiv() &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivByPowerOf2I(dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive)))
+          ? NULL
+          : TempRegister();
+  LInstruction* result = DefineAsRegister(
+      new (zone()) LFlooringDivByConstI(dividend, divisor, temp));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineSameAsFirst(new (zone()) LModByPowerOf2I(dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LModByConstI(dividend, divisor));
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LModI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    HValue* left = instr->BetterLeftOperand();
+    HValue* right = instr->BetterRightOperand();
+    LOperand* left_op;
+    LOperand* right_op;
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+    int32_t constant_value = 0;
+    if (right->IsConstant()) {
+      HConstant* constant = HConstant::cast(right);
+      constant_value = constant->Integer32Value();
+      // Constants -1, 0 and 1 can be optimized if the result can overflow.
+      // For other constants, it can be optimized only without overflow.
+      if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+        left_op = UseRegisterAtStart(left);
+        right_op = UseConstant(right);
+      } else {
+        if (bailout_on_minus_zero) {
+          left_op = UseRegister(left);
+        } else {
+          left_op = UseRegisterAtStart(left);
+        }
+        right_op = UseRegister(right);
+      }
+    } else {
+      if (bailout_on_minus_zero) {
+        left_op = UseRegister(left);
+      } else {
+        left_op = UseRegisterAtStart(left);
+      }
+      right_op = UseRegister(right);
+    }
+    LMulI* mul = new (zone()) LMulI(left_op, right_op);
+    if (right_op->IsConstantOperand()
+            ? ((can_overflow && constant_value == -1) ||
+               (bailout_on_minus_zero && constant_value <= 0))
+            : (can_overflow || bailout_on_minus_zero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineAsRegister(mul);
+
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+    if (instr->left()->IsConstant() &&
+        !instr->CheckFlag(HValue::kCanOverflow)) {
+      // If lhs is constant, do reverse subtraction instead.
+      return DoRSub(instr);
+    }
+
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new (zone()) LSubI(left, right);
+    LInstruction* result = DefineAsRegister(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+
+  // Note: The lhs of the subtraction becomes the rhs of the
+  // reverse-subtraction.
+  LOperand* left = UseRegisterAtStart(instr->right());
+  LOperand* right = UseOrConstantAtStart(instr->left());
+  LRSubI* rsb = new (zone()) LRSubI(left, right);
+  LInstruction* result = DefineAsRegister(rsb);
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+  LOperand* multiplier_op = UseRegister(mul->left());
+  LOperand* multiplicand_op = UseRegister(mul->right());
+  LOperand* addend_op = UseRegister(addend);
+  return DefineAsRegister(
+      new (zone()) LMultiplyAddD(addend_op, multiplier_op, multiplicand_op));
+}
+
+LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
+  LOperand* minuend_op = UseRegister(minuend);
+  LOperand* multiplier_op = UseRegister(mul->left());
+  LOperand* multiplicand_op = UseRegister(mul->right());
+
+  return DefineAsRegister(
+      new (zone()) LMultiplySubD(minuend_op, multiplier_op, multiplicand_op));
+}
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    LAddI* add = new (zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->IsConsistentExternalRepresentation());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LAddI* add = new (zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    left = UseRegister(instr->left());
+    right = UseRegister(instr->right());
+  }
+  return DefineAsRegister(new (zone()) LMathMinMax(left, right));
+}
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  DCHECK(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), d1);
+  LOperand* right = exponent_type.IsDouble()
+                        ? UseFixedDouble(instr->right(), d2)
+                        : UseFixed(instr->right(), r4);
+  LPower* result = new (zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, d3), instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r3);
+  LOperand* right = UseFixed(instr->right(), r2);
+  LCmpT* result = new (zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new (zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new (zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new (zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LCmpHoleAndBranch(value);
+}
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new (zone()) LIsStringAndBranch(value, temp);
+}
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new (zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r3);
+  LOperand* right = UseFixed(instr->right(), r2);
+  LStringCompareAndBranch* result =
+      new (zone()) LStringCompareAndBranch(context, left, right);
+  return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LHasInstanceTypeAndBranch(value);
+}
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new (zone()) LGetCachedArrayIndex(value));
+}
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new (zone())
+      LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+  return new (zone()) LClassOfTestAndBranch(value, TempRegister());
+}
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new (zone()) LSeqStringGetChar(string, index));
+}
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = FLAG_debug_code
+                        ? UseRegisterAtStart(instr->index())
+                        : UseRegisterOrConstantAtStart(instr->index());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+  return new (zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+                         ? UseRegisterOrConstantAtStart(instr->length())
+                         : UseRegisterAtStart(instr->length());
+  LInstruction* result = new (zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { return NULL; }
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new (zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result =
+          DefineAsRegister(new (zone()) LNumberUntagD(value));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new (zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(
+          DefineSameAsFirst(new (zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new (zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempDoubleRegister();
+        LInstruction* result =
+            DefineSameAsFirst(new (zone()) LTaggedToI(value, temp1, temp2));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegister(val);
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new (zone()) LNumberTagD(value, temp1, temp2);
+      return AssignPointerMap(Define(result, result_temp));
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      return AssignEnvironment(
+          DefineAsRegister(new (zone()) LDoubleToSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new (zone()) LDoubleToI(value));
+      if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+      return result;
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      if (!instr->CheckFlag(HValue::kCanOverflow)) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new (zone()) LSmiTag(value));
+      } else if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagU* result = new (zone()) LNumberTagU(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      } else {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagI* result = new (zone()) LNumberTagI(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new (zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(new (zone()) LUint32ToDouble(UseRegister(val)));
+      } else {
+        return DefineAsRegister(new (zone()) LInteger32ToDouble(Use(val)));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new (zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new (zone()) LCheckSmi(value));
+}
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+    HCheckArrayBufferNotNeutered* instr) {
+  LOperand* view = UseRegisterAtStart(instr->value());
+  LCheckArrayBufferNotNeutered* result =
+      new (zone()) LCheckArrayBufferNotNeutered(view);
+  return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new (zone()) LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new (zone()) LCheckValue(value));
+}
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LInstruction* result =
+      AssignEnvironment(new (zone()) LCheckMaps(value, temp));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return DefineAsRegister(new (zone()) LClampDToUint8(reg));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new (zone()) LClampIToUint8(reg));
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    LClampTToUint8* result =
+        new (zone()) LClampTToUint8(reg, TempDoubleRegister());
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new (zone()) LDoubleBits(UseRegister(value)));
+}
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegister(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new (zone()) LConstructDouble(hi, lo));
+}
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new (zone())
+      LReturn(UseFixed(instr->value(), r2), context, parameter_count);
+}
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new (zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new (zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new (zone()) LConstantD);
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new (zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new (zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+  LLoadGlobalGeneric* result =
+      new (zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  LInstruction* result = new (zone()) LStoreContextSlot(context, value);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new (zone()) LLoadNamedField(obj));
+}
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r2);
+  return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new (zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new (zone()) LLoadRoot);
+}
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LInstruction* result = NULL;
+
+  if (!instr->is_fixed_typed_array()) {
+    LOperand* obj = NULL;
+    if (instr->representation().IsDouble()) {
+      obj = UseRegister(instr->elements());
+    } else {
+      obj = UseRegisterAtStart(instr->elements());
+    }
+    result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
+  } else {
+    DCHECK((instr->representation().IsInteger32() &&
+            !IsDoubleOrFloatElementsKind(elements_kind)) ||
+           (instr->representation().IsDouble() &&
+            IsDoubleOrFloatElementsKind(elements_kind)));
+    LOperand* backing_store = UseRegister(instr->elements());
+    LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+    result = DefineAsRegister(
+        new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
+  }
+
+  bool needs_environment;
+  if (instr->is_fixed_typed_array()) {
+    // see LCodeGen::DoLoadKeyedExternalArray
+    needs_environment = elements_kind == UINT32_ELEMENTS &&
+                        !instr->CheckFlag(HInstruction::kUint32);
+  } else {
+    // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+    // LCodeGen::DoLoadKeyedFixedArray
+    needs_environment =
+        instr->RequiresHoleCheck() ||
+        (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
+  }
+
+  if (needs_environment) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result = DefineFixed(
+      new (zone()) LLoadKeyedGeneric(context, object, key, vector), r2);
+  return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  if (!instr->is_fixed_typed_array()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    bool needs_write_barrier = instr->NeedsWriteBarrier();
+    LOperand* object = NULL;
+    LOperand* key = NULL;
+    LOperand* val = NULL;
+
+    if (instr->value()->representation().IsDouble()) {
+      object = UseRegisterAtStart(instr->elements());
+      val = UseRegister(instr->value());
+      key = UseRegisterOrConstantAtStart(instr->key());
+    } else {
+      if (needs_write_barrier) {
+        object = UseTempRegister(instr->elements());
+        val = UseTempRegister(instr->value());
+        key = UseTempRegister(instr->key());
+      } else {
+        object = UseRegisterAtStart(instr->elements());
+        val = UseRegisterAtStart(instr->value());
+        key = UseRegisterOrConstantAtStart(instr->key());
+      }
+    }
+
+    return new (zone()) LStoreKeyed(object, key, val, nullptr);
+  }
+
+  DCHECK((instr->value()->representation().IsInteger32() &&
+          !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+         (instr->value()->representation().IsDouble() &&
+          IsDoubleOrFloatElementsKind(instr->elements_kind())));
+  DCHECK(instr->elements()->representation().IsExternal());
+  LOperand* val = UseRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LOperand* backing_store = UseRegister(instr->elements());
+  LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+  return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
+}
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreKeyedGeneric* result =
+      new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new (zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), r2);
+    LOperand* context = UseFixed(instr->context(), cp);
+    LTransitionElementsKind* result =
+        new (zone()) LTransitionElementsKind(object, context, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LTrapAllocationMemento* result =
+      new (zone()) LTrapAllocationMemento(object, temp1, temp2);
+  return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = Use(instr->object());
+  LOperand* elements = Use(instr->elements());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+  LMaybeGrowElements* result = new (zone())
+      LMaybeGrowElements(context, object, elements, key, current_capacity);
+  DefineFixed(result, r2);
+  return AssignPointerMap(AssignEnvironment(result));
+}
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool is_in_object = instr->access().IsInobject();
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  bool needs_write_barrier_for_map =
+      instr->has_transition() && instr->NeedsWriteBarrierForMap();
+
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = is_in_object ? UseRegister(instr->object())
+                       : UseTempRegister(instr->object());
+  } else {
+    obj = needs_write_barrier_for_map ? UseRegister(instr->object())
+                                      : UseRegisterAtStart(instr->object());
+  }
+
+  LOperand* val;
+  if (needs_write_barrier) {
+    val = UseTempRegister(instr->value());
+  } else if (instr->field_representation().IsDouble()) {
+    val = UseRegisterAtStart(instr->value());
+  } else {
+    val = UseRegister(instr->value());
+  }
+
+  // We need a temporary register for write barrier of the map field.
+  LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+  return new (zone()) LStoreNamedField(obj, val, temp);
+}
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreNamedGeneric* result =
+      new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r3);
+  LOperand* right = UseFixed(instr->right(), r2);
+  return MarkAsCall(
+      DefineFixed(new (zone()) LStringAdd(context, left, right), r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new (zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new (zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = UseRegisterOrConstant(instr->size());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new (zone()) LOsrEntry);
+}
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new (zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor = graph()->descriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetRegisterParameter(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk()->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kTooManySpillSlotsNeededForOSR);
+      spill_index = 0;
+    }
+    spill_index += StandardFrameConstants::kFixedSlotCount;
+  }
+  return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index);
+}
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = UseRegister(instr->arguments());
+  LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new (zone()) LAccessArgumentsAt(args, length, index));
+}
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* value = UseFixed(instr->value(), r5);
+  LTypeof* result = new (zone()) LTypeof(context, value);
+  return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new (zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
+}
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), cp);
+    return MarkAsCall(new (zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new (zone()) LStackCheck(context)));
+  }
+}
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(
+      instr->closure(), instr->arguments_count(), instr->function(), undefined,
+      instr->inlining_kind(), instr->syntactic_tail_call_mode());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedFunction(instr->shared());
+  return NULL;
+}
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new (zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer =
+      current_block_->last_environment()->DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+
+  return pop;
+}
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = UseFixed(instr->enumerable(), r2);
+  LForInPrepareMap* result = new (zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, r2), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(
+      DefineAsRegister(new (zone()) LForInCacheArray(map)));
+}
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new (zone()) LCheckMapValue(value, map));
+}
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  LLoadFieldByIndex* load = new (zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/s390/lithium-s390.h b/src/crankshaft/s390/lithium-s390.h
new file mode 100644
index 0000000..b6a1614
--- /dev/null
+++ b/src/crankshaft/s390/lithium-s390.h
@@ -0,0 +1,2414 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_S390_LITHIUM_S390_H_
+#define V8_CRANKSHAFT_S390_LITHIUM_S390_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(Allocate)                                \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallWithDescriptor)                      \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CheckArrayBufferNotNeutered)             \
+  V(CheckInstanceType)                       \
+  V(CheckNonSmi)                             \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInPrototypeChainAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(MaybeGrowElements)                       \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(MultiplyAddD)                            \
+  V(MultiplySubD)                            \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(Prologue)                                \
+  V(PushArgument)                            \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(RSubI)                                   \
+  V(TaggedToI)                               \
+  V(ThisFunction)                            \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const final { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) final;              \
+  const char* Mnemonic() const final { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
+  }
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+  H##type* hydrogen() const { return H##type::cast(hydrogen_value()); }
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) {}
+
+  virtual ~LInstruction() {}
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+// Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+// Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  void MarkAsSyntacticTailCall() {
+    bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+  }
+  bool IsSyntacticTailCall() const {
+    return IsSyntacticTailCallBits::decode(bit_field_);
+  }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall();
+  }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return IsCall(); }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
+ private:
+  // Iterator support.
+  friend class InputIterator;
+
+  friend class TempIterator;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  class IsCallBits : public BitField<bool, 0, 1> {};
+  class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+  };
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int bit_field_;
+};
+
+// R = number of result operands (0 or 1).
+template <int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  bool HasResult() const final { return R != 0 && result() != NULL; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const override { return results_[0]; }
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template <int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  int InputCount() final { return I; }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return T; }
+  LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block) : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  bool IsGap() const override { return true; }
+  void PrintDataTo(StringStream* stream) override;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new (zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos) {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+class LInstructionGap final : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  void PrintDataTo(StringStream* stream) override;
+  bool IsControl() const override { return true; }
+
+  int block_id() const { return block_->block_id(); }
+
+ private:
+  HBasicBlock* block_;
+};
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) { inputs_[0] = value; }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool IsControl() const override { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+class LLabel final : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+template <int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) {}
+
+  bool IsControl() const final { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+  DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+class LModI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LModI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+class LDivI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+class LMulI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = addend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* addend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+// Instruction for computing minuend - multiplier * multiplicand.
+class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplySubD(LOperand* minuend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = minuend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* minuend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
+};
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const { return hydrogen()->representation().IsDouble(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFloor(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRound(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+class LMathFround final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathAbs(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+class LMathLog final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathLog(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathClz32(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LMathExp(LOperand* value, LOperand* double_temp, LOperand* temp1,
+           LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = double_temp;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSqrt(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranch(LOperand* object) { inputs_[0] = object; }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() const { return inputs_[0]; }
+  LOperand* left() const { return inputs_[1]; }
+  LOperand* right() const { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+    inputs_[0] = object;
+    inputs_[1] = prototype;
+  }
+
+  LOperand* object() const { return inputs_[0]; }
+  LOperand* prototype() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+                               "has-in-prototype-chain-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+class LRSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LRSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+class LConstantD final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+
+  uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
+};
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+class LBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSeqStringGetChar(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  LOperand* string() const { return inputs_[0]; }
+  LOperand* index() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LSeqStringSetChar(LOperand* context, LOperand* string, LOperand* index,
+                    LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+  }
+
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+ private:
+  Token::Value op_;
+};
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op, LOperand* context, LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+ private:
+  Token::Value op_;
+};
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = backing_store_owner;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* backing_store_owner() { return inputs_[2]; }
+  ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) {}
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() +
+                    kImplicitRegisterParameterCount,
+                zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() +
+               kImplicitRegisterParameterCount ==
+           operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  // The target and context are passed as implicit parameters that are not
+  // explicitly listed in the descriptor.
+  static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  CallInterfaceDescriptor descriptor_;
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  int InputCount() final { return inputs_.length(); }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return 0; }
+  LOperand* TempAt(int i) final { return NULL; }
+};
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToSmi(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToI(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  bool needs_check() const { return needs_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+  bool needs_check_;
+};
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Representation representation() const {
+    return hydrogen()->field_representation();
+  }
+};
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+                     LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
+ public:
+  LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+              LOperand* backing_store_owner) {
+    inputs_[0] = object;
+    inputs_[1] = key;
+    inputs_[2] = value;
+    inputs_[3] = backing_store_owner;
+  }
+
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* backing_store_owner() { return inputs_[3]; }
+  ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  bool NeedsCanonicalization() {
+    if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+        hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+      return false;
+    }
+    return hydrogen()->NeedsCanonicalization();
+  }
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                     LOperand* value, LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    inputs_[3] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LTransitionElementsKind(LOperand* object, LOperand* context,
+                          LOperand* new_map_temp) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = new_map_temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> {
+ public:
+  LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = object;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+  LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+                     LOperand* key, LOperand* current_capacity) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = elements;
+    inputs_[3] = key;
+    inputs_[4] = current_capacity;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* elements() { return inputs_[2]; }
+  LOperand* key() { return inputs_[3]; }
+  LOperand* current_capacity() { return inputs_[4]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+  DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+class LCheckArrayBufferNotNeutered final
+    : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
+
+  LOperand* view() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+                               "check-array-buffer-not-neutered")
+  DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampDToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+class LAllocate final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
+            LOperand* temp2) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry() {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() { return HForInCacheArray::cast(this->hydrogen_value())->idx(); }
+};
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) {}
+
+  int GetNextSpillIndex(RegisterKind kind);
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+  LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
+  LInstruction* DoRSub(HSub* instr);
+
+  static bool HasMagicNumberForDivisor(int32_t divisor);
+
+  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+  LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
+  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HDiv* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in a constant operand.
+  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg);
+  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+                                  DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr, HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op, HBinaryOperation* instr);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_S390_LITHIUM_S390_H_
diff --git a/src/crankshaft/x64/lithium-codegen-x64.cc b/src/crankshaft/x64/lithium-codegen-x64.cc
index 849b4b3..fbda59b 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -125,7 +125,7 @@
     DCHECK(!frame_is_built_);
     frame_is_built_ = true;
     if (info()->IsStub()) {
-      __ StubPrologue();
+      __ StubPrologue(StackFrame::STUB);
     } else {
       __ Prologue(info()->GeneratePreagedPrologue());
     }
@@ -306,34 +306,27 @@
   if (needs_frame.is_linked()) {
     __ bind(&needs_frame);
     /* stack layout
-       4: return address  <-- rsp
-       3: garbage
+       3: return address  <-- rsp
        2: garbage
        1: garbage
        0: garbage
     */
-    // Reserve space for context and stub marker.
-    __ subp(rsp, Immediate(2 * kPointerSize));
-    __ Push(MemOperand(rsp, 2 * kPointerSize));  // Copy return address.
-    __ Push(kScratchRegister);  // Save entry address for ret(0)
+    // Reserve space for stub marker.
+    __ subp(rsp, Immediate(TypedFrameConstants::kFrameTypeSize));
+    __ Push(MemOperand(
+        rsp, TypedFrameConstants::kFrameTypeSize));  // Copy return address.
+    __ Push(kScratchRegister);
 
     /* stack layout
-       4: return address
-       3: garbage
+       3: return address
        2: garbage
        1: return address
        0: entry address  <-- rsp
     */
 
-    // Remember context pointer.
-    __ movp(kScratchRegister,
-            MemOperand(rbp, StandardFrameConstants::kContextOffset));
-    // Save context pointer into the stack frame.
-    __ movp(MemOperand(rsp, 3 * kPointerSize), kScratchRegister);
-
     // Create a stack frame.
-    __ movp(MemOperand(rsp, 4 * kPointerSize), rbp);
-    __ leap(rbp, MemOperand(rsp, 4 * kPointerSize));
+    __ movp(MemOperand(rsp, 3 * kPointerSize), rbp);
+    __ leap(rbp, MemOperand(rsp, 3 * kPointerSize));
 
     // This variant of deopt can only be used with stubs. Since we don't
     // have a function pointer to install in the stack frame that we're
@@ -342,8 +335,7 @@
     __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
 
     /* stack layout
-       4: old rbp
-       3: context pointer
+       3: old rbp
        2: stub marker
        1: return address
        0: entry address  <-- rsp
@@ -379,9 +371,8 @@
         frame_is_built_ = true;
         // Build the frame in such a way that esi isn't trashed.
         __ pushq(rbp);  // Caller's frame pointer.
-        __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
         __ Push(Smi::FromInt(StackFrame::STUB));
-        __ leap(rbp, Operand(rsp, 2 * kPointerSize));
+        __ leap(rbp, Operand(rsp, TypedFrameConstants::kFixedFrameSizeFromFp));
         Comment(";;; Deferred code");
       }
       code->Generate();
@@ -2012,16 +2003,17 @@
       __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
       EmitBranch(instr, not_equal);
     } else {
-      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      ToBooleanICStub::Types expected =
+          instr->hydrogen()->expected_input_types();
       // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
-      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
         // undefined -> false.
         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
         // true -> true.
         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
         __ j(equal, instr->TrueLabel(chunk_));
@@ -2029,13 +2021,13 @@
         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
         // 'null' -> false.
         __ CompareRoot(reg, Heap::kNullValueRootIndex);
         __ j(equal, instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::SMI)) {
+      if (expected.Contains(ToBooleanICStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         __ Cmp(reg, Smi::FromInt(0));
         __ j(equal, instr->FalseLabel(chunk_));
@@ -2058,13 +2050,13 @@
         }
       }
 
-      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
         __ j(above_equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::STRING)) {
+      if (expected.Contains(ToBooleanICStub::STRING)) {
         // String value -> false iff empty.
         Label not_string;
         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2075,19 +2067,19 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
         // Symbol value -> true.
         __ CmpInstanceType(map, SYMBOL_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
         // SIMD value -> true.
         __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2317,11 +2309,10 @@
   DCHECK(ToRegister(instr->left()).is(rdx));
   DCHECK(ToRegister(instr->right()).is(rax));
 
-  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
-  __ testp(rax, rax);
-
-  EmitBranch(instr, TokenToCondition(instr->op(), false));
+  __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+  EmitBranch(instr, equal);
 }
 
 
@@ -2393,11 +2384,12 @@
 
   __ JumpIfSmi(input, is_false);
 
-  __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
+  __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
-    __ j(equal, is_true);
+    __ j(above_equal, is_true);
   } else {
-    __ j(equal, is_false);
+    __ j(above_equal, is_false);
   }
 
   // Check if the constructor in the map is a function.
@@ -3010,11 +3002,11 @@
 
   if (instr->hydrogen()->from_inlined()) {
     __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
-  } else {
+  } else if (instr->hydrogen()->arguments_adaptor()) {
     // Check for arguments adapter frame.
     Label done, adapted;
     __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-    __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
+    __ Cmp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset),
            Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
     __ j(equal, &adapted, Label::kNear);
 
@@ -3029,6 +3021,8 @@
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
     __ bind(&done);
+  } else {
+    __ movp(result, rbp);
   }
 }
 
@@ -3140,13 +3134,24 @@
 
   // Invoke the function.
   __ bind(&invoke);
+
+  InvokeFlag flag = CALL_FUNCTION;
+  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+    DCHECK(!info()->saves_caller_doubles());
+    // TODO(ishell): drop current frame before pushing arguments to the stack.
+    flag = JUMP_FUNCTION;
+    ParameterCount actual(rax);
+    // It is safe to use rbx, rcx and r8 as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) rbx (expected number of arguments) will be initialized below.
+    PrepareForTailCall(actual, rbx, rcx, r8);
+  }
+
   DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator safepoint_generator(
-      this, pointers, Safepoint::kLazyDeopt);
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   ParameterCount actual(rax);
-  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
-                    safepoint_generator);
+  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
 }
 
 
@@ -3185,10 +3190,9 @@
   CallRuntime(Runtime::kDeclareGlobals, instr);
 }
 
-
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int formal_parameter_count, int arity,
-                                 LInstruction* instr) {
+                                 bool is_tail_call, LInstruction* instr) {
   bool dont_adapt_arguments =
       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   bool can_invoke_directly =
@@ -3205,23 +3209,36 @@
     __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
     __ Set(rax, arity);
 
+    bool is_self_call = function.is_identical_to(info()->closure());
+
     // Invoke function.
-    if (function.is_identical_to(info()->closure())) {
-      __ CallSelf();
+    if (is_self_call) {
+      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+      if (is_tail_call) {
+        __ Jump(self, RelocInfo::CODE_TARGET);
+      } else {
+        __ Call(self, RelocInfo::CODE_TARGET);
+      }
     } else {
-      __ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+      Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
+      if (is_tail_call) {
+        __ Jump(target);
+      } else {
+        __ Call(target);
+      }
     }
 
-    // Set up deoptimization.
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+    if (!is_tail_call) {
+      // Set up deoptimization.
+      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+    }
   } else {
     // We need to adapt arguments.
-    SafepointGenerator generator(
-        this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount actual(arity);
     ParameterCount expected(formal_parameter_count);
-    __ InvokeFunction(function_reg, no_reg, expected, count, CALL_FUNCTION,
-                      generator);
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(function_reg, no_reg, expected, actual, flag, generator);
   }
 }
 
@@ -3263,39 +3280,6 @@
 }
 
 
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  DCHECK(ToRegister(instr->function()).is(rdi));
-  DCHECK(ToRegister(instr->result()).is(rax));
-
-  // Change context.
-  __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
-  // Always initialize new target and number of actual arguments.
-  __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
-  __ Set(rax, instr->arity());
-
-  LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
-  bool is_self_call = false;
-  if (instr->hydrogen()->function()->IsConstant()) {
-    Handle<JSFunction> jsfun = Handle<JSFunction>::null();
-    HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
-    jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
-    is_self_call = jsfun.is_identical_to(info()->closure());
-  }
-
-  if (is_self_call) {
-    __ CallSelf();
-  } else {
-    Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
-    generator.BeforeCall(__ CallSize(target));
-    __ Call(target);
-  }
-  generator.AfterCall();
-}
-
-
 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   Register input_reg = ToRegister(instr->value());
   __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3401,8 +3385,14 @@
   }
 }
 
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+  XMMRegister output_reg = ToDoubleRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  CpuFeatureScope scope(masm(), SSE4_1);
+  __ Roundsd(output_reg, input_reg, kRoundDown);
+}
 
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
   XMMRegister xmm_scratch = double_scratch0();
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3460,8 +3450,23 @@
   }
 }
 
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+  XMMRegister xmm_scratch = double_scratch0();
+  XMMRegister output_reg = ToDoubleRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  CpuFeatureScope scope(masm(), SSE4_1);
+  Label done;
+  __ Roundsd(output_reg, input_reg, kRoundUp);
+  __ Move(xmm_scratch, -0.5);
+  __ Addsd(xmm_scratch, output_reg);
+  __ Ucomisd(xmm_scratch, input_reg);
+  __ j(below_equal, &done, Label::kNear);
+  __ Move(xmm_scratch, 1.0);
+  __ Subsd(output_reg, xmm_scratch);
+  __ bind(&done);
+}
 
-void LCodeGen::DoMathRound(LMathRound* instr) {
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
   const XMMRegister xmm_scratch = double_scratch0();
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3654,54 +3659,77 @@
   __ Lzcntl(result, input);
 }
 
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+                                  Register scratch1, Register scratch2,
+                                  Register scratch3) {
+#if DEBUG
+  if (actual.is_reg()) {
+    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+  } else {
+    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+  }
+#endif
+  if (FLAG_code_comments) {
+    if (actual.is_reg()) {
+      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+    } else {
+      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+    }
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ movp(rbp, scratch2);
+  __ SmiToInteger32(
+      caller_args_count_reg,
+      Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count.
+  __ movp(caller_args_count_reg,
+          Immediate(info()->literal()->parameter_count()));
+
+  __ bind(&formal_parameter_count_loaded);
+  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
+                        ReturnAddressState::kNotOnStack);
+  Comment(";;; }");
+}
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  HInvokeFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(rsi));
   DCHECK(ToRegister(instr->function()).is(rdi));
   DCHECK(instr->HasPointerMap());
 
-  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+  if (is_tail_call) {
+    DCHECK(!info()->saves_caller_doubles());
+    ParameterCount actual(instr->arity());
+    // It is safe to use rbx, rcx and r8 as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) rbx (expected number of arguments) will be initialized below.
+    PrepareForTailCall(actual, rbx, rcx, r8);
+  }
+
+  Handle<JSFunction> known_function = hinstr->known_function();
   if (known_function.is_null()) {
     LPointerMap* pointers = instr->pointer_map();
     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(instr->arity());
-    __ InvokeFunction(rdi, no_reg, count, CALL_FUNCTION, generator);
+    ParameterCount actual(instr->arity());
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(rdi, no_reg, actual, flag, generator);
   } else {
-    CallKnownFunction(known_function,
-                      instr->hydrogen()->formal_parameter_count(),
-                      instr->arity(), instr);
-  }
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  HCallFunction* hinstr = instr->hydrogen();
-  DCHECK(ToRegister(instr->context()).is(rsi));
-  DCHECK(ToRegister(instr->function()).is(rdi));
-  DCHECK(ToRegister(instr->result()).is(rax));
-
-  int arity = instr->arity();
-
-  ConvertReceiverMode mode = hinstr->convert_mode();
-  if (hinstr->HasVectorAndSlot()) {
-    Register slot_register = ToRegister(instr->temp_slot());
-    Register vector_register = ToRegister(instr->temp_vector());
-    DCHECK(slot_register.is(rdx));
-    DCHECK(vector_register.is(rbx));
-
-    AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
-    int index = vector->GetIndex(hinstr->slot());
-
-    __ Move(vector_register, vector);
-    __ Move(slot_register, Smi::FromInt(index));
-
-    Handle<Code> ic =
-        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    __ Set(rax, arity);
-    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+                      instr->arity(), is_tail_call, instr);
   }
 }
 
@@ -5207,13 +5235,6 @@
 }
 
 
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  DCHECK(ToRegister(instr->value()).is(rax));
-  __ Push(rax);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
 void LCodeGen::DoTypeof(LTypeof* instr) {
   DCHECK(ToRegister(instr->context()).is(rsi));
   DCHECK(ToRegister(instr->value()).is(rbx));
@@ -5573,13 +5594,6 @@
   __ bind(&done);
 }
 
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
-  Register context = ToRegister(instr->context());
-  __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/x64/lithium-codegen-x64.h b/src/crankshaft/x64/lithium-codegen-x64.h
index 873a3dd..139645e 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/src/crankshaft/x64/lithium-codegen-x64.h
@@ -192,11 +192,14 @@
 
   void LoadContextFromDeferred(LOperand* context);
 
+  void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+                          Register scratch2, Register scratch3);
+
   // Generate a direct call to a known function.  Expects the function
   // to be in rdi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int formal_parameter_count, int arity,
-                         LInstruction* instr);
+                         bool is_tail_call, LInstruction* instr);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode,
diff --git a/src/crankshaft/x64/lithium-x64.cc b/src/crankshaft/x64/lithium-x64.cc
index 6be4093..e86b90c 100644
--- a/src/crankshaft/x64/lithium-x64.cc
+++ b/src/crankshaft/x64/lithium-x64.cc
@@ -261,27 +261,6 @@
 }
 
 
-void LCallFunction::PrintDataTo(StringStream* stream) {
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  function()->PrintTo(stream);
-  if (hydrogen()->HasVectorAndSlot()) {
-    stream->Add(" (type-feedback-vector ");
-    temp_vector()->PrintTo(stream);
-    stream->Add(" ");
-    temp_slot()->PrintTo(stream);
-    stream->Add(")");
-  }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  function()->PrintTo(stream);
-  stream->Add("#%d / ", arity());
-}
-
-
 void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < InputCount(); i++) {
     InputAt(i)->PrintTo(stream);
@@ -602,11 +581,7 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(
-      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
-  return instr;
+  return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
 }
 
 
@@ -921,17 +896,7 @@
   }
   chunk_->AddInstruction(instr, current_block_);
 
-  if (instr->IsCall() || instr->IsPrologue()) {
-    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
-    if (hydrogen_val->HasObservableSideEffects()) {
-      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
-      sim->ReplayEnvironment(current_block_->last_environment());
-      hydrogen_value_for_lazy_bailout = sim;
-    }
-    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
-    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
-    chunk_->AddInstruction(bailout, current_block_);
-  }
+  CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
 }
 
 
@@ -941,7 +906,11 @@
 
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
-  return new (zone()) LPrologue();
+  LInstruction* result = new (zone()) LPrologue();
+  if (info_->num_heap_slots() > 0) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
 }
 
 
@@ -954,14 +923,14 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+  ToBooleanICStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LInstruction* branch = new(zone()) LBranch(UseRegister(value));
   if (!easy_case &&
-      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
        !expected.IsGeneric())) {
     branch = AssignEnvironment(branch);
   }
@@ -1081,16 +1050,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallJSFunction(
-    HCallJSFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), rdi);
-
-  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
-  return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1114,6 +1073,9 @@
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -1122,6 +1084,9 @@
   LOperand* context = UseFixed(instr->context(), rsi);
   LOperand* function = UseFixed(instr->function(), rdi);
   LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1152,22 +1117,33 @@
   }
 }
 
-
 LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegisterAtStart(instr->value());
-  LMathFloor* result = new(zone()) LMathFloor(input);
-  return AssignEnvironment(DefineAsRegister(result));
+  if (instr->representation().IsInteger32()) {
+    LMathFloorI* result = new (zone()) LMathFloorI(input);
+    return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    LMathFloorD* result = new (zone()) LMathFloorD(input);
+    return DefineAsRegister(result);
+  }
 }
 
-
 LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegister(instr->value());
-  LOperand* temp = FixedTemp(xmm4);
-  LMathRound* result = new(zone()) LMathRound(input, temp);
-  return AssignEnvironment(DefineAsRegister(result));
+  if (instr->representation().IsInteger32()) {
+    LOperand* temp = FixedTemp(xmm4);
+    LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
+    return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    LMathRoundD* result = new (zone()) LMathRoundD(input);
+    return DefineAsRegister(result);
+  }
 }
 
-
 LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
   LOperand* input = UseRegister(instr->value());
   LMathFround* result = new (zone()) LMathFround(input);
@@ -1234,21 +1210,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* function = UseFixed(instr->function(), rdi);
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(rdx);
-    vector = FixedTemp(rbx);
-  }
-  LCallFunction* call =
-      new (zone()) LCallFunction(context, function, slot, vector);
-  return MarkAsCall(DefineFixed(call, rax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
   LCallRuntime* result = new(zone()) LCallRuntime(context);
@@ -1813,13 +1774,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
-    HBoundsCheckBaseIndexInformation* instr) {
-  UNREACHABLE();
-  return NULL;
-}
-
-
 LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
   // The control instruction marking the end of a block that completed
   // abruptly (e.g., threw an exception).  There is nothing specific to do.
@@ -2540,13 +2494,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
-  LOperand* object = UseFixed(instr->value(), rax);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
-  return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
   LOperand* value = UseFixed(instr->value(), rbx);
@@ -2584,11 +2531,9 @@
   HEnvironment* outer = current_block_->last_environment();
   outer->set_ast_id(instr->ReturnId());
   HConstant* undefined = graph()->GetConstantUndefined();
-  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
-                                               instr->function(),
-                                               undefined,
-                                               instr->inlining_kind());
+  HEnvironment* inner = outer->CopyForInlining(
+      instr->closure(), instr->arguments_count(), instr->function(), undefined,
+      instr->inlining_kind(), instr->syntactic_tail_call_mode());
   // Only replay binding of arguments object if it wasn't removed from graph.
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2650,13 +2595,6 @@
   return AssignPointerMap(result);
 }
 
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
-  LOperand* context = UseRegisterAtStart(instr->context());
-  return new(zone()) LStoreFrameContext(context);
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/x64/lithium-x64.h b/src/crankshaft/x64/lithium-x64.h
index 406159b..1feba4b 100644
--- a/src/crankshaft/x64/lithium-x64.h
+++ b/src/crankshaft/x64/lithium-x64.h
@@ -29,9 +29,7 @@
   V(BitI)                                    \
   V(BoundsCheck)                             \
   V(Branch)                                  \
-  V(CallJSFunction)                          \
   V(CallWithDescriptor)                      \
-  V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
   V(CheckArrayBufferNotNeutered)             \
@@ -101,12 +99,14 @@
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathExp)                                 \
-  V(MathFloor)                               \
+  V(MathFloorD)                              \
+  V(MathFloorI)                              \
   V(MathFround)                              \
   V(MathLog)                                 \
   V(MathMinMax)                              \
   V(MathPowHalf)                             \
-  V(MathRound)                               \
+  V(MathRoundD)                              \
+  V(MathRoundI)                              \
   V(MathSqrt)                                \
   V(MaybeGrowElements)                       \
   V(ModByConstI)                             \
@@ -131,7 +131,6 @@
   V(StackCheck)                              \
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
-  V(StoreFrameContext)                       \
   V(StoreKeyed)                              \
   V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
@@ -143,7 +142,6 @@
   V(SubI)                                    \
   V(TaggedToI)                               \
   V(ThisFunction)                            \
-  V(ToFastProperties)                        \
   V(TransitionElementsKind)                  \
   V(TrapAllocationMemento)                   \
   V(Typeof)                                  \
@@ -152,7 +150,6 @@
   V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
-
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
   Opcode opcode() const final { return LInstruction::k##type; } \
   void CompileToNative(LCodeGen* generator) final;              \
@@ -224,6 +221,13 @@
   void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
   bool IsCall() const { return IsCallBits::decode(bit_field_); }
 
+  void MarkAsSyntacticTailCall() {
+    bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+  }
+  bool IsSyntacticTailCall() const {
+    return IsSyntacticTailCallBits::decode(bit_field_);
+  }
+
   // Interface to the register allocator and iterators.
   bool ClobbersTemps() const { return IsCall(); }
   bool ClobbersRegisters() const { return IsCall(); }
@@ -262,6 +266,8 @@
   virtual LOperand* TempAt(int i) = 0;
 
   class IsCallBits: public BitField<bool, 0, 1> {};
+  class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+  };
 
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
@@ -544,6 +550,7 @@
   LOperand* elements() { return inputs_[3]; }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+  DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
 };
 
 
@@ -805,23 +812,43 @@
   void PrintDataTo(StringStream* stream) override;
 };
 
-
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+// Math.floor with a double result.
+class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LMathFloor(LOperand* value) {
-    inputs_[0] = value;
-  }
+  explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
 
-  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 };
 
-
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+// Math.floor with an integer result.
+class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathRound(LOperand* value, LOperand* temp) {
+  explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.round with a double result.
+class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.round with an integer result.
+class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRoundI(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
@@ -829,7 +856,7 @@
   LOperand* value() { return inputs_[0]; }
   LOperand* temp() { return temps_[0]; }
 
-  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 };
 
@@ -1715,23 +1742,6 @@
 };
 
 
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallJSFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
 class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
  public:
   LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1790,29 +1800,6 @@
 };
 
 
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
-  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
-                LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-  int arity() const { return hydrogen()->argument_count() - 1; }
-
-  void PrintDataTo(StringStream* stream) override;
-};
-
-
 class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2399,19 +2386,6 @@
 };
 
 
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LToFastProperties(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
-  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
@@ -2528,18 +2502,6 @@
 };
 
 
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStoreFrameContext(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/x87/lithium-codegen-x87.cc b/src/crankshaft/x87/lithium-codegen-x87.cc
index a8f22be..1751199 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -57,13 +57,6 @@
   // the frame (that is done in GeneratePrologue).
   FrameScope frame_scope(masm_, StackFrame::MANUAL);
 
-  support_aligned_spilled_doubles_ = info()->IsOptimizing();
-
-  dynamic_frame_alignment_ = info()->IsOptimizing() &&
-      ((chunk()->num_double_slots() > 2 &&
-        !chunk()->graph()->is_recursive()) ||
-       !info()->osr_ast_id().IsNone());
-
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -98,31 +91,6 @@
 
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-    if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
-      // Move state of dynamic frame alignment into edx.
-      __ Move(edx, Immediate(kNoAlignmentPadding));
-
-      Label do_not_pad, align_loop;
-      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
-      // Align esp + 4 to a multiple of 2 * kPointerSize.
-      __ test(esp, Immediate(kPointerSize));
-      __ j(not_zero, &do_not_pad, Label::kNear);
-      __ push(Immediate(0));
-      __ mov(ebx, esp);
-      __ mov(edx, Immediate(kAlignmentPaddingPushed));
-      // Copy arguments, receiver, and return address.
-      __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
-      __ bind(&align_loop);
-      __ mov(eax, Operand(ebx, 1 * kPointerSize));
-      __ mov(Operand(ebx, 0), eax);
-      __ add(Operand(ebx), Immediate(kPointerSize));
-      __ dec(ecx);
-      __ j(not_zero, &align_loop, Label::kNear);
-      __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
-      __ bind(&do_not_pad);
-    }
   }
 
   info()->set_prologue_offset(masm_->pc_offset());
@@ -130,61 +98,29 @@
     DCHECK(!frame_is_built_);
     frame_is_built_ = true;
     if (info()->IsStub()) {
-      __ StubPrologue();
+      __ StubPrologue(StackFrame::STUB);
     } else {
       __ Prologue(info()->GeneratePreagedPrologue());
     }
   }
 
-  if (info()->IsOptimizing() &&
-      dynamic_frame_alignment_ &&
-      FLAG_debug_code) {
-    __ test(esp, Immediate(kPointerSize));
-    __ Assert(zero, kFrameIsExpectedToBeAligned);
-  }
-
   // Reserve space for the stack slots needed by the code.
   int slots = GetStackSlotCount();
   DCHECK(slots != 0 || !info()->IsOptimizing());
   if (slots > 0) {
-    if (slots == 1) {
-      if (dynamic_frame_alignment_) {
-        __ push(edx);
-      } else {
-        __ push(Immediate(kNoAlignmentPadding));
-      }
-    } else {
-      if (FLAG_debug_code) {
-        __ sub(Operand(esp), Immediate(slots * kPointerSize));
+    __ sub(Operand(esp), Immediate(slots * kPointerSize));
 #ifdef _MSC_VER
-        MakeSureStackPagesMapped(slots * kPointerSize);
+    MakeSureStackPagesMapped(slots * kPointerSize);
 #endif
-        __ push(eax);
-        __ mov(Operand(eax), Immediate(slots));
-        Label loop;
-        __ bind(&loop);
-        __ mov(MemOperand(esp, eax, times_4, 0),
-               Immediate(kSlotsZapValue));
-        __ dec(eax);
-        __ j(not_zero, &loop);
-        __ pop(eax);
-      } else {
-        __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
-        MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
-      }
-
-      if (support_aligned_spilled_doubles_) {
-        Comment(";;; Store dynamic frame alignment tag for spilled doubles");
-        // Store dynamic frame alignment state in the first local.
-        int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
-        if (dynamic_frame_alignment_) {
-          __ mov(Operand(ebp, offset), edx);
-        } else {
-          __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
-        }
-      }
+    if (FLAG_debug_code) {
+      __ push(eax);
+      __ mov(Operand(eax), Immediate(slots));
+      Label loop;
+      __ bind(&loop);
+      __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
+      __ dec(eax);
+      __ j(not_zero, &loop);
+      __ pop(eax);
     }
   }
 
@@ -265,50 +201,11 @@
 
   osr_pc_offset_ = masm()->pc_offset();
 
-    // Move state of dynamic frame alignment into edx.
-  __ Move(edx, Immediate(kNoAlignmentPadding));
-
-  if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
-    Label do_not_pad, align_loop;
-    // Align ebp + 4 to a multiple of 2 * kPointerSize.
-    __ test(ebp, Immediate(kPointerSize));
-    __ j(zero, &do_not_pad, Label::kNear);
-    __ push(Immediate(0));
-    __ mov(ebx, esp);
-    __ mov(edx, Immediate(kAlignmentPaddingPushed));
-
-    // Move all parts of the frame over one word. The frame consists of:
-    // unoptimized frame slots, alignment state, context, frame pointer, return
-    // address, receiver, and the arguments.
-    __ mov(ecx, Immediate(scope()->num_parameters() +
-           5 + graph()->osr()->UnoptimizedFrameSlots()));
-
-    __ bind(&align_loop);
-    __ mov(eax, Operand(ebx, 1 * kPointerSize));
-    __ mov(Operand(ebx, 0), eax);
-    __ add(Operand(ebx), Immediate(kPointerSize));
-    __ dec(ecx);
-    __ j(not_zero, &align_loop, Label::kNear);
-    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
-    __ sub(Operand(ebp), Immediate(kPointerSize));
-    __ bind(&do_not_pad);
-  }
-
-  // Save the first local, which is overwritten by the alignment state.
-  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
-  __ push(alignment_loc);
-
-  // Set the dynamic frame alignment state.
-  __ mov(alignment_loc, edx);
-
   // Adjust the frame size, subsuming the unoptimized frame into the
   // optimized frame.
   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
-  DCHECK(slots >= 1);
-  __ sub(esp, Immediate((slots - 1) * kPointerSize));
-
-  // Initailize FPU state.
-  __ fninit();
+  DCHECK(slots >= 0);
+  __ sub(esp, Immediate(slots * kPointerSize));
 }
 
 
@@ -376,32 +273,24 @@
   }
   if (needs_frame.is_linked()) {
     __ bind(&needs_frame);
-
     /* stack layout
-       4: entry address
-       3: return address  <-- esp
-       2: garbage
+       3: entry address
+       2: return address  <-- esp
        1: garbage
        0: garbage
     */
-    __ sub(esp, Immediate(kPointerSize));    // Reserve space for stub marker.
-    __ push(MemOperand(esp, kPointerSize));  // Copy return address.
-    __ push(MemOperand(esp, 3 * kPointerSize));  // Copy entry address.
+    __ push(MemOperand(esp, 0));                 // Copy return address.
+    __ push(MemOperand(esp, 2 * kPointerSize));  // Copy entry address.
 
     /* stack layout
        4: entry address
        3: return address
-       2: garbage
        1: return address
        0: entry address  <-- esp
     */
-    __ mov(MemOperand(esp, 4 * kPointerSize), ebp);  // Save ebp.
-
-    // Copy context.
-    __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
-    __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+    __ mov(MemOperand(esp, 3 * kPointerSize), ebp);  // Save ebp.
     // Fill ebp with the right stack frame address.
-    __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+    __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
 
     // This variant of deopt can only be used with stubs. Since we don't
     // have a function pointer to install in the stack frame that we're
@@ -411,8 +300,7 @@
            Immediate(Smi::FromInt(StackFrame::STUB)));
 
     /* stack layout
-       4: old ebp
-       3: context pointer
+       3: old ebp
        2: stub marker
        1: return address
        0: entry address  <-- esp
@@ -449,9 +337,8 @@
         frame_is_built_ = true;
         // Build the frame in such a way that esi isn't trashed.
         __ push(ebp);  // Caller's frame pointer.
-        __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
         __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
-        __ lea(ebp, Operand(esp, 2 * kPointerSize));
+        __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
         Comment(";;; Deferred code");
       }
       code->Generate();
@@ -2240,15 +2127,16 @@
       __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
       EmitBranch(instr, not_equal);
     } else {
-      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
-      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+      ToBooleanICStub::Types expected =
+          instr->hydrogen()->expected_input_types();
+      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
-      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
         // undefined -> false.
         __ cmp(reg, factory()->undefined_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
         // true -> true.
         __ cmp(reg, factory()->true_value());
         __ j(equal, instr->TrueLabel(chunk_));
@@ -2256,13 +2144,13 @@
         __ cmp(reg, factory()->false_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
-      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
         // 'null' -> false.
         __ cmp(reg, factory()->null_value());
         __ j(equal, instr->FalseLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::SMI)) {
+      if (expected.Contains(ToBooleanICStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
         __ test(reg, Operand(reg));
         __ j(equal, instr->FalseLabel(chunk_));
@@ -2282,18 +2170,18 @@
         if (expected.CanBeUndetectable()) {
           // Undetectable -> false.
           __ test_b(FieldOperand(map, Map::kBitFieldOffset),
-                    1 << Map::kIsUndetectable);
+                    Immediate(1 << Map::kIsUndetectable));
           __ j(not_zero, instr->FalseLabel(chunk_));
         }
       }
 
-      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
         // spec object -> true.
         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
         __ j(above_equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::STRING)) {
+      if (expected.Contains(ToBooleanICStub::STRING)) {
         // String value -> false iff empty.
         Label not_string;
         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2304,19 +2192,19 @@
         __ bind(&not_string);
       }
 
-      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
         // Symbol value -> true.
         __ CmpInstanceType(map, SYMBOL_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
         // SIMD value -> true.
         __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
         __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -2524,7 +2412,7 @@
   }
   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
-            1 << Map::kIsUndetectable);
+            Immediate(1 << Map::kIsUndetectable));
   EmitBranch(instr, not_zero);
 }
 
@@ -2554,11 +2442,10 @@
   DCHECK(ToRegister(instr->left()).is(edx));
   DCHECK(ToRegister(instr->right()).is(eax));
 
-  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
-  __ test(eax, eax);
-
-  EmitBranch(instr, ComputeCompareCondition(instr->op()));
+  __ CompareRoot(eax, Heap::kTrueValueRootIndex);
+  EmitBranch(instr, equal);
 }
 
 
@@ -2629,11 +2516,12 @@
   DCHECK(!temp.is(temp2));
   __ JumpIfSmi(input, is_false);
 
-  __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
+  __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
-    __ j(equal, is_true);
+    __ j(above_equal, is_true);
   } else {
-    __ j(equal, is_false);
+    __ j(above_equal, is_false);
   }
 
   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
@@ -2716,7 +2604,7 @@
 
   // Deoptimize if the object needs to be access checked.
   __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded);
+            Immediate(1 << Map::kIsAccessCheckNeeded));
   DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
   // Deoptimize for proxies.
   __ CmpInstanceType(object_map, JS_PROXY_TYPE);
@@ -2749,18 +2637,11 @@
   __ bind(&done);
 }
 
-
-void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
-  int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+void LCodeGen::EmitReturn(LReturn* instr) {
+  int extra_value_count = 1;
 
   if (instr->has_constant_parameter_count()) {
     int parameter_count = ToInteger32(instr->constant_parameter_count());
-    if (dynamic_frame_alignment && FLAG_debug_code) {
-      __ cmp(Operand(esp,
-                     (parameter_count + extra_value_count) * kPointerSize),
-             Immediate(kAlignmentZapValue));
-      __ Assert(equal, kExpectedAlignmentMarker);
-    }
     __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
   } else {
     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
@@ -2768,19 +2649,9 @@
     // The argument count parameter is a smi
     __ SmiUntag(reg);
     Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
-    if (dynamic_frame_alignment && FLAG_debug_code) {
-      DCHECK(extra_value_count == 2);
-      __ cmp(Operand(esp, reg, times_pointer_size,
-                     extra_value_count * kPointerSize),
-             Immediate(kAlignmentZapValue));
-      __ Assert(equal, kExpectedAlignmentMarker);
-    }
 
     // emit code to restore stack based on instr->parameter_count()
     __ pop(return_addr_reg);  // save return address
-    if (dynamic_frame_alignment) {
-      __ inc(reg);  // 1 more for alignment
-    }
     __ shl(reg, kPointerSizeLog2);
     __ add(esp, reg);
     __ jmp(return_addr_reg);
@@ -2798,25 +2669,12 @@
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
     __ CallRuntime(Runtime::kTraceExit);
   }
-  if (dynamic_frame_alignment_) {
-    // Fetch the state of the dynamic frame alignment.
-    __ mov(edx, Operand(ebp,
-      JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
-  }
   if (NeedsEagerFrame()) {
     __ mov(esp, ebp);
     __ pop(ebp);
   }
-  if (dynamic_frame_alignment_) {
-    Label no_padding;
-    __ cmp(edx, Immediate(kNoAlignmentPadding));
-    __ j(equal, &no_padding, Label::kNear);
 
-    EmitReturn(instr, true);
-    __ bind(&no_padding);
-  }
-
-  EmitReturn(instr, false);
+  EmitReturn(instr);
 }
 
 
@@ -3217,11 +3075,12 @@
 
   if (instr->hydrogen()->from_inlined()) {
     __ lea(result, Operand(esp, -2 * kPointerSize));
-  } else {
+  } else if (instr->hydrogen()->arguments_adaptor()) {
     // Check for arguments adapter frame.
     Label done, adapted;
     __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-    __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+    __ mov(result,
+           Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ cmp(Operand(result),
            Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
     __ j(equal, &adapted, Label::kNear);
@@ -3237,6 +3096,8 @@
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
     __ bind(&done);
+  } else {
+    __ mov(result, Operand(ebp));
   }
 }
 
@@ -3271,6 +3132,7 @@
   // object as a receiver to normal functions. Values have to be
   // passed unchanged to builtins and strict-mode functions.
   Label receiver_ok, global_object;
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   Register scratch = ToRegister(instr->temp());
 
   if (!instr->hydrogen()->known_function()) {
@@ -3279,20 +3141,20 @@
     __ mov(scratch,
            FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
     __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
-              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
-    __ j(not_equal, &receiver_ok);
+              Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+    __ j(not_equal, &receiver_ok, dist);
 
     // Do not transform the receiver to object for builtins.
     __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
-              1 << SharedFunctionInfo::kNativeBitWithinByte);
-    __ j(not_equal, &receiver_ok);
+              Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+    __ j(not_equal, &receiver_ok, dist);
   }
 
   // Normal function. Replace undefined or null with global receiver.
   __ cmp(receiver, factory()->null_value());
-  __ j(equal, &global_object);
+  __ j(equal, &global_object, Label::kNear);
   __ cmp(receiver, factory()->undefined_value());
-  __ j(equal, &global_object);
+  __ j(equal, &global_object, Label::kNear);
 
   // The receiver should be a JS object.
   __ test(receiver, Immediate(kSmiTagMask));
@@ -3340,13 +3202,25 @@
 
   // Invoke the function.
   __ bind(&invoke);
+
+  InvokeFlag flag = CALL_FUNCTION;
+  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+    DCHECK(!info()->saves_caller_doubles());
+    // TODO(ishell): drop current frame before pushing arguments to the stack.
+    flag = JUMP_FUNCTION;
+    ParameterCount actual(eax);
+    // It is safe to use ebx, ecx and edx as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) ebx (expected arguments count) and edx (new.target) will be
+    //    initialized below.
+    PrepareForTailCall(actual, ebx, ecx, edx);
+  }
+
   DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator safepoint_generator(
-      this, pointers, Safepoint::kLazyDeopt);
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   ParameterCount actual(eax);
-  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
-                    safepoint_generator);
+  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
 }
 
 
@@ -3390,10 +3264,9 @@
   CallRuntime(Runtime::kDeclareGlobals, instr);
 }
 
-
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int formal_parameter_count, int arity,
-                                 LInstruction* instr) {
+                                 bool is_tail_call, LInstruction* instr) {
   bool dont_adapt_arguments =
       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   bool can_invoke_directly =
@@ -3409,21 +3282,38 @@
     __ mov(edx, factory()->undefined_value());
     __ mov(eax, arity);
 
+    bool is_self_call = function.is_identical_to(info()->closure());
+
     // Invoke function directly.
-    if (function.is_identical_to(info()->closure())) {
-      __ CallSelf();
+    if (is_self_call) {
+      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+      if (is_tail_call) {
+        __ Jump(self, RelocInfo::CODE_TARGET);
+      } else {
+        __ Call(self, RelocInfo::CODE_TARGET);
+      }
     } else {
-      __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+      Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
+      if (is_tail_call) {
+        __ jmp(target);
+      } else {
+        __ call(target);
+      }
     }
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+    if (!is_tail_call) {
+      // Set up deoptimization.
+      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    }
   } else {
     // We need to adapt arguments.
     LPointerMap* pointers = instr->pointer_map();
     SafepointGenerator generator(
         this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
+    ParameterCount actual(arity);
     ParameterCount expected(formal_parameter_count);
-    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(function_reg, expected, actual, flag, generator);
   }
 }
 
@@ -3465,35 +3355,6 @@
 }
 
 
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  DCHECK(ToRegister(instr->function()).is(edi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  // Change context.
-  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
-  // Always initialize new target and number of actual arguments.
-  __ mov(edx, factory()->undefined_value());
-  __ mov(eax, instr->arity());
-
-  bool is_self_call = false;
-  if (instr->hydrogen()->function()->IsConstant()) {
-    HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
-    Handle<JSFunction> jsfun =
-      Handle<JSFunction>::cast(fun_const->handle(isolate()));
-    is_self_call = jsfun.is_identical_to(info()->closure());
-  }
-
-  if (is_self_call) {
-    __ CallSelf();
-  } else {
-    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
-  }
-
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   Register input_reg = ToRegister(instr->value());
   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3620,6 +3481,7 @@
   __ sub(esp, Immediate(kPointerSize));
   __ fist_s(Operand(esp, 0));
   __ pop(output_reg);
+  __ X87SetRC(0x0000);
   __ X87CheckIA();
   DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
   __ fnclex();
@@ -3652,6 +3514,8 @@
   // Clear exception bits.
   __ fnclex();
   __ fistp_s(MemOperand(esp, 0));
+  // Restore round mode.
+  __ X87SetRC(0x0000);
   // Check overflow.
   __ X87CheckIA();
   __ pop(result);
@@ -3686,6 +3550,8 @@
   // Clear exception bits.
   __ fnclex();
   __ fistp_s(MemOperand(esp, 0));
+  // Restore round mode.
+  __ X87SetRC(0x0000);
   // Check overflow.
   __ X87CheckIA();
   __ pop(result);
@@ -3926,54 +3792,78 @@
   X87CommitWrite(result_reg);
 }
 
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+                                  Register scratch1, Register scratch2,
+                                  Register scratch3) {
+#if DEBUG
+  if (actual.is_reg()) {
+    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+  } else {
+    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+  }
+#endif
+  if (FLAG_code_comments) {
+    if (actual.is_reg()) {
+      Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+    } else {
+      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+    }
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(ebp, scratch2);
+  __ mov(caller_args_count_reg,
+         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count.
+  __ mov(caller_args_count_reg,
+         Immediate(info()->literal()->parameter_count()));
+
+  __ bind(&formal_parameter_count_loaded);
+  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
+                        ReturnAddressState::kNotOnStack, 0);
+  Comment(";;; }");
+}
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  HInvokeFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(esi));
   DCHECK(ToRegister(instr->function()).is(edi));
   DCHECK(instr->HasPointerMap());
 
-  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+  if (is_tail_call) {
+    DCHECK(!info()->saves_caller_doubles());
+    ParameterCount actual(instr->arity());
+    // It is safe to use ebx, ecx and edx as scratch registers here given that
+    // 1) we are not going to return to caller function anyway,
+    // 2) ebx (expected arguments count) and edx (new.target) will be
+    //    initialized below.
+    PrepareForTailCall(actual, ebx, ecx, edx);
+  }
+
+  Handle<JSFunction> known_function = hinstr->known_function();
   if (known_function.is_null()) {
     LPointerMap* pointers = instr->pointer_map();
-    SafepointGenerator generator(
-        this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(instr->arity());
-    __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount actual(instr->arity());
+    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+    __ InvokeFunction(edi, no_reg, actual, flag, generator);
   } else {
-    CallKnownFunction(known_function,
-                      instr->hydrogen()->formal_parameter_count(),
-                      instr->arity(), instr);
-  }
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  HCallFunction* hinstr = instr->hydrogen();
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->function()).is(edi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-
-  int arity = instr->arity();
-  ConvertReceiverMode mode = hinstr->convert_mode();
-  if (hinstr->HasVectorAndSlot()) {
-    Register slot_register = ToRegister(instr->temp_slot());
-    Register vector_register = ToRegister(instr->temp_vector());
-    DCHECK(slot_register.is(edx));
-    DCHECK(vector_register.is(ebx));
-
-    AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
-    int index = vector->GetIndex(hinstr->slot());
-
-    __ mov(vector_register, vector);
-    __ mov(slot_register, Immediate(Smi::FromInt(index)));
-
-    Handle<Code> ic =
-        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    __ Set(eax, arity);
-    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+                      instr->arity(), is_tail_call, instr);
   }
 }
 
@@ -5180,7 +5070,7 @@
 
   __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
   __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
-            1 << JSArrayBuffer::WasNeutered::kShift);
+            Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
   DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
 }
 
@@ -5196,8 +5086,7 @@
     InstanceType last;
     instr->hydrogen()->GetCheckInterval(&first, &last);
 
-    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
-            static_cast<int8_t>(first));
+    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
 
     // If there is only one type in the interval check for equality.
     if (first == last) {
@@ -5206,8 +5095,7 @@
       DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
       // Omit check for the last type.
       if (last != LAST_TYPE) {
-        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
-                static_cast<int8_t>(last));
+        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
         DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
       }
     }
@@ -5218,7 +5106,7 @@
 
     if (base::bits::IsPowerOfTwo32(mask)) {
       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
-      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
       DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
                    Deoptimizer::kWrongInstanceType);
     } else {
@@ -5588,13 +5476,6 @@
 }
 
 
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  DCHECK(ToRegister(instr->value()).is(eax));
-  __ push(eax);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
 void LCodeGen::DoTypeof(LTypeof* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
   DCHECK(ToRegister(instr->value()).is(ebx));
@@ -5661,7 +5542,7 @@
     // Check for undetectable objects => true.
     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
+              Immediate(1 << Map::kIsUndetectable));
     final_branch_condition = not_zero;
 
   } else if (String::Equals(type_name, factory()->function_string())) {
@@ -5682,7 +5563,7 @@
     __ j(below, false_label, false_distance);
     // Check for callable or undetectable objects => false.
     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
-              (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+              Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     final_branch_condition = zero;
 
 // clang-format off
@@ -5946,13 +5827,6 @@
   __ bind(&done);
 }
 
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
-  Register context = ToRegister(instr->context());
-  __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/x87/lithium-codegen-x87.h b/src/crankshaft/x87/lithium-codegen-x87.h
index 0cfbf70..3719236 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.h
+++ b/src/crankshaft/x87/lithium-codegen-x87.h
@@ -31,8 +31,6 @@
         jump_table_(4, info->zone()),
         scope_(info->scope()),
         deferred_(8, info->zone()),
-        dynamic_frame_alignment_(false),
-        support_aligned_spilled_doubles_(false),
         frame_is_built_(false),
         x87_stack_(assembler),
         safepoints_(info->zone()),
@@ -221,11 +219,14 @@
 
   void LoadContextFromDeferred(LOperand* context);
 
-  // Generate a direct call to a known function.  Expects the function
+  void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+                          Register scratch2, Register scratch3);
+
+  // Generate a direct call to a known function. Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int formal_parameter_count, int arity,
-                         LInstruction* instr);
+                         bool is_tail_call, LInstruction* instr);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
@@ -329,7 +330,7 @@
   template <class T>
   void EmitVectorStoreICRegisters(T* instr);
 
-  void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
+  void EmitReturn(LReturn* instr);
 
   // Emits code for pushing either a tagged constant, a (non-double)
   // register, or a stack slot operand.
@@ -354,8 +355,6 @@
   ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   Scope* const scope_;
   ZoneList<LDeferredCode*> deferred_;
-  bool dynamic_frame_alignment_;
-  bool support_aligned_spilled_doubles_;
   bool frame_is_built_;
 
   class X87Stack : public ZoneObject {
diff --git a/src/crankshaft/x87/lithium-x87.cc b/src/crankshaft/x87/lithium-x87.cc
index f770509..163d2c9 100644
--- a/src/crankshaft/x87/lithium-x87.cc
+++ b/src/crankshaft/x87/lithium-x87.cc
@@ -278,27 +278,6 @@
 }
 
 
-void LCallFunction::PrintDataTo(StringStream* stream) {
-  context()->PrintTo(stream);
-  stream->Add(" ");
-  function()->PrintTo(stream);
-  if (hydrogen()->HasVectorAndSlot()) {
-    stream->Add(" (type-feedback-vector ");
-    temp_vector()->PrintTo(stream);
-    stream->Add(" ");
-    temp_slot()->PrintTo(stream);
-    stream->Add(")");
-  }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  function()->PrintTo(stream);
-  stream->Add("#%d / ", arity());
-}
-
-
 void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < InputCount(); i++) {
     InputAt(i)->PrintTo(stream);
@@ -445,13 +424,6 @@
   LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
 
-  // Reserve the first spill slot for the state of dynamic alignment.
-  if (info()->IsOptimizing()) {
-    int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
-    DCHECK_EQ(alignment_state_index, 4);
-    USE(alignment_state_index);
-  }
-
   // If compiling for OSR, reserve space for the unoptimized frame,
   // which will be subsumed into this frame.
   if (graph()->has_osr()) {
@@ -623,12 +595,7 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator,
-                                           &objects_to_materialize));
-  return instr;
+  return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
 }
 
 
@@ -952,22 +919,16 @@
   }
   chunk_->AddInstruction(instr, current_block_);
 
-  if (instr->IsCall() || instr->IsPrologue()) {
-    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
-    if (hydrogen_val->HasObservableSideEffects()) {
-      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
-      sim->ReplayEnvironment(current_block_->last_environment());
-      hydrogen_value_for_lazy_bailout = sim;
-    }
-    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
-    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
-    chunk_->AddInstruction(bailout, current_block_);
-  }
+  CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
 }
 
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
-  return new (zone()) LPrologue();
+  LInstruction* result = new (zone()) LPrologue();
+  if (info_->num_heap_slots() > 0) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
 }
 
 
@@ -980,8 +941,8 @@
   HValue* value = instr->value();
   Representation r = value->representation();
   HType type = value->type();
-  ToBooleanStub::Types expected = instr->expected_input_types();
-  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+  ToBooleanICStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
 
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
@@ -990,7 +951,7 @@
       temp != NULL ? new (zone()) LBranch(UseRegister(value), temp)
                    : new (zone()) LBranch(UseRegisterAtStart(value), temp);
   if (!easy_case &&
-      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+      ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
        !expected.IsGeneric())) {
     branch = AssignEnvironment(branch);
   }
@@ -1118,16 +1079,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallJSFunction(
-    HCallJSFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), edi);
-
-  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
-  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
   CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1150,6 +1101,9 @@
 
   LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
       descriptor, ops, zone());
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1158,6 +1112,9 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* function = UseFixed(instr->function(), edi);
   LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+    result->MarkAsSyntacticTailCall();
+  }
   return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1263,22 +1220,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* function = UseFixed(instr->function(), edi);
-  LOperand* slot = NULL;
-  LOperand* vector = NULL;
-  if (instr->HasVectorAndSlot()) {
-    slot = FixedTemp(edx);
-    vector = FixedTemp(ebx);
-  }
-
-  LCallFunction* call =
-      new (zone()) LCallFunction(context, function, slot, vector);
-  return MarkAsCall(DefineFixed(call, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
@@ -1840,13 +1781,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
-    HBoundsCheckBaseIndexInformation* instr) {
-  UNREACHABLE();
-  return NULL;
-}
-
-
 LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
   // The control instruction marking the end of a block that completed
   // abruptly (e.g., threw an exception).  There is nothing specific to do.
@@ -2512,11 +2446,6 @@
       Retry(kNotEnoughSpillSlotsForOsr);
       spill_index = 0;
     }
-    if (spill_index == 0) {
-      // The dynamic frame alignment state overwrites the first local.
-      // The first local is saved at the end of the unoptimized frame.
-      spill_index = graph()->osr()->UnoptimizedFrameSlots();
-    }
     spill_index += StandardFrameConstants::kFixedSlotCount;
   }
   return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
@@ -2556,13 +2485,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
-  LOperand* object = UseFixed(instr->value(), eax);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseFixed(instr->value(), ebx);
@@ -2600,11 +2522,9 @@
   HEnvironment* outer = current_block_->last_environment();
   outer->set_ast_id(instr->ReturnId());
   HConstant* undefined = graph()->GetConstantUndefined();
-  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
-                                               instr->function(),
-                                               undefined,
-                                               instr->inlining_kind());
+  HEnvironment* inner = outer->CopyForInlining(
+      instr->closure(), instr->arguments_count(), instr->function(), undefined,
+      instr->inlining_kind(), instr->syntactic_tail_call_mode());
   // Only replay binding of arguments object if it wasn't removed from graph.
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2665,13 +2585,6 @@
   return AssignPointerMap(result);
 }
 
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
-  LOperand* context = UseRegisterAtStart(instr->context());
-  return new(zone()) LStoreFrameContext(context);
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/x87/lithium-x87.h b/src/crankshaft/x87/lithium-x87.h
index 0f2813f..d83322a 100644
--- a/src/crankshaft/x87/lithium-x87.h
+++ b/src/crankshaft/x87/lithium-x87.h
@@ -33,9 +33,7 @@
   V(BitI)                                    \
   V(BoundsCheck)                             \
   V(Branch)                                  \
-  V(CallJSFunction)                          \
   V(CallWithDescriptor)                      \
-  V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
   V(CheckArrayBufferNotNeutered)             \
@@ -136,7 +134,6 @@
   V(StackCheck)                              \
   V(StoreCodeEntry)                          \
   V(StoreContextSlot)                        \
-  V(StoreFrameContext)                       \
   V(StoreKeyed)                              \
   V(StoreKeyedGeneric)                       \
   V(StoreNamedField)                         \
@@ -148,7 +145,6 @@
   V(SubI)                                    \
   V(TaggedToI)                               \
   V(ThisFunction)                            \
-  V(ToFastProperties)                        \
   V(TransitionElementsKind)                  \
   V(TrapAllocationMemento)                   \
   V(Typeof)                                  \
@@ -229,6 +225,13 @@
   void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
   bool IsCall() const { return IsCallBits::decode(bit_field_); }
 
+  void MarkAsSyntacticTailCall() {
+    bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+  }
+  bool IsSyntacticTailCall() const {
+    return IsSyntacticTailCallBits::decode(bit_field_);
+  }
+
   // Interface to the register allocator and iterators.
   bool ClobbersTemps() const { return IsCall(); }
   bool ClobbersRegisters() const { return IsCall(); }
@@ -267,6 +270,8 @@
   virtual LOperand* TempAt(int i) = 0;
 
   class IsCallBits: public BitField<bool, 0, 1> {};
+  class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+  };
 
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
@@ -555,6 +560,7 @@
   LOperand* elements() { return inputs_[3]; }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+  DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
 };
 
 
@@ -1735,23 +1741,6 @@
 };
 
 
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallJSFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  LOperand* function() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
-  void PrintDataTo(StringStream* stream) override;
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
 class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
  public:
   LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1810,29 +1799,6 @@
 };
 
 
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
-  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
-                LOperand* vector) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-    temps_[0] = slot;
-    temps_[1] = vector;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-  LOperand* temp_slot() { return temps_[0]; }
-  LOperand* temp_vector() { return temps_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
-  void PrintDataTo(StringStream* stream) override;
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
 class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2433,19 +2399,6 @@
 };
 
 
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LToFastProperties(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
-  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
@@ -2560,18 +2513,6 @@
 };
 
 
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LStoreFrameContext(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/d8.cc b/src/d8.cc
index 7c9a24f..0688380 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -133,8 +133,8 @@
   }
 
   uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
-                         const char* name, uint64_t id, uint64_t bind_id,
-                         int numArgs, const char** argNames,
+                         const char* name, const char* scope, uint64_t id,
+                         uint64_t bind_id, int numArgs, const char** argNames,
                          const uint8_t* argTypes, const uint64_t* argValues,
                          unsigned int flags) override {
     return 0;
@@ -251,7 +251,7 @@
 base::LazyMutex Shell::context_mutex_;
 const base::TimeTicks Shell::kInitialTicks =
     base::TimeTicks::HighResolutionNow();
-Global<Context> Shell::utility_context_;
+Global<Function> Shell::stringify_function_;
 base::LazyMutex Shell::workers_mutex_;
 bool Shell::allow_new_workers_ = true;
 i::List<Worker*> Shell::workers_;
@@ -412,24 +412,7 @@
       }
 #if !defined(V8_SHARED)
     } else {
-      v8::TryCatch try_catch(isolate);
-      v8::Local<v8::Context> context =
-          v8::Local<v8::Context>::New(isolate, utility_context_);
-      v8::Context::Scope context_scope(context);
-      Local<Object> global = context->Global();
-      Local<Value> fun =
-          global->Get(context, String::NewFromUtf8(isolate, "Stringify",
-                                                   v8::NewStringType::kNormal)
-                                   .ToLocalChecked()).ToLocalChecked();
-      Local<Value> argv[1] = {result};
-      Local<Value> s;
-      if (!Local<Function>::Cast(fun)
-               ->Call(context, global, 1, argv)
-               .ToLocal(&s)) {
-        return true;
-      }
-      DCHECK(!try_catch.HasCaught());
-      v8::String::Utf8Value str(s);
+      v8::String::Utf8Value str(Stringify(isolate, result));
       fwrite(*str, sizeof(**str), str.length(), stdout);
       printf("\n");
     }
@@ -906,11 +889,11 @@
 void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
   HandleScope handle_scope(isolate);
 #ifndef V8_SHARED
-  Local<Context> utility_context;
+  Local<Context> context;
   bool enter_context = !isolate->InContext();
   if (enter_context) {
-    utility_context = Local<Context>::New(isolate, utility_context_);
-    utility_context->Enter();
+    context = Local<Context>::New(isolate, evaluation_context_);
+    context->Enter();
   }
 #endif  // !V8_SHARED
   v8::String::Utf8Value exception(try_catch->Exception());
@@ -954,7 +937,7 @@
   }
   printf("\n");
 #ifndef V8_SHARED
-  if (enter_context) utility_context->Exit();
+  if (enter_context) context->Exit();
 #endif  // !V8_SHARED
 }
 
@@ -1057,60 +1040,37 @@
   counter->AddSample(sample);
 }
 
-
-class NoUseStrongForUtilityScriptScope {
- public:
-  NoUseStrongForUtilityScriptScope() : flag_(i::FLAG_use_strong) {
-    i::FLAG_use_strong = false;
-  }
-  ~NoUseStrongForUtilityScriptScope() { i::FLAG_use_strong = flag_; }
-
- private:
-  bool flag_;
-};
-
-
-void Shell::InstallUtilityScript(Isolate* isolate) {
-  NoUseStrongForUtilityScriptScope no_use_strong;
-  HandleScope scope(isolate);
-  // If we use the utility context, we have to set the security tokens so that
-  // utility, evaluation and debug context can all access each other.
-  Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
-  utility_context_.Reset(isolate, Context::New(isolate, NULL, global_template));
-  v8::Local<v8::Context> utility_context =
-      v8::Local<v8::Context>::New(isolate, utility_context_);
-  v8::Local<v8::Context> evaluation_context =
+// Turn a value into a human-readable string.
+Local<String> Shell::Stringify(Isolate* isolate, Local<Value> value) {
+  v8::Local<v8::Context> context =
       v8::Local<v8::Context>::New(isolate, evaluation_context_);
-  utility_context->SetSecurityToken(Undefined(isolate));
-  evaluation_context->SetSecurityToken(Undefined(isolate));
-  v8::Context::Scope context_scope(utility_context);
-
-  // Run the d8 shell utility script in the utility context
-  int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
-  i::Vector<const char> shell_source =
-      i::NativesCollection<i::D8>::GetScriptSource(source_index);
-  i::Vector<const char> shell_source_name =
-      i::NativesCollection<i::D8>::GetScriptName(source_index);
-  Local<String> source =
-      String::NewFromUtf8(isolate, shell_source.start(), NewStringType::kNormal,
-                          shell_source.length()).ToLocalChecked();
-  Local<String> name =
-      String::NewFromUtf8(isolate, shell_source_name.start(),
-                          NewStringType::kNormal,
-                          shell_source_name.length()).ToLocalChecked();
-  ScriptOrigin origin(name);
-  Local<Script> script =
-      Script::Compile(utility_context, source, &origin).ToLocalChecked();
-  script->Run(utility_context).ToLocalChecked();
-  // Mark the d8 shell script as native to avoid it showing up as normal source
-  // in the debugger.
-  i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
-  i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
-      ? i::Handle<i::Script>(i::Script::cast(
-          i::JSFunction::cast(*compiled_script)->shared()->script()))
-      : i::Handle<i::Script>(i::Script::cast(
-          i::SharedFunctionInfo::cast(*compiled_script)->script()));
-  script_object->set_type(i::Script::TYPE_EXTENSION);
+  if (stringify_function_.IsEmpty()) {
+    int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
+    i::Vector<const char> source_string =
+        i::NativesCollection<i::D8>::GetScriptSource(source_index);
+    i::Vector<const char> source_name =
+        i::NativesCollection<i::D8>::GetScriptName(source_index);
+    Local<String> source =
+        String::NewFromUtf8(isolate, source_string.start(),
+                            NewStringType::kNormal, source_string.length())
+            .ToLocalChecked();
+    Local<String> name =
+        String::NewFromUtf8(isolate, source_name.start(),
+                            NewStringType::kNormal, source_name.length())
+            .ToLocalChecked();
+    ScriptOrigin origin(name);
+    Local<Script> script =
+        Script::Compile(context, source, &origin).ToLocalChecked();
+    stringify_function_.Reset(
+        isolate, script->Run(context).ToLocalChecked().As<Function>());
+  }
+  Local<Function> fun = Local<Function>::New(isolate, stringify_function_);
+  Local<Value> argv[1] = {value};
+  v8::TryCatch try_catch(isolate);
+  MaybeLocal<Value> result =
+      fun->Call(context, Undefined(isolate), 1, argv).ToLocalChecked();
+  if (result.IsEmpty()) return String::Empty(isolate);
+  return result.ToLocalChecked().As<String>();
 }
 #endif  // !V8_SHARED
 
@@ -1320,7 +1280,6 @@
 
 void Shell::OnExit(v8::Isolate* isolate) {
 #ifndef V8_SHARED
-  reinterpret_cast<i::Isolate*>(isolate)->DumpAndResetCompilationStats();
   if (i::FLAG_dump_counters) {
     int number_of_counters = 0;
     for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@@ -1997,8 +1956,6 @@
 
   v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
 
-  bool enable_harmony_modules = false;
-
   // Set up isolated source groups.
   options.isolate_sources = new SourceGroup[options.num_isolates];
   SourceGroup* current = options.isolate_sources;
@@ -2011,7 +1968,6 @@
       current->Begin(argv, i + 1);
     } else if (strcmp(str, "--module") == 0) {
       // Pass on to SourceGroup, which understands this option.
-      enable_harmony_modules = true;
     } else if (strncmp(argv[i], "--", 2) == 0) {
       printf("Warning: unknown flag %s.\nTry --help for options\n", argv[i]);
     } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
@@ -2027,10 +1983,6 @@
     SetFlagsFromString("--nologfile_per_isolate");
   }
 
-  if (enable_harmony_modules) {
-    SetFlagsFromString("--harmony-modules");
-  }
-
   return true;
 }
 
@@ -2241,8 +2193,6 @@
                                           int* offset) {
   DCHECK(offset);
   EscapableHandleScope scope(isolate);
-  // This function should not use utility_context_ because it is running on a
-  // different thread.
   Local<Value> result;
   SerializationTag tag = data.ReadTag(offset);
 
@@ -2523,16 +2473,13 @@
     // Run interactive shell if explicitly requested or if no script has been
     // executed, but never on --test
     if (options.use_interactive_shell()) {
-#ifndef V8_SHARED
-      InstallUtilityScript(isolate);
-#endif  // !V8_SHARED
       RunShell(isolate);
     }
 
     // Shut down contexts and collect garbage.
     evaluation_context_.Reset();
 #ifndef V8_SHARED
-    utility_context_.Reset();
+    stringify_function_.Reset();
 #endif  // !V8_SHARED
     CollectGarbage(isolate);
   }
diff --git a/src/d8.h b/src/d8.h
index 16f612c..321d9c1 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -446,7 +446,7 @@
   static Global<Context> evaluation_context_;
   static base::OnceType quit_once_;
 #ifndef V8_SHARED
-  static Global<Context> utility_context_;
+  static Global<Function> stringify_function_;
   static CounterMap* counter_map_;
   // We statically allocate a set of local counters to be used if we
   // don't want to store the stats in a memory-mapped file
@@ -462,7 +462,7 @@
   static i::List<SharedArrayBuffer::Contents> externalized_shared_contents_;
 
   static Counter* GetCounter(const char* name, bool is_histogram);
-  static void InstallUtilityScript(Isolate* isolate);
+  static Local<String> Stringify(Isolate* isolate, Local<Value> value);
 #endif  // !V8_SHARED
   static void Initialize(Isolate* isolate);
   static void RunShell(Isolate* isolate);
diff --git a/src/d8.js b/src/d8.js
index 27a0bc3..e49c6b7 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+(function() {
 "use strict";
 
 // A more universal stringify that supports more types than JSON.
@@ -89,3 +90,6 @@
   }
   return '[' + proxy_type + ' Proxy ' + Stringify(info_object, depth-1) + ']';
 }
+
+return Stringify;
+})();
diff --git a/src/debug/arm/debug-arm.cc b/src/debug/arm/debug-arm.cc
index 5fdda4f..fa3540e 100644
--- a/src/debug/arm/debug-arm.cc
+++ b/src/debug/arm/debug-arm.cc
@@ -81,9 +81,15 @@
     __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
     __ push(ip);
 
-    if (mode == SAVE_RESULT_REGISTER) __ push(r0);
-
-    __ mov(r0, Operand::Zero());  // no arguments
+    // Push arguments for DebugBreak call.
+    if (mode == SAVE_RESULT_REGISTER) {
+      // Break on return.
+      __ push(r0);
+    } else {
+      // Non-return breaks.
+      __ Push(masm->isolate()->factory()->the_hole_value());
+    }
+    __ mov(r0, Operand(1));
     __ mov(r1,
            Operand(ExternalReference(
                Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
@@ -94,12 +100,14 @@
     if (FLAG_debug_code) {
       for (int i = 0; i < kNumJSCallerSaved; i++) {
         Register reg = {JSCallerSavedCode(i)};
-        __ mov(reg, Operand(kDebugZapValue));
+        // Do not clobber r0 if mode is SAVE_RESULT_REGISTER. It will
+        // contain return value of the function.
+        if (!(reg.is(r0) && (mode == SAVE_RESULT_REGISTER))) {
+          __ mov(reg, Operand(kDebugZapValue));
+        }
       }
     }
 
-    if (mode == SAVE_RESULT_REGISTER) __ pop(r0);
-
     // Don't bother removing padding bytes pushed on the stack
     // as the frame is going to be restored right away.
 
@@ -119,8 +127,7 @@
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
   // Load the function pointer off of our current stack frame.
-  __ ldr(r1, MemOperand(fp,
-         StandardFrameConstants::kConstantPoolOffset - kPointerSize));
+  __ ldr(r1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
 
   // Pop return address, frame and constant pool pointer (if
   // FLAG_enable_embedded_constant_pool).
diff --git a/src/debug/arm64/debug-arm64.cc b/src/debug/arm64/debug-arm64.cc
index 3e4b67c..cd01721 100644
--- a/src/debug/arm64/debug-arm64.cc
+++ b/src/debug/arm64/debug-arm64.cc
@@ -92,9 +92,15 @@
     __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
     __ Push(scratch);
 
-    if (mode == SAVE_RESULT_REGISTER) __ Push(x0);
-
-    __ Mov(x0, 0);  // No arguments.
+    // Push arguments for DebugBreak call.
+    if (mode == SAVE_RESULT_REGISTER) {
+      // Break on return.
+      __ Push(x0);
+    } else {
+      // Non-return breaks.
+      __ Push(masm->isolate()->factory()->the_hole_value());
+    }
+    __ Mov(x0, 1);
     __ Mov(x1, ExternalReference(Runtime::FunctionForId(Runtime::kDebugBreak),
                                  masm->isolate()));
 
@@ -104,13 +110,14 @@
     if (FLAG_debug_code) {
       for (int i = 0; i < kNumJSCallerSaved; i++) {
         Register reg = Register::XRegFromCode(JSCallerSavedCode(i));
-        __ Mov(reg, Operand(kDebugZapValue));
+        // Do not clobber x0 if mode is SAVE_RESULT_REGISTER. It will
+        // contain return value of the function.
+        if (!(reg.is(x0) && (mode == SAVE_RESULT_REGISTER))) {
+          __ Mov(reg, Operand(kDebugZapValue));
+        }
       }
     }
 
-    // Restore the register values from the expression stack.
-    if (mode == SAVE_RESULT_REGISTER) __ Pop(x0);
-
     // Don't bother removing padding bytes pushed on the stack
     // as the frame is going to be restored right away.
 
@@ -130,10 +137,12 @@
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
   // We do not know our frame height, but set sp based on fp.
-  __ Sub(masm->StackPointer(), fp, kPointerSize);
+  __ Add(masm->StackPointer(), fp, FrameDropperFrameConstants::kFunctionOffset);
   __ AssertStackConsistency();
 
-  __ Pop(x1, fp, lr);  // Function, Frame, Return address.
+  __ Pop(x1);  // Function
+  __ Mov(masm->StackPointer(), Operand(fp));
+  __ Pop(fp, lr);  // Frame, Return address.
 
   ParameterCount dummy(0);
   __ FloodFunctionIfStepping(x1, no_reg, dummy, dummy);
diff --git a/src/debug/debug-evaluate.cc b/src/debug/debug-evaluate.cc
index 8114c21..1729408 100644
--- a/src/debug/debug-evaluate.cc
+++ b/src/debug/debug-evaluate.cc
@@ -73,14 +73,12 @@
   ContextBuilder context_builder(isolate, frame, inlined_jsframe_index);
   if (isolate->has_pending_exception()) return MaybeHandle<Object>();
 
-  Handle<Context> context = context_builder.native_context();
+  Handle<Context> context = context_builder.evaluation_context();
   Handle<JSObject> receiver(context->global_proxy());
-  MaybeHandle<Object> maybe_result = Evaluate(
-      isolate, context_builder.outer_info(),
-      context_builder.innermost_context(), context_extension, receiver, source);
-  if (!maybe_result.is_null() && !FLAG_debug_eval_readonly_locals) {
-    context_builder.UpdateValues();
-  }
+  MaybeHandle<Object> maybe_result =
+      Evaluate(isolate, context_builder.outer_info(), context,
+               context_extension, receiver, source);
+  if (!maybe_result.is_null()) context_builder.UpdateValues();
   return maybe_result;
 }
 
@@ -130,113 +128,81 @@
   Handle<JSFunction> local_function =
       Handle<JSFunction>::cast(frame_inspector.GetFunction());
   Handle<Context> outer_context(local_function->context());
-  native_context_ = Handle<Context>(outer_context->native_context());
-  Handle<JSFunction> global_function(native_context_->closure());
-  outer_info_ = handle(global_function->shared());
-  Handle<Context> inner_context;
+  evaluation_context_ = outer_context;
+  outer_info_ = handle(local_function->shared());
+  Factory* factory = isolate->factory();
 
-  bool stop = false;
-
-  // Iterate the original context chain to create a context chain that reflects
-  // our needs. The original context chain may look like this:
-  // <native context> <outer contexts> <function context> <inner contexts>
-  // In the resulting context chain, we want to materialize the receiver,
-  // the parameters of the current function, the stack locals. We only
-  // materialize context variables that the function already references,
-  // because only for those variables we can be sure that they will be resolved
-  // correctly. Variables that are not referenced by the function may be
-  // context-allocated and thus accessible, but may be shadowed by stack-
-  // allocated variables and the resolution would be incorrect.
-  // The result will look like this:
-  // <native context> <receiver context>
-  //     <materialized stack and accessible context vars> <inner contexts>
-  // All contexts use the closure of the native context, since there is no
-  // function context in the chain. Variables that cannot be resolved are
-  // bound to toplevel (script contexts or global object).
-  // Once debug-evaluate has been executed, the changes to the materialized
-  // objects are written back to the original context chain. Any changes to
-  // the original context chain will therefore be overwritten.
+  // To evaluate as if we were running eval at the point of the debug break,
+  // we reconstruct the context chain as follows:
+  //  - To make stack-allocated variables visible, we materialize them and
+  //    use a debug-evaluate context to wrap both the materialized object and
+  //    the original context.
+  //  - We use the original context chain from the function context to the
+  //    native context.
+  //  - Between the function scope and the native context, we only resolve
+  //    variable names that the current function already uses. Only for these
+  //    names we can be sure that they will be correctly resolved. For the
+  //    rest, we only resolve to with, script, and native contexts. We use a
+  //    whitelist to implement that.
+  // Context::Lookup has special handling for debug-evaluate contexts:
+  //  - Look up in the materialized stack variables.
+  //  - Look up in the original context.
+  //  - Check the whitelist to find out whether to skip contexts during lookup.
   const ScopeIterator::Option option = ScopeIterator::COLLECT_NON_LOCALS;
   for (ScopeIterator it(isolate, &frame_inspector, option);
-       !it.Failed() && !it.Done() && !stop; it.Next()) {
+       !it.Failed() && !it.Done(); it.Next()) {
     ScopeIterator::ScopeType scope_type = it.Type();
     if (scope_type == ScopeIterator::ScopeTypeLocal) {
       DCHECK_EQ(FUNCTION_SCOPE, it.CurrentScopeInfo()->scope_type());
-      it.GetNonLocals(&non_locals_);
+      Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
       Handle<Context> local_context =
           it.HasContext() ? it.CurrentContext() : outer_context;
-
-      // The "this" binding, if any, can't be bound via "with".  If we need
-      // to, add another node onto the outer context to bind "this".
-      Handle<Context> receiver_context =
-          MaterializeReceiver(native_context_, local_context, local_function,
-                              global_function, it.ThisIsNonLocal());
-
-      Handle<JSObject> materialized_function = NewJSObjectWithNullProto();
-      frame_inspector.MaterializeStackLocals(materialized_function,
-                                             local_function);
-      MaterializeArgumentsObject(materialized_function, local_function);
-      MaterializeContextChain(materialized_function, local_context);
-
-      Handle<Context> with_context = isolate->factory()->NewWithContext(
-          global_function, receiver_context, materialized_function);
-
+      Handle<StringSet> non_locals = it.GetNonLocals();
+      MaterializeReceiver(materialized, local_context, local_function,
+                          non_locals);
+      frame_inspector.MaterializeStackLocals(materialized, local_function);
+      MaterializeArgumentsObject(materialized, local_function);
       ContextChainElement context_chain_element;
-      context_chain_element.original_context = local_context;
-      context_chain_element.materialized_object = materialized_function;
       context_chain_element.scope_info = it.CurrentScopeInfo();
+      context_chain_element.materialized_object = materialized;
+      // Non-locals that are already being referenced by the current function
+      // are guaranteed to be correctly resolved.
+      context_chain_element.whitelist = non_locals;
+      if (it.HasContext()) {
+        context_chain_element.wrapped_context = it.CurrentContext();
+      }
       context_chain_.Add(context_chain_element);
-
-      stop = true;
-      RecordContextsInChain(&inner_context, receiver_context, with_context);
+      evaluation_context_ = outer_context;
+      break;
     } else if (scope_type == ScopeIterator::ScopeTypeCatch ||
                scope_type == ScopeIterator::ScopeTypeWith) {
-      Handle<Context> cloned_context = Handle<Context>::cast(
-          isolate->factory()->CopyFixedArray(it.CurrentContext()));
-
       ContextChainElement context_chain_element;
-      context_chain_element.original_context = it.CurrentContext();
-      context_chain_element.cloned_context = cloned_context;
-      context_chain_.Add(context_chain_element);
-
-      RecordContextsInChain(&inner_context, cloned_context, cloned_context);
-    } else if (scope_type == ScopeIterator::ScopeTypeBlock) {
-      Handle<JSObject> materialized_object = NewJSObjectWithNullProto();
-      frame_inspector.MaterializeStackLocals(materialized_object,
-                                             it.CurrentScopeInfo());
-      if (it.HasContext()) {
-        Handle<Context> cloned_context = Handle<Context>::cast(
-            isolate->factory()->CopyFixedArray(it.CurrentContext()));
-        Handle<Context> with_context = isolate->factory()->NewWithContext(
-            global_function, cloned_context, materialized_object);
-
-        ContextChainElement context_chain_element;
-        context_chain_element.original_context = it.CurrentContext();
-        context_chain_element.cloned_context = cloned_context;
-        context_chain_element.materialized_object = materialized_object;
-        context_chain_element.scope_info = it.CurrentScopeInfo();
-        context_chain_.Add(context_chain_element);
-
-        RecordContextsInChain(&inner_context, cloned_context, with_context);
-      } else {
-        Handle<Context> with_context = isolate->factory()->NewWithContext(
-            global_function, outer_context, materialized_object);
-
-        ContextChainElement context_chain_element;
-        context_chain_element.materialized_object = materialized_object;
-        context_chain_element.scope_info = it.CurrentScopeInfo();
-        context_chain_.Add(context_chain_element);
-
-        RecordContextsInChain(&inner_context, with_context, with_context);
+      Handle<Context> current_context = it.CurrentContext();
+      if (!current_context->IsDebugEvaluateContext()) {
+        context_chain_element.wrapped_context = current_context;
       }
+      context_chain_.Add(context_chain_element);
+    } else if (scope_type == ScopeIterator::ScopeTypeBlock) {
+      Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
+      frame_inspector.MaterializeStackLocals(materialized,
+                                             it.CurrentScopeInfo());
+      ContextChainElement context_chain_element;
+      context_chain_element.scope_info = it.CurrentScopeInfo();
+      context_chain_element.materialized_object = materialized;
+      if (it.HasContext()) {
+        context_chain_element.wrapped_context = it.CurrentContext();
+      }
+      context_chain_.Add(context_chain_element);
     } else {
-      stop = true;
+      break;
     }
   }
-  if (innermost_context_.is_null()) {
-    innermost_context_ = outer_context;
+
+  for (int i = context_chain_.length() - 1; i >= 0; i--) {
+    evaluation_context_ = factory->NewDebugEvaluateContext(
+        evaluation_context_, context_chain_[i].materialized_object,
+        context_chain_[i].wrapped_context, context_chain_[i].whitelist);
   }
-  DCHECK(!innermost_context_.is_null());
 }
 
 
@@ -244,53 +210,16 @@
   // TODO(yangguo): remove updating values.
   for (int i = 0; i < context_chain_.length(); i++) {
     ContextChainElement element = context_chain_[i];
-    if (!element.original_context.is_null() &&
-        !element.cloned_context.is_null()) {
-      Handle<Context> cloned_context = element.cloned_context;
-      cloned_context->CopyTo(
-          Context::MIN_CONTEXT_SLOTS, *element.original_context,
-          Context::MIN_CONTEXT_SLOTS,
-          cloned_context->length() - Context::MIN_CONTEXT_SLOTS);
-    }
     if (!element.materialized_object.is_null()) {
-      // Write back potential changes to materialized stack locals to the
-      // stack.
+      // Write back potential changes to materialized stack locals to the stack.
       FrameInspector(frame_, inlined_jsframe_index_, isolate_)
           .UpdateStackLocalsFromMaterializedObject(element.materialized_object,
                                                    element.scope_info);
-      if (element.scope_info->scope_type() == FUNCTION_SCOPE) {
-        DCHECK_EQ(context_chain_.length() - 1, i);
-        UpdateContextChainFromMaterializedObject(element.materialized_object,
-                                                 element.original_context);
-      }
     }
   }
 }
 
 
-Handle<JSObject> DebugEvaluate::ContextBuilder::NewJSObjectWithNullProto() {
-  Handle<JSObject> result =
-      isolate_->factory()->NewJSObject(isolate_->object_function());
-  Handle<Map> new_map =
-      Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
-  Map::SetPrototype(new_map, isolate_->factory()->null_value());
-  JSObject::MigrateToMap(result, new_map);
-  return result;
-}
-
-
-void DebugEvaluate::ContextBuilder::RecordContextsInChain(
-    Handle<Context>* inner_context, Handle<Context> first,
-    Handle<Context> last) {
-  if (!inner_context->is_null()) {
-    (*inner_context)->set_previous(*last);
-  } else {
-    innermost_context_ = last;
-  }
-  *inner_context = first;
-}
-
-
 void DebugEvaluate::ContextBuilder::MaterializeArgumentsObject(
     Handle<JSObject> target, Handle<JSFunction> function) {
   // Do not materialize the arguments object for eval or top-level code.
@@ -309,98 +238,19 @@
       .Check();
 }
 
-
-MaybeHandle<Object> DebugEvaluate::ContextBuilder::LoadFromContext(
-    Handle<Context> context, Handle<String> name, bool* global) {
-  static const ContextLookupFlags flags = FOLLOW_CONTEXT_CHAIN;
-  int index;
-  PropertyAttributes attributes;
-  BindingFlags binding;
-  Handle<Object> holder =
-      context->Lookup(name, flags, &index, &attributes, &binding);
-  if (holder.is_null()) return MaybeHandle<Object>();
-  Handle<Object> value;
-  if (index != Context::kNotFound) {  // Found on context.
-    Handle<Context> context = Handle<Context>::cast(holder);
-    // Do not shadow variables on the script context.
-    *global = context->IsScriptContext();
-    return Handle<Object>(context->get(index), isolate_);
-  } else {  // Found on object.
-    Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
-    // Do not shadow properties on the global object.
-    *global = object->IsJSGlobalObject();
-    return JSReceiver::GetDataProperty(object, name);
-  }
-}
-
-
-void DebugEvaluate::ContextBuilder::MaterializeContextChain(
-    Handle<JSObject> target, Handle<Context> context) {
-  for (const Handle<String>& name : non_locals_) {
-    HandleScope scope(isolate_);
-    Handle<Object> value;
-    bool global;
-    if (!LoadFromContext(context, name, &global).ToHandle(&value) || global) {
-      // If resolving the variable fails, skip it. If it resolves to a global
-      // variable, skip it as well since it's not read-only and can be resolved
-      // within debug-evaluate.
-      continue;
-    }
-    if (value->IsTheHole()) continue;  // Value is not initialized yet (in TDZ).
-    JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
-  }
-}
-
-
-void DebugEvaluate::ContextBuilder::StoreToContext(Handle<Context> context,
-                                                   Handle<String> name,
-                                                   Handle<Object> value) {
-  static const ContextLookupFlags flags = FOLLOW_CONTEXT_CHAIN;
-  int index;
-  PropertyAttributes attributes;
-  BindingFlags binding;
-  Handle<Object> holder =
-      context->Lookup(name, flags, &index, &attributes, &binding);
-  if (holder.is_null()) return;
-  if (attributes & READ_ONLY) return;
-  if (index != Context::kNotFound) {  // Found on context.
-    Handle<Context> context = Handle<Context>::cast(holder);
-    context->set(index, *value);
-  } else {  // Found on object.
-    Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
-    LookupIterator lookup(object, name);
-    if (lookup.state() != LookupIterator::DATA) return;
-    CHECK(JSReceiver::SetDataProperty(&lookup, value).FromJust());
-  }
-}
-
-
-void DebugEvaluate::ContextBuilder::UpdateContextChainFromMaterializedObject(
-    Handle<JSObject> source, Handle<Context> context) {
-  // TODO(yangguo): check whether overwriting context fields is actually safe
-  //                wrt fields we consider constant.
-  for (const Handle<String>& name : non_locals_) {
-    HandleScope scope(isolate_);
-    Handle<Object> value = JSReceiver::GetDataProperty(source, name);
-    StoreToContext(context, name, value);
-  }
-}
-
-
-Handle<Context> DebugEvaluate::ContextBuilder::MaterializeReceiver(
-    Handle<Context> parent_context, Handle<Context> lookup_context,
-    Handle<JSFunction> local_function, Handle<JSFunction> global_function,
-    bool this_is_non_local) {
-  Handle<Object> receiver = isolate_->factory()->undefined_value();
-  Handle<String> this_string = isolate_->factory()->this_string();
-  if (this_is_non_local) {
-    bool global;
-    LoadFromContext(lookup_context, this_string, &global).ToHandle(&receiver);
+void DebugEvaluate::ContextBuilder::MaterializeReceiver(
+    Handle<JSObject> target, Handle<Context> local_context,
+    Handle<JSFunction> local_function, Handle<StringSet> non_locals) {
+  Handle<Object> recv = isolate_->factory()->undefined_value();
+  Handle<String> name = isolate_->factory()->this_string();
+  if (non_locals->Has(name)) {
+    // 'this' is allocated in an outer context and is is already being
+    // referenced by the current function, so it can be correctly resolved.
+    return;
   } else if (local_function->shared()->scope_info()->HasReceiver()) {
-    receiver = handle(frame_->receiver(), isolate_);
+    recv = handle(frame_->receiver(), isolate_);
   }
-  return isolate_->factory()->NewCatchContext(global_function, parent_context,
-                                              this_string, receiver);
+  JSObject::SetOwnPropertyIgnoreAttributes(target, name, recv, NONE).Check();
 }
 
 }  // namespace internal
diff --git a/src/debug/debug-evaluate.h b/src/debug/debug-evaluate.h
index c0b1f02..26f4e41 100644
--- a/src/debug/debug-evaluate.h
+++ b/src/debug/debug-evaluate.h
@@ -53,51 +53,30 @@
 
     void UpdateValues();
 
-    Handle<Context> innermost_context() const { return innermost_context_; }
-    Handle<Context> native_context() const { return native_context_; }
+    Handle<Context> evaluation_context() const { return evaluation_context_; }
     Handle<SharedFunctionInfo> outer_info() const { return outer_info_; }
 
    private:
     struct ContextChainElement {
-      Handle<Context> original_context;
-      Handle<Context> cloned_context;
-      Handle<JSObject> materialized_object;
       Handle<ScopeInfo> scope_info;
+      Handle<Context> wrapped_context;
+      Handle<JSObject> materialized_object;
+      Handle<StringSet> whitelist;
     };
 
-    void RecordContextsInChain(Handle<Context>* inner_context,
-                               Handle<Context> first, Handle<Context> last);
-
-    Handle<JSObject> NewJSObjectWithNullProto();
-
     // Helper function to find or create the arguments object for
     // Runtime_DebugEvaluate.
     void MaterializeArgumentsObject(Handle<JSObject> target,
                                     Handle<JSFunction> function);
 
-    void MaterializeContextChain(Handle<JSObject> target,
-                                 Handle<Context> context);
-
-    void UpdateContextChainFromMaterializedObject(Handle<JSObject> source,
-                                                  Handle<Context> context);
-
-    Handle<Context> MaterializeReceiver(Handle<Context> parent_context,
-                                        Handle<Context> lookup_context,
-                                        Handle<JSFunction> local_function,
-                                        Handle<JSFunction> global_function,
-                                        bool this_is_non_local);
-
-    MaybeHandle<Object> LoadFromContext(Handle<Context> context,
-                                        Handle<String> name, bool* global);
-
-    void StoreToContext(Handle<Context> context, Handle<String> name,
-                        Handle<Object> value);
+    void MaterializeReceiver(Handle<JSObject> target,
+                             Handle<Context> local_context,
+                             Handle<JSFunction> local_function,
+                             Handle<StringSet> non_locals);
 
     Handle<SharedFunctionInfo> outer_info_;
-    Handle<Context> innermost_context_;
-    Handle<Context> native_context_;
+    Handle<Context> evaluation_context_;
     List<ContextChainElement> context_chain_;
-    List<Handle<String> > non_locals_;
     Isolate* isolate_;
     JavaScriptFrame* frame_;
     int inlined_jsframe_index_;
diff --git a/src/debug/debug-frames.cc b/src/debug/debug-frames.cc
index 25634be..a7956ff 100644
--- a/src/debug/debug-frames.cc
+++ b/src/debug/debug-frames.cc
@@ -133,8 +133,10 @@
     if (scope_info->LocalIsSynthetic(i)) continue;
     Handle<String> name(scope_info->StackLocalName(i));
     Handle<Object> value = GetExpression(scope_info->StackLocalIndex(i));
+    // TODO(yangguo): We convert optimized out values to {undefined} when they
+    // are passed to the debugger. Eventually we should handle them somehow.
     if (value->IsTheHole()) value = isolate_->factory()->undefined_value();
-
+    if (value->IsOptimizedOut()) value = isolate_->factory()->undefined_value();
     JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
   }
 }
diff --git a/src/debug/debug-scopes.cc b/src/debug/debug-scopes.cc
index e785384..d9c615b 100644
--- a/src/debug/debug-scopes.cc
+++ b/src/debug/debug-scopes.cc
@@ -19,7 +19,6 @@
     : isolate_(isolate),
       frame_inspector_(frame_inspector),
       nested_scope_chain_(4),
-      non_locals_(nullptr),
       seen_script_scope_(false),
       failed_(false) {
   if (!frame_inspector->GetContext()->IsContext() ||
@@ -73,7 +72,9 @@
       }
     }
     if (scope_info->scope_type() == FUNCTION_SCOPE) {
-      nested_scope_chain_.Add(scope_info);
+      nested_scope_chain_.Add(ExtendedScopeInfo(scope_info,
+                                                shared_info->start_position(),
+                                                shared_info->end_position()));
     }
     if (!collect_non_locals) return;
   }
@@ -81,7 +82,7 @@
   // Reparse the code and analyze the scopes.
   Scope* scope = NULL;
   // Check whether we are in global, eval or function code.
-  Zone zone;
+  Zone zone(isolate->allocator());
   if (scope_info->scope_type() != FUNCTION_SCOPE) {
     // Global or eval code.
     Handle<Script> script(Script::cast(shared_info->script()));
@@ -107,6 +108,7 @@
     if (!ignore_nested_scopes) RetrieveScopeChain(scope);
     if (collect_non_locals) CollectNonLocals(scope);
   }
+  UnwrapEvaluationContext();
 }
 
 
@@ -114,10 +116,26 @@
     : isolate_(isolate),
       frame_inspector_(NULL),
       context_(function->context()),
-      non_locals_(nullptr),
       seen_script_scope_(false),
       failed_(false) {
   if (!function->shared()->IsSubjectToDebugging()) context_ = Handle<Context>();
+  UnwrapEvaluationContext();
+}
+
+void ScopeIterator::UnwrapEvaluationContext() {
+  while (true) {
+    if (context_.is_null()) return;
+    if (!context_->IsDebugEvaluateContext()) return;
+    // An existing debug-evaluate context can only be outside the local scope.
+    DCHECK(nested_scope_chain_.is_empty());
+    Handle<Object> wrapped(context_->get(Context::WRAPPED_CONTEXT_INDEX),
+                           isolate_);
+    if (wrapped->IsContext()) {
+      context_ = Handle<Context>::cast(wrapped);
+    } else {
+      context_ = Handle<Context>(context_->previous(), isolate_);
+    }
+  }
 }
 
 
@@ -130,11 +148,32 @@
   Handle<JSObject> scope_object;
   ASSIGN_RETURN_ON_EXCEPTION(isolate_, scope_object, ScopeObject(), JSObject);
   details->set(kScopeDetailsObjectIndex, *scope_object);
-  if (HasContext() && CurrentContext()->closure() != NULL) {
-    Handle<String> closure_name = JSFunction::GetDebugName(
-        Handle<JSFunction>(CurrentContext()->closure()));
-    if (!closure_name.is_null() && (closure_name->length() != 0))
+  Handle<JSFunction> js_function = HasContext()
+                                       ? handle(CurrentContext()->closure())
+                                       : Handle<JSFunction>::null();
+  if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript) {
+    return isolate_->factory()->NewJSArrayWithElements(details);
+  }
+
+  int start_position = 0;
+  int end_position = 0;
+  if (!nested_scope_chain_.is_empty()) {
+    js_function = GetFunction();
+    start_position = nested_scope_chain_.last().start_position;
+    end_position = nested_scope_chain_.last().end_position;
+  } else if (!js_function.is_null()) {
+    start_position = js_function->shared()->start_position();
+    end_position = js_function->shared()->end_position();
+  }
+
+  if (!js_function.is_null()) {
+    Handle<String> closure_name = JSFunction::GetDebugName(js_function);
+    if (!closure_name.is_null() && closure_name->length() != 0) {
       details->set(kScopeDetailsNameIndex, *closure_name);
+    }
+    details->set(kScopeDetailsStartPositionIndex, Smi::FromInt(start_position));
+    details->set(kScopeDetailsEndPositionIndex, Smi::FromInt(end_position));
+    details->set(kScopeDetailsFunctionIndex, *js_function);
   }
   return isolate_->factory()->NewJSArrayWithElements(details);
 }
@@ -147,30 +186,28 @@
     // The global scope is always the last in the chain.
     DCHECK(context_->IsNativeContext());
     context_ = Handle<Context>();
-    return;
-  }
-  if (scope_type == ScopeTypeScript) {
+  } else if (scope_type == ScopeTypeScript) {
     seen_script_scope_ = true;
     if (context_->IsScriptContext()) {
       context_ = Handle<Context>(context_->previous(), isolate_);
     }
     if (!nested_scope_chain_.is_empty()) {
-      DCHECK_EQ(nested_scope_chain_.last()->scope_type(), SCRIPT_SCOPE);
+      DCHECK_EQ(nested_scope_chain_.last().scope_info->scope_type(),
+                SCRIPT_SCOPE);
       nested_scope_chain_.RemoveLast();
       DCHECK(nested_scope_chain_.is_empty());
     }
     CHECK(context_->IsNativeContext());
-    return;
-  }
-  if (nested_scope_chain_.is_empty()) {
+  } else if (nested_scope_chain_.is_empty()) {
     context_ = Handle<Context>(context_->previous(), isolate_);
   } else {
-    if (nested_scope_chain_.last()->HasContext()) {
+    if (nested_scope_chain_.last().scope_info->HasContext()) {
       DCHECK(context_->previous() != NULL);
       context_ = Handle<Context>(context_->previous(), isolate_);
     }
     nested_scope_chain_.RemoveLast();
   }
+  UnwrapEvaluationContext();
 }
 
 
@@ -178,7 +215,7 @@
 ScopeIterator::ScopeType ScopeIterator::Type() {
   DCHECK(!failed_);
   if (!nested_scope_chain_.is_empty()) {
-    Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
+    Handle<ScopeInfo> scope_info = nested_scope_chain_.last().scope_info;
     switch (scope_info->scope_type()) {
       case FUNCTION_SCOPE:
         DCHECK(context_->IsFunctionContext() || !scope_info->HasContext());
@@ -190,7 +227,7 @@
         DCHECK(context_->IsScriptContext() || context_->IsNativeContext());
         return ScopeTypeScript;
       case WITH_SCOPE:
-        DCHECK(context_->IsWithContext());
+        DCHECK(context_->IsWithContext() || context_->IsDebugEvaluateContext());
         return ScopeTypeWith;
       case CATCH_SCOPE:
         DCHECK(context_->IsCatchContext());
@@ -223,7 +260,7 @@
   if (context_->IsScriptContext()) {
     return ScopeTypeScript;
   }
-  DCHECK(context_->IsWithContext());
+  DCHECK(context_->IsWithContext() || context_->IsDebugEvaluateContext());
   return ScopeTypeWith;
 }
 
@@ -240,9 +277,7 @@
       DCHECK(nested_scope_chain_.length() == 1);
       return MaterializeLocalScope();
     case ScopeIterator::ScopeTypeWith:
-      // Return the with object.
-      // TODO(neis): This breaks for proxies.
-      return handle(JSObject::cast(CurrentContext()->extension_receiver()));
+      return WithContextExtension();
     case ScopeIterator::ScopeTypeCatch:
       return MaterializeCatchScope();
     case ScopeIterator::ScopeTypeClosure:
@@ -262,7 +297,7 @@
   ScopeType type = Type();
   if (type == ScopeTypeBlock || type == ScopeTypeLocal) {
     if (!nested_scope_chain_.is_empty()) {
-      return nested_scope_chain_.last()->HasContext();
+      return nested_scope_chain_.last().scope_info->HasContext();
     }
   }
   return true;
@@ -298,7 +333,7 @@
 Handle<ScopeInfo> ScopeIterator::CurrentScopeInfo() {
   DCHECK(!failed_);
   if (!nested_scope_chain_.is_empty()) {
-    return nested_scope_chain_.last();
+    return nested_scope_chain_.last().scope_info;
   } else if (context_->IsBlockContext()) {
     return Handle<ScopeInfo>(context_->scope_info());
   } else if (context_->IsFunctionContext()) {
@@ -313,33 +348,14 @@
   if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript ||
       nested_scope_chain_.is_empty()) {
     return context_;
-  } else if (nested_scope_chain_.last()->HasContext()) {
+  } else if (nested_scope_chain_.last().scope_info->HasContext()) {
     return context_;
   } else {
     return Handle<Context>();
   }
 }
 
-
-void ScopeIterator::GetNonLocals(List<Handle<String> >* list_out) {
-  Handle<String> this_string = isolate_->factory()->this_string();
-  for (HashMap::Entry* entry = non_locals_->Start(); entry != nullptr;
-       entry = non_locals_->Next(entry)) {
-    Handle<String> name(reinterpret_cast<String**>(entry->key));
-    // We need to treat "this" differently.
-    if (name.is_identical_to(this_string)) continue;
-    list_out->Add(Handle<String>(reinterpret_cast<String**>(entry->key)));
-  }
-}
-
-
-bool ScopeIterator::ThisIsNonLocal() {
-  Handle<String> this_string = isolate_->factory()->this_string();
-  void* key = reinterpret_cast<void*>(this_string.location());
-  HashMap::Entry* entry = non_locals_->Lookup(key, this_string->Hash());
-  return entry != nullptr;
-}
-
+Handle<StringSet> ScopeIterator::GetNonLocals() { return non_locals_; }
 
 #ifdef DEBUG
 // Debug print of the content of the current scope.
@@ -409,7 +425,7 @@
 void ScopeIterator::RetrieveScopeChain(Scope* scope) {
   if (scope != NULL) {
     int source_position = frame_inspector_->GetSourcePosition();
-    scope->GetNestedScopeChain(isolate_, &nested_scope_chain_, source_position);
+    GetNestedScopeChain(isolate_, scope, source_position);
   } else {
     // A failed reparse indicates that the preparser has diverged from the
     // parser or that the preparse data given to the initial parse has been
@@ -425,9 +441,8 @@
 
 void ScopeIterator::CollectNonLocals(Scope* scope) {
   if (scope != NULL) {
-    DCHECK_NULL(non_locals_);
-    non_locals_ = new HashMap(InternalizedStringMatch);
-    scope->CollectNonLocals(non_locals_);
+    DCHECK(non_locals_.is_null());
+    non_locals_ = scope->CollectNonLocals(StringSet::New(isolate_));
   }
 }
 
@@ -532,6 +547,16 @@
   return catch_scope;
 }
 
+// Retrieve the with-context extension object. If the extension object is
+// a proxy, return an empty object.
+Handle<JSObject> ScopeIterator::WithContextExtension() {
+  Handle<Context> context = CurrentContext();
+  DCHECK(context->IsWithContext());
+  if (context->extension_receiver()->IsJSProxy()) {
+    return isolate_->factory()->NewJSObjectWithNullProto();
+  }
+  return handle(JSObject::cast(context->extension_receiver()));
+}
 
 // Create a plain JSObject which materializes the block scope for the specified
 // block context.
@@ -541,7 +566,7 @@
 
   Handle<Context> context = Handle<Context>::null();
   if (!nested_scope_chain_.is_empty()) {
-    Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
+    Handle<ScopeInfo> scope_info = nested_scope_chain_.last().scope_info;
     frame_inspector_->MaterializeStackLocals(block_scope, scope_info);
     if (scope_info->HasContext()) context = CurrentContext();
   } else {
@@ -815,5 +840,24 @@
   return true;
 }
 
+void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
+                                        int position) {
+  if (!scope->is_eval_scope()) {
+    nested_scope_chain_.Add(ExtendedScopeInfo(scope->GetScopeInfo(isolate),
+                                              scope->start_position(),
+                                              scope->end_position()));
+  }
+  for (int i = 0; i < scope->inner_scopes()->length(); i++) {
+    Scope* inner_scope = scope->inner_scopes()->at(i);
+    int beg_pos = inner_scope->start_position();
+    int end_pos = inner_scope->end_position();
+    DCHECK(beg_pos >= 0 && end_pos >= 0);
+    if (beg_pos <= position && position < end_pos) {
+      GetNestedScopeChain(isolate, inner_scope, position);
+      return;
+    }
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/debug/debug-scopes.h b/src/debug/debug-scopes.h
index fbdf632..4e95fc4 100644
--- a/src/debug/debug-scopes.h
+++ b/src/debug/debug-scopes.h
@@ -31,7 +31,10 @@
   static const int kScopeDetailsTypeIndex = 0;
   static const int kScopeDetailsObjectIndex = 1;
   static const int kScopeDetailsNameIndex = 2;
-  static const int kScopeDetailsSize = 3;
+  static const int kScopeDetailsStartPositionIndex = 3;
+  static const int kScopeDetailsEndPositionIndex = 4;
+  static const int kScopeDetailsFunctionIndex = 5;
+  static const int kScopeDetailsSize = 6;
 
   enum Option { DEFAULT, IGNORE_NESTED_SCOPES, COLLECT_NON_LOCALS };
 
@@ -40,8 +43,6 @@
 
   ScopeIterator(Isolate* isolate, Handle<JSFunction> function);
 
-  ~ScopeIterator() { delete non_locals_; }
-
   MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
 
   // More scopes?
@@ -72,10 +73,8 @@
   // be an actual context.
   Handle<Context> CurrentContext();
 
-  // Populate the list with collected non-local variable names.
-  void GetNonLocals(List<Handle<String> >* list_out);
-
-  bool ThisIsNonLocal();
+  // Populate the set with collected non-local variable names.
+  Handle<StringSet> GetNonLocals();
 
 #ifdef DEBUG
   // Debug print of the content of the current scope.
@@ -83,11 +82,19 @@
 #endif
 
  private:
+  struct ExtendedScopeInfo {
+    ExtendedScopeInfo(Handle<ScopeInfo> info, int start, int end)
+        : scope_info(info), start_position(start), end_position(end) {}
+    Handle<ScopeInfo> scope_info;
+    int start_position;
+    int end_position;
+  };
+
   Isolate* isolate_;
   FrameInspector* const frame_inspector_;
   Handle<Context> context_;
-  List<Handle<ScopeInfo> > nested_scope_chain_;
-  HashMap* non_locals_;
+  List<ExtendedScopeInfo> nested_scope_chain_;
+  Handle<StringSet> non_locals_;
   bool seen_script_scope_;
   bool failed_;
 
@@ -99,24 +106,19 @@
     return Handle<JSFunction>::cast(frame_inspector_->GetFunction());
   }
 
-  static bool InternalizedStringMatch(void* key1, void* key2) {
-    Handle<String> s1(reinterpret_cast<String**>(key1));
-    Handle<String> s2(reinterpret_cast<String**>(key2));
-    DCHECK(s1->IsInternalizedString());
-    DCHECK(s2->IsInternalizedString());
-    return s1.is_identical_to(s2);
-  }
-
   void RetrieveScopeChain(Scope* scope);
 
   void CollectNonLocals(Scope* scope);
 
+  void UnwrapEvaluationContext();
+
   MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScriptScope();
   MUST_USE_RESULT MaybeHandle<JSObject> MaterializeLocalScope();
   MUST_USE_RESULT MaybeHandle<JSObject> MaterializeModuleScope();
   Handle<JSObject> MaterializeClosure();
   Handle<JSObject> MaterializeCatchScope();
   Handle<JSObject> MaterializeBlockScope();
+  Handle<JSObject> WithContextExtension();
 
   bool SetLocalVariableValue(Handle<String> variable_name,
                              Handle<Object> new_value);
@@ -140,6 +142,13 @@
                                          Handle<JSObject> scope_object,
                                          KeyCollectionType type);
 
+  // Get the chain of nested scopes within this scope for the source statement
+  // position. The scopes will be added to the list from the outermost scope to
+  // the innermost scope. Only nested block, catch or with scopes are tracked
+  // and will be returned, but no inner function scopes.
+  void GetNestedScopeChain(Isolate* isolate, Scope* scope,
+                           int statement_position);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
 };
 
diff --git a/src/debug/debug.cc b/src/debug/debug.cc
index 93c914c..6e94012 100644
--- a/src/debug/debug.cc
+++ b/src/debug/debug.cc
@@ -16,7 +16,6 @@
 #include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/global-handles.h"
-#include "src/interpreter/bytecodes.h"
 #include "src/interpreter/interpreter.h"
 #include "src/isolate-inl.h"
 #include "src/list.h"
@@ -84,12 +83,23 @@
       position_(1),
       statement_position_(1) {}
 
+int BreakLocation::Iterator::ReturnPosition() {
+  if (debug_info_->shared()->HasSourceCode()) {
+    return debug_info_->shared()->end_position() -
+           debug_info_->shared()->start_position() - 1;
+  } else {
+    return 0;
+  }
+}
+
 BreakLocation::CodeIterator::CodeIterator(Handle<DebugInfo> debug_info,
                                           BreakLocatorType type)
     : Iterator(debug_info),
       reloc_iterator_(debug_info->abstract_code()->GetCode(),
                       GetModeMask(type)) {
-  if (!Done()) Next();
+  // There is at least one break location.
+  DCHECK(!Done());
+  Next();
 }
 
 int BreakLocation::CodeIterator::GetModeMask(BreakLocatorType type) {
@@ -98,6 +108,9 @@
   mask |= RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION);
   mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
   mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
+  if (isolate()->is_tail_call_elimination_enabled()) {
+    mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL);
+  }
   if (type == ALL_BREAK_LOCATIONS) {
     mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
     mask |= RelocInfo::ModeMask(RelocInfo::DEBUGGER_STATEMENT);
@@ -137,13 +150,7 @@
 
     if (RelocInfo::IsDebugBreakSlotAtReturn(rmode())) {
       // Set the positions to the end of the function.
-      if (debug_info_->shared()->HasSourceCode()) {
-        position_ = debug_info_->shared()->end_position() -
-                    debug_info_->shared()->start_position() - 1;
-      } else {
-        position_ = 0;
-      }
-      statement_position_ = position_;
+      statement_position_ = position_ = ReturnPosition();
     }
 
     break;
@@ -157,6 +164,10 @@
     type = DEBUG_BREAK_SLOT_AT_RETURN;
   } else if (RelocInfo::IsDebugBreakSlotAtCall(rmode())) {
     type = DEBUG_BREAK_SLOT_AT_CALL;
+  } else if (RelocInfo::IsDebugBreakSlotAtTailCall(rmode())) {
+    type = isolate()->is_tail_call_elimination_enabled()
+               ? DEBUG_BREAK_SLOT_AT_TAIL_CALL
+               : DEBUG_BREAK_SLOT_AT_CALL;
   } else if (RelocInfo::IsDebuggerStatement(rmode())) {
     type = DEBUGGER_STATEMENT;
   } else if (RelocInfo::IsDebugBreakSlot(rmode())) {
@@ -171,11 +182,14 @@
 BreakLocation::BytecodeArrayIterator::BytecodeArrayIterator(
     Handle<DebugInfo> debug_info, BreakLocatorType type)
     : Iterator(debug_info),
-      source_position_iterator_(
-          debug_info->abstract_code()->GetBytecodeArray()),
+      source_position_iterator_(debug_info->abstract_code()
+                                    ->GetBytecodeArray()
+                                    ->source_position_table()),
       break_locator_type_(type),
       start_position_(debug_info->shared()->start_position()) {
-  if (!Done()) Next();
+  // There is at least one break location.
+  DCHECK(!Done());
+  Next();
 }
 
 void BreakLocation::BytecodeArrayIterator::Next() {
@@ -192,7 +206,6 @@
     }
     DCHECK(position_ >= 0);
     DCHECK(statement_position_ >= 0);
-    break_index_++;
 
     enum DebugBreakType type = GetDebugBreakType();
     if (type == NOT_DEBUG_BREAK) continue;
@@ -200,11 +213,14 @@
     if (break_locator_type_ == ALL_BREAK_LOCATIONS) break;
 
     DCHECK_EQ(CALLS_AND_RETURNS, break_locator_type_);
-    if (type == DEBUG_BREAK_SLOT_AT_CALL ||
-        type == DEBUG_BREAK_SLOT_AT_RETURN) {
+    if (type == DEBUG_BREAK_SLOT_AT_CALL) break;
+    if (type == DEBUG_BREAK_SLOT_AT_RETURN) {
+      DCHECK_EQ(ReturnPosition(), position_);
+      DCHECK_EQ(ReturnPosition(), statement_position_);
       break;
     }
   }
+  break_index_++;
 }
 
 BreakLocation::DebugBreakType
@@ -217,6 +233,10 @@
     return DEBUGGER_STATEMENT;
   } else if (bytecode == interpreter::Bytecode::kReturn) {
     return DEBUG_BREAK_SLOT_AT_RETURN;
+  } else if (bytecode == interpreter::Bytecode::kTailCall) {
+    return isolate()->is_tail_call_elimination_enabled()
+               ? DEBUG_BREAK_SLOT_AT_TAIL_CALL
+               : DEBUG_BREAK_SLOT_AT_CALL;
   } else if (interpreter::Bytecodes::IsCallOrNew(bytecode)) {
     return DEBUG_BREAK_SLOT_AT_CALL;
   } else if (source_position_iterator_.is_statement()) {
@@ -261,21 +281,6 @@
   return FromCodeOffset(debug_info, call_offset);
 }
 
-// Find the break point at the supplied address, or the closest one before
-// the address.
-void BreakLocation::FromCodeOffsetSameStatement(
-    Handle<DebugInfo> debug_info, int offset, List<BreakLocation>* result_out) {
-  int break_index = BreakIndexFromCodeOffset(debug_info, offset);
-  base::SmartPointer<Iterator> it(GetIterator(debug_info));
-  it->SkipTo(break_index);
-  int statement_position = it->statement_position();
-  while (!it->Done() && it->statement_position() == statement_position) {
-    result_out->Add(it->GetBreakLocation());
-    it->Next();
-  }
-}
-
-
 void BreakLocation::AllForStatementPosition(Handle<DebugInfo> debug_info,
                                             int statement_position,
                                             List<BreakLocation>* result_out) {
@@ -479,6 +484,7 @@
   thread_local_.last_fp_ = 0;
   thread_local_.target_fp_ = 0;
   thread_local_.step_in_enabled_ = false;
+  thread_local_.return_value_ = Handle<Object>();
   // TODO(isolates): frames_are_dropped_?
   base::NoBarrier_Store(&thread_local_.current_debug_scope_,
                         static_cast<base::AtomicWord>(0));
@@ -565,10 +571,8 @@
   debug_context_ = Handle<Context>();
 }
 
-
-void Debug::Break(Arguments args, JavaScriptFrame* frame) {
+void Debug::Break(JavaScriptFrame* frame) {
   HandleScope scope(isolate_);
-  DCHECK(args.length() == 0);
 
   // Initialize LiveEdit.
   LiveEdit::InitializeThreadLocal(this);
@@ -611,22 +615,26 @@
   Address target_fp = thread_local_.target_fp_;
   Address last_fp = thread_local_.last_fp_;
 
-  bool step_break = true;
+  bool step_break = false;
   switch (step_action) {
     case StepNone:
       return;
     case StepOut:
       // Step out has not reached the target frame yet.
       if (current_fp < target_fp) return;
+      step_break = true;
       break;
     case StepNext:
       // Step next should not break in a deeper frame.
       if (current_fp < target_fp) return;
+      // For step-next, a tail call is like a return and should break.
+      step_break = location.IsTailCall();
     // Fall through.
     case StepIn: {
       FrameSummary summary = GetFirstFrameSummary(frame);
       int offset = summary.code_offset();
-      step_break = location.IsReturn() || (current_fp != last_fp) ||
+      step_break = step_break || location.IsReturn() ||
+                   (current_fp != last_fp) ||
                    (thread_local_.last_statement_position_ !=
                     location.abstract_code()->SourceStatementPosition(offset));
       break;
@@ -722,9 +730,10 @@
                                         Handle<Object> args[]) {
   PostponeInterruptsScope no_interrupts(isolate_);
   AssertDebugContext();
-  Handle<Object> holder = isolate_->natives_utils_object();
+  Handle<JSReceiver> holder =
+      Handle<JSReceiver>::cast(isolate_->natives_utils_object());
   Handle<JSFunction> fun = Handle<JSFunction>::cast(
-      Object::GetProperty(isolate_, holder, name).ToHandleChecked());
+      JSReceiver::GetProperty(isolate_, holder, name).ToHandleChecked());
   Handle<Object> undefined = isolate_->factory()->undefined_value();
   return Execution::TryCall(isolate_, fun, undefined, argc, args);
 }
@@ -1021,8 +1030,10 @@
   BreakLocation location =
       BreakLocation::FromCodeOffset(debug_info, call_offset);
 
-  // At a return statement we will step out either way.
+  // Any step at a return is a step-out.
   if (location.IsReturn()) step_action = StepOut;
+  // A step-next at a tail call is a step-out.
+  if (location.IsTailCall() && step_action == StepNext) step_action = StepOut;
 
   thread_local_.last_statement_position_ =
       debug_info->abstract_code()->SourceStatementPosition(
@@ -1309,6 +1320,7 @@
   // Make sure we abort incremental marking.
   isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                                       "prepare for break points");
+
   bool is_interpreted = shared->HasBytecodeArray();
 
   {
@@ -1514,7 +1526,7 @@
 
   if (function.is_null()) {
     DCHECK(shared->HasDebugCode());
-  } else if (!Compiler::Compile(function, CLEAR_EXCEPTION)) {
+  } else if (!Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
     return false;
   }
 
@@ -1572,24 +1584,11 @@
   UNREACHABLE();
 }
 
-Object* Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
-  if (frame->is_interpreted()) {
-    // Find the handler from the original bytecode array.
-    InterpretedFrame* interpreted_frame =
-        reinterpret_cast<InterpretedFrame*>(frame);
-    SharedFunctionInfo* shared = interpreted_frame->function()->shared();
-    BytecodeArray* bytecode_array = shared->bytecode_array();
-    int bytecode_offset = interpreted_frame->GetBytecodeOffset();
-    interpreter::Bytecode bytecode =
-        interpreter::Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
-    return isolate_->interpreter()->GetBytecodeHandler(bytecode);
-  } else {
-    after_break_target_ = NULL;
-    if (!LiveEdit::SetAfterBreakTarget(this)) {
-      // Continue just after the slot.
-      after_break_target_ = frame->pc();
-    }
-    return isolate_->heap()->undefined_value();
+void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
+  after_break_target_ = NULL;
+  if (!LiveEdit::SetAfterBreakTarget(this)) {
+    // Continue just after the slot.
+    after_break_target_ = frame->pc();
   }
 }
 
@@ -1610,7 +1609,7 @@
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
   BreakLocation location =
       BreakLocation::FromCodeOffset(debug_info, summary.code_offset());
-  return location.IsReturn();
+  return location.IsReturn() || location.IsTailCall();
 }
 
 
@@ -1657,45 +1656,6 @@
 }
 
 
-void Debug::GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
-                               List<int>* results_out) {
-  FrameSummary summary = GetFirstFrameSummary(frame);
-
-  Handle<JSFunction> fun = Handle<JSFunction>(summary.function());
-  Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>(fun->shared());
-
-  if (!EnsureDebugInfo(shared, fun)) return;
-
-  Handle<DebugInfo> debug_info(shared->GetDebugInfo());
-  // Refresh frame summary if the code has been recompiled for debugging.
-  if (AbstractCode::cast(shared->code()) != *summary.abstract_code()) {
-    summary = GetFirstFrameSummary(frame);
-  }
-
-  int call_offset =
-      CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
-  List<BreakLocation> locations;
-  BreakLocation::FromCodeOffsetSameStatement(debug_info, call_offset,
-                                             &locations);
-
-  for (BreakLocation location : locations) {
-    if (location.code_offset() <= summary.code_offset()) {
-      // The break point is near our pc. Could be a step-in possibility,
-      // that is currently taken by active debugger call.
-      if (break_frame_id() == StackFrame::NO_ID) {
-        continue;  // We are not stepping.
-      } else {
-        JavaScriptFrameIterator frame_it(isolate_, break_frame_id());
-        // If our frame is a top frame and we are stepping, we can do step-in
-        // at this place.
-        if (frame_it.frame()->id() != frame_id) continue;
-      }
-    }
-    if (location.IsCall()) results_out->Add(location.position());
-  }
-}
-
-
 void Debug::RecordEvalCaller(Handle<Script> script) {
   script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
   // For eval scripts add information on the function from which eval was
@@ -1748,13 +1708,6 @@
 }
 
 
-MaybeHandle<Object> Debug::MakePromiseEvent(Handle<JSObject> event_data) {
-  // Create the promise event object.
-  Handle<Object> argv[] = { event_data };
-  return CallFunction("MakePromiseEvent", arraysize(argv), argv);
-}
-
-
 MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
   // Create the async task event object.
   Handle<Object> argv[] = { task_event };
@@ -1852,6 +1805,10 @@
   // Bail out if there is no listener for this event
   if (ignore_events()) return;
 
+#ifdef DEBUG
+  PrintBreakLocation();
+#endif  // DEBUG
+
   HandleScope scope(isolate_);
   // Create the event data object.
   Handle<Object> event_data;
@@ -1881,25 +1838,6 @@
 }
 
 
-void Debug::OnPromiseEvent(Handle<JSObject> data) {
-  if (in_debug_scope() || ignore_events()) return;
-
-  HandleScope scope(isolate_);
-  DebugScope debug_scope(this);
-  if (debug_scope.failed()) return;
-
-  // Create the script collected state object.
-  Handle<Object> event_data;
-  // Bail out and don't call debugger if exception.
-  if (!MakePromiseEvent(data).ToHandle(&event_data)) return;
-
-  // Process debug event.
-  ProcessDebugEvent(v8::PromiseEvent,
-                    Handle<JSObject>::cast(event_data),
-                    true);
-}
-
-
 void Debug::OnAsyncTaskEvent(Handle<JSObject> data) {
   if (in_debug_scope() || ignore_events()) return;
 
@@ -2049,7 +1987,6 @@
     case v8::NewFunction:
     case v8::BeforeCompile:
     case v8::CompileError:
-    case v8::PromiseEvent:
     case v8::AsyncTaskEvent:
       break;
     case v8::Exception:
@@ -2084,16 +2021,19 @@
   // DebugCommandProcessor goes here.
   bool running = auto_continue;
 
-  Handle<Object> cmd_processor_ctor = Object::GetProperty(
-      isolate_, exec_state, "debugCommandProcessor").ToHandleChecked();
+  Handle<Object> cmd_processor_ctor =
+      JSReceiver::GetProperty(isolate_, exec_state, "debugCommandProcessor")
+          .ToHandleChecked();
   Handle<Object> ctor_args[] = { isolate_->factory()->ToBoolean(running) };
-  Handle<Object> cmd_processor = Execution::Call(
-      isolate_, cmd_processor_ctor, exec_state, 1, ctor_args).ToHandleChecked();
+  Handle<JSReceiver> cmd_processor = Handle<JSReceiver>::cast(
+      Execution::Call(isolate_, cmd_processor_ctor, exec_state, 1, ctor_args)
+          .ToHandleChecked());
   Handle<JSFunction> process_debug_request = Handle<JSFunction>::cast(
-      Object::GetProperty(
-          isolate_, cmd_processor, "processDebugRequest").ToHandleChecked());
-  Handle<Object> is_running = Object::GetProperty(
-      isolate_, cmd_processor, "isRunning").ToHandleChecked();
+      JSReceiver::GetProperty(isolate_, cmd_processor, "processDebugRequest")
+          .ToHandleChecked());
+  Handle<Object> is_running =
+      JSReceiver::GetProperty(isolate_, cmd_processor, "isRunning")
+          .ToHandleChecked();
 
   // Process requests from the debugger.
   do {
@@ -2313,6 +2253,44 @@
   OnDebugBreak(isolate_->factory()->undefined_value(), debug_command_only);
 }
 
+#ifdef DEBUG
+void Debug::PrintBreakLocation() {
+  if (!FLAG_print_break_location) return;
+  HandleScope scope(isolate_);
+  JavaScriptFrameIterator iterator(isolate_);
+  if (iterator.done()) return;
+  JavaScriptFrame* frame = iterator.frame();
+  FrameSummary summary = GetFirstFrameSummary(frame);
+  int source_position =
+      summary.abstract_code()->SourcePosition(summary.code_offset());
+  Handle<Object> script_obj(summary.function()->shared()->script(), isolate_);
+  PrintF("[debug] break in function '");
+  summary.function()->PrintName();
+  PrintF("'.\n");
+  if (script_obj->IsScript()) {
+    Handle<Script> script = Handle<Script>::cast(script_obj);
+    Handle<String> source(String::cast(script->source()));
+    Script::InitLineEnds(script);
+    int line = Script::GetLineNumber(script, source_position);
+    int column = Script::GetColumnNumber(script, source_position);
+    Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+    int line_start =
+        line == 0 ? 0 : Smi::cast(line_ends->get(line - 1))->value() + 1;
+    int line_end = Smi::cast(line_ends->get(line))->value();
+    DisallowHeapAllocation no_gc;
+    String::FlatContent content = source->GetFlatContent();
+    if (content.IsOneByte()) {
+      PrintF("[debug] %.*s\n", line_end - line_start,
+             content.ToOneByteVector().start() + line_start);
+      PrintF("[debug] ");
+      for (int i = 0; i < column; i++) PrintF(" ");
+      PrintF("^\n");
+    } else {
+      PrintF("[debug] at line %d column %d\n", line, column);
+    }
+  }
+}
+#endif  // DEBUG
 
 DebugScope::DebugScope(Debug* debug)
     : debug_(debug),
@@ -2324,9 +2302,10 @@
   base::NoBarrier_Store(&debug_->thread_local_.current_debug_scope_,
                         reinterpret_cast<base::AtomicWord>(this));
 
-  // Store the previous break id and frame id.
+  // Store the previous break id, frame id and return value.
   break_id_ = debug_->break_id();
   break_frame_id_ = debug_->break_frame_id();
+  return_value_ = debug_->return_value();
 
   // Create the new break info. If there is no JavaScript frames there is no
   // break frame id.
@@ -2364,6 +2343,7 @@
   // Restore to the previous break state.
   debug_->thread_local_.break_frame_id_ = break_frame_id_;
   debug_->thread_local_.break_id_ = break_id_;
+  debug_->thread_local_.return_value_ = return_value_;
 
   debug_->UpdateState();
 }
@@ -2448,8 +2428,9 @@
 
   if (IsEvent()) {
     // Call toJSONProtocol on the debug event object.
-    Handle<Object> fun = Object::GetProperty(
-        isolate, event_data_, "toJSONProtocol").ToHandleChecked();
+    Handle<Object> fun =
+        JSReceiver::GetProperty(isolate, event_data_, "toJSONProtocol")
+            .ToHandleChecked();
     if (!fun->IsJSFunction()) {
       return v8::Local<v8::String>();
     }
diff --git a/src/debug/debug.h b/src/debug/debug.h
index 81db9e5..501de63 100644
--- a/src/debug/debug.h
+++ b/src/debug/debug.h
@@ -70,10 +70,6 @@
   static BreakLocation FromFrame(Handle<DebugInfo> debug_info,
                                  JavaScriptFrame* frame);
 
-  static void FromCodeOffsetSameStatement(Handle<DebugInfo> debug_info,
-                                          int offset,
-                                          List<BreakLocation>* result_out);
-
   static void AllForStatementPosition(Handle<DebugInfo> debug_info,
                                       int statement_position,
                                       List<BreakLocation>* result_out);
@@ -85,6 +81,9 @@
 
   inline bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
   inline bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
+  inline bool IsTailCall() const {
+    return type_ == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
+  }
   inline bool IsDebugBreakSlot() const { return type_ >= DEBUG_BREAK_SLOT; }
   inline bool IsDebuggerStatement() const {
     return type_ == DEBUGGER_STATEMENT;
@@ -117,7 +116,8 @@
     DEBUGGER_STATEMENT,
     DEBUG_BREAK_SLOT,
     DEBUG_BREAK_SLOT_AT_CALL,
-    DEBUG_BREAK_SLOT_AT_RETURN
+    DEBUG_BREAK_SLOT_AT_RETURN,
+    DEBUG_BREAK_SLOT_AT_TAIL_CALL,
   };
 
   BreakLocation(Handle<DebugInfo> debug_info, DebugBreakType type,
@@ -142,6 +142,9 @@
 
    protected:
     explicit Iterator(Handle<DebugInfo> debug_info);
+    int ReturnPosition();
+
+    Isolate* isolate() { return debug_info_->GetIsolate(); }
 
     Handle<DebugInfo> debug_info_;
     int break_index_;
@@ -169,7 +172,7 @@
     }
 
    private:
-    static int GetModeMask(BreakLocatorType type);
+    int GetModeMask(BreakLocatorType type);
     RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
     RelocInfo* rinfo() { return reloc_iterator_.rinfo(); }
 
@@ -414,7 +417,6 @@
   void OnCompileError(Handle<Script> script);
   void OnBeforeCompile(Handle<Script> script);
   void OnAfterCompile(Handle<Script> script);
-  void OnPromiseEvent(Handle<JSObject> data);
   void OnAsyncTaskEvent(Handle<JSObject> data);
 
   // API facing.
@@ -430,8 +432,8 @@
 
   // Internal logic
   bool Load();
-  void Break(Arguments args, JavaScriptFrame*);
-  Object* SetAfterBreakTarget(JavaScriptFrame* frame);
+  void Break(JavaScriptFrame* frame);
+  void SetAfterBreakTarget(JavaScriptFrame* frame);
 
   // Scripts handling.
   Handle<FixedArray> GetLoadedScripts();
@@ -459,9 +461,6 @@
   void ClearStepOut();
   void EnableStepIn();
 
-  void GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
-                          List<int>* results_out);
-
   bool PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared);
 
   // Returns whether the operation succeeded. Compilation can only be triggered
@@ -530,6 +529,11 @@
   StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
   int break_id() { return thread_local_.break_id_; }
 
+  Handle<Object> return_value() { return thread_local_.return_value_; }
+  void set_return_value(Handle<Object> value) {
+    thread_local_.return_value_ = value;
+  }
+
   // Support for embedding into generated code.
   Address is_active_address() {
     return reinterpret_cast<Address>(&is_active_);
@@ -575,8 +579,6 @@
       Handle<Object> promise);
   MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
       Handle<Script> script, v8::DebugEvent type);
-  MUST_USE_RESULT MaybeHandle<Object> MakePromiseEvent(
-      Handle<JSObject> promise_event);
   MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
       Handle<JSObject> task_event);
 
@@ -617,6 +619,8 @@
 
   void ThreadInit();
 
+  void PrintBreakLocation();
+
   // Global handles.
   Handle<Context> debug_context_;
   Handle<Object> event_listener_;
@@ -682,6 +686,10 @@
     // Stores the way how LiveEdit has patched the stack. It is used when
     // debugger returns control back to user script.
     LiveEdit::FrameDropMode frame_drop_mode_;
+
+    // Value of accumulator in interpreter frames. In non-interpreter frames
+    // this value will be the hole.
+    Handle<Object> return_value_;
   };
 
   // Storage location for registers when handling debug break calls
@@ -723,6 +731,7 @@
   DebugScope* prev_;               // Previous scope if entered recursively.
   StackFrame::Id break_frame_id_;  // Previous break frame id.
   int break_id_;                   // Previous break id.
+  Handle<Object> return_value_;    // Previous result.
   bool failed_;                    // Did the debug context fail to load?
   SaveContext save_;               // Saves previous context.
   PostponeInterruptsScope no_termination_exceptons_;
diff --git a/src/debug/debug.js b/src/debug/debug.js
index 6849bf5..7f06ca1 100644
--- a/src/debug/debug.js
+++ b/src/debug/debug.js
@@ -51,8 +51,7 @@
                      BeforeCompile: 4,
                      AfterCompile: 5,
                      CompileError: 6,
-                     PromiseEvent: 7,
-                     AsyncTaskEvent: 8 };
+                     AsyncTaskEvent: 7 };
 
 // Types of exceptions that can be broken upon.
 Debug.ExceptionBreak = { Caught : 0,
@@ -1141,39 +1140,6 @@
 }
 
 
-function MakePromiseEvent(event_data) {
-  return new PromiseEvent(event_data);
-}
-
-
-function PromiseEvent(event_data) {
-  this.promise_ = event_data.promise;
-  this.parentPromise_ = event_data.parentPromise;
-  this.status_ = event_data.status;
-  this.value_ = event_data.value;
-}
-
-
-PromiseEvent.prototype.promise = function() {
-  return MakeMirror(this.promise_);
-}
-
-
-PromiseEvent.prototype.parentPromise = function() {
-  return MakeMirror(this.parentPromise_);
-}
-
-
-PromiseEvent.prototype.status = function() {
-  return this.status_;
-}
-
-
-PromiseEvent.prototype.value = function() {
-  return MakeMirror(this.value_);
-}
-
-
 function MakeAsyncTaskEvent(event_data) {
   return new AsyncTaskEvent(event_data);
 }
@@ -2517,7 +2483,6 @@
   "MakeExceptionEvent", MakeExceptionEvent,
   "MakeBreakEvent", MakeBreakEvent,
   "MakeCompileEvent", MakeCompileEvent,
-  "MakePromiseEvent", MakePromiseEvent,
   "MakeAsyncTaskEvent", MakeAsyncTaskEvent,
   "IsBreakPointTriggered", IsBreakPointTriggered,
   "UpdateScriptBreakPoints", UpdateScriptBreakPoints,
diff --git a/src/debug/ia32/debug-ia32.cc b/src/debug/ia32/debug-ia32.cc
index 95f2bc6..056407f 100644
--- a/src/debug/ia32/debug-ia32.cc
+++ b/src/debug/ia32/debug-ia32.cc
@@ -68,9 +68,15 @@
     }
     __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
 
-    if (mode == SAVE_RESULT_REGISTER) __ push(eax);
-
-    __ Move(eax, Immediate(0));  // No arguments.
+    // Push arguments for DebugBreak call.
+    if (mode == SAVE_RESULT_REGISTER) {
+      // Break on return.
+      __ push(eax);
+    } else {
+      // Non-return breaks.
+      __ Push(masm->isolate()->factory()->the_hole_value());
+    }
+    __ Move(eax, Immediate(1));
     __ mov(ebx,
            Immediate(ExternalReference(
                Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
@@ -81,12 +87,14 @@
     if (FLAG_debug_code) {
       for (int i = 0; i < kNumJSCallerSaved; ++i) {
         Register reg = {JSCallerSavedCode(i)};
-        __ Move(reg, Immediate(kDebugZapValue));
+        // Do not clobber eax if mode is SAVE_RESULT_REGISTER. It will
+        // contain return value of the function.
+        if (!(reg.is(eax) && (mode == SAVE_RESULT_REGISTER))) {
+          __ Move(reg, Immediate(kDebugZapValue));
+        }
       }
     }
 
-    if (mode == SAVE_RESULT_REGISTER) __ pop(eax);
-
     __ pop(ebx);
     // We divide stored value by 2 (untagging) and multiply it by word's size.
     STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
@@ -110,9 +118,12 @@
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
   // We do not know our frame height, but set esp based on ebp.
-  __ lea(esp, Operand(ebp, -1 * kPointerSize));
-
+  __ lea(esp, Operand(ebp, FrameDropperFrameConstants::kFunctionOffset));
   __ pop(edi);  // Function.
+  __ add(esp, Immediate(-FrameDropperFrameConstants::kCodeOffset));  // INTERNAL
+                                                                     // frame
+                                                                     // marker
+                                                                     // and code
   __ pop(ebp);
 
   ParameterCount dummy(0);
diff --git a/src/debug/liveedit.cc b/src/debug/liveedit.cc
index 91c990d..78ed6f1 100644
--- a/src/debug/liveedit.cc
+++ b/src/debug/liveedit.cc
@@ -710,20 +710,18 @@
 
   void FunctionDone() {
     HandleScope scope(isolate());
-    FunctionInfoWrapper info =
-        FunctionInfoWrapper::cast(
-            *Object::GetElement(
-                isolate(), result_, current_parent_index_).ToHandleChecked());
+    FunctionInfoWrapper info = FunctionInfoWrapper::cast(
+        *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
+             .ToHandleChecked());
     current_parent_index_ = info.GetParentIndex();
   }
 
   // Saves only function code, because for a script function we
   // may never create a SharedFunctionInfo object.
   void FunctionCode(Handle<Code> function_code) {
-    FunctionInfoWrapper info =
-        FunctionInfoWrapper::cast(
-            *Object::GetElement(
-                isolate(), result_, current_parent_index_).ToHandleChecked());
+    FunctionInfoWrapper info = FunctionInfoWrapper::cast(
+        *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
+             .ToHandleChecked());
     info.SetFunctionCode(function_code,
                          Handle<HeapObject>(isolate()->heap()->null_value()));
   }
@@ -735,10 +733,9 @@
     if (!shared->IsSharedFunctionInfo()) {
       return;
     }
-    FunctionInfoWrapper info =
-        FunctionInfoWrapper::cast(
-            *Object::GetElement(
-                isolate(), result_, current_parent_index_).ToHandleChecked());
+    FunctionInfoWrapper info = FunctionInfoWrapper::cast(
+        *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
+             .ToHandleChecked());
     info.SetFunctionCode(Handle<Code>(shared->code()),
                          Handle<HeapObject>(shared->scope_info()));
     info.SetSharedFunctionInfo(shared);
@@ -1185,21 +1182,22 @@
   // TODO(635): binary search may be used here
   for (int i = 0; i < array_len; i += 3) {
     HandleScope scope(isolate);
-    Handle<Object> element = Object::GetElement(
-        isolate, position_change_array, i).ToHandleChecked();
+    Handle<Object> element =
+        JSReceiver::GetElement(isolate, position_change_array, i)
+            .ToHandleChecked();
     CHECK(element->IsSmi());
     int chunk_start = Handle<Smi>::cast(element)->value();
     if (original_position < chunk_start) {
       break;
     }
-    element = Object::GetElement(
-        isolate, position_change_array, i + 1).ToHandleChecked();
+    element = JSReceiver::GetElement(isolate, position_change_array, i + 1)
+                  .ToHandleChecked();
     CHECK(element->IsSmi());
     int chunk_end = Handle<Smi>::cast(element)->value();
     // Position mustn't be inside a chunk.
     DCHECK(original_position >= chunk_end);
-    element = Object::GetElement(
-        isolate, position_change_array, i + 2).ToHandleChecked();
+    element = JSReceiver::GetElement(isolate, position_change_array, i + 2)
+                  .ToHandleChecked();
     CHECK(element->IsSmi());
     int chunk_changed_end = Handle<Smi>::cast(element)->value();
     position_diff = chunk_changed_end - chunk_end;
@@ -1448,7 +1446,7 @@
   for (int i = 0; i < len; i++) {
     HandleScope scope(isolate);
     Handle<Object> element =
-        Object::GetElement(isolate, shared_info_array, i).ToHandleChecked();
+        JSReceiver::GetElement(isolate, shared_info_array, i).ToHandleChecked();
     Handle<JSValue> jsvalue = Handle<JSValue>::cast(element);
     Handle<SharedFunctionInfo> shared =
         UnwrapSharedFunctionInfoFromJSValue(jsvalue);
@@ -1485,26 +1483,22 @@
 
 // Initializes an artificial stack frame. The data it contains is used for:
 //  a. successful work of frame dropper code which eventually gets control,
-//  b. being compatible with regular stack structure for various stack
+//  b. being compatible with a typed frame structure for various stack
 //     iterators.
-// Frame structure (conforms InternalFrame structure):
+// Frame structure (conforms to InternalFrame structure):
+//   -- function
 //   -- code
-//   -- SMI maker
-//   -- function (slot is called "context")
+//   -- SMI marker
 //   -- frame base
 static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
                                    Handle<Code> code) {
   DCHECK(bottom_js_frame->is_java_script());
-
   Address fp = bottom_js_frame->fp();
-
-  // Move function pointer into "context" slot.
-  Memory::Object_at(fp + StandardFrameConstants::kContextOffset) =
-      Memory::Object_at(fp + JavaScriptFrameConstants::kFunctionOffset);
-
-  Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code;
-  Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) =
+  Memory::Object_at(fp + FrameDropperFrameConstants::kFunctionOffset) =
+      Memory::Object_at(fp + StandardFrameConstants::kFunctionOffset);
+  Memory::Object_at(fp + FrameDropperFrameConstants::kFrameTypeOffset) =
       Smi::FromInt(StackFrame::INTERNAL);
+  Memory::Object_at(fp + FrameDropperFrameConstants::kCodeOffset) = *code;
 }
 
 
@@ -1566,9 +1560,9 @@
   }
 
   Address unused_stack_top = top_frame->sp();
-  int new_frame_size = LiveEdit::kFrameDropperFrameSize * kPointerSize;
-  Address unused_stack_bottom = bottom_js_frame->fp()
-      - new_frame_size + kPointerSize;  // Bigger address end is exclusive.
+  Address unused_stack_bottom =
+      bottom_js_frame->fp() - FrameDropperFrameConstants::kFixedFrameSize +
+      2 * kPointerSize;  // Bigger address end is exclusive.
 
   Address* top_frame_pc_address = top_frame->pc_address();
 
@@ -1580,8 +1574,9 @@
       int shortage_bytes =
           static_cast<int>(unused_stack_top - unused_stack_bottom);
 
-      Address padding_start = pre_top_frame->fp() -
-          LiveEdit::kFrameDropperFrameSize * kPointerSize;
+      Address padding_start =
+          pre_top_frame->fp() -
+          (FrameDropperFrameConstants::kFixedFrameSize - kPointerSize);
 
       Address padding_pointer = padding_start;
       Smi* padding_object = Smi::FromInt(LiveEdit::kFramePaddingValue);
@@ -1601,7 +1596,7 @@
 
       MemMove(padding_start + kPointerSize - shortage_bytes,
               padding_start + kPointerSize,
-              LiveEdit::kFrameDropperFrameSize * kPointerSize);
+              FrameDropperFrameConstants::kFixedFrameSize - kPointerSize);
 
       pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
       pre_pre_frame->SetCallerFp(pre_top_frame->fp());
@@ -1664,14 +1659,16 @@
     for (int i = 0; i < len; i++) {
       HandleScope scope(isolate);
       Handle<Object> old_element =
-          Object::GetElement(isolate, old_shared_array_, i).ToHandleChecked();
+          JSReceiver::GetElement(isolate, old_shared_array_, i)
+              .ToHandleChecked();
       if (!old_shared.is_identical_to(UnwrapSharedFunctionInfoFromJSValue(
               Handle<JSValue>::cast(old_element)))) {
         continue;
       }
 
       Handle<Object> new_element =
-          Object::GetElement(isolate, new_shared_array_, i).ToHandleChecked();
+          JSReceiver::GetElement(isolate, new_shared_array_, i)
+              .ToHandleChecked();
       if (new_element->IsUndefined()) return false;
       Handle<SharedFunctionInfo> new_shared =
           UnwrapSharedFunctionInfoFromJSValue(
@@ -1703,7 +1700,7 @@
                                                      TARGET& target,  // NOLINT
                                                      bool do_drop) {
   Debug* debug = isolate->debug();
-  Zone zone;
+  Zone zone(isolate->allocator());
   Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
 
 
@@ -1824,7 +1821,7 @@
   // Replace "blocked on active" with "replaced on active" status.
   for (int i = 0; i < array_len; i++) {
     Handle<Object> obj =
-        Object::GetElement(isolate, result, i).ToHandleChecked();
+        JSReceiver::GetElement(isolate, result, i).ToHandleChecked();
     if (*obj == Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
       Handle<Object> replaced(
           Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
@@ -1909,8 +1906,9 @@
       FixedArray::cast(old_shared_array->elements()));
 
   Handle<JSArray> result = isolate->factory()->NewJSArray(len);
+  JSObject::EnsureWritableFastElements(result);
   Handle<FixedArray> result_elements =
-      JSObject::EnsureWritableFastElements(result);
+      handle(FixedArray::cast(result->elements()), isolate);
 
   // Fill the default values.
   for (int i = 0; i < len; i++) {
diff --git a/src/debug/liveedit.h b/src/debug/liveedit.h
index f3d6c54..67be70e 100644
--- a/src/debug/liveedit.h
+++ b/src/debug/liveedit.h
@@ -170,10 +170,6 @@
    *   ...
    *   --- Bottom
    */
-  // A size of frame base including fp. Padding words starts right above
-  // the base.
-  static const int kFrameDropperFrameSize =
-      4 + StandardFrameConstants::kCPSlotCount;
   // A number of words that should be reserved on stack for the LiveEdit use.
   // Stored on stack in form of Smi.
   static const int kFramePaddingInitialSize = 1;
@@ -256,8 +252,8 @@
   }
 
   Handle<Object> GetField(int field_position) {
-    return Object::GetElement(
-        isolate(), array_, field_position).ToHandleChecked();
+    return JSReceiver::GetElement(isolate(), array_, field_position)
+        .ToHandleChecked();
   }
 
   int GetSmiValueField(int field_position) {
@@ -337,9 +333,8 @@
   static bool IsInstance(Handle<JSArray> array) {
     if (array->length() != Smi::FromInt(kSize_)) return false;
     Handle<Object> element(
-        Object::GetElement(array->GetIsolate(),
-                           array,
-                           kSharedInfoOffset_).ToHandleChecked());
+        JSReceiver::GetElement(array->GetIsolate(), array, kSharedInfoOffset_)
+            .ToHandleChecked());
     if (!element->IsJSValue()) return false;
     return Handle<JSValue>::cast(element)->value()->IsSharedFunctionInfo();
   }
diff --git a/src/debug/mips/debug-mips.cc b/src/debug/mips/debug-mips.cc
index 1d9f7d6..8e00d61 100644
--- a/src/debug/mips/debug-mips.cc
+++ b/src/debug/mips/debug-mips.cc
@@ -77,9 +77,15 @@
     __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
     __ push(at);
 
-    if (mode == SAVE_RESULT_REGISTER) __ push(v0);
-
-    __ PrepareCEntryArgs(0);  // No arguments.
+    // Push arguments for DebugBreak call.
+    if (mode == SAVE_RESULT_REGISTER) {
+      // Break on return.
+      __ push(v0);
+    } else {
+      // Non-return breaks.
+      __ Push(masm->isolate()->factory()->the_hole_value());
+    }
+    __ PrepareCEntryArgs(1);
     __ PrepareCEntryFunction(ExternalReference(
         Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate()));
 
@@ -89,12 +95,14 @@
     if (FLAG_debug_code) {
       for (int i = 0; i < kNumJSCallerSaved; i++) {
         Register reg = {JSCallerSavedCode(i)};
-        __ li(reg, kDebugZapValue);
+        // Do not clobber v0 if mode is SAVE_RESULT_REGISTER. It will
+        // contain return value of the function returned by DebugBreak.
+        if (!(reg.is(v0) && (mode == SAVE_RESULT_REGISTER))) {
+          __ li(reg, kDebugZapValue);
+        }
       }
     }
 
-    if (mode == SAVE_RESULT_REGISTER) __ pop(v0);
-
     // Don't bother removing padding bytes pushed on the stack
     // as the frame is going to be restored right away.
 
@@ -114,9 +122,10 @@
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
   // We do not know our frame height, but set sp based on fp.
-  __ Subu(sp, fp, Operand(kPointerSize));
+  __ lw(a1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
 
-  __ Pop(ra, fp, a1);  // Return address, Frame, Function.
+  // Pop return address and frame.
+  __ LeaveFrame(StackFrame::INTERNAL);
 
   ParameterCount dummy(0);
   __ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
diff --git a/src/debug/mips64/debug-mips64.cc b/src/debug/mips64/debug-mips64.cc
index 0646a24..aad095b 100644
--- a/src/debug/mips64/debug-mips64.cc
+++ b/src/debug/mips64/debug-mips64.cc
@@ -79,9 +79,15 @@
     __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
     __ push(at);
 
-    if (mode == SAVE_RESULT_REGISTER) __ push(v0);
-
-    __ PrepareCEntryArgs(0);  // No arguments.
+    // Push arguments for DebugBreak call.
+    if (mode == SAVE_RESULT_REGISTER) {
+      // Break on return.
+      __ push(v0);
+    } else {
+      // Non-return breaks.
+      __ Push(masm->isolate()->factory()->the_hole_value());
+    }
+    __ PrepareCEntryArgs(1);
     __ PrepareCEntryFunction(ExternalReference(
         Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate()));
 
@@ -91,12 +97,14 @@
     if (FLAG_debug_code) {
       for (int i = 0; i < kNumJSCallerSaved; i++) {
         Register reg = {JSCallerSavedCode(i)};
-        __ li(reg, kDebugZapValue);
+        // Do not clobber v0 if mode is SAVE_RESULT_REGISTER. It will
+        // contain return value of the function returned by DebugBreak.
+        if (!(reg.is(v0) && (mode == SAVE_RESULT_REGISTER))) {
+          __ li(reg, kDebugZapValue);
+        }
       }
     }
 
-    if (mode == SAVE_RESULT_REGISTER) __ pop(v0);
-
     // Don't bother removing padding bytes pushed on the stack
     // as the frame is going to be restored right away.
 
@@ -116,9 +124,10 @@
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
   // We do not know our frame height, but set sp based on fp.
-  __ Dsubu(sp, fp, Operand(kPointerSize));
+  __ ld(a1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
 
-  __ Pop(ra, fp, a1);  // Return address, Frame, Function.
+  // Pop return address and frame.
+  __ LeaveFrame(StackFrame::INTERNAL);
 
   ParameterCount dummy(0);
   __ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
diff --git a/src/debug/mirrors.js b/src/debug/mirrors.js
index 8b9dd02..881f303 100644
--- a/src/debug/mirrors.js
+++ b/src/debug/mirrors.js
@@ -1037,6 +1037,15 @@
 };
 
 
+FunctionMirror.prototype.context = function() {
+  if (this.resolved()) {
+    if (!this._context)
+      this._context = new ContextMirror(%FunctionGetContextData(this.value_));
+    return this._context;
+  }
+};
+
+
 /**
  * Mirror object for unresolved functions.
  * @param {string} value The name for the unresolved function reflected by this
@@ -1434,14 +1443,6 @@
 };
 
 
-GeneratorMirror.prototype.context = function() {
-  if (!this.context_) {
-    this.context_ = new ContextMirror(%GeneratorGetContext(this.value_));
-  }
-  return this.context_;
-};
-
-
 GeneratorMirror.prototype.receiver = function() {
   if (!this.receiver_) {
     this.receiver_ = MakeMirror(%GeneratorGetReceiver(this.value_));
@@ -1801,11 +1802,6 @@
 };
 
 
-FrameDetails.prototype.stepInPositionsImpl = function() {
-  return %GetStepInPositions(this.break_id_, this.frameId());
-};
-
-
 /**
  * Mirror object for stack frames.
  * @param {number} break_id The break id in the VM for which this frame is
@@ -1985,29 +1981,6 @@
 };
 
 
-FrameMirror.prototype.stepInPositions = function() {
-  var script = this.func().script();
-  var funcOffset = this.func().sourcePosition_();
-
-  var stepInRaw = this.details_.stepInPositionsImpl();
-  var result = [];
-  if (stepInRaw) {
-    for (var i = 0; i < stepInRaw.length; i++) {
-      var posStruct = {};
-      var offset = script.locationFromPosition(funcOffset + stepInRaw[i],
-                                               true);
-      serializeLocationFields(offset, posStruct);
-      var item = {
-        position: posStruct
-      };
-      result.push(item);
-    }
-  }
-
-  return result;
-};
-
-
 FrameMirror.prototype.evaluate = function(source, disable_break,
                                           opt_context_object) {
   return MakeMirror(%DebugEvaluate(this.break_id_,
@@ -2177,6 +2150,9 @@
 var kScopeDetailsTypeIndex = 0;
 var kScopeDetailsObjectIndex = 1;
 var kScopeDetailsNameIndex = 2;
+var kScopeDetailsStartPositionIndex = 3;
+var kScopeDetailsEndPositionIndex = 4;
+var kScopeDetailsFunctionIndex = 5;
 
 function ScopeDetails(frame, fun, index, opt_details) {
   if (frame) {
@@ -2221,6 +2197,29 @@
 };
 
 
+ScopeDetails.prototype.startPosition = function() {
+  if (!IS_UNDEFINED(this.break_id_)) {
+    %CheckExecutionState(this.break_id_);
+  }
+  return this.details_[kScopeDetailsStartPositionIndex];
+}
+
+
+ScopeDetails.prototype.endPosition = function() {
+  if (!IS_UNDEFINED(this.break_id_)) {
+    %CheckExecutionState(this.break_id_);
+  }
+  return this.details_[kScopeDetailsEndPositionIndex];
+}
+
+ScopeDetails.prototype.func = function() {
+  if (!IS_UNDEFINED(this.break_id_)) {
+    %CheckExecutionState(this.break_id_);
+  }
+  return this.details_[kScopeDetailsFunctionIndex];
+}
+
+
 ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
   var raw_res;
   if (!IS_UNDEFINED(this.break_id_)) {
diff --git a/src/debug/ppc/debug-ppc.cc b/src/debug/ppc/debug-ppc.cc
index aab5399..a160bc2 100644
--- a/src/debug/ppc/debug-ppc.cc
+++ b/src/debug/ppc/debug-ppc.cc
@@ -83,9 +83,15 @@
     __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
     __ push(ip);
 
-    if (mode == SAVE_RESULT_REGISTER) __ push(r3);
-
-    __ mov(r3, Operand::Zero());  // no arguments
+    // Push arguments for DebugBreak call.
+    if (mode == SAVE_RESULT_REGISTER) {
+      // Break on return.
+      __ push(r3);
+    } else {
+      // Non-return breaks.
+      __ Push(masm->isolate()->factory()->the_hole_value());
+    }
+    __ mov(r3, Operand(1));
     __ mov(r4,
            Operand(ExternalReference(
                Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
@@ -96,12 +102,14 @@
     if (FLAG_debug_code) {
       for (int i = 0; i < kNumJSCallerSaved; i++) {
         Register reg = {JSCallerSavedCode(i)};
-        __ mov(reg, Operand(kDebugZapValue));
+        // Do not clobber r3 if mode is SAVE_RESULT_REGISTER. It will
+        // contain return value of the function.
+        if (!(reg.is(r3) && (mode == SAVE_RESULT_REGISTER))) {
+          __ mov(reg, Operand(kDebugZapValue));
+        }
       }
     }
 
-    if (mode == SAVE_RESULT_REGISTER) __ pop(r3);
-
     // Don't bother removing padding bytes pushed on the stack
     // as the frame is going to be restored right away.
 
@@ -121,8 +129,7 @@
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
   // Load the function pointer off of our current stack frame.
-  __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
-                                  kPointerSize));
+  __ LoadP(r4, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
 
   // Pop return address and frame
   __ LeaveFrame(StackFrame::INTERNAL);
diff --git a/src/debug/s390/OWNERS b/src/debug/s390/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/debug/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/debug/s390/debug-s390.cc b/src/debug/s390/debug-s390.cc
new file mode 100644
index 0000000..c6764c2
--- /dev/null
+++ b/src/debug/s390/debug-s390.cc
@@ -0,0 +1,165 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void EmitDebugBreakSlot(MacroAssembler* masm) {
+  Label check_size;
+  __ bind(&check_size);
+  //   oill r3, 0
+  //   oill r3, 0
+  __ nop(Assembler::DEBUG_BREAK_NOP);
+  __ nop(Assembler::DEBUG_BREAK_NOP);
+
+  //   lr r0, r0    64-bit only
+  //   lr r0, r0    64-bit only
+  //   lr r0, r0    64-bit only
+  for (int i = 8; i < Assembler::kDebugBreakSlotLength; i += 2) {
+    __ nop();
+  }
+  DCHECK_EQ(Assembler::kDebugBreakSlotLength,
+            masm->SizeOfCodeGeneratedSince(&check_size));
+}
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
+  // Generate enough nop's to make space for a call instruction.
+  masm->RecordDebugBreakSlot(mode);
+  EmitDebugBreakSlot(masm);
+}
+
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+  CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
+  EmitDebugBreakSlot(patcher.masm());
+}
+
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+                                       Handle<Code> code) {
+  DCHECK_EQ(Code::BUILTIN, code->kind());
+  CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
+  // Patch the code changing the debug break slot code from
+  //
+  //   oill r3, 0
+  //   oill r3, 0
+  //   oill r3, 0   64-bit only
+  //   lr r0, r0    64-bit only
+  //
+  // to a call to the debug break code, using a FIXED_SEQUENCE.
+  //
+  //   iilf r14, <address>   6-bytes
+  //   basr r14, r14A        2-bytes
+  //
+  // The 64bit sequence has an extra iihf.
+  //
+  //   iihf r14, <high 32-bits address>    6-bytes
+  //   iilf r14, <lower 32-bits address>   6-bytes
+  //   basr r14, r14                       2-bytes
+  patcher.masm()->mov(v8::internal::r14,
+                      Operand(reinterpret_cast<intptr_t>(code->entry())));
+  patcher.masm()->basr(v8::internal::r14, v8::internal::r14);
+}
+
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+  Instr current_instr = Assembler::instr_at(pc);
+  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
+
+void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
+                                          DebugBreakCallHelperMode mode) {
+  __ RecordComment("Debug break");
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Load padding words on stack.
+    __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
+    for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+      __ push(ip);
+    }
+    __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
+    __ push(ip);
+
+    // Push arguments for DebugBreak call.
+    if (mode == SAVE_RESULT_REGISTER) {
+      // Break on return.
+      __ push(r2);
+    } else {
+      // Non-return breaks.
+      __ Push(masm->isolate()->factory()->the_hole_value());
+    }
+    __ mov(r2, Operand(1));
+    __ mov(r3,
+           Operand(ExternalReference(
+               Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
+
+    CEntryStub ceb(masm->isolate(), 1);
+    __ CallStub(&ceb);
+
+    if (FLAG_debug_code) {
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        Register reg = {JSCallerSavedCode(i)};
+        // Do not clobber r2 if mode is SAVE_RESULT_REGISTER. It will
+        // contain return value of the function.
+        if (!(reg.is(r2) && (mode == SAVE_RESULT_REGISTER))) {
+          __ mov(reg, Operand(kDebugZapValue));
+        }
+      }
+    }
+
+    // Don't bother removing padding bytes pushed on the stack
+    // as the frame is going to be restored right away.
+
+    // Leave the internal frame.
+  }
+
+  // Now that the break point has been handled, resume normal execution by
+  // jumping to the target address intended by the caller and that was
+  // overwritten by the address of DebugBreakXXX.
+  ExternalReference after_break_target =
+      ExternalReference::debug_after_break_target_address(masm->isolate());
+  __ mov(ip, Operand(after_break_target));
+  __ LoadP(ip, MemOperand(ip));
+  __ JumpToJSEntry(ip);
+}
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+  // Load the function pointer off of our current stack frame.
+  __ LoadP(r3, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
+
+  // Pop return address and frame
+  __ LeaveFrame(StackFrame::INTERNAL);
+
+  ParameterCount dummy(0);
+  __ FloodFunctionIfStepping(r3, no_reg, dummy, dummy);
+
+  // Load context from the function.
+  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+  // Clear new.target as a safety measure.
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+
+  // Get function code.
+  __ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+  __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Re-run JSFunction, r3 is function, cp is context.
+  __ Jump(ip);
+}
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/debug/x64/debug-x64.cc b/src/debug/x64/debug-x64.cc
index f7fbe76..a85ddb3 100644
--- a/src/debug/x64/debug-x64.cc
+++ b/src/debug/x64/debug-x64.cc
@@ -69,9 +69,15 @@
     }
     __ Push(Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
 
-    if (mode == SAVE_RESULT_REGISTER) __ Push(rax);
-
-    __ Set(rax, 0);  // No arguments (argc == 0).
+    // Push arguments for DebugBreak call.
+    if (mode == SAVE_RESULT_REGISTER) {
+      // Break on return.
+      __ Push(rax);
+    } else {
+      // Non-return breaks.
+      __ Push(masm->isolate()->factory()->the_hole_value());
+    }
+    __ Set(rax, 1);
     __ Move(rbx, ExternalReference(Runtime::FunctionForId(Runtime::kDebugBreak),
                                    masm->isolate()));
 
@@ -81,12 +87,14 @@
     if (FLAG_debug_code) {
       for (int i = 0; i < kNumJSCallerSaved; ++i) {
         Register reg = {JSCallerSavedCode(i)};
-        __ Set(reg, kDebugZapValue);
+        // Do not clobber rax if mode is SAVE_RESULT_REGISTER. It will
+        // contain return value of the function.
+        if (!(reg.is(rax) && (mode == SAVE_RESULT_REGISTER))) {
+          __ Set(reg, kDebugZapValue);
+        }
       }
     }
 
-    if (mode == SAVE_RESULT_REGISTER) __ Pop(rax);
-
     // Read current padding counter and skip corresponding number of words.
     __ Pop(kScratchRegister);
     __ SmiToInteger32(kScratchRegister, kScratchRegister);
@@ -111,9 +119,12 @@
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
   // We do not know our frame height, but set rsp based on rbp.
-  __ leap(rsp, Operand(rbp, -1 * kPointerSize));
-
+  __ leap(rsp, Operand(rbp, FrameDropperFrameConstants::kFunctionOffset));
   __ Pop(rdi);  // Function.
+  __ addp(rsp,
+          Immediate(-FrameDropperFrameConstants::kCodeOffset));  // INTERNAL
+                                                                 // frame marker
+                                                                 // and code
   __ popq(rbp);
 
   ParameterCount dummy(0);
diff --git a/src/debug/x87/debug-x87.cc b/src/debug/x87/debug-x87.cc
index 8ddb82f..029a004 100644
--- a/src/debug/x87/debug-x87.cc
+++ b/src/debug/x87/debug-x87.cc
@@ -68,9 +68,15 @@
     }
     __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
 
-    if (mode == SAVE_RESULT_REGISTER) __ push(eax);
-
-    __ Move(eax, Immediate(0));  // No arguments.
+    // Push arguments for DebugBreak call.
+    if (mode == SAVE_RESULT_REGISTER) {
+      // Break on return.
+      __ push(eax);
+    } else {
+      // Non-return breaks.
+      __ Push(masm->isolate()->factory()->the_hole_value());
+    }
+    __ Move(eax, Immediate(1));
     __ mov(ebx,
            Immediate(ExternalReference(
                Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
@@ -81,12 +87,14 @@
     if (FLAG_debug_code) {
       for (int i = 0; i < kNumJSCallerSaved; ++i) {
         Register reg = {JSCallerSavedCode(i)};
-        __ Move(reg, Immediate(kDebugZapValue));
+        // Do not clobber eax if mode is SAVE_RESULT_REGISTER. It will
+        // contain return value of the function.
+        if (!(reg.is(eax) && (mode == SAVE_RESULT_REGISTER))) {
+          __ Move(reg, Immediate(kDebugZapValue));
+        }
       }
     }
 
-    if (mode == SAVE_RESULT_REGISTER) __ pop(eax);
-
     __ pop(ebx);
     // We divide stored value by 2 (untagging) and multiply it by word's size.
     STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
@@ -110,9 +118,12 @@
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
   // We do not know our frame height, but set esp based on ebp.
-  __ lea(esp, Operand(ebp, -1 * kPointerSize));
-
+  __ lea(esp, Operand(ebp, FrameDropperFrameConstants::kFunctionOffset));
   __ pop(edi);  // Function.
+  __ add(esp, Immediate(-FrameDropperFrameConstants::kCodeOffset));  // INTERNAL
+                                                                     // frame
+                                                                     // marker
+                                                                     // and code
   __ pop(ebp);
 
   ParameterCount dummy(0);
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index e00e5ab..b2c5d42 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -38,7 +38,7 @@
 DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
     : allocator_(allocator),
       current_(NULL) {
-  for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
+  for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
     deopt_entry_code_entries_[i] = -1;
     deopt_entry_code_[i] = AllocateCodeChunk(allocator);
   }
@@ -46,7 +46,7 @@
 
 
 DeoptimizerData::~DeoptimizerData() {
-  for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
+  for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
     allocator_->Free(deopt_entry_code_[i]);
     deopt_entry_code_[i] = NULL;
   }
@@ -307,7 +307,7 @@
 
   // Move marked code from the optimized code list to the deoptimized
   // code list, collecting them into a ZoneList.
-  Zone zone;
+  Zone zone(isolate->allocator());
   ZoneList<Code*> codes(10, &zone);
 
   // Walk over all optimized code objects in this native context.
@@ -444,7 +444,6 @@
     case EAGER:
     case SOFT:
     case LAZY:
-    case DEBUGGER:
       return (frame_type == StackFrame::STUB)
           ? FLAG_trace_stub_failures
           : FLAG_trace_deopt;
@@ -459,7 +458,6 @@
     case EAGER: return "eager";
     case SOFT: return "soft";
     case LAZY: return "lazy";
-    case DEBUGGER: return "debugger";
   }
   FATAL("Unsupported deopt type");
   return NULL;
@@ -474,7 +472,6 @@
       bailout_type_(type),
       from_(from),
       fp_to_sp_delta_(fp_to_sp_delta),
-      has_alignment_padding_(0),
       deoptimizing_throw_(false),
       catch_handler_data_(-1),
       catch_handler_pc_offset_(-1),
@@ -482,6 +479,12 @@
       output_count_(0),
       jsframe_count_(0),
       output_(nullptr),
+      caller_frame_top_(0),
+      caller_fp_(0),
+      caller_pc_(0),
+      caller_constant_pool_(0),
+      input_frame_context_(0),
+      stack_fp_(0),
       trace_scope_(nullptr) {
   if (isolate->deoptimizer_lazy_throw()) {
     isolate->set_deoptimizer_lazy_throw(false);
@@ -546,9 +549,6 @@
           ? static_cast<Code*>(isolate_->FindCodeObject(from_))
           : compiled_code;
     }
-    case Deoptimizer::DEBUGGER:
-      DCHECK(optimized_code->contains(from_));
-      return optimized_code;
   }
   FATAL("Could not find code for optimized function");
   return NULL;
@@ -601,7 +601,7 @@
     CHECK_EQ(mode, CALCULATE_ENTRY_ADDRESS);
   }
   DeoptimizerData* data = isolate->deoptimizer_data();
-  CHECK_LT(type, kBailoutTypesWithCodeEntry);
+  CHECK_LE(type, kLastBailoutType);
   MemoryChunk* base = data->deopt_entry_code_[type];
   return base->area_start() + (id * table_entry_size_);
 }
@@ -711,16 +711,38 @@
   DeoptimizationInputData* input_data =
       DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
 
+  {
+    // Read caller's PC, caller's FP and caller's constant pool values
+    // from input frame. Compute caller's frame top address.
+
+    Register fp_reg = JavaScriptFrame::fp_register();
+    stack_fp_ = input_->GetRegister(fp_reg.code());
+
+    caller_frame_top_ = stack_fp_ + ComputeInputFrameAboveFpFixedSize();
+
+    Address fp_address = input_->GetFramePointerAddress();
+    caller_fp_ = Memory::intptr_at(fp_address);
+    caller_pc_ =
+        Memory::intptr_at(fp_address + CommonFrameConstants::kCallerPCOffset);
+    input_frame_context_ = Memory::intptr_at(
+        fp_address + CommonFrameConstants::kContextOrFrameTypeOffset);
+
+    if (FLAG_enable_embedded_constant_pool) {
+      caller_constant_pool_ = Memory::intptr_at(
+          fp_address + CommonFrameConstants::kConstantPoolOffset);
+    }
+  }
+
   if (trace_scope_ != NULL) {
     timer.Start();
     PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
            MessageFor(bailout_type_));
     PrintFunctionName();
     PrintF(trace_scope_->file(),
-           " (opt #%d) @%d, FP to SP delta: %d]\n",
-           input_data->OptimizationId()->value(),
-           bailout_id_,
-           fp_to_sp_delta_);
+           " (opt #%d) @%d, FP to SP delta: %d, caller sp: 0x%08" V8PRIxPTR
+           "]\n",
+           input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_,
+           caller_frame_top_);
     if (bailout_type_ == EAGER || bailout_type_ == SOFT ||
         (compiled_code_->is_hydrogen_stub())) {
       compiled_code_->PrintDeoptLocation(trace_scope_->file(), from_);
@@ -763,39 +785,42 @@
   }
   output_count_ = static_cast<int>(count);
 
-  Register fp_reg = JavaScriptFrame::fp_register();
-  stack_fp_ = reinterpret_cast<Address>(
-      input_->GetRegister(fp_reg.code()) +
-          has_alignment_padding_ * kPointerSize);
-
   // Translate each output frame.
-  for (size_t i = 0; i < count; ++i) {
+  int frame_index = 0;  // output_frame_index
+  for (size_t i = 0; i < count; ++i, ++frame_index) {
     // Read the ast node id, function, and frame height for this output frame.
-    int frame_index = static_cast<int>(i);
-    switch (translated_state_.frames()[i].kind()) {
+    TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
+    switch (translated_frame->kind()) {
       case TranslatedFrame::kFunction:
-        DoComputeJSFrame(frame_index, deoptimizing_throw_ && i == count - 1);
+        DoComputeJSFrame(translated_frame, frame_index,
+                         deoptimizing_throw_ && i == count - 1);
         jsframe_count_++;
         break;
       case TranslatedFrame::kInterpretedFunction:
-        DoComputeInterpretedFrame(frame_index,
+        DoComputeInterpretedFrame(translated_frame, frame_index,
                                   deoptimizing_throw_ && i == count - 1);
         jsframe_count_++;
         break;
       case TranslatedFrame::kArgumentsAdaptor:
-        DoComputeArgumentsAdaptorFrame(frame_index);
+        DoComputeArgumentsAdaptorFrame(translated_frame, frame_index);
+        break;
+      case TranslatedFrame::kTailCallerFunction:
+        DoComputeTailCallerFrame(translated_frame, frame_index);
+        // Tail caller frame translations do not produce output frames.
+        frame_index--;
+        output_count_--;
         break;
       case TranslatedFrame::kConstructStub:
-        DoComputeConstructStubFrame(frame_index);
+        DoComputeConstructStubFrame(translated_frame, frame_index);
         break;
       case TranslatedFrame::kGetter:
-        DoComputeAccessorStubFrame(frame_index, false);
+        DoComputeAccessorStubFrame(translated_frame, frame_index, false);
         break;
       case TranslatedFrame::kSetter:
-        DoComputeAccessorStubFrame(frame_index, true);
+        DoComputeAccessorStubFrame(translated_frame, frame_index, true);
         break;
       case TranslatedFrame::kCompiledStub:
-        DoComputeCompiledStubFrame(frame_index);
+        DoComputeCompiledStubFrame(translated_frame, frame_index);
         break;
       case TranslatedFrame::kInvalid:
         FATAL("invalid frame");
@@ -811,22 +836,18 @@
            MessageFor(bailout_type_));
     PrintFunctionName();
     PrintF(trace_scope_->file(),
-           " @%d => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
-           " took %0.3f ms]\n",
-           bailout_id_,
-           node_id.ToInt(),
-           output_[index]->GetPc(),
-           FullCodeGenerator::State2String(
-               static_cast<FullCodeGenerator::State>(
-                   output_[index]->GetState()->value())),
-           has_alignment_padding_ ? "with padding" : "no padding",
+           " @%d => node=%d, pc=0x%08" V8PRIxPTR ", caller sp=0x%08" V8PRIxPTR
+           ", state=%s, took %0.3f ms]\n",
+           bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
+           caller_frame_top_, FullCodeGenerator::State2String(
+                                  static_cast<FullCodeGenerator::State>(
+                                      output_[index]->GetState()->value())),
            ms);
   }
 }
 
-void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
-  TranslatedFrame* translated_frame =
-      &(translated_state_.frames()[frame_index]);
+void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
+                                   int frame_index, bool goto_catch_handler) {
   SharedFunctionInfo* shared = translated_frame->raw_shared_info();
 
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -853,8 +874,6 @@
     PrintF(trace_scope_->file(), "  translating frame ");
     base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
     PrintF(trace_scope_->file(), "%s", name.get());
-    PrintF(trace_scope_->file(),
-           " => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
     PrintF(trace_scope_->file(), " => node=%d, height=%d%s\n", node_id.ToInt(),
            height_in_bytes, goto_catch_handler ? " (throw)" : "");
   }
@@ -862,7 +881,6 @@
   // The 'fixed' part of the frame consists of the incoming parameters and
   // the part described by JavaScriptFrameConstants.
   unsigned fixed_frame_size = ComputeJavascriptFixedSize(shared);
-  unsigned input_frame_size = input_->GetFrameSize();
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
@@ -875,24 +893,11 @@
   CHECK_NULL(output_[frame_index]);
   output_[frame_index] = output_frame;
 
-  // The top address for the bottommost output frame can be computed from
-  // the input frame pointer and the output frame's height.  For all
-  // subsequent output frames, it can be computed from the previous one's
-  // top address and the current frame's size.
-  Register fp_reg = JavaScriptFrame::fp_register();
+  // The top address of the frame is computed from the previous frame's top and
+  // this frame's size.
   intptr_t top_address;
   if (is_bottommost) {
-    // Determine whether the input frame contains alignment padding.
-    has_alignment_padding_ =
-        (!compiled_code_->is_turbofanned() && HasAlignmentPadding(shared)) ? 1
-                                                                           : 0;
-    // 2 = context and function in the frame.
-    // If the optimized frame had alignment padding, adjust the frame pointer
-    // to point to the new position of the old frame pointer after padding
-    // is removed. Subtract 2 * kPointerSize for the context and function slots.
-    top_address = input_->GetRegister(fp_reg.code()) -
-        StandardFrameConstants::kFixedFrameSizeFromFp -
-        height_in_bytes + has_alignment_padding_ * kPointerSize;
+    top_address = caller_frame_top_ - output_frame_size;
   } else {
     top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
   }
@@ -900,13 +905,11 @@
 
   // Compute the incoming parameter translation.
   unsigned output_offset = output_frame_size;
-  unsigned input_offset = input_frame_size;
   for (int i = 0; i < parameter_count; ++i) {
     output_offset -= kPointerSize;
     WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
                                  output_offset);
   }
-  input_offset -= (parameter_count * kPointerSize);
 
   // There are no translation commands for the caller's pc and fp, the
   // context, and the function.  Synthesize their values and set them up
@@ -917,10 +920,9 @@
   // previous one.  This frame's pc can be computed from the non-optimized
   // function code and AST id of the bailout.
   output_offset -= kPCOnStackSize;
-  input_offset -= kPCOnStackSize;
   intptr_t value;
   if (is_bottommost) {
-    value = input_->GetFrameSlot(input_offset);
+    value = caller_pc_;
   } else {
     value = output_[frame_index - 1]->GetPc();
   }
@@ -932,30 +934,27 @@
   // read from the previous one.  Also compute and set this frame's frame
   // pointer.
   output_offset -= kFPOnStackSize;
-  input_offset -= kFPOnStackSize;
   if (is_bottommost) {
-    value = input_->GetFrameSlot(input_offset);
+    value = caller_fp_;
   } else {
     value = output_[frame_index - 1]->GetFp();
   }
   output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
-  DCHECK(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
-      has_alignment_padding_ * kPointerSize) == fp_value);
   output_frame->SetFp(fp_value);
-  if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
+  if (is_topmost) {
+    Register fp_reg = JavaScriptFrame::fp_register();
+    output_frame->SetRegister(fp_reg.code(), fp_value);
+  }
   DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
-  DCHECK(!is_bottommost || !has_alignment_padding_ ||
-         (fp_value & kPointerSize) != 0);
 
   if (FLAG_enable_embedded_constant_pool) {
     // For the bottommost output frame the constant pool pointer can be gotten
     // from the input frame. For subsequent output frames, it can be read from
     // the previous frame.
     output_offset -= kPointerSize;
-    input_offset -= kPointerSize;
     if (is_bottommost) {
-      value = input_->GetFrameSlot(input_offset);
+      value = caller_constant_pool_;
     } else {
       value = output_[frame_index - 1]->GetConstantPool();
     }
@@ -967,9 +966,7 @@
   // For the bottommost output frame the context can be gotten from the input
   // frame. For all subsequent output frames it can be gotten from the function
   // so long as we don't inline functions that need local contexts.
-  Register context_reg = JavaScriptFrame::context_register();
   output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
 
   TranslatedFrame::iterator context_pos = value_iterator;
   int context_input_index = input_index;
@@ -988,14 +985,15 @@
     // If the context was optimized away, just use the context from
     // the activation. This should only apply to Crankshaft code.
     CHECK(!compiled_code_->is_turbofanned());
-    context =
-        is_bottommost
-            ? reinterpret_cast<Object*>(input_->GetFrameSlot(input_offset))
-            : function->context();
+    context = is_bottommost ? reinterpret_cast<Object*>(input_frame_context_)
+                            : function->context();
   }
   value = reinterpret_cast<intptr_t>(context);
   output_frame->SetContext(value);
-  if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
+  if (is_topmost) {
+    Register context_reg = JavaScriptFrame::context_register();
+    output_frame->SetRegister(context_reg.code(), value);
+  }
   WriteValueToOutput(context, context_input_index, frame_index, output_offset,
                      "context    ");
   if (context == isolate_->heap()->arguments_marker()) {
@@ -1009,11 +1007,7 @@
 
   // The function was mentioned explicitly in the BEGIN_FRAME.
   output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(function);
-  // The function for the bottommost output frame should also agree with the
-  // input frame.
-  DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
   WriteValueToOutput(function, 0, frame_index, output_offset, "function    ");
 
   // Translate the rest of the frame.
@@ -1065,7 +1059,7 @@
   output_frame->SetState(Smi::FromInt(state));
 
   // Set the continuation for the topmost frame.
-  if (is_topmost && bailout_type_ != DEBUGGER) {
+  if (is_topmost) {
     Builtins* builtins = isolate_->builtins();
     Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
     if (bailout_type_ == LAZY) {
@@ -1080,10 +1074,9 @@
   }
 }
 
-void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
+void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
+                                            int frame_index,
                                             bool goto_catch_handler) {
-  TranslatedFrame* translated_frame =
-      &(translated_state_.frames()[frame_index]);
   SharedFunctionInfo* shared = translated_frame->raw_shared_info();
 
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -1110,7 +1103,6 @@
   // The 'fixed' part of the frame consists of the incoming parameters and
   // the part described by InterpreterFrameConstants.
   unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared);
-  unsigned input_frame_size = input_->GetFrameSize();
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
@@ -1125,18 +1117,11 @@
   CHECK_NULL(output_[frame_index]);
   output_[frame_index] = output_frame;
 
-  // The top address for the bottommost output frame can be computed from
-  // the input frame pointer and the output frame's height.  For all
-  // subsequent output frames, it can be computed from the previous one's
-  // top address and the current frame's size.
-  Register fp_reg = InterpretedFrame::fp_register();
+  // The top address of the frame is computed from the previous frame's top and
+  // this frame's size.
   intptr_t top_address;
   if (is_bottommost) {
-    // Subtract interpreter fixed frame size for the context function slots,
-    // new,target and bytecode offset.
-    top_address = input_->GetRegister(fp_reg.code()) -
-                  InterpreterFrameConstants::kFixedFrameSizeFromFp -
-                  height_in_bytes;
+    top_address = caller_frame_top_ - output_frame_size;
   } else {
     top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
   }
@@ -1144,13 +1129,11 @@
 
   // Compute the incoming parameter translation.
   unsigned output_offset = output_frame_size;
-  unsigned input_offset = input_frame_size;
   for (int i = 0; i < parameter_count; ++i) {
     output_offset -= kPointerSize;
     WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
                                  output_offset);
   }
-  input_offset -= (parameter_count * kPointerSize);
 
   // There are no translation commands for the caller's pc and fp, the
   // context, the function, new.target and the bytecode offset.  Synthesize
@@ -1162,10 +1145,9 @@
   // previous one.  This frame's pc can be computed from the non-optimized
   // function code and AST id of the bailout.
   output_offset -= kPCOnStackSize;
-  input_offset -= kPCOnStackSize;
   intptr_t value;
   if (is_bottommost) {
-    value = input_->GetFrameSlot(input_offset);
+    value = caller_pc_;
   } else {
     value = output_[frame_index - 1]->GetPc();
   }
@@ -1177,31 +1159,27 @@
   // read from the previous one.  Also compute and set this frame's frame
   // pointer.
   output_offset -= kFPOnStackSize;
-  input_offset -= kFPOnStackSize;
   if (is_bottommost) {
-    value = input_->GetFrameSlot(input_offset);
+    value = caller_fp_;
   } else {
     value = output_[frame_index - 1]->GetFp();
   }
   output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
-  DCHECK(!is_bottommost ||
-         (input_->GetRegister(fp_reg.code()) +
-          has_alignment_padding_ * kPointerSize) == fp_value);
   output_frame->SetFp(fp_value);
-  if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
+  if (is_topmost) {
+    Register fp_reg = InterpretedFrame::fp_register();
+    output_frame->SetRegister(fp_reg.code(), fp_value);
+  }
   DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
-  DCHECK(!is_bottommost || !has_alignment_padding_ ||
-         (fp_value & kPointerSize) != 0);
 
   if (FLAG_enable_embedded_constant_pool) {
     // For the bottommost output frame the constant pool pointer can be gotten
     // from the input frame. For subsequent output frames, it can be read from
     // the previous frame.
     output_offset -= kPointerSize;
-    input_offset -= kPointerSize;
     if (is_bottommost) {
-      value = input_->GetFrameSlot(input_offset);
+      value = caller_constant_pool_;
     } else {
       value = output_[frame_index - 1]->GetConstantPool();
     }
@@ -1215,7 +1193,6 @@
   // so long as we don't inline functions that need local contexts.
   Register context_reg = InterpretedFrame::context_register();
   output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
 
   // When deoptimizing into a catch block, we need to take the context
   // from a register that was specified in the handler table.
@@ -1243,31 +1220,24 @@
 
   // The function was mentioned explicitly in the BEGIN_FRAME.
   output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(function);
-  // The function for the bottommost output frame should also agree with the
-  // input frame.
-  DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
   WriteValueToOutput(function, 0, frame_index, output_offset, "function    ");
 
   // The new.target slot is only used during function activiation which is
   // before the first deopt point, so should never be needed. Just set it to
   // undefined.
   output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
   Object* new_target = isolate_->heap()->undefined_value();
   WriteValueToOutput(new_target, 0, frame_index, output_offset, "new_target  ");
 
   // Set the bytecode array pointer.
   output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
   Object* bytecode_array = shared->bytecode_array();
   WriteValueToOutput(bytecode_array, 0, frame_index, output_offset,
                      "bytecode array ");
 
   // The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
   output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
   int raw_bytecode_offset =
       BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset;
   Smi* smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
@@ -1317,7 +1287,7 @@
   }
 
   // Set the continuation for the topmost frame.
-  if (is_topmost && bailout_type_ != DEBUGGER) {
+  if (is_topmost) {
     Code* continuation =
         builtins->builtin(Builtins::kInterpreterNotifyDeoptimized);
     if (bailout_type_ == LAZY) {
@@ -1334,11 +1304,10 @@
   }
 }
 
-
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
-  TranslatedFrame* translated_frame =
-      &(translated_state_.frames()[frame_index]);
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(
+    TranslatedFrame* translated_frame, int frame_index) {
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
+  bool is_bottommost = (0 == frame_index);
   int input_index = 0;
 
   unsigned height = translated_frame->height();
@@ -1351,7 +1320,7 @@
            "  translating arguments adaptor => height=%d\n", height_in_bytes);
   }
 
-  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFixedFrameSize;
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
@@ -1360,15 +1329,19 @@
       FrameDescription(output_frame_size, parameter_count);
   output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
 
-  // Arguments adaptor can not be topmost or bottommost.
-  CHECK(frame_index > 0 && frame_index < output_count_ - 1);
+  // Arguments adaptor can not be topmost.
+  CHECK(frame_index < output_count_ - 1);
   CHECK(output_[frame_index] == NULL);
   output_[frame_index] = output_frame;
 
-  // The top address of the frame is computed from the previous
-  // frame's top and this frame's size.
+  // The top address of the frame is computed from the previous frame's top and
+  // this frame's size.
   intptr_t top_address;
-  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  if (is_bottommost) {
+    top_address = caller_frame_top_ - output_frame_size;
+  } else {
+    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  }
   output_frame->SetTop(top_address);
 
   // Compute the incoming parameter translation.
@@ -1381,13 +1354,22 @@
 
   // Read caller's PC from the previous frame.
   output_offset -= kPCOnStackSize;
-  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetCallerPc(output_offset, callers_pc);
-  DebugPrintOutputSlot(callers_pc, frame_index, output_offset, "caller's pc\n");
+  intptr_t value;
+  if (is_bottommost) {
+    value = caller_pc_;
+  } else {
+    value = output_[frame_index - 1]->GetPc();
+  }
+  output_frame->SetCallerPc(output_offset, value);
+  DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
 
   // Read caller's FP from the previous frame, and set this frame's FP.
   output_offset -= kFPOnStackSize;
-  intptr_t value = output_[frame_index - 1]->GetFp();
+  if (is_bottommost) {
+    value = caller_fp_;
+  } else {
+    value = output_[frame_index - 1]->GetFp();
+  }
   output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
@@ -1396,7 +1378,11 @@
   if (FLAG_enable_embedded_constant_pool) {
     // Read the caller's constant pool from the previous frame.
     output_offset -= kPointerSize;
-    value = output_[frame_index - 1]->GetConstantPool();
+    if (is_bottommost) {
+      value = caller_constant_pool_;
+    } else {
+      value = output_[frame_index - 1]->GetConstantPool();
+    }
     output_frame->SetCallerConstantPool(output_offset, value);
     DebugPrintOutputSlot(value, frame_index, output_offset,
                          "caller's constant_pool\n");
@@ -1440,17 +1426,94 @@
   }
 }
 
+void Deoptimizer::DoComputeTailCallerFrame(TranslatedFrame* translated_frame,
+                                           int frame_index) {
+  SharedFunctionInfo* shared = translated_frame->raw_shared_info();
 
-void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
-  TranslatedFrame* translated_frame =
-      &(translated_state_.frames()[frame_index]);
+  bool is_bottommost = (0 == frame_index);
+  // Tail caller frame can't be topmost.
+  CHECK_NE(output_count_ - 1, frame_index);
+
+  if (trace_scope_ != NULL) {
+    PrintF(trace_scope_->file(), "  translating tail caller frame ");
+    base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+    PrintF(trace_scope_->file(), "%s\n", name.get());
+  }
+
+  if (!is_bottommost) return;
+
+  // Drop arguments adaptor frame below current frame if it exsits.
+  Address fp_address = input_->GetFramePointerAddress();
+  Address adaptor_fp_address =
+      Memory::Address_at(fp_address + CommonFrameConstants::kCallerFPOffset);
+
+  if (Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR) !=
+      Memory::Object_at(adaptor_fp_address +
+                        CommonFrameConstants::kContextOrFrameTypeOffset)) {
+    return;
+  }
+
+  int caller_params_count =
+      Smi::cast(
+          Memory::Object_at(adaptor_fp_address +
+                            ArgumentsAdaptorFrameConstants::kLengthOffset))
+          ->value();
+
+  int callee_params_count =
+      function_->shared()->internal_formal_parameter_count();
+
+  // Both caller and callee parameters count do not include receiver.
+  int offset = (caller_params_count - callee_params_count) * kPointerSize;
+  intptr_t new_stack_fp =
+      reinterpret_cast<intptr_t>(adaptor_fp_address) + offset;
+
+  intptr_t new_caller_frame_top = new_stack_fp +
+                                  (callee_params_count + 1) * kPointerSize +
+                                  CommonFrameConstants::kFixedFrameSizeAboveFp;
+
+  intptr_t adaptor_caller_pc = Memory::intptr_at(
+      adaptor_fp_address + CommonFrameConstants::kCallerPCOffset);
+  intptr_t adaptor_caller_fp = Memory::intptr_at(
+      adaptor_fp_address + CommonFrameConstants::kCallerFPOffset);
+
+  if (trace_scope_ != NULL) {
+    PrintF(trace_scope_->file(),
+           "    dropping caller arguments adaptor frame: offset=%d, "
+           "fp: 0x%08" V8PRIxPTR " -> 0x%08" V8PRIxPTR
+           ", "
+           "caller sp: 0x%08" V8PRIxPTR " -> 0x%08" V8PRIxPTR "\n",
+           offset, stack_fp_, new_stack_fp, caller_frame_top_,
+           new_caller_frame_top);
+  }
+  caller_frame_top_ = new_caller_frame_top;
+  caller_fp_ = adaptor_caller_fp;
+  caller_pc_ = adaptor_caller_pc;
+}
+
+void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
+                                              int frame_index) {
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
+  bool is_topmost = (output_count_ - 1 == frame_index);
+  // The construct frame could become topmost only if we inlined a constructor
+  // call which does a tail call (otherwise the tail callee's frame would be
+  // the topmost one). So it could only be the LAZY case.
+  CHECK(!is_topmost || bailout_type_ == LAZY);
   int input_index = 0;
 
   Builtins* builtins = isolate_->builtins();
   Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   unsigned height = translated_frame->height();
   unsigned height_in_bytes = height * kPointerSize;
+
+  // If the construct frame appears to be topmost we should ensure that the
+  // value of result register is preserved during continuation execution.
+  // We do this here by "pushing" the result of the constructor function to the
+  // top of the reconstructed stack and then using the
+  // FullCodeGenerator::TOS_REG machinery.
+  if (is_topmost) {
+    height_in_bytes += kPointerSize;
+  }
+
   // Skip function.
   value_iterator++;
   input_index++;
@@ -1459,7 +1522,7 @@
            "  translating construct stub => height=%d\n", height_in_bytes);
   }
 
-  unsigned fixed_frame_size = ConstructFrameConstants::kFrameSize;
+  unsigned fixed_frame_size = ConstructFrameConstants::kFixedFrameSize;
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
@@ -1467,13 +1530,13 @@
       new (output_frame_size) FrameDescription(output_frame_size);
   output_frame->SetFrameType(StackFrame::CONSTRUCT);
 
-  // Construct stub can not be topmost or bottommost.
-  DCHECK(frame_index > 0 && frame_index < output_count_ - 1);
+  // Construct stub can not be topmost.
+  DCHECK(frame_index > 0 && frame_index < output_count_);
   DCHECK(output_[frame_index] == NULL);
   output_[frame_index] = output_frame;
 
-  // The top address of the frame is computed from the previous
-  // frame's top and this frame's size.
+  // The top address of the frame is computed from the previous frame's top and
+  // this frame's size.
   intptr_t top_address;
   top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
   output_frame->SetTop(top_address);
@@ -1503,6 +1566,10 @@
   output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
+  if (is_topmost) {
+    Register fp_reg = JavaScriptFrame::fp_register();
+    output_frame->SetRegister(fp_reg.code(), fp_value);
+  }
   DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
 
   if (FLAG_enable_embedded_constant_pool) {
@@ -1514,24 +1581,22 @@
                          "caller's constant_pool\n");
   }
 
-  // The context can be gotten from the previous frame.
-  output_offset -= kPointerSize;
-  value = output_[frame_index - 1]->GetContext();
-  output_frame->SetFrameSlot(output_offset, value);
-  DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
-
-  // A marker value is used in place of the function.
+  // A marker value is used to mark the frame.
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
   output_frame->SetFrameSlot(output_offset, value);
   DebugPrintOutputSlot(value, frame_index, output_offset,
-                       "function (construct sentinel)\n");
+                       "typed frame marker\n");
 
-  // The output frame reflects a JSConstructStubGeneric frame.
+  // The context can be gotten from the previous frame.
   output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(construct_stub);
+  value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  DebugPrintOutputSlot(value, frame_index, output_offset, "code object\n");
+  if (is_topmost) {
+    Register context_reg = JavaScriptFrame::context_register();
+    output_frame->SetRegister(context_reg.code(), value);
+  }
+  DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
 
   // The allocation site.
   output_offset -= kPointerSize;
@@ -1556,6 +1621,18 @@
   DebugPrintOutputSlot(value, frame_index, output_offset,
                        "allocated receiver\n");
 
+  if (is_topmost) {
+    // Ensure the result is restored back when we return to the stub.
+    output_offset -= kPointerSize;
+    Register result_reg = FullCodeGenerator::result_register();
+    value = input_->GetRegister(result_reg.code());
+    output_frame->SetFrameSlot(output_offset, value);
+    DebugPrintOutputSlot(value, frame_index, output_offset,
+                         "constructor result\n");
+
+    output_frame->SetState(Smi::FromInt(FullCodeGenerator::TOS_REG));
+  }
+
   CHECK_EQ(0u, output_offset);
 
   intptr_t pc = reinterpret_cast<intptr_t>(
@@ -1566,15 +1643,32 @@
     intptr_t constant_pool_value =
         reinterpret_cast<intptr_t>(construct_stub->constant_pool());
     output_frame->SetConstantPool(constant_pool_value);
+    if (is_topmost) {
+      Register constant_pool_reg =
+          JavaScriptFrame::constant_pool_pointer_register();
+      output_frame->SetRegister(constant_pool_reg.code(), fp_value);
+    }
+  }
+
+  // Set the continuation for the topmost frame.
+  if (is_topmost) {
+    Builtins* builtins = isolate_->builtins();
+    DCHECK_EQ(LAZY, bailout_type_);
+    Code* continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+    output_frame->SetContinuation(
+        reinterpret_cast<intptr_t>(continuation->entry()));
   }
 }
 
-
-void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
+void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
+                                             int frame_index,
                                              bool is_setter_stub_frame) {
-  TranslatedFrame* translated_frame =
-      &(translated_state_.frames()[frame_index]);
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
+  bool is_topmost = (output_count_ - 1 == frame_index);
+  // The accessor frame could become topmost only if we inlined an accessor
+  // call which does a tail call (otherwise the tail callee's frame would be
+  // the topmost one). So it could only be the LAZY case.
+  CHECK(!is_topmost || bailout_type_ == LAZY);
   int input_index = 0;
 
   // Skip accessor.
@@ -1585,6 +1679,19 @@
   // frame. This means that we have to use a height of 0.
   unsigned height = 0;
   unsigned height_in_bytes = height * kPointerSize;
+
+  // If the accessor frame appears to be topmost we should ensure that the
+  // value of result register is preserved during continuation execution.
+  // We do this here by "pushing" the result of the accessor function to the
+  // top of the reconstructed stack and then using the
+  // FullCodeGenerator::TOS_REG machinery.
+  // We don't need to restore the result in case of a setter call because we
+  // have to return the stored value but not the result of the setter function.
+  bool should_preserve_result = is_topmost && !is_setter_stub_frame;
+  if (should_preserve_result) {
+    height_in_bytes += kPointerSize;
+  }
+
   const char* kind = is_setter_stub_frame ? "setter" : "getter";
   if (trace_scope_ != NULL) {
     PrintF(trace_scope_->file(),
@@ -1592,7 +1699,7 @@
   }
 
   // We need 1 stack entry for the return address and enough entries for the
-  // StackFrame::INTERNAL (FP, context, frame type, code object and constant
+  // StackFrame::INTERNAL (FP, frame type, context, code object and constant
   // pool (if enabled)- see MacroAssembler::EnterFrame).
   // For a setter stub frame we need one additional entry for the implicit
   // return value, see StoreStubCompiler::CompileStoreViaSetter.
@@ -1607,8 +1714,8 @@
       new (output_frame_size) FrameDescription(output_frame_size);
   output_frame->SetFrameType(StackFrame::INTERNAL);
 
-  // A frame for an accessor stub can not be the topmost or bottommost one.
-  CHECK(frame_index > 0 && frame_index < output_count_ - 1);
+  // A frame for an accessor stub can not be bottommost.
+  CHECK(frame_index > 0 && frame_index < output_count_);
   CHECK_NULL(output_[frame_index]);
   output_[frame_index] = output_frame;
 
@@ -1631,6 +1738,10 @@
   output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
+  if (is_topmost) {
+    Register fp_reg = JavaScriptFrame::fp_register();
+    output_frame->SetRegister(fp_reg.code(), fp_value);
+  }
   DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
 
   if (FLAG_enable_embedded_constant_pool) {
@@ -1642,17 +1753,11 @@
                          "caller's constant_pool\n");
   }
 
-  // The context can be gotten from the previous frame.
-  output_offset -= kPointerSize;
-  value = output_[frame_index - 1]->GetContext();
-  output_frame->SetFrameSlot(output_offset, value);
-  DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
-
-  // A marker value is used in place of the function.
+  // Set the frame type.
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
   output_frame->SetFrameSlot(output_offset, value);
-  DebugPrintOutputSlot(value, frame_index, output_offset, "function ");
+  DebugPrintOutputSlot(value, frame_index, output_offset, "frame type ");
   if (trace_scope_ != nullptr) {
     PrintF(trace_scope_->file(), "(%s sentinel)\n", kind);
   }
@@ -1667,6 +1772,16 @@
   output_frame->SetFrameSlot(output_offset, value);
   DebugPrintOutputSlot(value, frame_index, output_offset, "code object\n");
 
+  // The context can be gotten from the previous frame.
+  output_offset -= kPointerSize;
+  value = output_[frame_index - 1]->GetContext();
+  output_frame->SetFrameSlot(output_offset, value);
+  if (is_topmost) {
+    Register context_reg = JavaScriptFrame::context_register();
+    output_frame->SetRegister(context_reg.code(), value);
+  }
+  DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
+
   // Skip receiver.
   value_iterator++;
   input_index++;
@@ -1679,6 +1794,20 @@
                                  output_offset);
   }
 
+  if (should_preserve_result) {
+    // Ensure the result is restored back when we return to the stub.
+    output_offset -= kPointerSize;
+    Register result_reg = FullCodeGenerator::result_register();
+    value = input_->GetRegister(result_reg.code());
+    output_frame->SetFrameSlot(output_offset, value);
+    DebugPrintOutputSlot(value, frame_index, output_offset,
+                         "accessor result\n");
+
+    output_frame->SetState(Smi::FromInt(FullCodeGenerator::TOS_REG));
+  } else {
+    output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+  }
+
   CHECK_EQ(0u, output_offset);
 
   Smi* offset = is_setter_stub_frame ?
@@ -1691,11 +1820,25 @@
     intptr_t constant_pool_value =
         reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
     output_frame->SetConstantPool(constant_pool_value);
+    if (is_topmost) {
+      Register constant_pool_reg =
+          JavaScriptFrame::constant_pool_pointer_register();
+      output_frame->SetRegister(constant_pool_reg.code(), fp_value);
+    }
+  }
+
+  // Set the continuation for the topmost frame.
+  if (is_topmost) {
+    Builtins* builtins = isolate_->builtins();
+    DCHECK_EQ(LAZY, bailout_type_);
+    Code* continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+    output_frame->SetContinuation(
+        reinterpret_cast<intptr_t>(continuation->entry()));
   }
 }
 
-
-void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
+void Deoptimizer::DoComputeCompiledStubFrame(TranslatedFrame* translated_frame,
+                                             int frame_index) {
   //
   //               FROM                                  TO
   //    |          ....           |          |          ....           |
@@ -1731,8 +1874,6 @@
   // and then, if the descriptor specifies a constant number of stack
   // parameters, the stack parameters as well.
 
-  TranslatedFrame* translated_frame =
-      &(translated_state_.frames()[frame_index]);
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
   int input_index = 0;
 
@@ -1751,10 +1892,8 @@
   CHECK_EQ(translated_frame->height(), param_count + 1);
   CHECK_GE(param_count, 0);
 
-  int height_in_bytes = kPointerSize * (param_count + stack_param_count) +
-                        sizeof(Arguments) + kPointerSize;
-  int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
-  int input_frame_size = input_->GetFrameSize();
+  int height_in_bytes = kPointerSize * (param_count + stack_param_count);
+  int fixed_frame_size = StubFailureTrampolineFrameConstants::kFixedFrameSize;
   int output_frame_size = height_in_bytes + fixed_frame_size;
   if (trace_scope_ != NULL) {
     PrintF(trace_scope_->file(),
@@ -1770,28 +1909,24 @@
   CHECK_EQ(frame_index, 0);
   output_[frame_index] = output_frame;
 
-  // The top address for the output frame can be computed from the input
-  // frame pointer and the output frame's height. Subtract space for the
-  // context and function slots.
-  Register fp_reg = StubFailureTrampolineFrame::fp_register();
-  intptr_t top_address = input_->GetRegister(fp_reg.code()) -
-      StandardFrameConstants::kFixedFrameSizeFromFp - height_in_bytes;
+  // The top address of the frame is computed from the previous frame's top and
+  // this frame's size.
+  intptr_t top_address = caller_frame_top_ - output_frame_size;
   output_frame->SetTop(top_address);
 
-  // Read caller's PC (JSFunction continuation) from the input frame.
-  unsigned input_frame_offset = input_frame_size - kPCOnStackSize;
+  // Set caller's PC (JSFunction continuation).
   unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
-  intptr_t value = input_->GetFrameSlot(input_frame_offset);
+  intptr_t value = caller_pc_;
   output_frame->SetCallerPc(output_frame_offset, value);
   DebugPrintOutputSlot(value, frame_index, output_frame_offset,
                        "caller's pc\n");
 
   // Read caller's FP from the input frame, and set this frame's FP.
-  input_frame_offset -= kFPOnStackSize;
-  value = input_->GetFrameSlot(input_frame_offset);
+  value = caller_fp_;
   output_frame_offset -= kFPOnStackSize;
   output_frame->SetCallerFp(output_frame_offset, value);
-  intptr_t frame_ptr = input_->GetRegister(fp_reg.code());
+  intptr_t frame_ptr = top_address + output_frame_offset;
+  Register fp_reg = StubFailureTrampolineFrame::fp_register();
   output_frame->SetRegister(fp_reg.code(), frame_ptr);
   output_frame->SetFp(frame_ptr);
   DebugPrintOutputSlot(value, frame_index, output_frame_offset,
@@ -1799,20 +1934,14 @@
 
   if (FLAG_enable_embedded_constant_pool) {
     // Read the caller's constant pool from the input frame.
-    input_frame_offset -= kPointerSize;
-    value = input_->GetFrameSlot(input_frame_offset);
+    value = caller_constant_pool_;
     output_frame_offset -= kPointerSize;
     output_frame->SetCallerConstantPool(output_frame_offset, value);
     DebugPrintOutputSlot(value, frame_index, output_frame_offset,
                          "caller's constant_pool\n");
   }
 
-  // Remember where the context will need to be written back from the deopt
-  // translation.
-  output_frame_offset -= kPointerSize;
-  unsigned context_frame_offset = output_frame_offset;
-
-  // A marker value is used in place of the function.
+  // The marker for the typed stack frame
   output_frame_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(
       Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
@@ -1873,8 +2002,6 @@
   Register context_reg = StubFailureTrampolineFrame::context_register();
   value = reinterpret_cast<intptr_t>(maybe_context);
   output_frame->SetRegister(context_reg.code(), value);
-  output_frame->SetFrameSlot(context_frame_offset, value);
-  DebugPrintOutputSlot(value, frame_index, context_frame_offset, "context\n");
   ++value_iterator;
 
   // Copy constant stack parameters to the failure frame. If the number of stack
@@ -1942,14 +2069,13 @@
 
 
 void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
-  DCHECK_NE(DEBUGGER, bailout_type_);
-
   // Walk to the last JavaScript output frame to find out if it has
   // adapted arguments.
   for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
     if (frame_index != 0) it->Advance();
   }
-  translated_state_.Prepare(it->frame()->has_adapted_arguments(), stack_fp_);
+  translated_state_.Prepare(it->frame()->has_adapted_arguments(),
+                            reinterpret_cast<Address>(stack_fp_));
 
   for (auto& materialization : values_to_materialize_) {
     Handle<Object> value = materialization.value_->GetValue();
@@ -1966,7 +2092,8 @@
         reinterpret_cast<intptr_t>(*value);
   }
 
-  isolate_->materialized_object_store()->Remove(stack_fp_);
+  isolate_->materialized_object_store()->Remove(
+      reinterpret_cast<Address>(stack_fp_));
 }
 
 
@@ -2024,25 +2151,26 @@
   }
 }
 
-
-unsigned Deoptimizer::ComputeInputFrameSize() const {
-  unsigned fixed_size = StandardFrameConstants::kFixedFrameSize;
+unsigned Deoptimizer::ComputeInputFrameAboveFpFixedSize() const {
+  unsigned fixed_size = CommonFrameConstants::kFixedFrameSizeAboveFp;
   if (!function_->IsSmi()) {
     fixed_size += ComputeIncomingArgumentSize(function_->shared());
-  } else {
-    CHECK_EQ(Smi::cast(function_), Smi::FromInt(StackFrame::STUB));
   }
+  return fixed_size;
+}
+
+unsigned Deoptimizer::ComputeInputFrameSize() const {
   // The fp-to-sp delta already takes the context, constant pool pointer and the
   // function into account so we have to avoid double counting them.
-  unsigned result = fixed_size + fp_to_sp_delta_ -
-                    StandardFrameConstants::kFixedFrameSizeFromFp;
+  unsigned fixed_size_above_fp = ComputeInputFrameAboveFpFixedSize();
+  unsigned result = fixed_size_above_fp + fp_to_sp_delta_;
   if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
     unsigned stack_slots = compiled_code_->stack_slots();
     unsigned outgoing_size =
         ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
-    CHECK(result ==
-          fixed_size + (stack_slots * kPointerSize) -
-              StandardFrameConstants::kFixedFrameSize + outgoing_size);
+    CHECK_EQ(fixed_size_above_fp + (stack_slots * kPointerSize) -
+                 CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size,
+             result);
   }
   return result;
 }
@@ -2240,6 +2368,10 @@
   buffer_->Add(height, zone());
 }
 
+void Translation::BeginTailCallerFrame(int literal_id) {
+  buffer_->Add(TAIL_CALLER_FRAME, zone());
+  buffer_->Add(literal_id, zone());
+}
 
 void Translation::BeginJSFrame(BailoutId node_id,
                                int literal_id,
@@ -2362,7 +2494,7 @@
 
 void Translation::StoreJSFrameFunction() {
   StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
-                  StandardFrameConstants::kMarkerOffset) /
+                  StandardFrameConstants::kFunctionOffset) /
                  kPointerSize);
 }
 
@@ -2385,6 +2517,7 @@
     case DOUBLE_STACK_SLOT:
     case LITERAL:
     case COMPILED_STUB_FRAME:
+    case TAIL_CALLER_FRAME:
       return 1;
     case BEGIN:
     case ARGUMENTS_ADAPTOR_FRAME:
@@ -2510,7 +2643,9 @@
                           BailoutId node_id) {
   if (shared->HasBytecodeArray()) {
     BytecodeArray* bytecodes = shared->bytecode_array();
-    return bytecodes->SourcePosition(node_id.ToInt());
+    // BailoutId points to the next bytecode in the bytecode aray. Subtract
+    // 1 to get the end of current bytecode.
+    return bytecodes->SourcePosition(node_id.ToInt() - 1);
   } else {
     Code* non_optimized_code = shared->code();
     FixedArray* raw_data = non_optimized_code->deoptimization_data();
@@ -2942,6 +3077,11 @@
                          shared_info, height);
 }
 
+TranslatedFrame TranslatedFrame::TailCallerFrame(
+    SharedFunctionInfo* shared_info) {
+  return TranslatedFrame(kTailCallerFunction, shared_info->GetIsolate(),
+                         shared_info, 0);
+}
 
 TranslatedFrame TranslatedFrame::ConstructStubFrame(
     SharedFunctionInfo* shared_info, int height) {
@@ -2976,6 +3116,9 @@
     case kConstructStub:
       return 1 + height_;
 
+    case kTailCallerFunction:
+      return 1;  // Function.
+
     case kCompiledStub:
       return height_;
 
@@ -3052,6 +3195,18 @@
       return TranslatedFrame::ArgumentsAdaptorFrame(shared_info, height);
     }
 
+    case Translation::TAIL_CALLER_FRAME: {
+      SharedFunctionInfo* shared_info =
+          SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+      if (trace_file != nullptr) {
+        base::SmartArrayPointer<char> name =
+            shared_info->DebugName()->ToCString();
+        PrintF(trace_file, "  reading tail caller frame marker %s\n",
+               name.get());
+      }
+      return TranslatedFrame::TailCallerFrame(shared_info);
+    }
+
     case Translation::CONSTRUCT_STUB_FRAME: {
       SharedFunctionInfo* shared_info =
           SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
@@ -3152,6 +3307,7 @@
     case Translation::JS_FRAME:
     case Translation::INTERPRETED_FRAME:
     case Translation::ARGUMENTS_ADAPTOR_FRAME:
+    case Translation::TAIL_CALLER_FRAME:
     case Translation::CONSTRUCT_STUB_FRAME:
     case Translation::GETTER_STUB_FRAME:
     case Translation::SETTER_STUB_FRAME:
@@ -3725,7 +3881,8 @@
     materialized_store->Set(stack_frame_pointer_,
                             previously_materialized_objects);
     CHECK(frames_[0].kind() == TranslatedFrame::kFunction ||
-          frames_[0].kind() == TranslatedFrame::kInterpretedFunction);
+          frames_[0].kind() == TranslatedFrame::kInterpretedFunction ||
+          frames_[0].kind() == TranslatedFrame::kTailCallerFunction);
     Object* const function = frames_[0].front().GetRawValue();
     Deoptimizer::DeoptimizeFunction(JSFunction::cast(function));
   }
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 0259f01..21ca84e 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -116,6 +116,7 @@
     kInterpretedFunction,
     kGetter,
     kSetter,
+    kTailCallerFunction,
     kArgumentsAdaptor,
     kConstructStub,
     kCompiledStub,
@@ -186,6 +187,7 @@
                                        SharedFunctionInfo* shared_info);
   static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info,
                                                int height);
+  static TranslatedFrame TailCallerFrame(SharedFunctionInfo* shared_info);
   static TranslatedFrame ConstructStubFrame(SharedFunctionInfo* shared_info,
                                             int height);
   static TranslatedFrame CompiledStubFrame(int height, Isolate* isolate) {
@@ -315,7 +317,6 @@
   virtual void LeaveContext(Context* context) = 0;
 };
 
-
 #define DEOPT_MESSAGES_LIST(V)                                                 \
   V(kAccessCheck, "Access check needed")                                       \
   V(kNoReason, "no reason")                                                    \
@@ -333,6 +334,7 @@
   V(kInstanceMigrationFailed, "instance migration failed")                     \
   V(kInsufficientTypeFeedbackForCallWithArguments,                             \
     "Insufficient type feedback for call with arguments")                      \
+  V(kFastArrayPushFailed, "Falling off the fast path for FastArrayPush")       \
   V(kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,                 \
     "Insufficient type feedback for combined type of binary operation")        \
   V(kInsufficientTypeFeedbackForGenericNamedAccess,                            \
@@ -396,18 +398,9 @@
   V(kUndefinedOrNullInForIn, "null or undefined in for-in")                    \
   V(kUndefinedOrNullInToObject, "null or undefined in ToObject")
 
-
 class Deoptimizer : public Malloced {
  public:
-  enum BailoutType {
-    EAGER,
-    LAZY,
-    SOFT,
-    // This last bailout type is not really a bailout, but used by the
-    // debugger to deoptimize stack frames to allow inspection.
-    DEBUGGER,
-    kBailoutTypesWithCodeEntry = SOFT + 1
-  };
+  enum BailoutType { EAGER, LAZY, SOFT, kLastBailoutType = SOFT };
 
 #define DEOPT_MESSAGES_CONSTANTS(C, T) C,
   enum DeoptReason {
@@ -537,8 +530,8 @@
   }
   static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
 
-  static int has_alignment_padding_offset() {
-    return OFFSET_OF(Deoptimizer, has_alignment_padding_);
+  static int caller_frame_top_offset() {
+    return OFFSET_OF(Deoptimizer, caller_frame_top_);
   }
 
   static int GetDeoptimizedCodeCount(Isolate* isolate);
@@ -594,12 +587,20 @@
   void DeleteFrameDescriptions();
 
   void DoComputeOutputFrames();
-  void DoComputeJSFrame(int frame_index, bool goto_catch_handler);
-  void DoComputeInterpretedFrame(int frame_index, bool goto_catch_handler);
-  void DoComputeArgumentsAdaptorFrame(int frame_index);
-  void DoComputeConstructStubFrame(int frame_index);
-  void DoComputeAccessorStubFrame(int frame_index, bool is_setter_stub_frame);
-  void DoComputeCompiledStubFrame(int frame_index);
+  void DoComputeJSFrame(TranslatedFrame* translated_frame, int frame_index,
+                        bool goto_catch_handler);
+  void DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
+                                 int frame_index, bool goto_catch_handler);
+  void DoComputeArgumentsAdaptorFrame(TranslatedFrame* translated_frame,
+                                      int frame_index);
+  void DoComputeTailCallerFrame(TranslatedFrame* translated_frame,
+                                int frame_index);
+  void DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
+                                   int frame_index);
+  void DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
+                                  int frame_index, bool is_setter_stub_frame);
+  void DoComputeCompiledStubFrame(TranslatedFrame* translated_frame,
+                                  int frame_index);
 
   void WriteTranslatedValueToOutput(
       TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
@@ -612,6 +613,7 @@
                             unsigned output_offset,
                             const char* debug_hint_string);
 
+  unsigned ComputeInputFrameAboveFpFixedSize() const;
   unsigned ComputeInputFrameSize() const;
   static unsigned ComputeJavascriptFixedSize(SharedFunctionInfo* shared);
   static unsigned ComputeInterpretedFixedSize(SharedFunctionInfo* shared);
@@ -651,10 +653,6 @@
   // from the input frame's double registers.
   void CopyDoubleRegisters(FrameDescription* output_frame);
 
-  // Determines whether the input frame contains alignment padding by looking
-  // at the dynamic alignment state slot inside the frame.
-  bool HasAlignmentPadding(SharedFunctionInfo* shared);
-
   Isolate* isolate_;
   JSFunction* function_;
   Code* compiled_code_;
@@ -662,7 +660,6 @@
   BailoutType bailout_type_;
   Address from_;
   int fp_to_sp_delta_;
-  int has_alignment_padding_;
   bool deoptimizing_throw_;
   int catch_handler_data_;
   int catch_handler_pc_offset_;
@@ -676,8 +673,15 @@
   // Array of output frame descriptions.
   FrameDescription** output_;
 
+  // Caller frame details computed from input frame.
+  intptr_t caller_frame_top_;
+  intptr_t caller_fp_;
+  intptr_t caller_pc_;
+  intptr_t caller_constant_pool_;
+  intptr_t input_frame_context_;
+
   // Key for lookup of previously materialized objects
-  Address stack_fp_;
+  intptr_t stack_fp_;
 
   TranslatedState translated_state_;
   struct ValueToMaterialize {
@@ -891,8 +895,8 @@
 
  private:
   MemoryAllocator* allocator_;
-  int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
-  MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
+  int deopt_entry_code_entries_[Deoptimizer::kLastBailoutType + 1];
+  MemoryChunk* deopt_entry_code_[Deoptimizer::kLastBailoutType + 1];
 
   Deoptimizer* current_;
 
@@ -944,6 +948,7 @@
   V(GETTER_STUB_FRAME)             \
   V(SETTER_STUB_FRAME)             \
   V(ARGUMENTS_ADAPTOR_FRAME)       \
+  V(TAIL_CALLER_FRAME)             \
   V(COMPILED_STUB_FRAME)           \
   V(DUPLICATED_OBJECT)             \
   V(ARGUMENTS_OBJECT)              \
@@ -987,6 +992,7 @@
                              unsigned height);
   void BeginCompiledStubFrame(int height);
   void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
+  void BeginTailCallerFrame(int literal_id);
   void BeginConstructStubFrame(int literal_id, unsigned height);
   void BeginGetterStubFrame(int literal_id);
   void BeginSetterStubFrame(int literal_id);
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 59a57e5..ed9ca9a 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -10,7 +10,7 @@
 #include "src/deoptimizer.h"
 #include "src/disasm.h"
 #include "src/macro-assembler.h"
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/serializer-common.h"
 #include "src/string-stream.h"
 
 namespace v8 {
diff --git a/src/elements.cc b/src/elements.cc
index 9fd450a..288c60e 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -7,6 +7,7 @@
 #include "src/arguments.h"
 #include "src/conversions.h"
 #include "src/factory.h"
+#include "src/isolate-inl.h"
 #include "src/messages.h"
 #include "src/objects-inl.h"
 #include "src/utils.h"
@@ -428,7 +429,6 @@
   }
 }
 
-
 static void TraceTopFrame(Isolate* isolate) {
   StackFrameIterator it(isolate);
   if (it.done()) {
@@ -503,12 +503,6 @@
     ElementsAccessorSubclass::ValidateImpl(holder);
   }
 
-  bool IsPacked(Handle<JSObject> holder, Handle<FixedArrayBase> backing_store,
-                uint32_t start, uint32_t end) final {
-    return ElementsAccessorSubclass::IsPackedImpl(holder, backing_store, start,
-                                                  end);
-  }
-
   static bool IsPackedImpl(Handle<JSObject> holder,
                            Handle<FixedArrayBase> backing_store, uint32_t start,
                            uint32_t end) {
@@ -608,81 +602,67 @@
     UNREACHABLE();
   }
 
-  uint32_t Push(Handle<JSArray> receiver, Handle<FixedArrayBase> backing_store,
-                Arguments* args, uint32_t push_size) final {
-    return ElementsAccessorSubclass::PushImpl(receiver, backing_store, args,
-                                              push_size);
+  uint32_t Push(Handle<JSArray> receiver, Arguments* args,
+                uint32_t push_size) final {
+    return ElementsAccessorSubclass::PushImpl(receiver, args, push_size);
   }
 
-  static uint32_t PushImpl(Handle<JSArray> receiver,
-                           Handle<FixedArrayBase> elms_obj, Arguments* args,
+  static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
                            uint32_t push_sized) {
     UNREACHABLE();
     return 0;
   }
 
-  uint32_t Unshift(Handle<JSArray> receiver,
-                   Handle<FixedArrayBase> backing_store, Arguments* args,
+  uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
                    uint32_t unshift_size) final {
-    return ElementsAccessorSubclass::UnshiftImpl(receiver, backing_store, args,
-                                                 unshift_size);
+    return ElementsAccessorSubclass::UnshiftImpl(receiver, args, unshift_size);
   }
 
-  static uint32_t UnshiftImpl(Handle<JSArray> receiver,
-                              Handle<FixedArrayBase> elms_obj, Arguments* args,
+  static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args,
                               uint32_t unshift_size) {
     UNREACHABLE();
     return 0;
   }
 
-  Handle<JSArray> Slice(Handle<JSObject> receiver,
-                        Handle<FixedArrayBase> backing_store, uint32_t start,
+  Handle<JSArray> Slice(Handle<JSObject> receiver, uint32_t start,
                         uint32_t end) final {
-    return ElementsAccessorSubclass::SliceImpl(receiver, backing_store, start,
-                                               end);
+    return ElementsAccessorSubclass::SliceImpl(receiver, start, end);
   }
 
   static Handle<JSArray> SliceImpl(Handle<JSObject> receiver,
-                                   Handle<FixedArrayBase> backing_store,
                                    uint32_t start, uint32_t end) {
     UNREACHABLE();
     return Handle<JSArray>();
   }
 
-  Handle<JSArray> Splice(Handle<JSArray> receiver,
-                         Handle<FixedArrayBase> backing_store, uint32_t start,
+  Handle<JSArray> Splice(Handle<JSArray> receiver, uint32_t start,
                          uint32_t delete_count, Arguments* args,
                          uint32_t add_count) final {
-    return ElementsAccessorSubclass::SpliceImpl(receiver, backing_store, start,
-                                                delete_count, args, add_count);
+    return ElementsAccessorSubclass::SpliceImpl(receiver, start, delete_count,
+                                                args, add_count);
   }
 
   static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver,
-                                    Handle<FixedArrayBase> backing_store,
                                     uint32_t start, uint32_t delete_count,
                                     Arguments* args, uint32_t add_count) {
     UNREACHABLE();
     return Handle<JSArray>();
   }
 
-  Handle<Object> Pop(Handle<JSArray> receiver,
-                     Handle<FixedArrayBase> backing_store) final {
-    return ElementsAccessorSubclass::PopImpl(receiver, backing_store);
+  Handle<Object> Pop(Handle<JSArray> receiver) final {
+    return ElementsAccessorSubclass::PopImpl(receiver);
   }
 
-  static Handle<Object> PopImpl(Handle<JSArray> receiver,
-                                Handle<FixedArrayBase> backing_store) {
+  static Handle<Object> PopImpl(Handle<JSArray> receiver) {
     UNREACHABLE();
     return Handle<Object>();
   }
 
-  Handle<Object> Shift(Handle<JSArray> receiver,
-                       Handle<FixedArrayBase> backing_store) final {
-    return ElementsAccessorSubclass::ShiftImpl(receiver, backing_store);
+  Handle<Object> Shift(Handle<JSArray> receiver) final {
+    return ElementsAccessorSubclass::ShiftImpl(receiver);
   }
 
-  static Handle<Object> ShiftImpl(Handle<JSArray> receiver,
-                                  Handle<FixedArrayBase> backing_store) {
+  static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
     UNREACHABLE();
     return Handle<Object>();
   }
@@ -714,8 +694,11 @@
     if (length == 0) {
       array->initialize_elements();
     } else if (length <= capacity) {
-      if (array->HasFastSmiOrObjectElements()) {
-        backing_store = JSObject::EnsureWritableFastElements(array);
+      if (IsFastSmiOrObjectElementsKind(kind())) {
+        JSObject::EnsureWritableFastElements(array);
+        if (array->elements() != *backing_store) {
+          backing_store = handle(array->elements(), isolate);
+        }
       }
       if (2 * length <= capacity) {
         // If more than half the elements won't be used, trim the array.
@@ -737,6 +720,16 @@
     JSObject::ValidateElements(array);
   }
 
+  static uint32_t GetIterationLength(JSObject* receiver,
+                                     FixedArrayBase* elements) {
+    if (receiver->IsJSArray()) {
+      DCHECK(JSArray::cast(receiver)->length()->IsSmi());
+      return static_cast<uint32_t>(
+          Smi::cast(JSArray::cast(receiver)->length())->value());
+    }
+    return ElementsAccessorSubclass::GetCapacityImpl(receiver, elements);
+  }
+
   static Handle<FixedArrayBase> ConvertElementsWithCapacity(
       Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
       ElementsKind from_kind, uint32_t capacity) {
@@ -853,40 +846,194 @@
         from, from_start, *to, from_kind, to_start, packed_size, copy_size);
   }
 
-  static void CollectElementIndicesImpl(Handle<JSObject> object,
-                                        Handle<FixedArrayBase> backing_store,
-                                        KeyAccumulator* keys, uint32_t range,
-                                        PropertyFilter filter,
-                                        uint32_t offset) {
-    DCHECK_NE(DICTIONARY_ELEMENTS, kind());
-    if (filter & ONLY_ALL_CAN_READ) {
-      // Non-dictionary elements can't have all-can-read accessors.
-      return;
-    }
-    uint32_t length = 0;
-    if (object->IsJSArray()) {
-      length = Smi::cast(JSArray::cast(*object)->length())->value();
-    } else {
-      length =
-          ElementsAccessorSubclass::GetCapacityImpl(*object, *backing_store);
-    }
-    if (range < length) length = range;
-    for (uint32_t i = offset; i < length; i++) {
-      if (!ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
-                                                    filter)) {
-        continue;
+  Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) final {
+    return ElementsAccessorSubclass::NormalizeImpl(object,
+                                                   handle(object->elements()));
+  }
+
+  static Handle<SeededNumberDictionary> NormalizeImpl(
+      Handle<JSObject> object, Handle<FixedArrayBase> elements) {
+    UNREACHABLE();
+    return Handle<SeededNumberDictionary>();
+  }
+
+  Maybe<bool> CollectValuesOrEntries(Isolate* isolate, Handle<JSObject> object,
+                                     Handle<FixedArray> values_or_entries,
+                                     bool get_entries, int* nof_items,
+                                     PropertyFilter filter) {
+    return ElementsAccessorSubclass::CollectValuesOrEntriesImpl(
+        isolate, object, values_or_entries, get_entries, nof_items, filter);
+  }
+
+  static Maybe<bool> CollectValuesOrEntriesImpl(
+      Isolate* isolate, Handle<JSObject> object,
+      Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+      PropertyFilter filter) {
+    int count = 0;
+    KeyAccumulator accumulator(isolate, OWN_ONLY, ALL_PROPERTIES);
+    accumulator.NextPrototype();
+    ElementsAccessorSubclass::CollectElementIndicesImpl(
+        object, handle(object->elements(), isolate), &accumulator, kMaxUInt32,
+        ALL_PROPERTIES, 0);
+    Handle<FixedArray> keys = accumulator.GetKeys();
+
+    for (int i = 0; i < keys->length(); ++i) {
+      Handle<Object> key(keys->get(i), isolate);
+      Handle<Object> value;
+      uint32_t index;
+      if (!key->ToUint32(&index)) continue;
+
+      uint32_t entry = ElementsAccessorSubclass::GetEntryForIndexImpl(
+          *object, object->elements(), index, filter);
+      if (entry == kMaxUInt32) continue;
+
+      PropertyDetails details =
+          ElementsAccessorSubclass::GetDetailsImpl(*object, entry);
+
+      if (details.kind() == kData) {
+        value = ElementsAccessorSubclass::GetImpl(object, entry);
+      } else {
+        LookupIterator it(isolate, object, index, LookupIterator::OWN);
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+            isolate, value, Object::GetProperty(&it), Nothing<bool>());
       }
-      keys->AddKey(i);
+      if (get_entries) {
+        value = MakeEntryPair(isolate, index, value);
+      }
+      values_or_entries->set(count++, *value);
     }
+
+    *nof_items = count;
+    return Just(true);
   }
 
   void CollectElementIndices(Handle<JSObject> object,
                              Handle<FixedArrayBase> backing_store,
                              KeyAccumulator* keys, uint32_t range,
                              PropertyFilter filter, uint32_t offset) final {
+    if (filter & ONLY_ALL_CAN_READ) return;
     ElementsAccessorSubclass::CollectElementIndicesImpl(
         object, backing_store, keys, range, filter, offset);
-  };
+  }
+
+  static void CollectElementIndicesImpl(Handle<JSObject> object,
+                                        Handle<FixedArrayBase> backing_store,
+                                        KeyAccumulator* keys, uint32_t range,
+                                        PropertyFilter filter,
+                                        uint32_t offset) {
+    DCHECK_NE(DICTIONARY_ELEMENTS, kind());
+    // Non-dictionary elements can't have all-can-read accessors.
+    uint32_t length = GetIterationLength(*object, *backing_store);
+    if (range < length) length = range;
+    for (uint32_t i = offset; i < length; i++) {
+      if (ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
+                                                   filter)) {
+        keys->AddKey(i);
+      }
+    }
+  }
+
+  static Handle<FixedArray> DirectCollectElementIndicesImpl(
+      Isolate* isolate, Handle<JSObject> object,
+      Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
+      PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
+      uint32_t insertion_index = 0) {
+    uint32_t length =
+        ElementsAccessorSubclass::GetIterationLength(*object, *backing_store);
+    for (uint32_t i = 0; i < length; i++) {
+      if (ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
+                                                   filter)) {
+        if (convert == CONVERT_TO_STRING) {
+          Handle<String> index_string = isolate->factory()->Uint32ToString(i);
+          list->set(insertion_index, *index_string);
+        } else {
+          list->set(insertion_index, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+        }
+        insertion_index++;
+      }
+    }
+    *nof_indices = insertion_index;
+    return list;
+  }
+
+  Handle<FixedArray> PrependElementIndices(Handle<JSObject> object,
+                                           Handle<FixedArrayBase> backing_store,
+                                           Handle<FixedArray> keys,
+                                           GetKeysConversion convert,
+                                           PropertyFilter filter) final {
+    return ElementsAccessorSubclass::PrependElementIndicesImpl(
+        object, backing_store, keys, convert, filter);
+  }
+
+  static Handle<FixedArray> PrependElementIndicesImpl(
+      Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+      Handle<FixedArray> keys, GetKeysConversion convert,
+      PropertyFilter filter) {
+    Isolate* isolate = object->GetIsolate();
+    uint32_t nof_property_keys = keys->length();
+    uint32_t initial_list_length =
+        ElementsAccessorSubclass::GetCapacityImpl(*object, *backing_store);
+    initial_list_length += nof_property_keys;
+
+    // Collect the element indices into a new list.
+    uint32_t nof_indices = 0;
+    Handle<FixedArray> combined_keys =
+        isolate->factory()->NewFixedArray(initial_list_length);
+    combined_keys = ElementsAccessorSubclass::DirectCollectElementIndicesImpl(
+        isolate, object, backing_store, convert, filter, combined_keys,
+        &nof_indices);
+
+    // Sort the indices list if necessary.
+    if (IsDictionaryElementsKind(kind()) || IsSloppyArgumentsElements(kind())) {
+      struct {
+        bool operator()(Object* a, Object* b) {
+          if (!a->IsUndefined()) {
+            if (b->IsUndefined()) return true;
+            return a->Number() < b->Number();
+          }
+          return !b->IsUndefined();
+        }
+      } cmp;
+      Object** start =
+          reinterpret_cast<Object**>(combined_keys->GetFirstElementAddress());
+      std::sort(start, start + nof_indices, cmp);
+      uint32_t array_length = 0;
+      // Indices from dictionary elements should only be converted after
+      // sorting.
+      if (convert == CONVERT_TO_STRING) {
+        for (uint32_t i = 0; i < nof_indices; i++) {
+          Handle<Object> index_string = isolate->factory()->Uint32ToString(
+                  combined_keys->get(i)->Number());
+          combined_keys->set(i, *index_string);
+        }
+      } else if (!(object->IsJSArray() &&
+                   JSArray::cast(*object)->length()->ToArrayLength(
+                       &array_length) &&
+                   array_length <= Smi::kMaxValue)) {
+        // Since we use std::sort above, the GC will no longer know where the
+        // HeapNumbers are, hence we have to write them again.
+        // For Arrays with valid Smi length, we are sure to have no HeapNumber
+        // indices and thus we can skip this step.
+        for (uint32_t i = 0; i < nof_indices; i++) {
+          Object* index = combined_keys->get(i);
+          combined_keys->set(i, index);
+        }
+      }
+    }
+
+    // Copy over the passed-in property keys.
+    CopyObjectToObjectElements(*keys, FAST_ELEMENTS, 0, *combined_keys,
+                               FAST_ELEMENTS, nof_indices, nof_property_keys);
+
+    if (IsHoleyElementsKind(kind())) {
+      // Shrink combined_keys to the final size.
+      int final_size = nof_indices + nof_property_keys;
+      DCHECK_LE(final_size, combined_keys->length());
+      combined_keys->Shrink(final_size);
+    }
+
+    return combined_keys;
+  }
 
   void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
                                    KeyAccumulator* accumulator,
@@ -919,12 +1066,7 @@
                  ? index
                  : kMaxUInt32;
     } else {
-      uint32_t length =
-          holder->IsJSArray()
-              ? static_cast<uint32_t>(
-                    Smi::cast(JSArray::cast(holder)->length())->value())
-              : ElementsAccessorSubclass::GetCapacityImpl(holder,
-                                                          backing_store);
+      uint32_t length = GetIterationLength(holder, backing_store);
       return index < length ? index : kMaxUInt32;
     }
   }
@@ -961,6 +1103,19 @@
       : ElementsAccessorBase<DictionaryElementsAccessor,
                              ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
 
+  static uint32_t GetIterationLength(JSObject* receiver,
+                                     FixedArrayBase* elements) {
+    uint32_t length;
+    if (receiver->IsJSArray()) {
+      // Special-case GetIterationLength for dictionary elements since the
+      // length of the array might be a HeapNumber.
+      JSArray::cast(receiver)->length()->ToArrayLength(&length);
+    } else {
+      length = GetCapacityImpl(receiver, elements);
+    }
+    return length;
+  }
+
   static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
                             uint32_t length,
                             Handle<FixedArrayBase> backing_store) {
@@ -1037,12 +1192,16 @@
 
   static bool HasAccessorsImpl(JSObject* holder,
                                FixedArrayBase* backing_store) {
+    DisallowHeapAllocation no_gc;
     SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
     if (!dict->requires_slow_elements()) return false;
     int capacity = dict->Capacity();
+    Heap* heap = holder->GetHeap();
+    Object* undefined = heap->undefined_value();
+    Object* the_hole = heap->the_hole_value();
     for (int i = 0; i < capacity; i++) {
       Object* key = dict->KeyAt(i);
-      if (!dict->IsKey(key)) continue;
+      if (key == the_hole || key == undefined) continue;
       DCHECK(!dict->IsDeleted(i));
       PropertyDetails details = dict->DetailsAt(i);
       if (details.type() == ACCESSOR_CONSTANT) return true;
@@ -1141,47 +1300,97 @@
     return SeededNumberDictionary::cast(backing_store)->DetailsAt(entry);
   }
 
+  static uint32_t FilterKey(Handle<SeededNumberDictionary> dictionary,
+                            int entry, Object* raw_key, PropertyFilter filter) {
+    DCHECK(!dictionary->IsDeleted(entry));
+    DCHECK(raw_key->IsNumber());
+    DCHECK_LE(raw_key->Number(), kMaxUInt32);
+    PropertyDetails details = dictionary->DetailsAt(entry);
+    PropertyAttributes attr = details.attributes();
+    if ((attr & filter) != 0) return kMaxUInt32;
+    return static_cast<uint32_t>(raw_key->Number());
+  }
+
+  static uint32_t GetKeyForEntryImpl(Handle<SeededNumberDictionary> dictionary,
+                                     int entry, PropertyFilter filter) {
+    DisallowHeapAllocation no_gc;
+    Object* raw_key = dictionary->KeyAt(entry);
+    if (!dictionary->IsKey(raw_key)) return kMaxUInt32;
+    return FilterKey(dictionary, entry, raw_key, filter);
+  }
+
+  static uint32_t GetKeyForEntryImpl(Handle<SeededNumberDictionary> dictionary,
+                                     int entry, PropertyFilter filter,
+                                     Object* undefined, Object* the_hole) {
+    DisallowHeapAllocation no_gc;
+    Object* raw_key = dictionary->KeyAt(entry);
+    // Replace the IsKey check with a direct comparison which is much faster.
+    if (raw_key == undefined || raw_key == the_hole) {
+      return kMaxUInt32;
+    }
+    return FilterKey(dictionary, entry, raw_key, filter);
+  }
+
   static void CollectElementIndicesImpl(Handle<JSObject> object,
                                         Handle<FixedArrayBase> backing_store,
                                         KeyAccumulator* keys, uint32_t range,
                                         PropertyFilter filter,
                                         uint32_t offset) {
+    if (filter & SKIP_STRINGS) return;
+    Isolate* isolate = keys->isolate();
+    Handle<Object> undefined = isolate->factory()->undefined_value();
+    Handle<Object> the_hole = isolate->factory()->the_hole_value();
     Handle<SeededNumberDictionary> dictionary =
         Handle<SeededNumberDictionary>::cast(backing_store);
     int capacity = dictionary->Capacity();
     for (int i = 0; i < capacity; i++) {
-      Object* k = dictionary->KeyAt(i);
-      if (!dictionary->IsKey(k)) continue;
-      if (k->FilterKey(filter)) continue;
-      if (dictionary->IsDeleted(i)) continue;
-      DCHECK(k->IsNumber());
-      DCHECK_LE(k->Number(), kMaxUInt32);
-      uint32_t index = static_cast<uint32_t>(k->Number());
-      if (index < offset) continue;
-      PropertyDetails details = dictionary->DetailsAt(i);
-      if (filter & ONLY_ALL_CAN_READ) {
-        if (details.kind() != kAccessor) continue;
-        Object* accessors = dictionary->ValueAt(i);
-        if (!accessors->IsAccessorInfo()) continue;
-        if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
-      }
-      PropertyAttributes attr = details.attributes();
-      if ((attr & filter) != 0) continue;
-      keys->AddKey(index);
+      uint32_t key =
+          GetKeyForEntryImpl(dictionary, i, filter, *undefined, *the_hole);
+      if (key == kMaxUInt32) continue;
+      keys->AddKey(key);
     }
 
     keys->SortCurrentElementsList();
   }
 
+  static Handle<FixedArray> DirectCollectElementIndicesImpl(
+      Isolate* isolate, Handle<JSObject> object,
+      Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
+      PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
+      uint32_t insertion_index = 0) {
+    if (filter & SKIP_STRINGS) return list;
+    if (filter & ONLY_ALL_CAN_READ) return list;
+
+    Handle<Object> undefined = isolate->factory()->undefined_value();
+    Handle<Object> the_hole = isolate->factory()->the_hole_value();
+    Handle<SeededNumberDictionary> dictionary =
+        Handle<SeededNumberDictionary>::cast(backing_store);
+    uint32_t capacity = dictionary->Capacity();
+    for (uint32_t i = 0; i < capacity; i++) {
+      uint32_t key =
+          GetKeyForEntryImpl(dictionary, i, filter, *undefined, *the_hole);
+      if (key == kMaxUInt32) continue;
+      Handle<Object> index = isolate->factory()->NewNumberFromUint(key);
+      list->set(insertion_index, *index);
+      insertion_index++;
+    }
+    *nof_indices = insertion_index;
+    return list;
+  }
+
   static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
                                               KeyAccumulator* accumulator,
                                               AddKeyConversion convert) {
+    Isolate* isolate = accumulator->isolate();
+    Handle<Object> undefined = isolate->factory()->undefined_value();
+    Handle<Object> the_hole = isolate->factory()->the_hole_value();
     SeededNumberDictionary* dictionary =
         SeededNumberDictionary::cast(receiver->elements());
     int capacity = dictionary->Capacity();
     for (int i = 0; i < capacity; i++) {
       Object* k = dictionary->KeyAt(i);
-      if (!dictionary->IsKey(k)) continue;
+      if (k == *undefined) continue;
+      if (k == *the_hole) continue;
       if (dictionary->IsDeleted(i)) continue;
       Object* value = dictionary->ValueAt(i);
       DCHECK(!value->IsTheHole());
@@ -1205,6 +1414,36 @@
 
   typedef typename KindTraits::BackingStore BackingStore;
 
+  static Handle<SeededNumberDictionary> NormalizeImpl(
+      Handle<JSObject> object, Handle<FixedArrayBase> store) {
+    Isolate* isolate = store->GetIsolate();
+    ElementsKind kind = FastElementsAccessorSubclass::kind();
+
+    // Ensure that notifications fire if the array or object prototypes are
+    // normalizing.
+    if (IsFastSmiOrObjectElementsKind(kind)) {
+      isolate->UpdateArrayProtectorOnNormalizeElements(object);
+    }
+
+    int capacity = object->GetFastElementsUsage();
+    Handle<SeededNumberDictionary> dictionary =
+        SeededNumberDictionary::New(isolate, capacity);
+
+    PropertyDetails details = PropertyDetails::Empty();
+    bool used_as_prototype = object->map()->is_prototype_map();
+    int j = 0;
+    for (int i = 0; j < capacity; i++) {
+      if (IsHoleyElementsKind(kind)) {
+        if (BackingStore::cast(*store)->is_the_hole(i)) continue;
+      }
+      Handle<Object> value = FastElementsAccessorSubclass::GetImpl(*store, i);
+      dictionary = SeededNumberDictionary::AddNumberEntry(
+          dictionary, i, value, details, used_as_prototype);
+      j++;
+    }
+    return dictionary;
+  }
+
   static void DeleteAtEnd(Handle<JSObject> obj,
                           Handle<BackingStore> backing_store, uint32_t entry) {
     uint32_t length = static_cast<uint32_t>(backing_store->length());
@@ -1337,15 +1576,10 @@
   static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
                                               KeyAccumulator* accumulator,
                                               AddKeyConversion convert) {
-    uint32_t length = 0;
     Handle<FixedArrayBase> elements(receiver->elements(),
                                     receiver->GetIsolate());
-    if (receiver->IsJSArray()) {
-      length = Smi::cast(JSArray::cast(*receiver)->length())->value();
-    } else {
-      length =
-          FastElementsAccessorSubclass::GetCapacityImpl(*receiver, *elements);
-    }
+    uint32_t length =
+        FastElementsAccessorSubclass::GetIterationLength(*receiver, *elements);
     for (uint32_t i = 0; i < length; i++) {
       if (IsFastPackedElementsKind(KindTraits::Kind) ||
           HasEntryImpl(*elements, i)) {
@@ -1380,45 +1614,33 @@
 #endif
   }
 
-  static Handle<Object> PopImpl(Handle<JSArray> receiver,
-                                Handle<FixedArrayBase> backing_store) {
-    return FastElementsAccessorSubclass::RemoveElement(receiver, backing_store,
-                                                       AT_END);
+  static Handle<Object> PopImpl(Handle<JSArray> receiver) {
+    return FastElementsAccessorSubclass::RemoveElement(receiver, AT_END);
   }
 
-  static Handle<Object> ShiftImpl(Handle<JSArray> receiver,
-                                  Handle<FixedArrayBase> backing_store) {
-    return FastElementsAccessorSubclass::RemoveElement(receiver, backing_store,
-                                                       AT_START);
+  static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
+    return FastElementsAccessorSubclass::RemoveElement(receiver, AT_START);
   }
 
   static uint32_t PushImpl(Handle<JSArray> receiver,
-                           Handle<FixedArrayBase> backing_store,
                            Arguments* args, uint32_t push_size) {
+    Handle<FixedArrayBase> backing_store(receiver->elements());
     return FastElementsAccessorSubclass::AddArguments(receiver, backing_store,
                                                       args, push_size, AT_END);
   }
 
   static uint32_t UnshiftImpl(Handle<JSArray> receiver,
-                              Handle<FixedArrayBase> backing_store,
                               Arguments* args, uint32_t unshift_size) {
+    Handle<FixedArrayBase> backing_store(receiver->elements());
     return FastElementsAccessorSubclass::AddArguments(
         receiver, backing_store, args, unshift_size, AT_START);
   }
 
-  static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
-                           Handle<FixedArrayBase> backing_store, int dst_index,
-                           int src_index, int len, int hole_start,
-                           int hole_end) {
-    UNREACHABLE();
-  }
-
   static Handle<JSArray> SliceImpl(Handle<JSObject> receiver,
-                                   Handle<FixedArrayBase> backing_store,
                                    uint32_t start, uint32_t end) {
-    DCHECK(start < end);
     Isolate* isolate = receiver->GetIsolate();
-    int result_len = end - start;
+    Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
+    int result_len = end < start ? 0u : end - start;
     Handle<JSArray> result_array = isolate->factory()->NewJSArray(
         KindTraits::Kind, result_len, result_len);
     DisallowHeapAllocation no_gc;
@@ -1431,7 +1653,6 @@
   }
 
   static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver,
-                                    Handle<FixedArrayBase> backing_store,
                                     uint32_t start, uint32_t delete_count,
                                     Arguments* args, uint32_t add_count) {
     Isolate* isolate = receiver->GetIsolate();
@@ -1439,6 +1660,15 @@
     uint32_t length = Smi::cast(receiver->length())->value();
     uint32_t new_length = length - delete_count + add_count;
 
+    ElementsKind kind = KindTraits::Kind;
+    if (new_length <= static_cast<uint32_t>(receiver->elements()->length()) &&
+        IsFastSmiOrObjectElementsKind(kind)) {
+      HandleScope scope(isolate);
+      JSObject::EnsureWritableFastElements(receiver);
+    }
+
+    Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
+
     if (new_length == 0) {
       receiver->set_elements(heap->empty_fixed_array());
       receiver->set_length(Smi::FromInt(0));
@@ -1477,6 +1707,55 @@
     return deleted_elements;
   }
 
+  static Maybe<bool> CollectValuesOrEntriesImpl(
+      Isolate* isolate, Handle<JSObject> object,
+      Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+      PropertyFilter filter) {
+    int count = 0;
+    uint32_t length = object->elements()->length();
+    for (uint32_t index = 0; index < length; ++index) {
+      if (!HasEntryImpl(object->elements(), index)) continue;
+      Handle<Object> value =
+          FastElementsAccessorSubclass::GetImpl(object->elements(), index);
+      if (get_entries) {
+        value = MakeEntryPair(isolate, index, value);
+      }
+      values_or_entries->set(count++, *value);
+    }
+    *nof_items = count;
+    return Just(true);
+  }
+
+  static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
+                           Handle<FixedArrayBase> backing_store, int dst_index,
+                           int src_index, int len, int hole_start,
+                           int hole_end) {
+    Heap* heap = isolate->heap();
+    Handle<BackingStore> dst_elms = Handle<BackingStore>::cast(backing_store);
+    if (heap->CanMoveObjectStart(*dst_elms) && dst_index == 0) {
+      // Update all the copies of this backing_store handle.
+      *dst_elms.location() =
+          BackingStore::cast(heap->LeftTrimFixedArray(*dst_elms, src_index));
+      receiver->set_elements(*dst_elms);
+      // Adjust the hole offset as the array has been shrunk.
+      hole_end -= src_index;
+      DCHECK_LE(hole_start, backing_store->length());
+      DCHECK_LE(hole_end, backing_store->length());
+    } else if (len != 0) {
+      if (IsFastDoubleElementsKind(KindTraits::Kind)) {
+        MemMove(dst_elms->data_start() + dst_index,
+                dst_elms->data_start() + src_index, len * kDoubleSize);
+      } else {
+        DisallowHeapAllocation no_gc;
+        heap->MoveElements(FixedArray::cast(*dst_elms), dst_index, src_index,
+                           len);
+      }
+    }
+    if (hole_start != hole_end) {
+      dst_elms->FillWithHoles(hole_start, hole_end);
+    }
+  }
+
  private:
   // SpliceShrinkStep might modify the backing_store.
   static void SpliceShrinkStep(Isolate* isolate, Handle<JSArray> receiver,
@@ -1523,9 +1802,14 @@
   }
 
   static Handle<Object> RemoveElement(Handle<JSArray> receiver,
-                                      Handle<FixedArrayBase> backing_store,
                                       Where remove_position) {
     Isolate* isolate = receiver->GetIsolate();
+    ElementsKind kind = KindTraits::Kind;
+    if (IsFastSmiOrObjectElementsKind(kind)) {
+      HandleScope scope(isolate);
+      JSObject::EnsureWritableFastElements(receiver);
+    }
+    Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
     uint32_t length =
         static_cast<uint32_t>(Smi::cast(receiver->length())->value());
     DCHECK(length > 0);
@@ -1540,8 +1824,8 @@
     FastElementsAccessorSubclass::SetLengthImpl(isolate, receiver, new_length,
                                                 backing_store);
 
-    if (IsHoleyElementsKind(KindTraits::Kind) && result->IsTheHole()) {
-      return receiver->GetIsolate()->factory()->undefined_value();
+    if (IsHoleyElementsKind(kind) && result->IsTheHole()) {
+      return isolate->factory()->undefined_value();
     }
     return result;
   }
@@ -1551,7 +1835,7 @@
                                Arguments* args, uint32_t add_size,
                                Where remove_position) {
     uint32_t length = Smi::cast(receiver->length())->value();
-    DCHECK(add_size > 0);
+    DCHECK(0 < add_size);
     uint32_t elms_len = backing_store->length();
     // Check we do not overflow the new_length.
     DCHECK(add_size <= static_cast<uint32_t>(Smi::kMaxValue - length));
@@ -1630,29 +1914,6 @@
     return backing_store->get(index);
   }
 
-  static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
-                           Handle<FixedArrayBase> backing_store, int dst_index,
-                           int src_index, int len, int hole_start,
-                           int hole_end) {
-    Heap* heap = isolate->heap();
-    Handle<FixedArray> dst_elms = Handle<FixedArray>::cast(backing_store);
-    if (heap->CanMoveObjectStart(*dst_elms) && dst_index == 0) {
-      // Update all the copies of this backing_store handle.
-      *dst_elms.location() =
-          FixedArray::cast(heap->LeftTrimFixedArray(*dst_elms, src_index));
-      receiver->set_elements(*dst_elms);
-      // Adjust the hole offset as the array has been shrunk.
-      hole_end -= src_index;
-      DCHECK_LE(hole_start, backing_store->length());
-      DCHECK_LE(hole_end, backing_store->length());
-    } else if (len != 0) {
-      DisallowHeapAllocation no_gc;
-      heap->MoveElements(*dst_elms, dst_index, src_index, len);
-    }
-    if (hole_start != hole_end) {
-      dst_elms->FillWithHoles(hole_start, hole_end);
-    }
-  }
 
   // NOTE: this method violates the handlified function signature convention:
   // raw pointer parameters in the function that allocates.
@@ -1784,31 +2045,6 @@
     FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
   }
 
-  static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
-                           Handle<FixedArrayBase> backing_store, int dst_index,
-                           int src_index, int len, int hole_start,
-                           int hole_end) {
-    Heap* heap = isolate->heap();
-    Handle<FixedDoubleArray> dst_elms =
-        Handle<FixedDoubleArray>::cast(backing_store);
-    if (heap->CanMoveObjectStart(*dst_elms) && dst_index == 0) {
-      // Update all the copies of this backing_store handle.
-      *dst_elms.location() = FixedDoubleArray::cast(
-          heap->LeftTrimFixedArray(*dst_elms, src_index));
-      receiver->set_elements(*dst_elms);
-      // Adjust the hole offset as the array has been shrunk.
-      hole_end -= src_index;
-      DCHECK_LE(hole_start, backing_store->length());
-      DCHECK_LE(hole_end, backing_store->length());
-    } else if (len != 0) {
-      MemMove(dst_elms->data_start() + dst_index,
-              dst_elms->data_start() + src_index, len * kDoubleSize);
-    }
-    if (hole_start != hole_end) {
-      dst_elms->FillWithHoles(hole_start, hole_end);
-    }
-  }
-
   static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
                                FixedArrayBase* to, ElementsKind from_kind,
                                uint32_t to_start, int packed_size,
@@ -1965,14 +2201,33 @@
   static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
                                               KeyAccumulator* accumulator,
                                               AddKeyConversion convert) {
-    Handle<FixedArrayBase> elements(receiver->elements(),
-                                    receiver->GetIsolate());
+    Handle<FixedArrayBase> elements(receiver->elements());
     uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
     for (uint32_t i = 0; i < length; i++) {
       Handle<Object> value = AccessorClass::GetImpl(*elements, i);
       accumulator->AddKey(value, convert);
     }
   }
+
+  static Maybe<bool> CollectValuesOrEntriesImpl(
+      Isolate* isolate, Handle<JSObject> object,
+      Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+      PropertyFilter filter) {
+    int count = 0;
+    if ((filter & ONLY_CONFIGURABLE) == 0) {
+      Handle<FixedArrayBase> elements(object->elements());
+      uint32_t length = AccessorClass::GetCapacityImpl(*object, *elements);
+      for (uint32_t index = 0; index < length; ++index) {
+        Handle<Object> value = AccessorClass::GetImpl(*elements, index);
+        if (get_entries) {
+          value = MakeEntryPair(isolate, index, value);
+        }
+        values_or_entries->set(count++, *value);
+      }
+    }
+    *nof_items = count;
+    return Just(true);
+  }
 };
 
 
@@ -2163,6 +2418,55 @@
           obj, entry - length);
     }
   }
+
+  static void CollectElementIndicesImpl(Handle<JSObject> object,
+                                        Handle<FixedArrayBase> backing_store,
+                                        KeyAccumulator* keys, uint32_t range,
+                                        PropertyFilter filter,
+                                        uint32_t offset) {
+    FixedArray* parameter_map = FixedArray::cast(*backing_store);
+    uint32_t length = parameter_map->length() - 2;
+    if (range < length) length = range;
+
+    for (uint32_t i = offset; i < length; ++i) {
+      if (!parameter_map->get(i + 2)->IsTheHole()) {
+        keys->AddKey(i);
+      }
+    }
+
+    Handle<FixedArrayBase> store(FixedArrayBase::cast(parameter_map->get(1)));
+    ArgumentsAccessor::CollectElementIndicesImpl(object, store, keys, range,
+                                                 filter, offset);
+    if (SloppyArgumentsElementsAccessorSubclass::kind() ==
+        FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
+      keys->SortCurrentElementsList();
+    }
+  }
+
+  static Handle<FixedArray> DirectCollectElementIndicesImpl(
+      Isolate* isolate, Handle<JSObject> object,
+      Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
+      PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
+      uint32_t insertion_index = 0) {
+    FixedArray* parameter_map = FixedArray::cast(*backing_store);
+    uint32_t length = parameter_map->length() - 2;
+
+    for (uint32_t i = 0; i < length; ++i) {
+      if (parameter_map->get(i + 2)->IsTheHole()) continue;
+      if (convert == CONVERT_TO_STRING) {
+        Handle<String> index_string = isolate->factory()->Uint32ToString(i);
+        list->set(insertion_index, *index_string);
+      } else {
+        list->set(insertion_index, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+      }
+      insertion_index++;
+    }
+
+    Handle<FixedArrayBase> store(FixedArrayBase::cast(parameter_map->get(1)));
+    return ArgumentsAccessor::DirectCollectElementIndicesImpl(
+        isolate, object, store, convert, filter, list, nof_indices,
+        insertion_index);
+  }
 };
 
 
@@ -2264,6 +2568,13 @@
             FastHoleyObjectElementsAccessor,
             ElementsKindTraits<FAST_SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
 
+  static Handle<SeededNumberDictionary> NormalizeImpl(
+      Handle<JSObject> object, Handle<FixedArrayBase> elements) {
+    FixedArray* parameter_map = FixedArray::cast(*elements);
+    Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
+    return FastHoleyObjectElementsAccessor::NormalizeImpl(object, arguments);
+  }
+
   static void DeleteFromArguments(Handle<JSObject> obj, uint32_t entry) {
     FixedArray* parameter_map = FixedArray::cast(obj->elements());
     Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
@@ -2452,11 +2763,9 @@
                                         KeyAccumulator* keys, uint32_t range,
                                         PropertyFilter filter,
                                         uint32_t offset) {
-    if ((filter & ONLY_ALL_CAN_READ) == 0) {
-      uint32_t length = GetString(*object)->length();
-      for (uint32_t i = 0; i < length; i++) {
-        keys->AddKey(i);
-      }
+    uint32_t length = GetString(*object)->length();
+    for (uint32_t i = 0; i < length; i++) {
+      keys->AddKey(i);
     }
     BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store, keys,
                                                     range, filter, offset);
@@ -2488,6 +2797,11 @@
       : StringWrapperElementsAccessor<
             FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor,
             ElementsKindTraits<FAST_STRING_WRAPPER_ELEMENTS>>(name) {}
+
+  static Handle<SeededNumberDictionary> NormalizeImpl(
+      Handle<JSObject> object, Handle<FixedArrayBase> elements) {
+    return FastHoleyObjectElementsAccessor::NormalizeImpl(object, elements);
+  }
 };
 
 class SlowStringWrapperElementsAccessor
@@ -2664,62 +2978,66 @@
 
 Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
                                          uint32_t concat_size) {
-  int result_len = 0;
-  ElementsKind elements_kind = GetInitialFastElementsKind();
-  bool has_double = false;
+  const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
+  STATIC_ASSERT(FixedDoubleArray::kMaxLength < kHalfOfMaxInt);
+  USE(kHalfOfMaxInt);
+  uint32_t result_len = 0;
+  bool has_raw_doubles = false;
+  ElementsKind result_elements_kind = GetInitialFastElementsKind();
   {
     DisallowHeapAllocation no_gc;
+    bool is_holey = false;
     // Iterate through all the arguments performing checks
     // and calculating total length.
-    bool is_holey = false;
     for (uint32_t i = 0; i < concat_size; i++) {
-      Object* arg = (*args)[i];
-      int len = Smi::cast(JSArray::cast(arg)->length())->value();
+      JSArray* array = JSArray::cast((*args)[i]);
+      uint32_t len = 0;
+      array->length()->ToArrayLength(&len);
 
       // We shouldn't overflow when adding another len.
-      const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
-      STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
-      USE(kHalfOfMaxInt);
       result_len += len;
       DCHECK(0 <= result_len);
       DCHECK(result_len <= FixedDoubleArray::kMaxLength);
 
-      ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
-      has_double = has_double || IsFastDoubleElementsKind(arg_kind);
+      ElementsKind arg_kind = array->GetElementsKind();
+      has_raw_doubles = has_raw_doubles || IsFastDoubleElementsKind(arg_kind);
       is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
-      elements_kind = GetMoreGeneralElementsKind(elements_kind, arg_kind);
+      result_elements_kind =
+          GetMoreGeneralElementsKind(result_elements_kind, arg_kind);
     }
     if (is_holey) {
-      elements_kind = GetHoleyElementsKind(elements_kind);
+      result_elements_kind = GetHoleyElementsKind(result_elements_kind);
     }
   }
 
   // If a double array is concatted into a fast elements array, the fast
   // elements array needs to be initialized to contain proper holes, since
   // boxing doubles may cause incremental marking.
-  ArrayStorageAllocationMode mode =
-      has_double && IsFastObjectElementsKind(elements_kind)
-          ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
-          : DONT_INITIALIZE_ARRAY_ELEMENTS;
+  bool requires_double_boxing =
+      has_raw_doubles && !IsFastDoubleElementsKind(result_elements_kind);
+  ArrayStorageAllocationMode mode = requires_double_boxing
+                                        ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+                                        : DONT_INITIALIZE_ARRAY_ELEMENTS;
   Handle<JSArray> result_array = isolate->factory()->NewJSArray(
-      elements_kind, result_len, result_len, Strength::WEAK, mode);
+      result_elements_kind, result_len, result_len, mode);
   if (result_len == 0) return result_array;
-  int j = 0;
+
+  uint32_t insertion_index = 0;
   Handle<FixedArrayBase> storage(result_array->elements(), isolate);
-  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+  ElementsAccessor* accessor = ElementsAccessor::ForKind(result_elements_kind);
   for (uint32_t i = 0; i < concat_size; i++) {
     // It is crucial to keep |array| in a raw pointer form to avoid
     // performance degradation.
     JSArray* array = JSArray::cast((*args)[i]);
-    int len = Smi::cast(array->length())->value();
-    if (len > 0) {
-      ElementsKind from_kind = array->GetElementsKind();
-      accessor->CopyElements(array, 0, from_kind, storage, j, len);
-      j += len;
-    }
+    uint32_t len = 0;
+    array->length()->ToArrayLength(&len);
+    if (len == 0) continue;
+    ElementsKind from_kind = array->GetElementsKind();
+    accessor->CopyElements(array, 0, from_kind, storage, insertion_index, len);
+    insertion_index += len;
   }
 
-  DCHECK(j == result_len);
+  DCHECK_EQ(insertion_index, result_len);
   return result_array;
 }
 
diff --git a/src/elements.h b/src/elements.h
index 483d753..2b18ab0 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -8,7 +8,7 @@
 #include "src/elements-kind.h"
 #include "src/heap/heap.h"
 #include "src/isolate.h"
-#include "src/key-accumulator.h"
+#include "src/keys.h"
 #include "src/objects.h"
 
 namespace v8 {
@@ -52,11 +52,6 @@
     return HasElement(holder, index, handle(holder->elements()), filter);
   }
 
-  // Returns true if the backing store is compact in the given range
-  virtual bool IsPacked(Handle<JSObject> holder,
-                        Handle<FixedArrayBase> backing_store, uint32_t start,
-                        uint32_t end) = 0;
-
   virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
 
   virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
@@ -100,6 +95,24 @@
                           filter, offset);
   }
 
+  virtual Maybe<bool> CollectValuesOrEntries(
+      Isolate* isolate, Handle<JSObject> object,
+      Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
+      PropertyFilter filter = ALL_PROPERTIES) = 0;
+
+  //
+  virtual Handle<FixedArray> PrependElementIndices(
+      Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+      Handle<FixedArray> keys, GetKeysConversion convert,
+      PropertyFilter filter = ALL_PROPERTIES) = 0;
+
+  inline Handle<FixedArray> PrependElementIndices(
+      Handle<JSObject> object, Handle<FixedArray> keys,
+      GetKeysConversion convert, PropertyFilter filter = ALL_PROPERTIES) {
+    return PrependElementIndices(object, handle(object->elements()), keys,
+                                 convert, filter);
+  }
+
   virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
                                            KeyAccumulator* accumulator,
                                            AddKeyConversion convert) = 0;
@@ -124,28 +137,27 @@
   static Handle<JSArray> Concat(Isolate* isolate, Arguments* args,
                                 uint32_t concat_size);
 
-  virtual uint32_t Push(Handle<JSArray> receiver,
-                        Handle<FixedArrayBase> backing_store, Arguments* args,
+  virtual uint32_t Push(Handle<JSArray> receiver, Arguments* args,
                         uint32_t push_size) = 0;
 
   virtual uint32_t Unshift(Handle<JSArray> receiver,
-                           Handle<FixedArrayBase> backing_store,
                            Arguments* args, uint32_t unshift_size) = 0;
 
   virtual Handle<JSArray> Slice(Handle<JSObject> receiver,
-                                Handle<FixedArrayBase> backing_store,
                                 uint32_t start, uint32_t end) = 0;
 
   virtual Handle<JSArray> Splice(Handle<JSArray> receiver,
-                                 Handle<FixedArrayBase> backing_store,
                                  uint32_t start, uint32_t delete_count,
                                  Arguments* args, uint32_t add_count) = 0;
 
-  virtual Handle<Object> Pop(Handle<JSArray> receiver,
-                             Handle<FixedArrayBase> backing_store) = 0;
+  virtual Handle<Object> Pop(Handle<JSArray> receiver) = 0;
 
-  virtual Handle<Object> Shift(Handle<JSArray> receiver,
-                               Handle<FixedArrayBase> backing_store) = 0;
+  virtual Handle<Object> Shift(Handle<JSArray> receiver) = 0;
+
+  virtual Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) = 0;
+
+  virtual uint32_t GetCapacity(JSObject* holder,
+                               FixedArrayBase* backing_store) = 0;
 
  protected:
   friend class LookupIterator;
@@ -172,8 +184,6 @@
                             uint32_t destination_start, int copy_size) = 0;
 
  private:
-  virtual uint32_t GetCapacity(JSObject* holder,
-                               FixedArrayBase* backing_store) = 0;
   static ElementsAccessor** elements_accessors_;
   const char* name_;
 
diff --git a/src/execution.cc b/src/execution.cc
index e6a569f..a092a8a 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -138,16 +138,6 @@
     Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
     SaveContext save(isolate);
     isolate->set_context(function->context());
-    // Do proper receiver conversion for non-strict mode api functions.
-    if (!receiver->IsJSReceiver() &&
-        is_sloppy(function->shared()->language_mode())) {
-      if (receiver->IsUndefined() || receiver->IsNull()) {
-        receiver = handle(function->global_proxy(), isolate);
-      } else {
-        ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
-                                   Object::ToObject(isolate, receiver), Object);
-      }
-    }
     DCHECK(function->context()->global_object()->IsJSGlobalObject());
     auto value = Builtins::InvokeApiFunction(function, receiver, argc, argv);
     bool has_exception = value.is_null();
diff --git a/src/external-reference-table.cc b/src/external-reference-table.cc
new file mode 100644
index 0000000..29a2474
--- /dev/null
+++ b/src/external-reference-table.cc
@@ -0,0 +1,354 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/external-reference-table.h"
+
+#include "src/accessors.h"
+#include "src/assembler.h"
+#include "src/counters.h"
+#include "src/deoptimizer.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
+  ExternalReferenceTable* external_reference_table =
+      isolate->external_reference_table();
+  if (external_reference_table == NULL) {
+    external_reference_table = new ExternalReferenceTable(isolate);
+    isolate->set_external_reference_table(external_reference_table);
+  }
+  return external_reference_table;
+}
+
+ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
+  // Miscellaneous
+  Add(ExternalReference::roots_array_start(isolate).address(),
+      "Heap::roots_array_start()");
+  Add(ExternalReference::address_of_stack_limit(isolate).address(),
+      "StackGuard::address_of_jslimit()");
+  Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
+      "StackGuard::address_of_real_jslimit()");
+  Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
+      "Heap::NewSpaceAllocationLimitAddress()");
+  Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
+      "Heap::NewSpaceAllocationTopAddress()");
+  Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
+      "mod_two_doubles");
+  // Keyed lookup cache.
+  Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
+      "KeyedLookupCache::keys()");
+  Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
+      "KeyedLookupCache::field_offsets()");
+  Add(ExternalReference::handle_scope_next_address(isolate).address(),
+      "HandleScope::next");
+  Add(ExternalReference::handle_scope_limit_address(isolate).address(),
+      "HandleScope::limit");
+  Add(ExternalReference::handle_scope_level_address(isolate).address(),
+      "HandleScope::level");
+  Add(ExternalReference::new_deoptimizer_function(isolate).address(),
+      "Deoptimizer::New()");
+  Add(ExternalReference::compute_output_frames_function(isolate).address(),
+      "Deoptimizer::ComputeOutputFrames()");
+  Add(ExternalReference::address_of_min_int().address(),
+      "LDoubleConstant::min_int");
+  Add(ExternalReference::address_of_one_half().address(),
+      "LDoubleConstant::one_half");
+  Add(ExternalReference::isolate_address(isolate).address(), "isolate");
+  Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
+      "Interpreter::dispatch_table_address");
+  Add(ExternalReference::address_of_negative_infinity().address(),
+      "LDoubleConstant::negative_infinity");
+  Add(ExternalReference::power_double_double_function(isolate).address(),
+      "power_double_double_function");
+  Add(ExternalReference::power_double_int_function(isolate).address(),
+      "power_double_int_function");
+  Add(ExternalReference::math_log_double_function(isolate).address(),
+      "std::log");
+  Add(ExternalReference::store_buffer_top(isolate).address(),
+      "store_buffer_top");
+  Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
+  Add(ExternalReference::get_date_field_function(isolate).address(),
+      "JSDate::GetField");
+  Add(ExternalReference::date_cache_stamp(isolate).address(),
+      "date_cache_stamp");
+  Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
+      "address_of_pending_message_obj");
+  Add(ExternalReference::get_make_code_young_function(isolate).address(),
+      "Code::MakeCodeYoung");
+  Add(ExternalReference::cpu_features().address(), "cpu_features");
+  Add(ExternalReference::old_space_allocation_top_address(isolate).address(),
+      "Heap::OldSpaceAllocationTopAddress");
+  Add(ExternalReference::old_space_allocation_limit_address(isolate).address(),
+      "Heap::OldSpaceAllocationLimitAddress");
+  Add(ExternalReference::allocation_sites_list_address(isolate).address(),
+      "Heap::allocation_sites_list_address()");
+  Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
+  Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
+      "Code::MarkCodeAsExecuted");
+  Add(ExternalReference::is_profiling_address(isolate).address(),
+      "CpuProfiler::is_profiling");
+  Add(ExternalReference::scheduled_exception_address(isolate).address(),
+      "Isolate::scheduled_exception");
+  Add(ExternalReference::invoke_function_callback(isolate).address(),
+      "InvokeFunctionCallback");
+  Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
+      "InvokeAccessorGetterCallback");
+  Add(ExternalReference::wasm_f32_trunc(isolate).address(),
+      "wasm::f32_trunc_wrapper");
+  Add(ExternalReference::wasm_f32_floor(isolate).address(),
+      "wasm::f32_floor_wrapper");
+  Add(ExternalReference::wasm_f32_ceil(isolate).address(),
+      "wasm::f32_ceil_wrapper");
+  Add(ExternalReference::wasm_f32_nearest_int(isolate).address(),
+      "wasm::f32_nearest_int_wrapper");
+  Add(ExternalReference::wasm_f64_trunc(isolate).address(),
+      "wasm::f64_trunc_wrapper");
+  Add(ExternalReference::wasm_f64_floor(isolate).address(),
+      "wasm::f64_floor_wrapper");
+  Add(ExternalReference::wasm_f64_ceil(isolate).address(),
+      "wasm::f64_ceil_wrapper");
+  Add(ExternalReference::wasm_f64_nearest_int(isolate).address(),
+      "wasm::f64_nearest_int_wrapper");
+  Add(ExternalReference::wasm_int64_to_float32(isolate).address(),
+      "wasm::int64_to_float32_wrapper");
+  Add(ExternalReference::wasm_uint64_to_float32(isolate).address(),
+      "wasm::uint64_to_float32_wrapper");
+  Add(ExternalReference::wasm_int64_to_float64(isolate).address(),
+      "wasm::int64_to_float64_wrapper");
+  Add(ExternalReference::wasm_uint64_to_float64(isolate).address(),
+      "wasm::uint64_to_float64_wrapper");
+  Add(ExternalReference::wasm_float32_to_int64(isolate).address(),
+      "wasm::float32_to_int64_wrapper");
+  Add(ExternalReference::wasm_float32_to_uint64(isolate).address(),
+      "wasm::float32_to_uint64_wrapper");
+  Add(ExternalReference::wasm_float64_to_int64(isolate).address(),
+      "wasm::float64_to_int64_wrapper");
+  Add(ExternalReference::wasm_float64_to_uint64(isolate).address(),
+      "wasm::float64_to_uint64_wrapper");
+  Add(ExternalReference::wasm_int64_div(isolate).address(), "wasm::int64_div");
+  Add(ExternalReference::wasm_int64_mod(isolate).address(), "wasm::int64_mod");
+  Add(ExternalReference::wasm_uint64_div(isolate).address(),
+      "wasm::uint64_div");
+  Add(ExternalReference::wasm_uint64_mod(isolate).address(),
+      "wasm::uint64_mod");
+  Add(ExternalReference::f64_acos_wrapper_function(isolate).address(),
+      "f64_acos_wrapper");
+  Add(ExternalReference::f64_asin_wrapper_function(isolate).address(),
+      "f64_asin_wrapper");
+  Add(ExternalReference::f64_atan_wrapper_function(isolate).address(),
+      "f64_atan_wrapper");
+  Add(ExternalReference::f64_cos_wrapper_function(isolate).address(),
+      "f64_cos_wrapper");
+  Add(ExternalReference::f64_sin_wrapper_function(isolate).address(),
+      "f64_sin_wrapper");
+  Add(ExternalReference::f64_tan_wrapper_function(isolate).address(),
+      "f64_tan_wrapper");
+  Add(ExternalReference::f64_exp_wrapper_function(isolate).address(),
+      "f64_exp_wrapper");
+  Add(ExternalReference::f64_log_wrapper_function(isolate).address(),
+      "f64_log_wrapper");
+  Add(ExternalReference::f64_pow_wrapper_function(isolate).address(),
+      "f64_pow_wrapper");
+  Add(ExternalReference::f64_atan2_wrapper_function(isolate).address(),
+      "f64_atan2_wrapper");
+  Add(ExternalReference::f64_mod_wrapper_function(isolate).address(),
+      "f64_mod_wrapper");
+  Add(ExternalReference::log_enter_external_function(isolate).address(),
+      "Logger::EnterExternal");
+  Add(ExternalReference::log_leave_external_function(isolate).address(),
+      "Logger::LeaveExternal");
+  Add(ExternalReference::address_of_minus_one_half().address(),
+      "double_constants.minus_one_half");
+  Add(ExternalReference::stress_deopt_count(isolate).address(),
+      "Isolate::stress_deopt_count_address()");
+  Add(ExternalReference::virtual_handler_register(isolate).address(),
+      "Isolate::virtual_handler_register()");
+  Add(ExternalReference::virtual_slot_register(isolate).address(),
+      "Isolate::virtual_slot_register()");
+  Add(ExternalReference::runtime_function_table_address(isolate).address(),
+      "Runtime::runtime_function_table_address()");
+  Add(ExternalReference::is_tail_call_elimination_enabled_address(isolate)
+          .address(),
+      "Isolate::is_tail_call_elimination_enabled_address()");
+
+  // Debug addresses
+  Add(ExternalReference::debug_after_break_target_address(isolate).address(),
+      "Debug::after_break_target_address()");
+  Add(ExternalReference::debug_is_active_address(isolate).address(),
+      "Debug::is_active_address()");
+  Add(ExternalReference::debug_step_in_enabled_address(isolate).address(),
+      "Debug::step_in_enabled_address()");
+
+#ifndef V8_INTERPRETED_REGEXP
+  Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
+      "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
+  Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
+      "RegExpMacroAssembler*::CheckStackGuardState()");
+  Add(ExternalReference::re_grow_stack(isolate).address(),
+      "NativeRegExpMacroAssembler::GrowStack()");
+  Add(ExternalReference::re_word_character_map().address(),
+      "NativeRegExpMacroAssembler::word_character_map");
+  Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
+      "RegExpStack::limit_address()");
+  Add(ExternalReference::address_of_regexp_stack_memory_address(isolate)
+          .address(),
+      "RegExpStack::memory_address()");
+  Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
+      "RegExpStack::memory_size()");
+  Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
+      "OffsetsVector::static_offsets_vector");
+#endif  // V8_INTERPRETED_REGEXP
+
+  // The following populates all of the different type of external references
+  // into the ExternalReferenceTable.
+  //
+  // NOTE: This function was originally 100k of code.  It has since been
+  // rewritten to be mostly table driven, as the callback macro style tends to
+  // very easily cause code bloat.  Please be careful in the future when adding
+  // new references.
+
+  struct RefTableEntry {
+    uint16_t id;
+    const char* name;
+  };
+
+  static const RefTableEntry c_builtins[] = {
+#define DEF_ENTRY_C(name, ignored) {Builtins::c_##name, "Builtins::" #name},
+      BUILTIN_LIST_C(DEF_ENTRY_C)
+#undef DEF_ENTRY_C
+  };
+
+  for (unsigned i = 0; i < arraysize(c_builtins); ++i) {
+    ExternalReference ref(static_cast<Builtins::CFunctionId>(c_builtins[i].id),
+                          isolate);
+    Add(ref.address(), c_builtins[i].name);
+  }
+
+  static const RefTableEntry builtins[] = {
+#define DEF_ENTRY_C(name, ignored) {Builtins::k##name, "Builtins::" #name},
+#define DEF_ENTRY_A(name, i1, i2, i3) {Builtins::k##name, "Builtins::" #name},
+      BUILTIN_LIST_C(DEF_ENTRY_C) BUILTIN_LIST_A(DEF_ENTRY_A)
+          BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
+#undef DEF_ENTRY_C
+#undef DEF_ENTRY_A
+  };
+
+  for (unsigned i = 0; i < arraysize(builtins); ++i) {
+    ExternalReference ref(static_cast<Builtins::Name>(builtins[i].id), isolate);
+    Add(ref.address(), builtins[i].name);
+  }
+
+  static const RefTableEntry runtime_functions[] = {
+#define RUNTIME_ENTRY(name, i1, i2) {Runtime::k##name, "Runtime::" #name},
+      FOR_EACH_INTRINSIC(RUNTIME_ENTRY)
+#undef RUNTIME_ENTRY
+  };
+
+  for (unsigned i = 0; i < arraysize(runtime_functions); ++i) {
+    ExternalReference ref(
+        static_cast<Runtime::FunctionId>(runtime_functions[i].id), isolate);
+    Add(ref.address(), runtime_functions[i].name);
+  }
+
+  // Stat counters
+  struct StatsRefTableEntry {
+    StatsCounter* (Counters::*counter)();
+    const char* name;
+  };
+
+  static const StatsRefTableEntry stats_ref_table[] = {
+#define COUNTER_ENTRY(name, caption) {&Counters::name, "Counters::" #name},
+      STATS_COUNTER_LIST_1(COUNTER_ENTRY) STATS_COUNTER_LIST_2(COUNTER_ENTRY)
+#undef COUNTER_ENTRY
+  };
+
+  Counters* counters = isolate->counters();
+  for (unsigned i = 0; i < arraysize(stats_ref_table); ++i) {
+    // To make sure the indices are not dependent on whether counters are
+    // enabled, use a dummy address as filler.
+    Address address = NotAvailable();
+    StatsCounter* counter = (counters->*(stats_ref_table[i].counter))();
+    if (counter->Enabled()) {
+      address = reinterpret_cast<Address>(counter->GetInternalPointer());
+    }
+    Add(address, stats_ref_table[i].name);
+  }
+
+  // Top addresses
+  static const char* address_names[] = {
+#define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
+      FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) NULL
+#undef BUILD_NAME_LITERAL
+  };
+
+  for (int i = 0; i < Isolate::kIsolateAddressCount; ++i) {
+    Add(isolate->get_address_from_id(static_cast<Isolate::AddressId>(i)),
+        address_names[i]);
+  }
+
+  // Accessors
+  struct AccessorRefTable {
+    Address address;
+    const char* name;
+  };
+
+  static const AccessorRefTable accessors[] = {
+#define ACCESSOR_INFO_DECLARATION(name) \
+  {FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter"},
+      ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
+#undef ACCESSOR_INFO_DECLARATION
+#define ACCESSOR_SETTER_DECLARATION(name) \
+  {FUNCTION_ADDR(&Accessors::name), "Accessors::" #name},
+          ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
+#undef ACCESSOR_INFO_DECLARATION
+  };
+
+  for (unsigned i = 0; i < arraysize(accessors); ++i) {
+    Add(accessors[i].address, accessors[i].name);
+  }
+
+  StubCache* stub_cache = isolate->stub_cache();
+
+  // Stub cache tables
+  Add(stub_cache->key_reference(StubCache::kPrimary).address(),
+      "StubCache::primary_->key");
+  Add(stub_cache->value_reference(StubCache::kPrimary).address(),
+      "StubCache::primary_->value");
+  Add(stub_cache->map_reference(StubCache::kPrimary).address(),
+      "StubCache::primary_->map");
+  Add(stub_cache->key_reference(StubCache::kSecondary).address(),
+      "StubCache::secondary_->key");
+  Add(stub_cache->value_reference(StubCache::kSecondary).address(),
+      "StubCache::secondary_->value");
+  Add(stub_cache->map_reference(StubCache::kSecondary).address(),
+      "StubCache::secondary_->map");
+
+  // Runtime entries
+  Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
+      "HandleScope::DeleteExtensions");
+  Add(ExternalReference::incremental_marking_record_write_function(isolate)
+          .address(),
+      "IncrementalMarking::RecordWrite");
+  Add(ExternalReference::incremental_marking_record_write_code_entry_function(
+          isolate)
+          .address(),
+      "IncrementalMarking::RecordWriteOfCodeEntryFromCode");
+  Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
+      "StoreBuffer::StoreBufferOverflow");
+
+  // Add a small set of deopt entry addresses to encoder without generating the
+  // deopt table code, which isn't possible at deserialization time.
+  HandleScope scope(isolate);
+  for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
+    Address address = Deoptimizer::GetDeoptimizationEntry(
+        isolate, entry, Deoptimizer::LAZY,
+        Deoptimizer::CALCULATE_ENTRY_ADDRESS);
+    Add(address, "lazy_deopt");
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/external-reference-table.h b/src/external-reference-table.h
new file mode 100644
index 0000000..2ea4b14
--- /dev/null
+++ b/src/external-reference-table.h
@@ -0,0 +1,50 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXTERNAL_REFERENCE_TABLE_H_
+#define V8_EXTERNAL_REFERENCE_TABLE_H_
+
+#include "src/address-map.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// ExternalReferenceTable is a helper class that defines the relationship
+// between external references and their encodings. It is used to build
+// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
+class ExternalReferenceTable {
+ public:
+  static ExternalReferenceTable* instance(Isolate* isolate);
+
+  int size() const { return refs_.length(); }
+  Address address(int i) { return refs_[i].address; }
+  const char* name(int i) { return refs_[i].name; }
+
+  inline static Address NotAvailable() { return NULL; }
+
+  static const int kDeoptTableSerializeEntryCount = 64;
+
+ private:
+  struct ExternalReferenceEntry {
+    Address address;
+    const char* name;
+  };
+
+  explicit ExternalReferenceTable(Isolate* isolate);
+
+  void Add(Address address, const char* name) {
+    ExternalReferenceEntry entry = {address, name};
+    refs_.Add(entry);
+  }
+
+  List<ExternalReferenceEntry> refs_;
+
+  DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
+};
+
+}  // namespace internal
+}  // namespace v8
+#endif  // V8_EXTERNAL_REFERENCE_TABLE_H_
diff --git a/src/factory.cc b/src/factory.cc
index 15ddb5f..41c3cb5 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -112,12 +112,12 @@
   return result;
 }
 
-
 Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
-                                    Handle<Object> to_number,
+                                    Handle<Object> to_number, bool to_boolean,
                                     const char* type_of, byte kind) {
   Handle<Oddball> oddball = New<Oddball>(map, OLD_SPACE);
-  Oddball::Initialize(isolate(), oddball, to_string, to_number, type_of, kind);
+  Oddball::Initialize(isolate(), oddball, to_string, to_number, to_boolean,
+                      type_of, kind);
   return oddball;
 }
 
@@ -800,6 +800,22 @@
   return context;
 }
 
+Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
+                                                 Handle<JSReceiver> extension,
+                                                 Handle<Context> wrapped,
+                                                 Handle<StringSet> whitelist) {
+  STATIC_ASSERT(Context::WHITE_LIST_INDEX == Context::MIN_CONTEXT_SLOTS + 1);
+  Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS + 2);
+  array->set_map_no_write_barrier(*debug_evaluate_context_map());
+  Handle<Context> c = Handle<Context>::cast(array);
+  c->set_closure(wrapped.is_null() ? previous->closure() : wrapped->closure());
+  c->set_previous(*previous);
+  c->set_native_context(previous->native_context());
+  if (!extension.is_null()) c->set(Context::EXTENSION_INDEX, *extension);
+  if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped);
+  if (!whitelist.is_null()) c->set(Context::WHITE_LIST_INDEX, *whitelist);
+  return c;
+}
 
 Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
                                         Handle<Context> previous,
@@ -859,6 +875,7 @@
   Handle<AccessorInfo> info =
       Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
   info->set_flag(0);  // Must clear the flag, it was initialized as undefined.
+  info->set_is_sloppy(true);
   return info;
 }
 
@@ -1350,33 +1367,8 @@
     info->ResetForNewContext(isolate()->heap()->global_ic_age());
   }
 
-  if (FLAG_always_opt && info->allows_lazy_compilation()) {
-    result->MarkForOptimization();
-  }
-
-  CodeAndLiterals cached = info->SearchOptimizedCodeMap(
-      context->native_context(), BailoutId::None());
-  if (cached.code != nullptr) {
-    // Caching of optimized code enabled and optimized code found.
-    DCHECK(!cached.code->marked_for_deoptimization());
-    DCHECK(result->shared()->is_compiled());
-    result->ReplaceCode(cached.code);
-  }
-
-  if (cached.literals != nullptr) {
-    result->set_literals(cached.literals);
-  } else {
-    int number_of_literals = info->num_literals();
-    Handle<LiteralsArray> literals =
-        LiteralsArray::New(isolate(), handle(info->feedback_vector()),
-                           number_of_literals, pretenure);
-    result->set_literals(*literals);
-
-    // Cache context-specific literals.
-    Handle<Context> native_context(context->native_context());
-    SharedFunctionInfo::AddLiteralsToOptimizedCodeMap(info, native_context,
-                                                      literals);
-  }
+  // Give compiler a chance to pre-initialize.
+  Compiler::PostInstantiation(result, pretenure);
 
   return result;
 }
@@ -1507,6 +1499,14 @@
       JSObject);
 }
 
+Handle<JSObject> Factory::NewJSObjectWithNullProto() {
+  Handle<JSObject> result = NewJSObject(isolate()->object_function());
+  Handle<Map> new_map =
+      Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
+  Map::SetPrototype(new_map, null_value());
+  JSObject::MigrateToMap(result, new_map);
+  return result;
+}
 
 Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
                                       Handle<ScopeInfo> scope_info) {
@@ -1596,11 +1596,9 @@
 
 
 Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
-                                    Strength strength,
                                     PretenureFlag pretenure) {
-  Map* map = isolate()->get_initial_js_array_map(elements_kind, strength);
+  Map* map = isolate()->get_initial_js_array_map(elements_kind);
   if (map == nullptr) {
-    DCHECK(strength == Strength::WEAK);
     Context* native_context = isolate()->context()->native_context();
     JSFunction* array_function = native_context->array_function();
     map = array_function->initial_map();
@@ -1608,23 +1606,21 @@
   return Handle<JSArray>::cast(NewJSObjectFromMap(handle(map), pretenure));
 }
 
-
 Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length,
-                                    int capacity, Strength strength,
+                                    int capacity,
                                     ArrayStorageAllocationMode mode,
                                     PretenureFlag pretenure) {
-  Handle<JSArray> array = NewJSArray(elements_kind, strength, pretenure);
+  Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
   NewJSArrayStorage(array, length, capacity, mode);
   return array;
 }
 
-
 Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
                                                 ElementsKind elements_kind,
-                                                int length, Strength strength,
+                                                int length,
                                                 PretenureFlag pretenure) {
   DCHECK(length <= elements->length());
-  Handle<JSArray> array = NewJSArray(elements_kind, strength, pretenure);
+  Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
 
   array->set_elements(*elements);
   array->set_length(Smi::FromInt(length));
@@ -2071,16 +2067,13 @@
   object->set_hash(*hash);
 }
 
-
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
     Handle<String> name, int number_of_literals, FunctionKind kind,
-    Handle<Code> code, Handle<ScopeInfo> scope_info,
-    Handle<TypeFeedbackVector> feedback_vector) {
+    Handle<Code> code, Handle<ScopeInfo> scope_info) {
   DCHECK(IsValidFunctionKind(kind));
   Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
       name, code, IsConstructable(kind, scope_info->language_mode()));
   shared->set_scope_info(*scope_info);
-  shared->set_feedback_vector(*feedback_vector);
   shared->set_kind(kind);
   shared->set_num_literals(number_of_literals);
   if (IsGeneratorFunction(kind)) {
@@ -2136,7 +2129,7 @@
   share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
   share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
   share->set_debug_info(DebugInfo::uninitialized(), SKIP_WRITE_BARRIER);
-  share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
+  share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
   StaticFeedbackVectorSpec empty_spec;
   Handle<TypeFeedbackMetadata> feedback_metadata =
       TypeFeedbackMetadata::New(isolate(), &empty_spec);
@@ -2300,7 +2293,6 @@
 
 Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
                                                int number_of_properties,
-                                               bool is_strong,
                                                bool* is_result_from_cache) {
   const int kMapCacheSize = 128;
 
@@ -2309,29 +2301,21 @@
       isolate()->bootstrapper()->IsActive()) {
     *is_result_from_cache = false;
     Handle<Map> map = Map::Create(isolate(), number_of_properties);
-    if (is_strong) map->set_is_strong();
     return map;
   }
   *is_result_from_cache = true;
   if (number_of_properties == 0) {
     // Reuse the initial map of the Object function if the literal has no
-    // predeclared properties, or the strong map if strong.
-    return handle(is_strong
-                      ? context->js_object_strong_map()
-                      : context->object_function()->initial_map(), isolate());
+    // predeclared properties.
+    return handle(context->object_function()->initial_map(), isolate());
   }
 
   int cache_index = number_of_properties - 1;
-  Handle<Object> maybe_cache(is_strong ? context->strong_map_cache()
-                                       : context->map_cache(), isolate());
+  Handle<Object> maybe_cache(context->map_cache(), isolate());
   if (maybe_cache->IsUndefined()) {
     // Allocate the new map cache for the native context.
     maybe_cache = NewFixedArray(kMapCacheSize, TENURED);
-    if (is_strong) {
-      context->set_strong_map_cache(*maybe_cache);
-    } else {
-      context->set_map_cache(*maybe_cache);
-    }
+    context->set_map_cache(*maybe_cache);
   } else {
     // Check to see whether there is a matching element in the cache.
     Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
@@ -2346,7 +2330,6 @@
   // Create a new map and add it to the cache.
   Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
   Handle<Map> map = Map::Create(isolate(), number_of_properties);
-  if (is_strong) map->set_is_strong();
   Handle<WeakCell> cell = NewWeakCell(map);
   cache->set(cache_index, *cell);
   return map;
diff --git a/src/factory.h b/src/factory.h
index dd107d1..2fa2901 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -16,8 +16,8 @@
 class Factory final {
  public:
   Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
-                             Handle<Object> to_number, const char* type_of,
-                             byte kind);
+                             Handle<Object> to_number, bool to_boolean,
+                             const char* type_of, byte kind);
 
   // Allocates a fixed array initialized with undefined values.
   Handle<FixedArray> NewFixedArray(
@@ -256,6 +256,11 @@
                                  Handle<Context> previous,
                                  Handle<JSReceiver> extension);
 
+  Handle<Context> NewDebugEvaluateContext(Handle<Context> previous,
+                                          Handle<JSReceiver> extension,
+                                          Handle<Context> wrapped,
+                                          Handle<StringSet> whitelist);
+
   // Create a block context.
   Handle<Context> NewBlockContext(Handle<JSFunction> function,
                                   Handle<Context> previous,
@@ -389,6 +394,8 @@
   // JSObject that should have a memento pointing to the allocation site.
   Handle<JSObject> NewJSObjectWithMemento(Handle<JSFunction> constructor,
                                           Handle<AllocationSite> site);
+  // JSObject without a prototype.
+  Handle<JSObject> NewJSObjectWithNullProto();
 
   // Global objects are pretenured and initialized based on a constructor.
   Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
@@ -410,34 +417,30 @@
   // according to the specified mode.
   Handle<JSArray> NewJSArray(
       ElementsKind elements_kind, int length, int capacity,
-      Strength strength = Strength::WEAK,
       ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
       PretenureFlag pretenure = NOT_TENURED);
 
   Handle<JSArray> NewJSArray(
       int capacity, ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
-      Strength strength = Strength::WEAK,
       PretenureFlag pretenure = NOT_TENURED) {
     if (capacity != 0) {
       elements_kind = GetHoleyElementsKind(elements_kind);
     }
-    return NewJSArray(elements_kind, 0, capacity, strength,
+    return NewJSArray(elements_kind, 0, capacity,
                       INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, pretenure);
   }
 
   // Create a JSArray with the given elements.
   Handle<JSArray> NewJSArrayWithElements(Handle<FixedArrayBase> elements,
                                          ElementsKind elements_kind, int length,
-                                         Strength strength = Strength::WEAK,
                                          PretenureFlag pretenure = NOT_TENURED);
 
   Handle<JSArray> NewJSArrayWithElements(
       Handle<FixedArrayBase> elements,
       ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
-      Strength strength = Strength::WEAK,
       PretenureFlag pretenure = NOT_TENURED) {
     return NewJSArrayWithElements(elements, elements_kind, elements->length(),
-                                  strength, pretenure);
+                                  pretenure);
   }
 
   void NewJSArrayStorage(
@@ -631,8 +634,7 @@
   // Allocates a new SharedFunctionInfo object.
   Handle<SharedFunctionInfo> NewSharedFunctionInfo(
       Handle<String> name, int number_of_literals, FunctionKind kind,
-      Handle<Code> code, Handle<ScopeInfo> scope_info,
-      Handle<TypeFeedbackVector> feedback_vector);
+      Handle<Code> code, Handle<ScopeInfo> scope_info);
   Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
                                                    MaybeHandle<Code> code,
                                                    bool is_constructor);
@@ -651,7 +653,6 @@
   // native context.
   Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
                                         int number_of_properties,
-                                        bool is_strong,
                                         bool* is_result_from_cache);
 
   // Creates a new FixedArray that holds the data associated with the
@@ -710,7 +711,6 @@
 
   // Create a JSArray with no elements and no length.
   Handle<JSArray> NewJSArray(ElementsKind elements_kind,
-                             Strength strength = Strength::WEAK,
                              PretenureFlag pretenure = NOT_TENURED);
 };
 
diff --git a/src/fast-accessor-assembler.cc b/src/fast-accessor-assembler.cc
new file mode 100644
index 0000000..cd2910c
--- /dev/null
+++ b/src/fast-accessor-assembler.cc
@@ -0,0 +1,231 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/fast-accessor-assembler.h"
+
+#include "src/base/logging.h"
+#include "src/code-stubs.h"  // For CallApiCallbackStub.
+#include "src/compiler/code-stub-assembler.h"
+#include "src/handles-inl.h"
+#include "src/objects.h"  // For FAA::LoadInternalField impl.
+
+using v8::internal::compiler::CodeStubAssembler;
+using v8::internal::compiler::Node;
+
+namespace v8 {
+namespace internal {
+
+FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
+    : zone_(isolate->allocator()),
+      isolate_(isolate),
+      assembler_(new CodeStubAssembler(isolate, zone(), 1,
+                                       Code::ComputeFlags(Code::STUB),
+                                       "FastAccessorAssembler")),
+      state_(kBuilding) {}
+
+FastAccessorAssembler::~FastAccessorAssembler() { Clear(); }
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::IntegerConstant(
+    int const_value) {
+  CHECK_EQ(kBuilding, state_);
+  return FromRaw(assembler_->NumberConstant(const_value));
+}
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::GetReceiver() {
+  CHECK_EQ(kBuilding, state_);
+
+  // For JS functions, the receiver is parameter 0.
+  return FromRaw(assembler_->Parameter(0));
+}
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
+    ValueId value, int field_no) {
+  CHECK_EQ(kBuilding, state_);
+
+  // Determine the 'value' object's instance type.
+  Node* object_map = assembler_->LoadObjectField(
+      FromId(value), Internals::kHeapObjectMapOffset, MachineType::Pointer());
+  Node* instance_type = assembler_->WordAnd(
+      assembler_->LoadObjectField(object_map,
+                                  Internals::kMapInstanceTypeAndBitFieldOffset,
+                                  MachineType::Uint16()),
+      assembler_->IntPtrConstant(0xff));
+
+  // Check whether we have a proper JSObject.
+  CodeStubAssembler::Variable result(assembler_.get(),
+                                     MachineRepresentation::kTagged);
+  CodeStubAssembler::Label is_jsobject(assembler_.get());
+  CodeStubAssembler::Label is_not_jsobject(assembler_.get());
+  CodeStubAssembler::Label merge(assembler_.get(), &result);
+  assembler_->Branch(
+      assembler_->WordEqual(
+          instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
+      &is_jsobject, &is_not_jsobject);
+
+  // JSObject? Then load the internal field field_no.
+  assembler_->Bind(&is_jsobject);
+  Node* internal_field = assembler_->LoadObjectField(
+      FromId(value), JSObject::kHeaderSize + kPointerSize * field_no,
+      MachineType::Pointer());
+  result.Bind(internal_field);
+  assembler_->Goto(&merge);
+
+  // No JSObject? Return undefined.
+  // TODO(vogelheim): Check whether this is the appropriate action, or whether
+  //                  the method should take a label instead.
+  assembler_->Bind(&is_not_jsobject);
+  Node* fail_value = assembler_->UndefinedConstant();
+  result.Bind(fail_value);
+  assembler_->Goto(&merge);
+
+  // Return.
+  assembler_->Bind(&merge);
+  return FromRaw(result.value());
+}
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(ValueId value,
+                                                                int offset) {
+  CHECK_EQ(kBuilding, state_);
+  return FromRaw(assembler_->LoadBufferObject(FromId(value), offset,
+                                              MachineType::IntPtr()));
+}
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(ValueId value,
+                                                                 int offset) {
+  CHECK_EQ(kBuilding, state_);
+  return FromRaw(assembler_->LoadBufferObject(
+      assembler_->LoadBufferObject(FromId(value), offset,
+                                   MachineType::Pointer()),
+      0, MachineType::AnyTagged()));
+}
+
+void FastAccessorAssembler::ReturnValue(ValueId value) {
+  CHECK_EQ(kBuilding, state_);
+  assembler_->Return(FromId(value));
+}
+
+void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
+  CHECK_EQ(kBuilding, state_);
+  CodeStubAssembler::Label pass(assembler_.get());
+  CodeStubAssembler::Label fail(assembler_.get());
+  assembler_->Branch(
+      assembler_->Word32Equal(
+          assembler_->Word32And(FromId(value), assembler_->Int32Constant(mask)),
+          assembler_->Int32Constant(0)),
+      &pass, &fail);
+  assembler_->Bind(&fail);
+  assembler_->Return(assembler_->NullConstant());
+  assembler_->Bind(&pass);
+}
+
+void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value) {
+  CHECK_EQ(kBuilding, state_);
+  CodeStubAssembler::Label is_null(assembler_.get());
+  CodeStubAssembler::Label not_null(assembler_.get());
+  assembler_->Branch(
+      assembler_->WordEqual(FromId(value), assembler_->IntPtrConstant(0)),
+      &is_null, &not_null);
+  assembler_->Bind(&is_null);
+  assembler_->Return(assembler_->NullConstant());
+  assembler_->Bind(&not_null);
+}
+
+FastAccessorAssembler::LabelId FastAccessorAssembler::MakeLabel() {
+  CHECK_EQ(kBuilding, state_);
+  return FromRaw(new CodeStubAssembler::Label(assembler_.get()));
+}
+
+void FastAccessorAssembler::SetLabel(LabelId label_id) {
+  CHECK_EQ(kBuilding, state_);
+  assembler_->Bind(FromId(label_id));
+}
+
+void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
+                                               LabelId label_id) {
+  CHECK_EQ(kBuilding, state_);
+  CodeStubAssembler::Label pass(assembler_.get());
+  assembler_->Branch(
+      assembler_->WordEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
+      &pass, FromId(label_id));
+  assembler_->Bind(&pass);
+}
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
+    FunctionCallback callback_function, ValueId arg) {
+  CHECK_EQ(kBuilding, state_);
+
+  // Wrap the FunctionCallback in an ExternalReference.
+  ApiFunction callback_api_function(FUNCTION_ADDR(callback_function));
+  ExternalReference callback(&callback_api_function,
+                             ExternalReference::DIRECT_API_CALL, isolate());
+
+  // Create & call API callback via stub.
+  CallApiCallbackStub stub(isolate(), 1, true);
+  DCHECK_EQ(5, stub.GetCallInterfaceDescriptor().GetParameterCount());
+  DCHECK_EQ(1, stub.GetCallInterfaceDescriptor().GetStackParameterCount());
+  // TODO(vogelheim): There is currently no clean way to retrieve the context
+  //     parameter for a stub and the implementation details are hidden in
+  //     compiler/*. The context_paramter is computed as:
+  //       Linkage::GetJSCallContextParamIndex(descriptor->JSParameterCount())
+  const int context_parameter = 2;
+  Node* call = assembler_->CallStub(
+      stub.GetCallInterfaceDescriptor(),
+      assembler_->HeapConstant(stub.GetCode()),
+      assembler_->Parameter(context_parameter),
+
+      // Stub/register parameters:
+      assembler_->Parameter(0),               /* receiver (use accessor's) */
+      assembler_->UndefinedConstant(),        /* call_data (undefined) */
+      assembler_->NullConstant(),             /* holder (null) */
+      assembler_->ExternalConstant(callback), /* API callback function */
+
+      // JS arguments, on stack:
+      FromId(arg));
+
+  return FromRaw(call);
+}
+
+MaybeHandle<Code> FastAccessorAssembler::Build() {
+  CHECK_EQ(kBuilding, state_);
+  Handle<Code> code = assembler_->GenerateCode();
+  state_ = !code.is_null() ? kBuilt : kError;
+  Clear();
+  return code;
+}
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::FromRaw(Node* node) {
+  nodes_.push_back(node);
+  ValueId value = {nodes_.size() - 1};
+  return value;
+}
+
+FastAccessorAssembler::LabelId FastAccessorAssembler::FromRaw(
+    CodeStubAssembler::Label* label) {
+  labels_.push_back(label);
+  LabelId label_id = {labels_.size() - 1};
+  return label_id;
+}
+
+Node* FastAccessorAssembler::FromId(ValueId value) const {
+  CHECK_LT(value.value_id, nodes_.size());
+  CHECK_NOT_NULL(nodes_.at(value.value_id));
+  return nodes_.at(value.value_id);
+}
+
+CodeStubAssembler::Label* FastAccessorAssembler::FromId(LabelId label) const {
+  CHECK_LT(label.label_id, labels_.size());
+  CHECK_NOT_NULL(labels_.at(label.label_id));
+  return labels_.at(label.label_id);
+}
+
+void FastAccessorAssembler::Clear() {
+  for (auto label : labels_) {
+    delete label;
+  }
+  nodes_.clear();
+  labels_.clear();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/fast-accessor-assembler.h b/src/fast-accessor-assembler.h
similarity index 79%
rename from src/compiler/fast-accessor-assembler.h
rename to src/fast-accessor-assembler.h
index 1cb751d..57e72e8 100644
--- a/src/compiler/fast-accessor-assembler.h
+++ b/src/fast-accessor-assembler.h
@@ -2,19 +2,19 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
-#define V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
+#ifndef V8_FAST_ACCESSOR_ASSEMBLER_H_
+#define V8_FAST_ACCESSOR_ASSEMBLER_H_
 
 #include <stdint.h>
 #include <vector>
 
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
 #include "include/v8-experimental.h"
 #include "src/base/macros.h"
 #include "src/base/smart-pointers.h"
 #include "src/handles.h"
 
+// For CodeStubAssembler::Label. (We cannot forward-declare inner classes.)
+#include "src/compiler/code-stub-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -24,11 +24,8 @@
 class Zone;
 
 namespace compiler {
-
 class Node;
-class RawMachineAssembler;
-class RawMachineLabel;
-
+}
 
 // This interface "exports" an aggregated subset of RawMachineAssembler, for
 // use by the API to implement Fast Dom Accessors.
@@ -75,21 +72,24 @@
   MaybeHandle<Code> Build();
 
  private:
-  ValueId FromRaw(Node* node);
-  LabelId FromRaw(RawMachineLabel* label);
-  Node* FromId(ValueId value) const;
-  RawMachineLabel* FromId(LabelId value) const;
+  ValueId FromRaw(compiler::Node* node);
+  LabelId FromRaw(compiler::CodeStubAssembler::Label* label);
+  compiler::Node* FromId(ValueId value) const;
+  compiler::CodeStubAssembler::Label* FromId(LabelId value) const;
 
+  void Clear();
   Zone* zone() { return &zone_; }
+  Isolate* isolate() const { return isolate_; }
 
   Zone zone_;
-  base::SmartPointer<RawMachineAssembler> assembler_;
+  Isolate* isolate_;
+  base::SmartPointer<compiler::CodeStubAssembler> assembler_;
 
   // To prevent exposing the RMA internals to the outside world, we'll map
   // Node + Label pointers integers wrapped in ValueId and LabelId instances.
   // These vectors maintain this mapping.
-  std::vector<Node*> nodes_;
-  std::vector<RawMachineLabel*> labels_;
+  std::vector<compiler::Node*> nodes_;
+  std::vector<compiler::CodeStubAssembler::Label*> labels_;
 
   // Remember the current state for easy error checking. (We prefer to be
   // strict as this class will be exposed at the API.)
@@ -98,8 +98,7 @@
   DISALLOW_COPY_AND_ASSIGN(FastAccessorAssembler);
 };
 
-}  // namespace compiler
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
+#endif  // V8_FAST_ACCESSOR_ASSEMBLER_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index ac430ab..c1fffec 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -177,12 +177,6 @@
 
 // Flags for language modes and experimental language features.
 DEFINE_BOOL(use_strict, false, "enforce strict mode")
-DEFINE_BOOL(use_strong, false, "enforce strong mode")
-DEFINE_IMPLICATION(use_strong, use_strict)
-
-DEFINE_BOOL(strong_mode, false, "experimental strong language mode")
-DEFINE_IMPLICATION(use_strong, strong_mode)
-DEFINE_BOOL(strong_this, true, "don't allow 'this' to escape from constructors")
 
 DEFINE_BOOL(es_staging, false,
             "enable test-worthy harmony features (for internal use only)")
@@ -190,10 +184,6 @@
 DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
 DEFINE_IMPLICATION(es_staging, harmony)
 
-DEFINE_BOOL(legacy_const, false, "legacy semantics for const in sloppy mode")
-// ES2015 const semantics are shipped
-DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, legacy_const, true)
-
 DEFINE_BOOL(promise_extra, true, "additional V8 Promise functions")
 // Removing extra Promise functions is staged
 DEFINE_NEG_IMPLICATION(harmony, promise_extra)
@@ -201,44 +191,41 @@
 // Activate on ClusterFuzz.
 DEFINE_IMPLICATION(es_staging, harmony_regexp_lookbehind)
 DEFINE_IMPLICATION(es_staging, move_object_start)
-DEFINE_IMPLICATION(es_staging, harmony_tailcalls)
 
 // Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V)                                                \
-  V(harmony_object_observe, "harmony Object.observe")                        \
-  V(harmony_modules, "harmony modules")                                      \
-  V(harmony_function_sent, "harmony function.sent")                          \
-  V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")                  \
-  V(harmony_simd, "harmony simd")                                            \
-  V(harmony_do_expressions, "harmony do-expressions")                        \
-  V(harmony_iterator_close, "harmony iterator finalization")                 \
+#define HARMONY_INPROGRESS(V)                                           \
+  V(harmony_object_observe, "harmony Object.observe")                   \
+  V(harmony_function_sent, "harmony function.sent")                     \
+  V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")             \
+  V(harmony_simd, "harmony simd")                                       \
+  V(harmony_do_expressions, "harmony do-expressions")                   \
+  V(harmony_regexp_property, "harmony unicode regexp property classes") \
+  V(harmony_string_padding, "harmony String-padding methods")
+
+// Features that are complete (but still behind --harmony/es-staging flag).
+#define HARMONY_STAGED(V)                                                    \
+  V(harmony_regexp_lookbehind, "harmony regexp lookbehind")                  \
   V(harmony_tailcalls, "harmony tail calls")                                 \
   V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
   V(harmony_object_own_property_descriptors,                                 \
     "harmony Object.getOwnPropertyDescriptors()")                            \
-  V(harmony_regexp_property, "harmony unicode regexp property classes")
-
-// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V)                                     \
-  V(harmony_function_name, "harmony Function name inference") \
-  V(harmony_regexp_lookbehind, "harmony regexp lookbehind")   \
-  V(harmony_species, "harmony Symbol.species")                \
-  V(harmony_instanceof, "harmony instanceof support")
+  V(harmony_exponentiation_operator, "harmony exponentiation operator `**`")
 
 // Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V)                                               \
-  V(harmony_default_parameters, "harmony default parameters")             \
-  V(harmony_destructuring_assignment, "harmony destructuring assignment") \
-  V(harmony_destructuring_bind, "harmony destructuring bind")             \
-  V(harmony_tostring, "harmony toString")                                 \
-  V(harmony_regexps, "harmony regular expression extensions")             \
-  V(harmony_unicode_regexps, "harmony unicode regexps")                   \
-  V(harmony_sloppy, "harmony features in sloppy mode")                    \
-  V(harmony_sloppy_let, "harmony let in sloppy mode")                     \
-  V(harmony_sloppy_function, "harmony sloppy function block scoping")     \
-  V(harmony_proxies, "harmony proxies")                                   \
-  V(harmony_reflect, "harmony Reflect API")                               \
-  V(harmony_regexp_subclass, "harmony regexp subclassing")
+#define HARMONY_SHIPPING(V)                                           \
+  V(harmony_array_prototype_values, "harmony Array.prototype.values") \
+  V(harmony_function_name, "harmony Function name inference")         \
+  V(harmony_instanceof, "harmony instanceof support")                 \
+  V(harmony_iterator_close, "harmony iterator finalization")          \
+  V(harmony_unicode_regexps, "harmony unicode regexps")               \
+  V(harmony_regexp_exec, "harmony RegExp exec override behavior")     \
+  V(harmony_sloppy, "harmony features in sloppy mode")                \
+  V(harmony_sloppy_let, "harmony let in sloppy mode")                 \
+  V(harmony_sloppy_function, "harmony sloppy function block scoping") \
+  V(harmony_regexp_subclass, "harmony regexp subclassing")            \
+  V(harmony_restrictive_declarations,                                 \
+    "harmony limitations on sloppy mode function declarations")       \
+  V(harmony_species, "harmony Symbol.species")
 
 // Once a shipping feature has proved stable in the wild, it will be dropped
 // from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -268,10 +255,6 @@
 DEFINE_IMPLICATION(harmony_sloppy_let, harmony_sloppy)
 DEFINE_IMPLICATION(harmony_sloppy_function, harmony_sloppy)
 
-// Destructuring shares too much parsing architecture with default parameters
-// to be enabled on its own.
-DEFINE_IMPLICATION(harmony_destructuring_bind, harmony_default_parameters)
-
 // Flags for experimental implementation features.
 DEFINE_BOOL(compiled_keyed_generic_loads, false,
             "use optimizing compiler to generate keyed generic load stubs")
@@ -285,6 +268,7 @@
 DEFINE_BOOL(track_double_fields, true, "track fields with double values")
 DEFINE_BOOL(track_heap_object_fields, true, "track fields with heap values")
 DEFINE_BOOL(track_computed_fields, true, "track computed boilerplate fields")
+DEFINE_BOOL(harmony_instanceof_opt, true, "optimize ES6 instanceof support")
 DEFINE_IMPLICATION(track_double_fields, track_fields)
 DEFINE_IMPLICATION(track_heap_object_fields, track_fields)
 DEFINE_IMPLICATION(track_computed_fields, track_fields)
@@ -306,6 +290,7 @@
 
 // Flags for Ignition.
 DEFINE_BOOL(ignition, false, "use ignition interpreter")
+DEFINE_BOOL(ignition_eager, true, "eagerly compile and parse with ignition")
 DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
 DEFINE_BOOL(print_bytecode, false,
             "print bytecode generated by ignition interpreter")
@@ -377,8 +362,6 @@
 DEFINE_BOOL(array_bounds_checks_elimination, true,
             "perform array bounds checks elimination")
 DEFINE_BOOL(trace_bce, false, "trace array bounds check elimination")
-DEFINE_BOOL(array_bounds_checks_hoisting, false,
-            "perform array bounds checks hoisting")
 DEFINE_BOOL(array_index_dehoisting, true, "perform array index dehoisting")
 DEFINE_BOOL(analyze_environment_liveness, true,
             "analyze liveness of environment slots and zap dead values")
@@ -412,8 +395,6 @@
            "artificial compilation delay in ms")
 DEFINE_BOOL(block_concurrent_recompilation, false,
             "block queued jobs until released")
-DEFINE_BOOL(concurrent_osr, false, "concurrent on-stack replacement")
-DEFINE_IMPLICATION(concurrent_osr, concurrent_recompilation)
 
 DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
             "do not emit check maps for constant values that have a leaf map, "
@@ -477,18 +458,23 @@
 
 // Flags for native WebAssembly.
 DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
+DEFINE_BOOL(trace_wasm_encoder, false, "trace encoding of wasm code")
 DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
 DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
 DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
-DEFINE_BOOL(trace_wasm_ast, false, "dump AST after WASM decode")
+DEFINE_INT(trace_wasm_ast_start, 0,
+           "start function for WASM AST trace (inclusive)")
+DEFINE_INT(trace_wasm_ast_end, 0, "end function for WASM AST trace (exclusive)")
+DEFINE_INT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
 DEFINE_BOOL(wasm_break_on_decoder_error, false,
             "debug break when wasm decoder encounters an error")
+DEFINE_BOOL(wasm_loop_assignment_analysis, true,
+            "perform loop assignment analysis for WASM")
 
 DEFINE_BOOL(enable_simd_asmjs, false, "enable SIMD.js in asm.js stdlib")
 
-DEFINE_BOOL(dump_asmjs_wasm, false, "dump Asm.js to WASM module bytes")
-DEFINE_STRING(asmjs_wasm_dumpfile, "asmjs.wasm",
-              "file to dump asm wasm conversion result to")
+DEFINE_BOOL(dump_wasm_module, false, "dump WASM module bytes")
+DEFINE_STRING(dump_wasm_module_path, NULL, "directory to dump wasm modules to")
 
 DEFINE_INT(typed_array_max_size_in_heap, 64,
            "threshold for in-heap typed array")
@@ -592,6 +578,8 @@
             "trace deoptimization of generated code stubs")
 
 DEFINE_BOOL(serialize_toplevel, true, "enable caching of toplevel scripts")
+DEFINE_BOOL(serialize_eager, false, "compile eagerly when caching scripts")
+DEFINE_BOOL(serialize_age_code, false, "pre age code in the code cache")
 DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
 
 // compiler.cc
@@ -620,8 +608,6 @@
 DEFINE_IMPLICATION(trace_array_abuse, trace_external_array_abuse)
 
 // debugger
-DEFINE_BOOL(debug_eval_readonly_locals, true,
-            "do not update locals after debug-evaluate")
 DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response")
 DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature")
 DEFINE_BOOL(hard_abort, true, "abort by crashing")
@@ -682,6 +668,7 @@
 DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
 DEFINE_BOOL(trace_fragmentation_verbose, false,
             "report fragmentation for old space (detailed)")
+DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
 DEFINE_BOOL(trace_mutator_utilization, false,
             "print mutator utilization, allocation speed, gc speed")
 DEFINE_BOOL(weak_embedded_maps_in_optimized_code, true,
@@ -699,8 +686,11 @@
            "least this many unmarked objects")
 DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
            "at most try this many times to finalize incremental marking")
+DEFINE_BOOL(black_allocation, true, "use black allocation")
 DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
 DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
+DEFINE_BOOL(parallel_pointer_update, true,
+            "use parallel pointer update during compaction")
 DEFINE_BOOL(trace_incremental_marking, false,
             "trace progress of the incremental marking")
 DEFINE_BOOL(track_gc_object_stats, false,
@@ -718,7 +708,7 @@
 #endif
 DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
 DEFINE_BOOL(memory_reducer, true, "use memory reducer")
-DEFINE_BOOL(scavenge_reclaim_unmodified_objects, false,
+DEFINE_BOOL(scavenge_reclaim_unmodified_objects, true,
             "remove unmodified and unreferenced objects")
 DEFINE_INT(heap_growing_percent, 0,
            "specifies heap growing factor as (1 + heap_growing_percent/100)")
@@ -727,6 +717,9 @@
 DEFINE_INT(histogram_interval, 600000,
            "time interval in ms for aggregating memory histograms")
 
+// global-handles.cc
+DEFINE_BOOL(trace_object_groups, false,
+            "print object groups detected during each garbage collection")
 
 // heap-snapshot-generator.cc
 DEFINE_BOOL(heap_profiler_trace_objects, false,
@@ -776,6 +769,7 @@
             "Collapse prototype chain checks into single-cell checks")
 DEFINE_IMPLICATION(eliminate_prototype_chain_checks, track_prototype_users)
 DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
+DEFINE_BOOL(trace_for_in_enumerate, false, "Trace for-in enumerate slow-paths")
 #if TRACE_MAPS
 DEFINE_BOOL(trace_maps, false, "trace map creation")
 #endif
@@ -862,7 +856,6 @@
 
 DEFINE_BOOL(predictable, false, "enable predictable mode")
 DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
 DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
 DEFINE_NEG_IMPLICATION(predictable, parallel_compaction)
 DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
@@ -991,6 +984,9 @@
             "trace regexp macro assembler calls.")
 DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
 
+// Debugger
+DEFINE_BOOL(print_break_location, false, "print source location on debug break")
+
 //
 // Logging and profiling flags
 //
@@ -1007,8 +1003,6 @@
 DEFINE_BOOL(log_gc, false,
             "Log heap samples on garbage collection for the hp2ps tool.")
 DEFINE_BOOL(log_handles, false, "Log global handle events.")
-DEFINE_BOOL(log_snapshot_positions, false,
-            "log positions of (de)serialized objects in the snapshot.")
 DEFINE_BOOL(log_suspect, false, "Log suspect operations.")
 DEFINE_BOOL(prof, false,
             "Log statistical profiling information (implies --log-code).")
@@ -1026,6 +1020,11 @@
 DEFINE_BOOL(perf_basic_prof_only_functions, false,
             "Only report function code ranges to perf (i.e. no stubs).")
 DEFINE_IMPLICATION(perf_basic_prof_only_functions, perf_basic_prof)
+DEFINE_BOOL(perf_prof, false,
+            "Enable perf linux profiler (experimental annotate support).")
+DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
+DEFINE_BOOL(perf_prof_debug_info, false,
+            "Enable debug info for perf linux profiler (experimental).")
 DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__",
               "Specify the name of the file for fake gc mmap used in ll_prof")
 DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.")
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 4013601..5ecbd45 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -24,6 +24,8 @@
 #include "src/mips/frames-mips.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS64
 #include "src/mips64/frames-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/frames-s390.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
 #include "src/x87/frames-x87.h"  // NOLINT
 #else
@@ -114,7 +116,9 @@
 
 inline Object* StandardFrame::context() const {
   const int offset = StandardFrameConstants::kContextOffset;
-  return Memory::Object_at(fp() + offset);
+  Object* maybe_result = Memory::Object_at(fp() + offset);
+  DCHECK(!maybe_result->IsSmi());
+  return maybe_result;
 }
 
 
@@ -139,23 +143,20 @@
 
 
 inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
-  Object* marker =
-      Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
-  return marker == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
+  Object* frame_type =
+      Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
+  return frame_type == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
 }
 
 
 inline bool StandardFrame::IsConstructFrame(Address fp) {
-  Object* marker =
-      Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset);
-  return marker == Smi::FromInt(StackFrame::CONSTRUCT);
+  Object* frame_type =
+      Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
+  return frame_type == Smi::FromInt(StackFrame::CONSTRUCT);
 }
 
-
 inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
-    : StandardFrame(iterator) {
-}
-
+    : StandardFrame(iterator) {}
 
 Address JavaScriptFrame::GetParameterSlot(int index) const {
   int param_count = ComputeParametersCount();
@@ -242,6 +243,14 @@
     StackFrameIteratorBase* iterator) : JavaScriptFrame(iterator) {
 }
 
+inline WasmFrame::WasmFrame(StackFrameIteratorBase* iterator)
+    : StandardFrame(iterator) {}
+
+inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
+    : StubFrame(iterator) {}
+
+inline JsToWasmFrame::JsToWasmFrame(StackFrameIteratorBase* iterator)
+    : StubFrame(iterator) {}
 
 inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
     : StandardFrame(iterator) {
@@ -257,21 +266,18 @@
     : InternalFrame(iterator) {
 }
 
-
 inline JavaScriptFrameIterator::JavaScriptFrameIterator(
     Isolate* isolate)
     : iterator_(isolate) {
   if (!done()) Advance();
 }
 
-
 inline JavaScriptFrameIterator::JavaScriptFrameIterator(
     Isolate* isolate, ThreadLocalTop* top)
     : iterator_(isolate, top) {
   if (!done()) Advance();
 }
 
-
 inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
   // TODO(1233797): The frame hierarchy needs to change. It's
   // problematic that we can't use the safe-cast operator to cast to
diff --git a/src/frames.cc b/src/frames.cc
index 50a2e21..0e57429 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -134,12 +134,10 @@
 #undef FRAME_TYPE_CASE
 }
 
-
 // -------------------------------------------------------------------------
 
-
-JavaScriptFrameIterator::JavaScriptFrameIterator(
-    Isolate* isolate, StackFrame::Id id)
+JavaScriptFrameIterator::JavaScriptFrameIterator(Isolate* isolate,
+                                                 StackFrame::Id id)
     : iterator_(isolate) {
   while (!done()) {
     Advance();
@@ -216,9 +214,9 @@
     // we check only that kMarkerOffset is within the stack bounds and do
     // compile time check that kContextOffset slot is pushed on the stack before
     // kMarkerOffset.
-    STATIC_ASSERT(StandardFrameConstants::kMarkerOffset <
+    STATIC_ASSERT(StandardFrameConstants::kFunctionOffset <
                   StandardFrameConstants::kContextOffset);
-    Address frame_marker = fp + StandardFrameConstants::kMarkerOffset;
+    Address frame_marker = fp + StandardFrameConstants::kFunctionOffset;
     if (IsValidStackAddress(frame_marker)) {
       type = StackFrame::ComputeType(this, &state);
       top_frame_type_ = type;
@@ -403,11 +401,29 @@
   return_address_location_resolver_ = resolver;
 }
 
+static bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
+  Code* interpreter_entry_trampoline =
+      isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
+  Code* interpreter_bytecode_dispatch =
+      isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+
+  return (pc >= interpreter_entry_trampoline->instruction_start() &&
+          pc < interpreter_entry_trampoline->instruction_end()) ||
+         (pc >= interpreter_bytecode_dispatch->instruction_start() &&
+          pc < interpreter_bytecode_dispatch->instruction_end());
+}
 
 StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
                                          State* state) {
   DCHECK(state->fp != NULL);
 
+#if defined(USE_SIMULATOR)
+  MSAN_MEMORY_IS_INITIALIZED(
+      state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
+      kPointerSize);
+#endif
+  Object* marker = Memory::Object_at(
+      state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
   if (!iterator->can_access_heap_objects_) {
     // TODO(titzer): "can_access_heap_objects" is kind of bogus. It really
     // means that we are being called from the profiler, which can interrupt
@@ -416,66 +432,81 @@
     // reliable.
 #if defined(USE_SIMULATOR)
     MSAN_MEMORY_IS_INITIALIZED(
-        state->fp + StandardFrameConstants::kContextOffset, kPointerSize);
-    MSAN_MEMORY_IS_INITIALIZED(
-        state->fp + StandardFrameConstants::kMarkerOffset, kPointerSize);
+        state->fp + StandardFrameConstants::kFunctionOffset, kPointerSize);
 #endif
-    if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
-      // An adapter frame has a special SMI constant for the context and
-      // is not distinguished through the marker.
-      return ARGUMENTS_ADAPTOR;
-    }
-    Object* marker =
-        Memory::Object_at(state->fp + StandardFrameConstants::kMarkerOffset);
-    if (marker->IsSmi()) {
-      return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-    } else {
-      return JAVA_SCRIPT;
-    }
-  }
-
-  // Look up the code object to figure out the type of the stack frame.
-  Code* code_obj = GetContainingCode(iterator->isolate(), *(state->pc_address));
-
-  Object* marker =
-      Memory::Object_at(state->fp + StandardFrameConstants::kMarkerOffset);
-  if (code_obj != nullptr) {
-    switch (code_obj->kind()) {
-      case Code::FUNCTION:
+    Object* maybe_function =
+        Memory::Object_at(state->fp + StandardFrameConstants::kFunctionOffset);
+    if (!marker->IsSmi()) {
+      if (maybe_function->IsSmi()) {
+        return NONE;
+      } else if (FLAG_ignition && IsInterpreterFramePc(iterator->isolate(),
+                                                       *(state->pc_address))) {
+        return INTERPRETED;
+      } else {
         return JAVA_SCRIPT;
-      case Code::OPTIMIZED_FUNCTION:
-        return OPTIMIZED;
-      case Code::WASM_FUNCTION:
-        return STUB;
-      case Code::BUILTIN:
-        if (!marker->IsSmi()) {
-          if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
-            // An adapter frame has a special SMI constant for the context and
-            // is not distinguished through the marker.
-            return ARGUMENTS_ADAPTOR;
-          } else {
-            // The interpreter entry trampoline has a non-SMI marker.
-            DCHECK(code_obj->is_interpreter_entry_trampoline() ||
-                   code_obj->is_interpreter_enter_bytecode_dispatch());
-            return INTERPRETED;
-          }
-        }
-        break;  // Marker encodes the frame type.
-      case Code::HANDLER:
-        if (!marker->IsSmi()) {
-          // Only hydrogen code stub handlers can have a non-SMI marker.
-          DCHECK(code_obj->is_hydrogen_stub());
+      }
+    }
+  } else {
+    // Look up the code object to figure out the type of the stack frame.
+    Code* code_obj =
+        GetContainingCode(iterator->isolate(), *(state->pc_address));
+    if (code_obj != nullptr) {
+      if (code_obj->is_interpreter_entry_trampoline() ||
+          code_obj->is_interpreter_enter_bytecode_dispatch()) {
+        return INTERPRETED;
+      }
+      switch (code_obj->kind()) {
+        case Code::BUILTIN:
+          if (marker->IsSmi()) break;
+          // We treat frames for BUILTIN Code objects as OptimizedFrame for now
+          // (all the builtins with JavaScript linkage are actually generated
+          // with TurboFan currently, so this is sound).
           return OPTIMIZED;
-        }
-        break;  // Marker encodes the frame type.
-      default:
-        break;  // Marker encodes the frame type.
+        case Code::FUNCTION:
+          return JAVA_SCRIPT;
+        case Code::OPTIMIZED_FUNCTION:
+          return OPTIMIZED;
+        case Code::WASM_FUNCTION:
+          return WASM;
+        case Code::WASM_TO_JS_FUNCTION:
+          return WASM_TO_JS;
+        case Code::JS_TO_WASM_FUNCTION:
+          return JS_TO_WASM;
+        default:
+          // All other types should have an explicit marker
+          break;
+      }
+    } else {
+      return NONE;
     }
   }
 
-  // Didn't find a code object, or the code kind wasn't specific enough.
-  // The marker should encode the frame type.
-  return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+  DCHECK(marker->IsSmi());
+  StackFrame::Type candidate =
+      static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+  switch (candidate) {
+    case ENTRY:
+    case ENTRY_CONSTRUCT:
+    case EXIT:
+    case STUB:
+    case STUB_FAILURE_TRAMPOLINE:
+    case INTERNAL:
+    case CONSTRUCT:
+    case ARGUMENTS_ADAPTOR:
+      return candidate;
+    case JS_TO_WASM:
+    case WASM_TO_JS:
+    case WASM:
+    case JAVA_SCRIPT:
+    case OPTIMIZED:
+    case INTERPRETED:
+    default:
+      // Unoptimized and optimized JavaScript frames, including
+      // interpreted frames, should never have a StackFrame::Type
+      // marker. If we find one, we're likely being called from the
+      // profiler in a bogus stack frame.
+      return NONE;
+  }
 }
 
 
@@ -493,16 +524,7 @@
 
 
 Address StackFrame::UnpaddedFP() const {
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
-  if (!is_optimized()) return fp();
-  int32_t alignment_state = Memory::int32_at(
-    fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
-
-  return (alignment_state == kAlignmentPaddingPushed) ?
-    (fp() + kPointerSize) : fp();
-#else
   return fp();
-#endif
 }
 
 
@@ -572,7 +594,7 @@
 
 
 Address ExitFrame::GetCallerStackPointer() const {
-  return fp() + ExitFrameConstants::kCallerSPDisplacement;
+  return fp() + ExitFrameConstants::kCallerSPOffset;
 }
 
 
@@ -648,13 +670,50 @@
   SafepointEntry safepoint_entry;
   Code* code = StackFrame::GetSafepointData(
       isolate(), pc(), &safepoint_entry, &stack_slots);
-  unsigned slot_space =
-      stack_slots * kPointerSize - StandardFrameConstants::kFixedFrameSize;
+  unsigned slot_space = stack_slots * kPointerSize;
 
-  // Visit the outgoing parameters.
+  // Determine the fixed header and spill slot area size.
+  int frame_header_size = StandardFrameConstants::kFixedFrameSizeFromFp;
+  Object* marker =
+      Memory::Object_at(fp() + CommonFrameConstants::kContextOrFrameTypeOffset);
+  if (marker->IsSmi()) {
+    StackFrame::Type candidate =
+        static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+    switch (candidate) {
+      case ENTRY:
+      case ENTRY_CONSTRUCT:
+      case EXIT:
+      case STUB_FAILURE_TRAMPOLINE:
+      case ARGUMENTS_ADAPTOR:
+      case STUB:
+      case INTERNAL:
+      case CONSTRUCT:
+      case JS_TO_WASM:
+      case WASM_TO_JS:
+      case WASM:
+        frame_header_size = TypedFrameConstants::kFixedFrameSizeFromFp;
+        break;
+      case JAVA_SCRIPT:
+      case OPTIMIZED:
+      case INTERPRETED:
+        // These frame types have a context, but they are actually stored
+        // in the place on the stack that one finds the frame type.
+        UNREACHABLE();
+        break;
+      case NONE:
+      case NUMBER_OF_TYPES:
+      case MANUAL:
+        UNREACHABLE();
+        break;
+    }
+  }
+  slot_space -=
+      (frame_header_size + StandardFrameConstants::kFixedFrameSizeAboveFp);
+
+  Object** frame_header_base = &Memory::Object_at(fp() - frame_header_size);
+  Object** frame_header_limit = &Memory::Object_at(fp());
   Object** parameters_base = &Memory::Object_at(sp());
-  Object** parameters_limit = &Memory::Object_at(
-      fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
+  Object** parameters_limit = frame_header_base - slot_space / kPointerSize;
 
   // Visit the parameters that may be on top of the saved registers.
   if (safepoint_entry.argument_count() > 0) {
@@ -690,7 +749,10 @@
   safepoint_bits += kNumSafepointRegisters >> kBitsPerByteLog2;
 
   // Visit the rest of the parameters.
-  v->VisitPointers(parameters_base, parameters_limit);
+  if (!is_js_to_wasm() && !is_wasm()) {
+    // Non-WASM frames have tagged values as parameters.
+    v->VisitPointers(parameters_base, parameters_limit);
+  }
 
   // Visit pointer spill slots and locals.
   for (unsigned index = 0; index < stack_slots; index++) {
@@ -704,12 +766,11 @@
   // Visit the return address in the callee and incoming arguments.
   IteratePc(v, pc_address(), constant_pool_address(), code);
 
-  // Visit the context in stub frame and JavaScript frame.
-  // Visit the function in JavaScript frame.
-  Object** fixed_base = &Memory::Object_at(
-      fp() + StandardFrameConstants::kMarkerOffset);
-  Object** fixed_limit = &Memory::Object_at(fp());
-  v->VisitPointers(fixed_base, fixed_limit);
+  if (!is_wasm() && !is_wasm_to_js()) {
+    // Visit the context in stub frame and JavaScript frame.
+    // Visit the function in JavaScript frame.
+    v->VisitPointers(frame_header_base, frame_header_limit);
+  }
 }
 
 
@@ -724,7 +785,7 @@
 
 
 Address StubFrame::GetCallerStackPointer() const {
-  return fp() + ExitFrameConstants::kCallerSPDisplacement;
+  return fp() + ExitFrameConstants::kCallerSPOffset;
 }
 
 
@@ -893,6 +954,15 @@
   }
 }
 
+namespace {
+
+bool CannotDeoptFromAsmCode(Code* code, JSFunction* function) {
+  return code->is_turbofanned() && function->shared()->asm_function() &&
+         !FLAG_turbo_asm_deoptimization;
+}
+
+}  // namespace
+
 FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
                            AbstractCode* abstract_code, int code_offset,
                            bool is_constructor)
@@ -900,7 +970,11 @@
       function_(function),
       abstract_code_(abstract_code),
       code_offset_(code_offset),
-      is_constructor_(is_constructor) {}
+      is_constructor_(is_constructor) {
+  DCHECK(abstract_code->IsBytecodeArray() ||
+         Code::cast(abstract_code)->kind() != Code::OPTIMIZED_FUNCTION ||
+         CannotDeoptFromAsmCode(Code::cast(abstract_code), function));
+}
 
 void FrameSummary::Print() {
   PrintF("receiver: ");
@@ -912,7 +986,10 @@
   if (abstract_code_->IsCode()) {
     Code* code = abstract_code_->GetCode();
     if (code->kind() == Code::FUNCTION) PrintF(" UNOPT ");
-    if (code->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT ");
+    if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+      DCHECK(CannotDeoptFromAsmCode(code, *function()));
+      PrintF(" ASM ");
+    }
   } else {
     PrintF(" BYTECODE ");
   }
@@ -926,8 +1003,9 @@
 
   // Delegate to JS frame in absence of turbofan deoptimization.
   // TODO(turbofan): Revisit once we support deoptimization across the board.
-  if (LookupCode()->is_turbofanned() && function()->shared()->asm_function() &&
-      !FLAG_turbo_asm_deoptimization) {
+  Code* code = LookupCode();
+  if (code->kind() == Code::BUILTIN ||
+      CannotDeoptFromAsmCode(code, function())) {
     return JavaScriptFrame::Summarize(frames);
   }
 
@@ -952,7 +1030,7 @@
     if (frame_opcode == Translation::JS_FRAME ||
         frame_opcode == Translation::INTERPRETED_FRAME) {
       jsframe_count--;
-      BailoutId const ast_id = BailoutId(it.Next());
+      BailoutId const bailout_id = BailoutId(it.Next());
       SharedFunctionInfo* const shared_info =
           SharedFunctionInfo::cast(literal_array->get(it.Next()));
       it.Next();  // Skip height.
@@ -999,14 +1077,14 @@
         DeoptimizationOutputData* const output_data =
             DeoptimizationOutputData::cast(code->deoptimization_data());
         unsigned const entry =
-            Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
+            Deoptimizer::GetOutputInfo(output_data, bailout_id, shared_info);
         code_offset = FullCodeGenerator::PcField::decode(entry);
         abstract_code = AbstractCode::cast(code);
       } else {
-        // TODO(rmcilroy): Modify FrameSummary to enable us to summarize
-        // based on the BytecodeArray and bytecode offset.
         DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
-        code_offset = 0;
+        // BailoutId points to the next bytecode in the bytecode aray. Subtract
+        // 1 to get the end of current bytecode.
+        code_offset = bailout_id.ToInt() - 1;
         abstract_code = AbstractCode::cast(shared_info->bytecode_array());
       }
       FrameSummary summary(receiver, function, abstract_code, code_offset,
@@ -1030,7 +1108,6 @@
 int OptimizedFrame::LookupExceptionHandlerInTable(
     int* stack_slots, HandlerTable::CatchPrediction* prediction) {
   Code* code = LookupCode();
-  DCHECK(code->is_optimized_code());
   HandlerTable* table = HandlerTable::cast(code->handler_table());
   int pc_offset = static_cast<int>(pc() - code->entry());
   if (stack_slots) *stack_slots = code->stack_slots();
@@ -1069,8 +1146,9 @@
 
   // Delegate to JS frame in absence of turbofan deoptimization.
   // TODO(turbofan): Revisit once we support deoptimization across the board.
-  if (LookupCode()->is_turbofanned() && function()->shared()->asm_function() &&
-      !FLAG_turbo_asm_deoptimization) {
+  Code* code = LookupCode();
+  if (code->kind() == Code::BUILTIN ||
+      CannotDeoptFromAsmCode(code, function())) {
     return JavaScriptFrame::GetFunctions(functions);
   }
 
@@ -1222,6 +1300,20 @@
   accumulator->Add((mode == OVERVIEW) ? "%5d: " : "[%d]: ", index);
 }
 
+void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
+                      int index) const {
+  accumulator->Add("wasm frame");
+}
+
+Code* WasmFrame::unchecked_code() const {
+  return static_cast<Code*>(isolate()->FindCodeObject(pc()));
+}
+
+void WasmFrame::Iterate(ObjectVisitor* v) const { IterateCompiledFrame(v); }
+
+Address WasmFrame::GetCallerStackPointer() const {
+  return fp() + ExitFrameConstants::kCallerSPOffset;
+}
 
 namespace {
 
@@ -1437,10 +1529,10 @@
 
 void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const {
   Object** base = &Memory::Object_at(sp());
-  Object** limit = &Memory::Object_at(fp() +
-                                      kFirstRegisterParameterFrameOffset);
+  Object** limit = &Memory::Object_at(
+      fp() + StubFailureTrampolineFrameConstants::kFixedHeaderBottomOffset);
   v->VisitPointers(base, limit);
-  base = &Memory::Object_at(fp() + StandardFrameConstants::kMarkerOffset);
+  base = &Memory::Object_at(fp() + StandardFrameConstants::kFunctionOffset);
   const int offset = StandardFrameConstants::kLastObjectOffset;
   limit = &Memory::Object_at(fp() + offset) + 1;
   v->VisitPointers(base, limit);
diff --git a/src/frames.h b/src/frames.h
index f33eb16..f6806d7 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -97,13 +97,15 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
 };
 
-
 #define STACK_FRAME_TYPE_LIST(V)                         \
   V(ENTRY, EntryFrame)                                   \
   V(ENTRY_CONSTRUCT, EntryConstructFrame)                \
   V(EXIT, ExitFrame)                                     \
   V(JAVA_SCRIPT, JavaScriptFrame)                        \
   V(OPTIMIZED, OptimizedFrame)                           \
+  V(WASM, WasmFrame)                                     \
+  V(WASM_TO_JS, WasmToJsFrame)                           \
+  V(JS_TO_WASM, JsToWasmFrame)                           \
   V(INTERPRETED, InterpretedFrame)                       \
   V(STUB, StubFrame)                                     \
   V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
@@ -137,9 +139,62 @@
 //       |- - - - - - - - -| Header <-- frame ptr       |
 //   2   | [Constant Pool] |   |                        |
 //       |- - - - - - - - -|   |                        |
+// 2+cp  |Context/Frm. Type|   v   if a constant pool   |
+//       |-----------------+----    is used, cp = 1,    |
+// 3+cp  |                 |   ^   otherwise, cp = 0    |
+//       |- - - - - - - - -|   |                        |
+// 4+cp  |                 |   |                      Callee
+//       |- - - - - - - - -|   |                   frame slots
+//  ...  |                 | Frame slots           (slot >= 0)
+//       |- - - - - - - - -|   |                        |
+//       |                 |   v                        |
+//  -----+-----------------+----- <-- stack ptr -------------
+//
+class CommonFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset = 0 * kPointerSize;
+  static const int kCallerPCOffset = kCallerFPOffset + 1 * kFPOnStackSize;
+  static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+
+  // Fixed part of the frame consists of return address, caller fp,
+  // constant pool (if FLAG_enable_embedded_constant_pool), context, and
+  // function. StandardFrame::IterateExpressions assumes that kLastObjectOffset
+  // is the last object pointer.
+  static const int kFixedFrameSizeAboveFp = kPCOnStackSize + kFPOnStackSize;
+  static const int kFixedSlotCountAboveFp =
+      kFixedFrameSizeAboveFp / kPointerSize;
+  static const int kCPSlotSize =
+      FLAG_enable_embedded_constant_pool ? kPointerSize : 0;
+  static const int kCPSlotCount = kCPSlotSize / kPointerSize;
+  static const int kConstantPoolOffset = kCPSlotSize ? -1 * kPointerSize : 0;
+  static const int kContextOrFrameTypeSize = kPointerSize;
+  static const int kContextOrFrameTypeOffset =
+      -(kCPSlotSize + kContextOrFrameTypeSize);
+};
+
+// StandardFrames are used for interpreted, full-codegen and optimized
+// JavaScript frames. They always have a context below the saved fp/constant
+// pool and below that the JSFunction of the executing function.
+//
+//  slot      JS frame
+//       +-----------------+--------------------------------
+//  -n-1 |   parameter 0   |                            ^
+//       |- - - - - - - - -|                            |
+//  -n   |                 |                          Caller
+//  ...  |       ...       |                       frame slots
+//  -2   |  parameter n-1  |                       (slot < 0)
+//       |- - - - - - - - -|                            |
+//  -1   |   parameter n   |                            v
+//  -----+-----------------+--------------------------------
+//   0   |   return addr   |   ^                        ^
+//       |- - - - - - - - -|   |                        |
+//   1   | saved frame ptr | Fixed                      |
+//       |- - - - - - - - -| Header <-- frame ptr       |
+//   2   | [Constant Pool] |   |                        |
+//       |- - - - - - - - -|   |                        |
 // 2+cp  |     Context     |   |   if a constant pool   |
 //       |- - - - - - - - -|   |    is used, cp = 1,    |
-// 3+cp  |JSFunction/Marker|   v   otherwise, cp = 0    |
+// 3+cp  |    JSFunction   |   v   otherwise, cp = 0    |
 //       +-----------------+----                        |
 // 4+cp  |                 |   ^                      Callee
 //       |- - - - - - - - -|   |                   frame slots
@@ -148,66 +203,115 @@
 //       |                 |   v                        |
 //  -----+-----------------+----- <-- stack ptr -------------
 //
-
-class StandardFrameConstants : public AllStatic {
+class StandardFrameConstants : public CommonFrameConstants {
  public:
-  // Fixed part of the frame consists of return address, caller fp,
-  // constant pool (if FLAG_enable_embedded_constant_pool), context, and
-  // function. StandardFrame::IterateExpressions assumes that kLastObjectOffset
-  // is the last object pointer.
-  static const int kCPSlotSize =
-      FLAG_enable_embedded_constant_pool ? kPointerSize : 0;
-  static const int kFixedFrameSizeFromFp =  2 * kPointerSize + kCPSlotSize;
-  static const int kFixedFrameSizeAboveFp = kPCOnStackSize + kFPOnStackSize;
+  static const int kFixedFrameSizeFromFp = 2 * kPointerSize + kCPSlotSize;
   static const int kFixedFrameSize =
       kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
-  static const int kFixedSlotCountAboveFp =
-      kFixedFrameSizeAboveFp / kPointerSize;
+  static const int kFixedSlotCountFromFp = kFixedFrameSizeFromFp / kPointerSize;
   static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
-  static const int kCPSlotCount = kCPSlotSize / kPointerSize;
+  static const int kContextOffset = kContextOrFrameTypeOffset;
+  static const int kFunctionOffset = -2 * kPointerSize - kCPSlotSize;
   static const int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
-  static const int kMarkerOffset = -2 * kPointerSize - kCPSlotSize;
-  static const int kContextOffset = -1 * kPointerSize - kCPSlotSize;
-  static const int kConstantPoolOffset = kCPSlotSize ? -1 * kPointerSize : 0;
-  static const int kCallerFPOffset = 0 * kPointerSize;
-  static const int kCallerPCOffset = +1 * kFPOnStackSize;
-  static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
-
   static const int kLastObjectOffset = kContextOffset;
 };
 
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
+// TypedFrames have a SMI type maker value below the saved FP/constant pool to
+// distinguish them from StandardFrames, which have a context in that position
+// instead.
+//
+//  slot      JS frame
+//       +-----------------+--------------------------------
+//  -n-1 |   parameter 0   |                            ^
+//       |- - - - - - - - -|                            |
+//  -n   |                 |                          Caller
+//  ...  |       ...       |                       frame slots
+//  -2   |  parameter n-1  |                       (slot < 0)
+//       |- - - - - - - - -|                            |
+//  -1   |   parameter n   |                            v
+//  -----+-----------------+--------------------------------
+//   0   |   return addr   |   ^                        ^
+//       |- - - - - - - - -|   |                        |
+//   1   | saved frame ptr | Fixed                      |
+//       |- - - - - - - - -| Header <-- frame ptr       |
+//   2   | [Constant Pool] |   |                        |
+//       |- - - - - - - - -|   |                        |
+// 2+cp  |Frame Type Marker|   v   if a constant pool   |
+//       |-----------------+----    is used, cp = 1,    |
+// 3+cp  |                 |   ^   otherwise, cp = 0    |
+//       |- - - - - - - - -|   |                        |
+// 4+cp  |                 |   |                      Callee
+//       |- - - - - - - - -|   |                   frame slots
+//  ...  |                 | Frame slots           (slot >= 0)
+//       |- - - - - - - - -|   |                        |
+//       |                 |   v                        |
+//  -----+-----------------+----- <-- stack ptr -------------
+//
+class TypedFrameConstants : public CommonFrameConstants {
  public:
-  // FP-relative.
-  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
-  static const int kFrameSize =
-      StandardFrameConstants::kFixedFrameSize + kPointerSize;
+  static const int kFrameTypeSize = kContextOrFrameTypeSize;
+  static const int kFrameTypeOffset = kContextOrFrameTypeOffset;
+  static const int kFixedFrameSizeFromFp = kCPSlotSize + kFrameTypeSize;
+  static const int kFixedSlotCountFromFp = kFixedFrameSizeFromFp / kPointerSize;
+  static const int kFixedFrameSize =
+      StandardFrameConstants::kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
+  static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+  static const int kFirstPushedFrameValueOffset =
+      -StandardFrameConstants::kCPSlotSize - kFrameTypeSize - kPointerSize;
 };
 
+#define TYPED_FRAME_PUSHED_VALUE_OFFSET(x) \
+  (TypedFrameConstants::kFirstPushedFrameValueOffset - (x)*kPointerSize)
+#define TYPED_FRAME_SIZE(count) \
+  (TypedFrameConstants::kFixedFrameSize + (count)*kPointerSize)
+#define TYPED_FRAME_SIZE_FROM_SP(count) \
+  (TypedFrameConstants::kFixedFrameSizeFromFp + (count)*kPointerSize)
+#define DEFINE_TYPED_FRAME_SIZES(count)                                     \
+  static const int kFixedFrameSize = TYPED_FRAME_SIZE(count);               \
+  static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;        \
+  static const int kFixedFrameSizeFromFp = TYPED_FRAME_SIZE_FROM_SP(count); \
+  static const int kFixedSlotCountFromFp = kFixedFrameSizeFromFp / kPointerSize
 
-class InternalFrameConstants : public AllStatic {
+class ArgumentsAdaptorFrameConstants : public TypedFrameConstants {
  public:
   // FP-relative.
-  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+  static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
 };
 
-
-class ConstructFrameConstants : public AllStatic {
+class InternalFrameConstants : public TypedFrameConstants {
  public:
   // FP-relative.
-  static const int kImplicitReceiverOffset =
-      StandardFrameConstants::kExpressionsOffset - 3 * kPointerSize;
-  static const int kLengthOffset =
-      StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize;
-  static const int kAllocationSiteOffset =
-      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-  static const int kCodeOffset =
-      StandardFrameConstants::kExpressionsOffset - 0 * kPointerSize;
+  static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  DEFINE_TYPED_FRAME_SIZES(1);
+};
 
-  static const int kFrameSize =
-      StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+class FrameDropperFrameConstants : public InternalFrameConstants {
+ public:
+  // FP-relative.
+  static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
+};
+
+class ConstructFrameConstants : public TypedFrameConstants {
+ public:
+  // FP-relative.
+  static const int kContextOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kAllocationSiteOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+  static const int kImplicitReceiverOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
+  DEFINE_TYPED_FRAME_SIZES(4);
+};
+
+class StubFailureTrampolineFrameConstants : public InternalFrameConstants {
+ public:
+  static const int kArgumentsArgumentsOffset =
+      TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kArgumentsLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  static const int kArgumentsPointerOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+  static const int kFixedHeaderBottomOffset = kArgumentsPointerOffset;
+  DEFINE_TYPED_FRAME_SIZES(3);
 };
 
 
@@ -310,6 +414,9 @@
   bool is_exit() const { return type() == EXIT; }
   bool is_optimized() const { return type() == OPTIMIZED; }
   bool is_interpreted() const { return type() == INTERPRETED; }
+  bool is_wasm() const { return type() == WASM; }
+  bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
+  bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
   bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
   bool is_internal() const { return type() == INTERNAL; }
   bool is_stub_failure_trampoline() const {
@@ -617,8 +724,7 @@
   bool is_constructor_;
 };
 
-
-class JavaScriptFrame: public StandardFrame {
+class JavaScriptFrame : public StandardFrame {
  public:
   Type type() const override { return JAVA_SCRIPT; }
 
@@ -841,6 +947,55 @@
   friend class StackFrameIteratorBase;
 };
 
+class WasmFrame : public StandardFrame {
+ public:
+  Type type() const override { return WASM; }
+
+  // GC support.
+  void Iterate(ObjectVisitor* v) const override;
+
+  // Printing support.
+  void Print(StringStream* accumulator, PrintMode mode,
+             int index) const override;
+
+  // Determine the code for the frame.
+  Code* unchecked_code() const override;
+
+  static WasmFrame* cast(StackFrame* frame) {
+    DCHECK(frame->is_wasm());
+    return static_cast<WasmFrame*>(frame);
+  }
+
+ protected:
+  inline explicit WasmFrame(StackFrameIteratorBase* iterator);
+
+  Address GetCallerStackPointer() const override;
+
+ private:
+  friend class StackFrameIteratorBase;
+};
+
+class WasmToJsFrame : public StubFrame {
+ public:
+  Type type() const override { return WASM_TO_JS; }
+
+ protected:
+  inline explicit WasmToJsFrame(StackFrameIteratorBase* iterator);
+
+ private:
+  friend class StackFrameIteratorBase;
+};
+
+class JsToWasmFrame : public StubFrame {
+ public:
+  Type type() const override { return JS_TO_WASM; }
+
+ protected:
+  inline explicit JsToWasmFrame(StackFrameIteratorBase* iterator);
+
+ private:
+  friend class StackFrameIteratorBase;
+};
 
 class InternalFrame: public StandardFrame {
  public:
@@ -869,14 +1024,6 @@
 
 class StubFailureTrampolineFrame: public StandardFrame {
  public:
-  // sizeof(Arguments) - sizeof(Arguments*) is 3 * kPointerSize), but the
-  // presubmit script complains about using sizeof() on a type.
-  static const int kFirstRegisterParameterFrameOffset =
-      StandardFrameConstants::kMarkerOffset - 3 * kPointerSize;
-
-  static const int kCallerStackParameterCountFrameOffset =
-      StandardFrameConstants::kMarkerOffset - 2 * kPointerSize;
-
   Type type() const override { return STUB_FAILURE_TRAMPOLINE; }
 
   // Get the code associated with this frame.
@@ -974,7 +1121,6 @@
   DISALLOW_COPY_AND_ASSIGN(StackFrameIterator);
 };
 
-
 // Iterator that supports iterating through all JavaScript frames.
 class JavaScriptFrameIterator BASE_EMBEDDED {
  public:
@@ -997,7 +1143,6 @@
   StackFrameIterator iterator_;
 };
 
-
 // NOTE: The stack trace frame iterator is an iterator that only
 // traverse proper JavaScript frames; that is JavaScript frames that
 // have proper JavaScript functions. This excludes the problematic
diff --git a/src/full-codegen/arm/full-codegen-arm.cc b/src/full-codegen/arm/full-codegen-arm.cc
index 6e6a655..81c5ff2 100644
--- a/src/full-codegen/arm/full-codegen-arm.cc
+++ b/src/full-codegen/arm/full-codegen-arm.cc
@@ -293,42 +293,38 @@
     __ CallRuntime(Runtime::kTraceEnter);
   }
 
-  // Visit the declarations and body unless there is an illegal
-  // redeclaration.
-  if (scope()->HasIllegalRedeclaration()) {
+  // Visit the declarations and body.
+  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  {
     Comment cmnt(masm_, "[ Declarations");
-    VisitForEffect(scope()->GetIllegalRedeclaration());
+    VisitDeclarations(scope()->declarations());
+  }
 
-  } else {
-    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
-    { Comment cmnt(masm_, "[ Declarations");
-      VisitDeclarations(scope()->declarations());
-    }
+  // Assert that the declarations do not use ICs. Otherwise the debugger
+  // won't be able to redirect a PC at an IC to the correct IC in newly
+  // recompiled code.
+  DCHECK_EQ(0, ic_total_count_);
 
-    // Assert that the declarations do not use ICs. Otherwise the debugger
-    // won't be able to redirect a PC at an IC to the correct IC in newly
-    // recompiled code.
-    DCHECK_EQ(0, ic_total_count_);
+  {
+    Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    Label ok;
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmp(sp, Operand(ip));
+    __ b(hs, &ok);
+    Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+    PredictableCodeSizeScope predictable(masm_);
+    predictable.ExpectSize(
+        masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+    __ Call(stack_check, RelocInfo::CODE_TARGET);
+    __ bind(&ok);
+  }
 
-    { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
-      Label ok;
-      __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-      __ cmp(sp, Operand(ip));
-      __ b(hs, &ok);
-      Handle<Code> stack_check = isolate()->builtins()->StackCheck();
-      PredictableCodeSizeScope predictable(masm_);
-      predictable.ExpectSize(
-          masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
-      __ Call(stack_check, RelocInfo::CODE_TARGET);
-      __ bind(&ok);
-    }
-
-    { Comment cmnt(masm_, "[ Body");
-      DCHECK(loop_depth() == 0);
-      VisitStatements(literal()->body());
-      DCHECK(loop_depth() == 0);
-    }
+  {
+    Comment cmnt(masm_, "[ Body");
+    DCHECK(loop_depth() == 0);
+    VisitStatements(literal()->body());
+    DCHECK(loop_depth() == 0);
   }
 
   // Always emit a 'return undefined' in case control fell off the end of
@@ -537,7 +533,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ b(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -643,7 +639,7 @@
                                Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+  Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
   CallIC(ic, condition->test_id());
   __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
   Split(eq, if_true, if_false, fall_through);
@@ -994,14 +990,14 @@
 
   FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
 
-  Label loop, exit;
-  ForIn loop_statement(this, stmt);
-  increment_loop_depth();
-
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  OperandStackDepthIncrement(ForIn::kElementCount);
+  OperandStackDepthIncrement(5);
+
+  Label loop, exit;
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
 
   // If the object is null or undefined, skip over the loop, otherwise convert
   // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
@@ -1076,10 +1072,6 @@
   // We got a fixed array in register r0. Iterate through that.
   __ bind(&fixed_array);
 
-  int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(r1);
-  __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-  __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(vector_index)));
   __ mov(r1, Operand(Smi::FromInt(1)));  // Smi(1) indicates slow check
   __ Push(r1, r0);  // Smi and array
   __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
@@ -1114,12 +1106,8 @@
   __ cmp(r4, Operand(r2));
   __ b(eq, &update_each);
 
-  // We might get here from TurboFan or Crankshaft when something in the
-  // for-in loop body deopts and only now notice in fullcodegen, that we
-  // can now longer use the enum cache, i.e. left fast mode. So better record
-  // this information here, in case we later OSR back into this loop or
-  // reoptimize the whole function w/o rerunning the loop with the slow
-  // mode object in fullcodegen (which would result in a deopt loop).
+  // We need to filter the key, record slow-path here.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(r0);
   __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ str(r2, FieldMemOperand(r0, FixedArray::OffsetOfElementAt(vector_index)));
@@ -1172,31 +1160,6 @@
 }
 
 
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
-                                       bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning. If
-  // we're running with the --always-opt or the --prepare-always-opt
-  // flag, we need to use the runtime function so that the new function
-  // we are creating here gets a chance to have its code optimized and
-  // doesn't just get a copy of the existing unoptimized code.
-  if (!FLAG_always_opt &&
-      !FLAG_prepare_always_opt &&
-      !pretenure &&
-      scope()->is_function_scope() &&
-      info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
-    __ mov(r2, Operand(info));
-    __ CallStub(&stub);
-  } else {
-    __ Push(info);
-    __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
-                             : Runtime::kNewClosure);
-  }
-  context()->Plug(r0);
-}
-
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
@@ -1631,13 +1594,6 @@
     }
   }
 
-  if (expr->has_function()) {
-    DCHECK(result_saved);
-    __ ldr(r0, MemOperand(sp));
-    __ push(r0);
-    __ CallRuntime(Runtime::kToFastProperties);
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1884,63 +1840,43 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  switch (expr->yield_kind()) {
-    case Yield::kSuspend:
-      // Pop value from top-of-stack slot; box result into result register.
-      EmitCreateIteratorResult(false);
-      __ push(result_register());
-      // Fall through.
-    case Yield::kInitial: {
-      Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume;
 
-      __ jmp(&suspend);
-      __ bind(&continuation);
-      // When we arrive here, the stack top is the resume mode and
-      // result_register() holds the input value (the argument given to the
-      // respective resume operation).
-      __ RecordGeneratorContinuation();
-      __ pop(r1);
-      __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
-      __ b(ne, &resume);
-      __ push(result_register());
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
+  __ jmp(&suspend);
+  __ bind(&continuation);
+  // When we arrive here, the stack top is the resume mode and
+  // result_register() holds the input value (the argument given to the
+  // respective resume operation).
+  __ RecordGeneratorContinuation();
+  __ pop(r1);
+  __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+  __ b(ne, &resume);
+  __ push(result_register());
+  EmitCreateIteratorResult(true);
+  EmitUnwindAndReturn();
 
-      __ bind(&suspend);
-      VisitForAccumulatorValue(expr->generator_object());
-      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-      __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
-      __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
-      __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
-      __ mov(r1, cp);
-      __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
-                          kLRHasBeenSaved, kDontSaveFPRegs);
-      __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
-      __ cmp(sp, r1);
-      __ b(eq, &post_runtime);
-      __ push(r0);  // generator object
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ bind(&post_runtime);
-      PopOperand(result_register());
-      EmitReturnSequence();
+  __ bind(&suspend);
+  OperandStackDepthIncrement(1);  // Not popped on this path.
+  VisitForAccumulatorValue(expr->generator_object());
+  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+  __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
+  __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
+  __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
+  __ mov(r1, cp);
+  __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+                      kLRHasBeenSaved, kDontSaveFPRegs);
+  __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+  __ cmp(sp, r1);
+  __ b(eq, &post_runtime);
+  __ push(r0);  // generator object
+  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ bind(&post_runtime);
+  PopOperand(result_register());
+  EmitReturnSequence();
 
-      __ bind(&resume);
-      context()->Plug(result_register());
-      break;
-    }
-
-    case Yield::kFinal: {
-      // Pop value from top-of-stack slot, box result into result register.
-      OperandStackDepthDecrement(1);
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
-      break;
-    }
-
-    case Yield::kDelegating:
-      UNREACHABLE();
-  }
+  __ bind(&resume);
+  context()->Plug(result_register());
 }
 
 
@@ -1970,7 +1906,10 @@
   __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
   __ push(r2);
 
-  // Push holes for the rest of the arguments to the generator function.
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
   __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
   __ ldr(r3,
          FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1994,9 +1933,7 @@
   // pp = caller's constant pool (if FLAG_enable_embedded_constant_pool),
   // cp = callee's context,
   // r4 = callee's JS function.
-  __ PushFixedFrame(r4);
-  // Adjust FP to point to saved FP.
-  __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  __ PushStandardFrame(r4);
 
   // Load the operand stack size.
   __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
@@ -2081,7 +2018,7 @@
 
   __ bind(&done_allocate);
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r1);
-  __ pop(r2);
+  PopOperand(r2);
   __ LoadRoot(r3,
               done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
   __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
@@ -2093,18 +2030,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!prop->IsSuperAccess());
-
-  __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
-  __ mov(LoadDescriptor::SlotRegister(),
-         Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF);
-}
-
-
 void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               Expression* left_expr,
@@ -2713,7 +2638,7 @@
   }
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-  SetCallPosition(expr);
+  SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
       __ CallRuntime(Runtime::kTraceTailCall);
@@ -3075,9 +3000,10 @@
   // Map is now in r0.
   __ b(lt, &null);
 
-  // Return 'Function' for JSFunction objects.
-  __ cmp(r1, Operand(JS_FUNCTION_TYPE));
-  __ b(eq, &function);
+  // Return 'Function' for JSFunction and JSBoundFunction objects.
+  __ cmp(r1, Operand(FIRST_FUNCTION_TYPE));
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+  __ b(hs, &function);
 
   // Check if the constructor in the map is a JS function.
   Register instance_type = r2;
@@ -3196,23 +3122,6 @@
 }
 
 
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into r0 and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to an integer.
-  Label done_convert;
-  __ JumpIfSmi(r0, &done_convert);
-  __ Push(r0);
-  __ CallRuntime(Runtime::kToInteger);
-  __ bind(&done_convert);
-  context()->Plug(r0);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3389,6 +3298,11 @@
   context()->Plug(r0);
 }
 
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+  DCHECK_EQ(0, expr->arguments()->length());
+  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r0);
+  context()->Plug(r0);
+}
 
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
@@ -3431,11 +3345,13 @@
 
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+  // Push function.
+  __ LoadNativeContextSlot(expr->context_index(), r0);
+  PushOperand(r0);
+
   // Push undefined as the receiver.
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   PushOperand(r0);
-
-  __ LoadNativeContextSlot(expr->context_index(), r0);
 }
 
 
@@ -3449,60 +3365,9 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-}
 
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  if (expr->is_jsruntime()) {
-    Comment cmnt(masm_, "[ CallRuntime");
-    EmitLoadJSRuntimeFunction(expr);
-
-    // Push the target function under the receiver.
-    __ ldr(ip, MemOperand(sp, 0));
-    PushOperand(ip);
-    __ str(r0, MemOperand(sp, kPointerSize));
-
-    // Push the arguments ("left-to-right").
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-
-    PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-    EmitCallJSRuntimeFunction(expr);
-
-    // Restore context register.
-    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-    context()->DropAndPlug(1, r0);
-
-  } else {
-    const Runtime::Function* function = expr->function();
-    switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name)     \
-  case Runtime::kInline##Name: {           \
-    Comment cmnt(masm_, "[ Inline" #Name); \
-    return Emit##Name(expr);               \
-  }
-      FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
-      default: {
-        Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
-        // Push the arguments ("left-to-right").
-        for (int i = 0; i < arg_count; i++) {
-          VisitForStackValue(args->at(i));
-        }
-
-        // Call the C runtime function.
-        PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-        __ CallRuntime(expr->function(), arg_count);
-        OperandStackDepthDecrement(arg_count);
-        context()->Plug(r0);
-      }
-    }
-  }
+  // Restore context register.
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
 
@@ -3740,11 +3605,11 @@
     __ jmp(&stub_call);
     __ bind(&slow);
   }
-  if (!is_strong(language_mode())) {
-    ToNumberStub convert_stub(isolate());
-    __ CallStub(&convert_stub);
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
+
+  // Convert old value into a number.
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3784,9 +3649,6 @@
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
-  if (is_strong(language_mode())) {
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
   // Store the value returned in r0.
   switch (assign_type) {
     case VARIABLE:
@@ -4039,21 +3901,16 @@
     __ cmp(r0, r1);
     Split(eq, if_true, if_false, fall_through);
   } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
-    CallIC(ic, expr->CompareOperationFeedbackId());
-    __ CompareRoot(r0, Heap::kTrueValueRootIndex);
-    Split(eq, if_true, if_false, fall_through);
+    __ JumpIfSmi(r0, if_false);
+    __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+    __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
+    __ tst(r1, Operand(1 << Map::kIsUndetectable));
+    Split(ne, if_true, if_false, fall_through);
   }
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  context()->Plug(r0);
-}
-
-
 Register FullCodeGenerator::result_register() {
   return r0;
 }
@@ -4063,6 +3920,10 @@
   return cp;
 }
 
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+  DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  __ ldr(value, MemOperand(fp, frame_offset));
+}
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
@@ -4133,12 +3994,6 @@
 }
 
 
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
-  DCHECK(!slot.IsInvalid());
-  __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
-         Operand(SmiFromSlot(slot)));
-}
-
 void FullCodeGenerator::DeferredCommands::EmitCommands() {
   DCHECK(!result_register().is(r1));
   __ Pop(result_register());  // Restore the accumulator.
@@ -4249,7 +4104,6 @@
       break;
     }
     case ON_STACK_REPLACEMENT:
-    case OSR_AFTER_STACK_CHECK:
       //  <decrement profiling counter>
       //   mov r0, r0 (NOP)
       //   ; load on-stack replacement address into ip - either of (for ARMv7):
@@ -4287,8 +4141,10 @@
 
   Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
   Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
+#ifdef DEBUG
   Address interrupt_address = Assembler::target_address_at(
       pc_immediate_load_address, unoptimized_code);
+#endif
 
   if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
     DCHECK(interrupt_address ==
@@ -4298,14 +4154,9 @@
 
   DCHECK(Assembler::IsNop(Assembler::instr_at(branch_address)));
 
-  if (interrupt_address ==
-      isolate->builtins()->OnStackReplacement()->entry()) {
-    return ON_STACK_REPLACEMENT;
-  }
-
   DCHECK(interrupt_address ==
-         isolate->builtins()->OsrAfterStackCheck()->entry());
-  return OSR_AFTER_STACK_CHECK;
+         isolate->builtins()->OnStackReplacement()->entry());
+  return ON_STACK_REPLACEMENT;
 }
 
 
diff --git a/src/full-codegen/arm64/full-codegen-arm64.cc b/src/full-codegen/arm64/full-codegen-arm64.cc
index d0278e7..aa67117 100644
--- a/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -296,42 +296,36 @@
     __ CallRuntime(Runtime::kTraceEnter);
   }
 
-  // Visit the declarations and body unless there is an illegal
-  // redeclaration.
-  if (scope()->HasIllegalRedeclaration()) {
+  // Visit the declarations and body.
+  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  {
     Comment cmnt(masm_, "[ Declarations");
-    VisitForEffect(scope()->GetIllegalRedeclaration());
+    VisitDeclarations(scope()->declarations());
+  }
 
-  } else {
-    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
-    { Comment cmnt(masm_, "[ Declarations");
-      VisitDeclarations(scope()->declarations());
-    }
+  // Assert that the declarations do not use ICs. Otherwise the debugger
+  // won't be able to redirect a PC at an IC to the correct IC in newly
+  // recompiled code.
+  DCHECK_EQ(0, ic_total_count_);
 
-    // Assert that the declarations do not use ICs. Otherwise the debugger
-    // won't be able to redirect a PC at an IC to the correct IC in newly
-    // recompiled code.
-    DCHECK_EQ(0, ic_total_count_);
+  {
+    Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    Label ok;
+    DCHECK(jssp.Is(__ StackPointer()));
+    __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+    __ B(hs, &ok);
+    PredictableCodeSizeScope predictable(masm_,
+                                         Assembler::kCallSizeWithRelocation);
+    __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+    __ Bind(&ok);
+  }
 
-    {
-      Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
-      Label ok;
-      DCHECK(jssp.Is(__ StackPointer()));
-      __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
-      __ B(hs, &ok);
-      PredictableCodeSizeScope predictable(masm_,
-                                           Assembler::kCallSizeWithRelocation);
-      __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
-      __ Bind(&ok);
-    }
-
-    {
-      Comment cmnt(masm_, "[ Body");
-      DCHECK(loop_depth() == 0);
-      VisitStatements(literal()->body());
-      DCHECK(loop_depth() == 0);
-    }
+  {
+    Comment cmnt(masm_, "[ Body");
+    DCHECK(loop_depth() == 0);
+    VisitStatements(literal()->body());
+    DCHECK(loop_depth() == 0);
   }
 
   // Always emit a 'return undefined' in case control fell off the end of
@@ -530,7 +524,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ B(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -640,7 +634,7 @@
                                Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+  Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
   CallIC(ic, condition->test_id());
   __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
   Split(eq, if_true, if_false, fall_through);
@@ -997,14 +991,14 @@
 
   // TODO(all): This visitor probably needs better comments and a revisit.
 
-  Label loop, exit;
-  ForIn loop_statement(this, stmt);
-  increment_loop_depth();
-
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  OperandStackDepthIncrement(ForIn::kElementCount);
+  OperandStackDepthIncrement(5);
+
+  Label loop, exit;
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
 
   // If the object is null or undefined, skip over the loop, otherwise convert
   // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
@@ -1072,10 +1066,6 @@
   // We got a fixed array in register x0. Iterate through that.
   __ Bind(&fixed_array);
 
-  int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(x1);
-  __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-  __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
   __ Mov(x1, Smi::FromInt(1));  // Smi(1) indicates slow check.
   __ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
   __ Push(x1, x0, x2);  // Smi and array, fixed array length (as smi).
@@ -1108,12 +1098,8 @@
   __ Cmp(x11, x2);
   __ B(eq, &update_each);
 
-  // We might get here from TurboFan or Crankshaft when something in the
-  // for-in loop body deopts and only now notice in fullcodegen, that we
-  // can now longer use the enum cache, i.e. left fast mode. So better record
-  // this information here, in case we later OSR back into this loop or
-  // reoptimize the whole function w/o rerunning the loop with the slow
-  // mode object in fullcodegen (which would result in a deopt loop).
+  // We need to filter the key, record slow-path here.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(x0);
   __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ Str(x10, FieldMemOperand(x0, FixedArray::OffsetOfElementAt(vector_index)));
@@ -1165,31 +1151,6 @@
 }
 
 
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
-                                       bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new space for
-  // nested functions that don't need literals cloning. If we're running with
-  // the --always-opt or the --prepare-always-opt flag, we need to use the
-  // runtime function so that the new function we are creating here gets a
-  // chance to have its code optimized and doesn't just get a copy of the
-  // existing unoptimized code.
-  if (!FLAG_always_opt &&
-      !FLAG_prepare_always_opt &&
-      !pretenure &&
-      scope()->is_function_scope() &&
-      info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
-    __ Mov(x2, Operand(info));
-    __ CallStub(&stub);
-  } else {
-    __ Push(info);
-    __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
-                             : Runtime::kNewClosure);
-  }
-  context()->Plug(x0);
-}
-
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
@@ -1618,13 +1579,6 @@
     }
   }
 
-  if (expr->has_function()) {
-    DCHECK(result_saved);
-    __ Peek(x0, 0);
-    __ Push(x0);
-    __ CallRuntime(Runtime::kToFastProperties);
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1858,18 +1812,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!prop->IsSuperAccess());
-
-  __ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
-  __ Mov(LoadDescriptor::SlotRegister(),
-         SmiFromSlot(prop->PropertyFeedbackSlot()));
-  CallLoadIC(NOT_INSIDE_TYPEOF);
-}
-
-
 void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               Expression* left_expr,
@@ -2494,7 +2436,7 @@
   }
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-  SetCallPosition(expr);
+  SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
       __ CallRuntime(Runtime::kTraceTailCall);
@@ -2863,8 +2805,9 @@
   __ B(lt, &null);
 
   // Return 'Function' for JSFunction objects.
-  __ Cmp(x11, JS_FUNCTION_TYPE);
-  __ B(eq, &function);
+  __ Cmp(x11, FIRST_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+  __ B(hs, &function);
 
   // Check if the constructor in the map is a JS function.
   Register instance_type = x14;
@@ -2978,23 +2921,6 @@
 }
 
 
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into x0 and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to an integer.
-  Label done_convert;
-  __ JumpIfSmi(x0, &done_convert);
-  __ Push(x0);
-  __ CallRuntime(Runtime::kToInteger);
-  __ bind(&done_convert);
-  context()->Plug(x0);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3175,6 +3101,11 @@
   context()->Plug(x0);
 }
 
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+  DCHECK_EQ(0, expr->arguments()->length());
+  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, x0);
+  context()->Plug(x0);
+}
 
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
@@ -3228,11 +3159,13 @@
 
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+  // Push function.
+  __ LoadNativeContextSlot(expr->context_index(), x0);
+  PushOperand(x0);
+
   // Push undefined as the receiver.
   __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
   PushOperand(x0);
-
-  __ LoadNativeContextSlot(expr->context_index(), x0);
 }
 
 
@@ -3246,58 +3179,9 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-}
 
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  if (expr->is_jsruntime()) {
-    Comment cmnt(masm_, "[ CallRunTime");
-    EmitLoadJSRuntimeFunction(expr);
-
-    // Push the target function under the receiver.
-    PopOperand(x10);
-    PushOperands(x0, x10);
-
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-
-    PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-    EmitCallJSRuntimeFunction(expr);
-
-    // Restore context register.
-    __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-    context()->DropAndPlug(1, x0);
-
-  } else {
-    const Runtime::Function* function = expr->function();
-    switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name)     \
-  case Runtime::kInline##Name: {           \
-    Comment cmnt(masm_, "[ Inline" #Name); \
-    return Emit##Name(expr);               \
-  }
-      FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
-      default: {
-        Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
-        // Push the arguments ("left-to-right").
-        for (int i = 0; i < arg_count; i++) {
-          VisitForStackValue(args->at(i));
-        }
-
-        // Call the C runtime function.
-        PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-        __ CallRuntime(expr->function(), arg_count);
-        OperandStackDepthDecrement(arg_count);
-        context()->Plug(x0);
-      }
-    }
-  }
+  // Restore context register.
+  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
 
@@ -3531,11 +3415,11 @@
     __ B(&stub_call);
     __ Bind(&slow);
   }
-  if (!is_strong(language_mode())) {
-    ToNumberStub convert_stub(isolate());
-    __ CallStub(&convert_stub);
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
+
+  // Convert old value into a number.
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3577,9 +3461,6 @@
   }
   __ Bind(&done);
 
-  if (is_strong(language_mode())) {
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
   // Store the value returned in x0.
   switch (assign_type) {
     case VARIABLE:
@@ -3841,22 +3722,17 @@
     __ CompareRoot(x0, nil_value);
     Split(eq, if_true, if_false, fall_through);
   } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
-    CallIC(ic, expr->CompareOperationFeedbackId());
-    __ CompareRoot(x0, Heap::kTrueValueRootIndex);
-    Split(eq, if_true, if_false, fall_through);
+    __ JumpIfSmi(x0, if_false);
+    __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+    __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+    __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true,
+                    fall_through);
   }
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  context()->Plug(x0);
-}
-
-
 void FullCodeGenerator::VisitYield(Yield* expr) {
   Comment cmnt(masm_, "[ Yield");
   SetExpressionPosition(expr);
@@ -3869,66 +3745,46 @@
   // and suchlike. The implementation changes a little by bleeding_edge so I
   // don't want to spend too much time on it now.
 
-  switch (expr->yield_kind()) {
-    case Yield::kSuspend:
-      // Pop value from top-of-stack slot; box result into result register.
-      EmitCreateIteratorResult(false);
-      __ Push(result_register());
-      // Fall through.
-    case Yield::kInitial: {
-      Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume;
 
-      __ B(&suspend);
-      // TODO(jbramley): This label is bound here because the following code
-      // looks at its pos(). Is it possible to do something more efficient here,
-      // perhaps using Adr?
-      __ Bind(&continuation);
-      // When we arrive here, the stack top is the resume mode and
-      // result_register() holds the input value (the argument given to the
-      // respective resume operation).
-      __ RecordGeneratorContinuation();
-      __ Pop(x1);
-      __ Cmp(x1, Smi::FromInt(JSGeneratorObject::RETURN));
-      __ B(ne, &resume);
-      __ Push(result_register());
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
+  __ B(&suspend);
+  // TODO(jbramley): This label is bound here because the following code
+  // looks at its pos(). Is it possible to do something more efficient here,
+  // perhaps using Adr?
+  __ Bind(&continuation);
+  // When we arrive here, the stack top is the resume mode and
+  // result_register() holds the input value (the argument given to the
+  // respective resume operation).
+  __ RecordGeneratorContinuation();
+  __ Pop(x1);
+  __ Cmp(x1, Smi::FromInt(JSGeneratorObject::RETURN));
+  __ B(ne, &resume);
+  __ Push(result_register());
+  EmitCreateIteratorResult(true);
+  EmitUnwindAndReturn();
 
-      __ Bind(&suspend);
-      VisitForAccumulatorValue(expr->generator_object());
-      DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
-      __ Mov(x1, Smi::FromInt(continuation.pos()));
-      __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
-      __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
-      __ Mov(x1, cp);
-      __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
-                          kLRHasBeenSaved, kDontSaveFPRegs);
-      __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
-      __ Cmp(__ StackPointer(), x1);
-      __ B(eq, &post_runtime);
-      __ Push(x0);  // generator object
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ Bind(&post_runtime);
-      PopOperand(result_register());
-      EmitReturnSequence();
+  __ Bind(&suspend);
+  OperandStackDepthIncrement(1);  // Not popped on this path.
+  VisitForAccumulatorValue(expr->generator_object());
+  DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
+  __ Mov(x1, Smi::FromInt(continuation.pos()));
+  __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+  __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+  __ Mov(x1, cp);
+  __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+                      kLRHasBeenSaved, kDontSaveFPRegs);
+  __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
+  __ Cmp(__ StackPointer(), x1);
+  __ B(eq, &post_runtime);
+  __ Push(x0);  // generator object
+  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ Bind(&post_runtime);
+  PopOperand(result_register());
+  EmitReturnSequence();
 
-      __ Bind(&resume);
-      context()->Plug(result_register());
-      break;
-    }
-
-    case Yield::kFinal: {
-      // Pop value from top-of-stack slot, box result into result register.
-      OperandStackDepthDecrement(1);
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
-      break;
-    }
-
-    case Yield::kDelegating:
-      UNREACHABLE();
-  }
+  __ Bind(&resume);
+  context()->Plug(result_register());
 }
 
 
@@ -3967,7 +3823,10 @@
                               JSGeneratorObject::kReceiverOffset));
   __ Push(x10);
 
-  // Push holes for the rest of the arguments to the generator function.
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
   __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
 
   // The number of arguments is stored as an int32_t, and -1 is a marker
@@ -4078,7 +3937,7 @@
   Register empty_fixed_array = x4;
   Register untagged_result = x5;
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, map_reg);
-  __ Pop(result_value);
+  PopOperand(result_value);
   __ LoadRoot(boolean_done,
               done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
   __ LoadRoot(empty_fixed_array, Heap::kEmptyFixedArrayRootIndex);
@@ -4114,6 +3973,10 @@
   return cp;
 }
 
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+  DCHECK(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
+  __ Ldr(value, MemOperand(fp, frame_offset));
+}
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   DCHECK(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
@@ -4185,11 +4048,6 @@
 }
 
 
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
-  DCHECK(!slot.IsInvalid());
-  __ Mov(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
-}
-
 void FullCodeGenerator::DeferredCommands::EmitCommands() {
   __ Pop(result_register(), x1);  // Restore the accumulator and get the token.
   for (DeferredCommand cmd : commands_) {
@@ -4245,7 +4103,6 @@
       patcher.b(6, pl);
       break;
     case ON_STACK_REPLACEMENT:
-    case OSR_AFTER_STACK_CHECK:
       //  <decrement profiling counter>
       //  .. .. .. ..       mov x0, x0 (NOP)
       //  .. .. .. ..       ldr x16, pc+<on-stack replacement address>
@@ -4266,9 +4123,6 @@
               isolate->builtins()->InterruptCheck()->entry())) ||
          (Memory::uint64_at(interrupt_address_pointer) ==
           reinterpret_cast<uint64_t>(
-              isolate->builtins()->OsrAfterStackCheck()->entry())) ||
-         (Memory::uint64_at(interrupt_address_pointer) ==
-          reinterpret_cast<uint64_t>(
               isolate->builtins()->OnStackReplacement()->entry())));
   Memory::uint64_at(interrupt_address_pointer) =
       reinterpret_cast<uint64_t>(replacement_code->entry());
@@ -4294,9 +4148,6 @@
     if (entry == reinterpret_cast<uint64_t>(
         isolate->builtins()->OnStackReplacement()->entry())) {
       return ON_STACK_REPLACEMENT;
-    } else if (entry == reinterpret_cast<uint64_t>(
-        isolate->builtins()->OsrAfterStackCheck()->entry())) {
-      return OSR_AFTER_STACK_CHECK;
     } else {
       UNREACHABLE();
     }
diff --git a/src/full-codegen/full-codegen.cc b/src/full-codegen/full-codegen.cc
index 8255089..af5dd41 100644
--- a/src/full-codegen/full-codegen.cc
+++ b/src/full-codegen/full-codegen.cc
@@ -4,8 +4,8 @@
 
 #include "src/full-codegen/full-codegen.h"
 
-#include "src/ast/ast.h"
 #include "src/ast/ast-numbering.h"
+#include "src/ast/ast.h"
 #include "src/ast/prettyprinter.h"
 #include "src/ast/scopeinfo.h"
 #include "src/ast/scopes.h"
@@ -14,6 +14,7 @@
 #include "src/compiler.h"
 #include "src/debug/debug.h"
 #include "src/debug/liveedit.h"
+#include "src/frames-inl.h"
 #include "src/isolate-inl.h"
 #include "src/macro-assembler.h"
 #include "src/snapshot/snapshot.h"
@@ -30,9 +31,6 @@
   TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
   TRACE_EVENT0("v8", "V8.CompileFullCode");
 
-  // Ensure that the feedback vector is large enough.
-  info->EnsureFeedbackVector();
-
   Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
     int len = String::cast(script->source())->length();
@@ -67,7 +65,8 @@
   CodeGenerator::PrintCode(code, info);
   info->SetCode(code);
   void* line_info = masm.positions_recorder()->DetachJITHandlerData();
-  LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info));
+  LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(
+                              AbstractCode::cast(*code), line_info));
 
 #ifdef DEBUG
   // Check that no context-specific object has been embedded.
@@ -157,8 +156,7 @@
 
 bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
     ArrayLiteral* expr) const {
-  // TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
-  return expr->depth() > 1 || expr->is_strong() ||
+  return expr->depth() > 1 ||
          expr->values()->length() > JSArray::kInitialMaxFastElementArray;
 }
 
@@ -486,14 +484,14 @@
 }
 
 void FullCodeGenerator::OperandStackDepthIncrement(int count) {
+  DCHECK_IMPLIES(!HasStackOverflow(), operand_stack_depth_ >= 0);
   DCHECK_GE(count, 0);
-  DCHECK_GE(operand_stack_depth_, 0);
   operand_stack_depth_ += count;
 }
 
 void FullCodeGenerator::OperandStackDepthDecrement(int count) {
+  DCHECK_IMPLIES(!HasStackOverflow(), operand_stack_depth_ >= count);
   DCHECK_GE(count, 0);
-  DCHECK_GE(operand_stack_depth_, count);
   operand_stack_depth_ -= count;
 }
 
@@ -562,9 +560,17 @@
     }
   }
   __ Call(callable.code(), RelocInfo::CODE_TARGET);
+
+  // Reload the context register after the call as i.e. TurboFan code stubs
+  // won't preserve the context register.
+  LoadFromFrameField(StandardFrameConstants::kContextOffset,
+                     context_register());
   context()->Plug(result_register());
 }
 
+void FullCodeGenerator::EmitNewObject(CallRuntime* expr) {
+  EmitIntrinsicAsStubCall(expr, CodeFactory::FastNewObject(isolate()));
+}
 
 void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
   EmitIntrinsicAsStubCall(expr, CodeFactory::NumberToString(isolate()));
@@ -585,6 +591,9 @@
   EmitIntrinsicAsStubCall(expr, CodeFactory::ToLength(isolate()));
 }
 
+void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
+  EmitIntrinsicAsStubCall(expr, CodeFactory::ToInteger(isolate()));
+}
 
 void FullCodeGenerator::EmitToNumber(CallRuntime* expr) {
   EmitIntrinsicAsStubCall(expr, CodeFactory::ToNumber(isolate()));
@@ -662,13 +671,16 @@
   }
 }
 
-
-void FullCodeGenerator::SetCallPosition(Expression* expr) {
+void FullCodeGenerator::SetCallPosition(Expression* expr,
+                                        TailCallMode tail_call_mode) {
   if (expr->position() == RelocInfo::kNoPosition) return;
   RecordPosition(masm_, expr->position());
   if (info_->is_debug()) {
+    RelocInfo::Mode mode = (tail_call_mode == TailCallMode::kAllow)
+                               ? RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL
+                               : RelocInfo::DEBUG_BREAK_SLOT_AT_CALL;
     // Always emit a debug break slot before a call.
-    DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
+    DebugCodegen::GenerateSlot(masm_, mode);
   }
 }
 
@@ -851,7 +863,6 @@
 void FullCodeGenerator::VisitBlock(Block* stmt) {
   Comment cmnt(masm_, "[ Block");
   NestedBlock nested_block(this, stmt);
-  SetStatementPosition(stmt);
 
   {
     EnterBlockScopeIfNeeded block_scope_state(
@@ -880,7 +891,6 @@
 
 void FullCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
   Comment cmnt(masm_, "[ EmptyStatement");
-  SetStatementPosition(stmt);
 }
 
 
@@ -913,7 +923,6 @@
 
 void FullCodeGenerator::EmitContinue(Statement* target) {
   NestedStatement* current = nesting_stack_;
-  int stack_depth = 0;
   int context_length = 0;
   // When continuing, we clobber the unpredictable value in the accumulator
   // with one that's safe for GC.  If we hit an exit from the try block of
@@ -923,15 +932,17 @@
   while (!current->IsContinueTarget(target)) {
     if (current->IsTryFinally()) {
       Comment cmnt(masm(), "[ Deferred continue through finally");
-      current->Exit(&stack_depth, &context_length);
-      DCHECK_EQ(0, stack_depth);
-      DCHECK_EQ(0, context_length);
+      current->Exit(&context_length);
+      DCHECK_EQ(-1, context_length);
       current->AsTryFinally()->deferred_commands()->RecordContinue(target);
       return;
     }
-    current = current->Exit(&stack_depth, &context_length);
+    current = current->Exit(&context_length);
   }
-  __ Drop(stack_depth);
+  int stack_depth = current->GetStackDepthAtTarget();
+  int stack_drop = operand_stack_depth_ - stack_depth;
+  DCHECK_GE(stack_drop, 0);
+  __ Drop(stack_drop);
   if (context_length > 0) {
     while (context_length > 0) {
       LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@@ -952,7 +963,6 @@
 
 void FullCodeGenerator::EmitBreak(Statement* target) {
   NestedStatement* current = nesting_stack_;
-  int stack_depth = 0;
   int context_length = 0;
   // When breaking, we clobber the unpredictable value in the accumulator
   // with one that's safe for GC.  If we hit an exit from the try block of
@@ -962,15 +972,17 @@
   while (!current->IsBreakTarget(target)) {
     if (current->IsTryFinally()) {
       Comment cmnt(masm(), "[ Deferred break through finally");
-      current->Exit(&stack_depth, &context_length);
-      DCHECK_EQ(0, stack_depth);
-      DCHECK_EQ(0, context_length);
+      current->Exit(&context_length);
+      DCHECK_EQ(-1, context_length);
       current->AsTryFinally()->deferred_commands()->RecordBreak(target);
       return;
     }
-    current = current->Exit(&stack_depth, &context_length);
+    current = current->Exit(&context_length);
   }
-  __ Drop(stack_depth);
+  int stack_depth = current->GetStackDepthAtTarget();
+  int stack_drop = operand_stack_depth_ - stack_depth;
+  DCHECK_GE(stack_drop, 0);
+  __ Drop(stack_drop);
   if (context_length > 0) {
     while (context_length > 0) {
       LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@@ -991,23 +1003,56 @@
 
 void FullCodeGenerator::EmitUnwindAndReturn() {
   NestedStatement* current = nesting_stack_;
-  int stack_depth = 0;
   int context_length = 0;
   while (current != NULL) {
     if (current->IsTryFinally()) {
       Comment cmnt(masm(), "[ Deferred return through finally");
-      current->Exit(&stack_depth, &context_length);
-      DCHECK_EQ(0, stack_depth);
-      DCHECK_EQ(0, context_length);
+      current->Exit(&context_length);
+      DCHECK_EQ(-1, context_length);
       current->AsTryFinally()->deferred_commands()->RecordReturn();
       return;
     }
-    current = current->Exit(&stack_depth, &context_length);
+    current = current->Exit(&context_length);
   }
-  __ Drop(stack_depth);
   EmitReturnSequence();
 }
 
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+                                       bool pretenure) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning. If
+  // we're running with the --always-opt or the --prepare-always-opt
+  // flag, we need to use the runtime function so that the new function
+  // we are creating here gets a chance to have its code optimized and
+  // doesn't just get a copy of the existing unoptimized code.
+  if (!FLAG_always_opt &&
+      !FLAG_prepare_always_opt &&
+      !pretenure &&
+      scope()->is_function_scope() &&
+      info->num_literals() == 0) {
+    FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
+    __ Move(stub.GetCallInterfaceDescriptor().GetRegisterParameter(0), info);
+    __ CallStub(&stub);
+  } else {
+    __ Push(info);
+    __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+                             : Runtime::kNewClosure);
+  }
+  context()->Plug(result_register());
+}
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+  SetExpressionPosition(prop);
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(!prop->IsSuperAccess());
+
+  __ Move(LoadDescriptor::NameRegister(), key->value());
+  __ Move(LoadDescriptor::SlotRegister(),
+          SmiFromSlot(prop->PropertyFeedbackSlot()));
+  CallLoadIC(NOT_INSIDE_TYPEOF);
+}
+
 void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
   // Stack: receiver, home_object
   SetExpressionPosition(prop);
@@ -1041,6 +1086,10 @@
   PushOperand(result_register());
 }
 
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+  DCHECK(!slot.IsInvalid());
+  __ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
+}
 
 void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
   Comment cmnt(masm_, "[ ReturnStatement");
@@ -1158,7 +1207,6 @@
   Iteration loop_statement(this, stmt);
 
   if (stmt->init() != NULL) {
-    SetStatementPosition(stmt->init());
     Visit(stmt->init());
   }
 
@@ -1236,6 +1284,11 @@
   decrement_loop_depth();
 }
 
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  LoadFromFrameField(JavaScriptFrameConstants::kFunctionOffset,
+                     result_register());
+  context()->Plug(result_register());
+}
 
 void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
   Comment cmnt(masm_, "[ TryCatchStatement");
@@ -1250,7 +1303,7 @@
   Label try_entry, handler_entry, exit;
   __ jmp(&try_entry);
   __ bind(&handler_entry);
-  ClearPendingMessage();
+  if (stmt->clear_pending_message()) ClearPendingMessage();
 
   // Exception handler code, the exception is in the result register.
   // Extend the context before executing the catch block.
@@ -1281,7 +1334,8 @@
   try_catch_depth_++;
   int handler_index = NewHandlerTableEntry();
   EnterTryBlock(handler_index, &handler_entry);
-  { TryCatch try_body(this);
+  {
+    Comment cmnt_try(masm(), "[ Try block");
     Visit(stmt->try_block());
   }
   ExitTryBlock(handler_index);
@@ -1322,7 +1376,7 @@
   // Exception handler code.  This code is only executed when an exception
   // is thrown.  Record the continuation and jump to the finally block.
   {
-    Comment cmt_handler(masm(), "[ Finally handler");
+    Comment cmnt_handler(masm(), "[ Finally handler");
     deferred.RecordThrow();
   }
 
@@ -1331,6 +1385,7 @@
   int handler_index = NewHandlerTableEntry();
   EnterTryBlock(handler_index, &handler_entry);
   {
+    Comment cmnt_try(masm(), "[ Try block");
     TryFinally try_body(this, &deferred);
     Visit(stmt->try_block());
   }
@@ -1345,15 +1400,14 @@
 
   // Finally block implementation.
   __ bind(&finally_entry);
-  Comment cmnt_finally(masm(), "[ Finally block");
-  OperandStackDepthIncrement(2);  // Token and accumulator are on stack.
-  EnterFinallyBlock();
   {
-    Finally finally_body(this);
+    Comment cmnt_finally(masm(), "[ Finally block");
+    OperandStackDepthIncrement(2);  // Token and accumulator are on stack.
+    EnterFinallyBlock();
     Visit(stmt->finally_block());
+    ExitFinallyBlock();
+    OperandStackDepthDecrement(2);  // Token and accumulator were on stack.
   }
-  ExitFinallyBlock();
-  OperandStackDepthDecrement(2);  // Token and accumulator were on stack.
 
   {
     Comment cmnt_deferred(masm(), "[ Post-finally dispatch");
@@ -1434,6 +1488,7 @@
   Comment cmnt(masm_, "[ ClassLiteral");
 
   {
+    NestedClassLiteral nested_class_literal(this, lit);
     EnterBlockScopeIfNeeded block_scope_state(
         this, lit->scope(), lit->EntryId(), lit->DeclsId(), lit->ExitId());
 
@@ -1463,8 +1518,7 @@
 
     EmitClassDefineProperties(lit);
 
-    // Set both the prototype and constructor to have fast properties, and also
-    // freeze them in strong mode.
+    // Set both the prototype and constructor to have fast properties.
     CallRuntimeWithOperands(Runtime::kFinalizeClassDefinition);
 
     if (lit->class_variable_proxy() != nullptr) {
@@ -1586,6 +1640,49 @@
 #endif
 }
 
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+
+  if (expr->is_jsruntime()) {
+    Comment cmnt(masm_, "[ CallRuntime");
+    EmitLoadJSRuntimeFunction(expr);
+
+    // Push the arguments ("left-to-right").
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+
+    PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+    EmitCallJSRuntimeFunction(expr);
+    context()->DropAndPlug(1, result_register());
+
+  } else {
+    const Runtime::Function* function = expr->function();
+    switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name)     \
+  case Runtime::kInline##Name: {           \
+    Comment cmnt(masm_, "[ Inline" #Name); \
+    return Emit##Name(expr);               \
+  }
+      FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+      default: {
+        Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
+        // Push the arguments ("left-to-right").
+        for (int i = 0; i < arg_count; i++) {
+          VisitForStackValue(args->at(i));
+        }
+
+        // Call the C runtime function.
+        PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+        __ CallRuntime(expr->function(), arg_count);
+        OperandStackDepthDecrement(arg_count);
+        context()->Plug(result_register());
+      }
+    }
+  }
+}
 
 void FullCodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
 
@@ -1599,28 +1696,32 @@
   Visit(expr->expression());
 }
 
-
 FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
-    int* stack_depth, int* context_length) {
+    int* context_length) {
   // The macros used here must preserve the result register.
 
+  // Calculate how many operands to drop to get down to handler block.
+  int stack_drop = codegen_->operand_stack_depth_ - GetStackDepthAtTarget();
+  DCHECK_GE(stack_drop, 0);
+
   // Because the handler block contains the context of the finally
   // code, we can restore it directly from there for the finally code
   // rather than iteratively unwinding contexts via their previous
   // links.
   if (*context_length > 0) {
-    __ Drop(*stack_depth);  // Down to the handler block.
+    __ Drop(stack_drop);  // Down to the handler block.
     // Restore the context to its dedicated register and the stack.
-    STATIC_ASSERT(TryFinally::kElementCount == 1);
+    STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
     __ Pop(codegen_->context_register());
     codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
                                 codegen_->context_register());
   } else {
     // Down to the handler block and also drop context.
-    __ Drop(*stack_depth + kElementCount);
+    __ Drop(stack_drop + TryBlockConstant::kElementCount);
   }
-  *stack_depth = 0;
-  *context_length = 0;
+
+  // The caller will ignore outputs.
+  *context_length = -1;
   return previous_;
 }
 
@@ -1671,7 +1772,7 @@
     return true;
   }
 
-  if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
+  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
     EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
     return true;
   }
@@ -1733,27 +1834,6 @@
 }
 
 
-void BackEdgeTable::AddStackCheck(Handle<Code> code, uint32_t pc_offset) {
-  DisallowHeapAllocation no_gc;
-  Isolate* isolate = code->GetIsolate();
-  Address pc = code->instruction_start() + pc_offset;
-  Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck);
-  PatchAt(*code, pc, OSR_AFTER_STACK_CHECK, patch);
-}
-
-
-void BackEdgeTable::RemoveStackCheck(Handle<Code> code, uint32_t pc_offset) {
-  DisallowHeapAllocation no_gc;
-  Isolate* isolate = code->GetIsolate();
-  Address pc = code->instruction_start() + pc_offset;
-
-  if (OSR_AFTER_STACK_CHECK == GetBackEdgeState(isolate, *code, pc)) {
-    Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
-    PatchAt(*code, pc, ON_STACK_REPLACEMENT, patch);
-  }
-}
-
-
 #ifdef DEBUG
 bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) {
   DisallowHeapAllocation no_gc;
diff --git a/src/full-codegen/full-codegen.h b/src/full-codegen/full-codegen.h
index 6ab0231..0c12937 100644
--- a/src/full-codegen/full-codegen.h
+++ b/src/full-codegen/full-codegen.h
@@ -93,6 +93,12 @@
   static const int kCodeSizeMultiplier = 149;
 #elif V8_TARGET_ARCH_MIPS64
   static const int kCodeSizeMultiplier = 149;
+#elif V8_TARGET_ARCH_S390
+// TODO(joransiu): Copied PPC value. Check this is sensible for S390.
+  static const int kCodeSizeMultiplier = 200;
+#elif V8_TARGET_ARCH_S390X
+// TODO(joransiu): Copied PPC value. Check this is sensible for S390X.
+  static const int kCodeSizeMultiplier = 200;
 #else
 #error Unsupported target architecture.
 #endif
@@ -108,7 +114,9 @@
 
   class NestedStatement BASE_EMBEDDED {
    public:
-    explicit NestedStatement(FullCodeGenerator* codegen) : codegen_(codegen) {
+    explicit NestedStatement(FullCodeGenerator* codegen)
+        : codegen_(codegen),
+          stack_depth_at_target_(codegen->operand_stack_depth_) {
       // Link into codegen's nesting stack.
       previous_ = codegen->nesting_stack_;
       codegen->nesting_stack_ = this;
@@ -130,18 +138,20 @@
     // Notify the statement that we are exiting it via break, continue, or
     // return and give it a chance to generate cleanup code.  Return the
     // next outer statement in the nesting stack.  We accumulate in
-    // *stack_depth the amount to drop the stack and in *context_length the
-    // number of context chain links to unwind as we traverse the nesting
-    // stack from an exit to its target.
-    virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
-      return previous_;
-    }
+    // {*context_length} the number of context chain links to unwind as we
+    // traverse the nesting stack from an exit to its target.
+    virtual NestedStatement* Exit(int* context_length) { return previous_; }
+
+    // Determine the expected operand stack depth when this statement is being
+    // used as the target of an exit. The caller will drop to this depth.
+    int GetStackDepthAtTarget() { return stack_depth_at_target_; }
 
    protected:
     MacroAssembler* masm() { return codegen_->masm(); }
 
     FullCodeGenerator* codegen_;
     NestedStatement* previous_;
+    int stack_depth_at_target_;
 
    private:
     DISALLOW_COPY_AND_ASSIGN(NestedStatement);
@@ -192,7 +202,7 @@
         : Breakable(codegen, block) {
     }
 
-    NestedStatement* Exit(int* stack_depth, int* context_length) override {
+    NestedStatement* Exit(int* context_length) override {
       auto block_scope = statement()->AsBlock()->scope();
       if (block_scope != nullptr) {
         if (block_scope->ContextLocalCount() > 0) ++(*context_length);
@@ -201,17 +211,21 @@
     }
   };
 
-  // The try block of a try/catch statement.
-  class TryCatch : public NestedStatement {
+  // A class literal expression
+  class NestedClassLiteral : public NestedStatement {
    public:
-    static const int kElementCount = TryBlockConstant::kElementCount;
+    NestedClassLiteral(FullCodeGenerator* codegen, ClassLiteral* lit)
+        : NestedStatement(codegen),
+          needs_context_(lit->scope() != nullptr &&
+                         lit->scope()->NeedsContext()) {}
 
-    explicit TryCatch(FullCodeGenerator* codegen) : NestedStatement(codegen) {}
-
-    NestedStatement* Exit(int* stack_depth, int* context_length) override {
-      *stack_depth += kElementCount;
+    NestedStatement* Exit(int* context_length) override {
+      if (needs_context_) ++(*context_length);
       return previous_;
     }
+
+   private:
+    const bool needs_context_;
   };
 
   class DeferredCommands {
@@ -254,12 +268,10 @@
   // The try block of a try/finally statement.
   class TryFinally : public NestedStatement {
    public:
-    static const int kElementCount = TryBlockConstant::kElementCount;
-
     TryFinally(FullCodeGenerator* codegen, DeferredCommands* commands)
         : NestedStatement(codegen), deferred_commands_(commands) {}
 
-    NestedStatement* Exit(int* stack_depth, int* context_length) override;
+    NestedStatement* Exit(int* context_length) override;
 
     bool IsTryFinally() override { return true; }
     TryFinally* AsTryFinally() override { return this; }
@@ -270,35 +282,6 @@
     DeferredCommands* deferred_commands_;
   };
 
-  // The finally block of a try/finally statement.
-  class Finally : public NestedStatement {
-   public:
-    static const int kElementCount = 3;
-
-    explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) {}
-
-    NestedStatement* Exit(int* stack_depth, int* context_length) override {
-      *stack_depth += kElementCount;
-      return previous_;
-    }
-  };
-
-  // The body of a for/in loop.
-  class ForIn : public Iteration {
-   public:
-    static const int kElementCount = 5;
-
-    ForIn(FullCodeGenerator* codegen, ForInStatement* statement)
-        : Iteration(codegen, statement) {
-    }
-
-    NestedStatement* Exit(int* stack_depth, int* context_length) override {
-      *stack_depth += kElementCount;
-      return previous_;
-    }
-  };
-
-
   // The body of a with or catch.
   class WithOrCatch : public NestedStatement {
    public:
@@ -306,7 +289,7 @@
         : NestedStatement(codegen) {
     }
 
-    NestedStatement* Exit(int* stack_depth, int* context_length) override {
+    NestedStatement* Exit(int* context_length) override {
       ++(*context_length);
       return previous_;
     }
@@ -537,6 +520,7 @@
   F(IsRegExp)                           \
   F(IsJSProxy)                          \
   F(Call)                               \
+  F(NewObject)                          \
   F(ValueOf)                            \
   F(StringCharFromCode)                 \
   F(StringCharAt)                       \
@@ -564,6 +548,7 @@
   F(ToName)                             \
   F(ToObject)                           \
   F(DebugIsActive)                      \
+  F(GetOrdinaryHasInstance)             \
   F(CreateIterResultObject)
 
 #define GENERATOR_DECLARATION(Name) void Emit##Name(CallRuntime* call);
@@ -681,6 +666,8 @@
   void EmitSetHomeObjectAccumulator(Expression* initializer, int offset,
                                     FeedbackVectorSlot slot);
 
+  void EmitLoadStoreICSlot(FeedbackVectorSlot slot);
+
   void CallIC(Handle<Code> code,
               TypeFeedbackId id = TypeFeedbackId::None());
 
@@ -707,7 +694,8 @@
   // This is used in loop headers where we want to break for each iteration.
   void SetExpressionAsStatementPosition(Expression* expr);
 
-  void SetCallPosition(Expression* expr);
+  void SetCallPosition(Expression* expr,
+                       TailCallMode tail_call_mode = TailCallMode::kDisallow);
 
   void SetConstructCallPosition(Expression* expr) {
     // Currently call and construct calls are treated the same wrt debugging.
@@ -743,13 +731,16 @@
   Handle<Script> script() { return info_->script(); }
   bool is_eval() { return info_->is_eval(); }
   bool is_native() { return info_->is_native(); }
-  LanguageMode language_mode() { return literal()->language_mode(); }
+  LanguageMode language_mode() { return scope()->language_mode(); }
   bool has_simple_parameters() { return info_->has_simple_parameters(); }
   FunctionLiteral* literal() const { return info_->literal(); }
   Scope* scope() { return scope_; }
 
   static Register context_register();
 
+  // Get fields from the stack frame. Offsets are the frame pointer relative
+  // offsets defined in, e.g., StandardFrameConstants.
+  void LoadFromFrameField(int frame_offset, Register value);
   // Set fields in the stack frame. Offsets are the frame pointer relative
   // offsets defined in, e.g., StandardFrameConstants.
   void StoreToFrameField(int frame_offset, Register value);
@@ -783,8 +774,6 @@
   bool MustCreateObjectLiteralWithRuntime(ObjectLiteral* expr) const;
   bool MustCreateArrayLiteralWithRuntime(ArrayLiteral* expr) const;
 
-  void EmitLoadStoreICSlot(FeedbackVectorSlot slot);
-
   int NewHandlerTableEntry();
 
   struct BailoutEntry {
@@ -1046,11 +1035,7 @@
     return instruction_start_ + pc_offset(index);
   }
 
-  enum BackEdgeState {
-    INTERRUPT,
-    ON_STACK_REPLACEMENT,
-    OSR_AFTER_STACK_CHECK
-  };
+  enum BackEdgeState { INTERRUPT, ON_STACK_REPLACEMENT };
 
   // Increase allowed loop nesting level by one and patch those matching loops.
   static void Patch(Isolate* isolate, Code* unoptimized_code);
@@ -1065,13 +1050,6 @@
   static void Revert(Isolate* isolate,
                      Code* unoptimized_code);
 
-  // Change a back edge patched for on-stack replacement to perform a
-  // stack check first.
-  static void AddStackCheck(Handle<Code> code, uint32_t pc_offset);
-
-  // Revert the patch by AddStackCheck.
-  static void RemoveStackCheck(Handle<Code> code, uint32_t pc_offset);
-
   // Return the current patch state of the back edge.
   static BackEdgeState GetBackEdgeState(Isolate* isolate,
                                         Code* unoptimized_code,
diff --git a/src/full-codegen/ia32/full-codegen-ia32.cc b/src/full-codegen/ia32/full-codegen-ia32.cc
index fadcd7c..f1945c8 100644
--- a/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -288,39 +288,35 @@
     __ CallRuntime(Runtime::kTraceEnter);
   }
 
-  // Visit the declarations and body unless there is an illegal
-  // redeclaration.
-  if (scope()->HasIllegalRedeclaration()) {
+  // Visit the declarations and body.
+  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  {
     Comment cmnt(masm_, "[ Declarations");
-    VisitForEffect(scope()->GetIllegalRedeclaration());
+    VisitDeclarations(scope()->declarations());
+  }
 
-  } else {
-    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
-    { Comment cmnt(masm_, "[ Declarations");
-      VisitDeclarations(scope()->declarations());
-    }
+  // Assert that the declarations do not use ICs. Otherwise the debugger
+  // won't be able to redirect a PC at an IC to the correct IC in newly
+  // recompiled code.
+  DCHECK_EQ(0, ic_total_count_);
 
-    // Assert that the declarations do not use ICs. Otherwise the debugger
-    // won't be able to redirect a PC at an IC to the correct IC in newly
-    // recompiled code.
-    DCHECK_EQ(0, ic_total_count_);
+  {
+    Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    Label ok;
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(isolate());
+    __ cmp(esp, Operand::StaticVariable(stack_limit));
+    __ j(above_equal, &ok, Label::kNear);
+    __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+    __ bind(&ok);
+  }
 
-    { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
-      Label ok;
-      ExternalReference stack_limit
-          = ExternalReference::address_of_stack_limit(isolate());
-      __ cmp(esp, Operand::StaticVariable(stack_limit));
-      __ j(above_equal, &ok, Label::kNear);
-      __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
-      __ bind(&ok);
-    }
-
-    { Comment cmnt(masm_, "[ Body");
-      DCHECK(loop_depth() == 0);
-      VisitStatements(literal()->body());
-      DCHECK(loop_depth() == 0);
-    }
+  {
+    Comment cmnt(masm_, "[ Body");
+    DCHECK(loop_depth() == 0);
+    VisitStatements(literal()->body());
+    DCHECK(loop_depth() == 0);
   }
 
   // Always emit a 'return undefined' in case control fell off the end of
@@ -487,7 +483,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -595,7 +591,7 @@
                                Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+  Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
   CallIC(ic, condition->test_id());
   __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
   Split(equal, if_true, if_false, fall_through);
@@ -934,14 +930,14 @@
 
   FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
 
-  Label loop, exit;
-  ForIn loop_statement(this, stmt);
-  increment_loop_depth();
-
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  OperandStackDepthIncrement(ForIn::kElementCount);
+  OperandStackDepthIncrement(5);
+
+  Label loop, exit;
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
 
   // If the object is null or undefined, skip over the loop, otherwise convert
   // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
@@ -1008,11 +1004,6 @@
   // We got a fixed array in register eax. Iterate through that.
   __ bind(&fixed_array);
 
-  // No need for a write barrier, we are storing a Smi in the feedback vector.
-  int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(ebx);
-  __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
-         Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ push(Immediate(Smi::FromInt(1)));  // Smi(1) indicates slow check
   __ push(eax);  // Array
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
@@ -1043,12 +1034,8 @@
   __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
-  // We might get here from TurboFan or Crankshaft when something in the
-  // for-in loop body deopts and only now notice in fullcodegen, that we
-  // can now longer use the enum cache, i.e. left fast mode. So better record
-  // this information here, in case we later OSR back into this loop or
-  // reoptimize the whole function w/o rerunning the loop with the slow
-  // mode object in fullcodegen (which would result in a deopt loop).
+  // We need to filter the key, record slow-path here.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(edx);
   __ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
          Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
@@ -1089,8 +1076,7 @@
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ add(esp, Immediate(5 * kPointerSize));
-  OperandStackDepthDecrement(ForIn::kElementCount);
+  DropOperands(5);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1099,31 +1085,6 @@
 }
 
 
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
-                                       bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning. If
-  // we're running with the --always-opt or the --prepare-always-opt
-  // flag, we need to use the runtime function so that the new function
-  // we are creating here gets a chance to have its code optimized and
-  // doesn't just get a copy of the existing unoptimized code.
-  if (!FLAG_always_opt &&
-      !FLAG_prepare_always_opt &&
-      !pretenure &&
-      scope()->is_function_scope() &&
-      info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
-    __ mov(ebx, Immediate(info));
-    __ CallStub(&stub);
-  } else {
-    __ push(Immediate(info));
-    __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
-                             : Runtime::kNewClosure);
-  }
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
@@ -1554,12 +1515,6 @@
     }
   }
 
-  if (expr->has_function()) {
-    DCHECK(result_saved);
-    __ push(Operand(esp, 0));
-    __ CallRuntime(Runtime::kToFastProperties);
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1801,64 +1756,44 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  switch (expr->yield_kind()) {
-    case Yield::kSuspend:
-      // Pop value from top-of-stack slot; box result into result register.
-      EmitCreateIteratorResult(false);
-      __ push(result_register());
-      // Fall through.
-    case Yield::kInitial: {
-      Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume;
 
-      __ jmp(&suspend);
-      __ bind(&continuation);
-      // When we arrive here, the stack top is the resume mode and
-      // result_register() holds the input value (the argument given to the
-      // respective resume operation).
-      __ RecordGeneratorContinuation();
-      __ pop(ebx);
-      __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
-      __ j(not_equal, &resume);
-      __ push(result_register());
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
+  __ jmp(&suspend);
+  __ bind(&continuation);
+  // When we arrive here, the stack top is the resume mode and
+  // result_register() holds the input value (the argument given to the
+  // respective resume operation).
+  __ RecordGeneratorContinuation();
+  __ pop(ebx);
+  __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
+  __ j(not_equal, &resume);
+  __ push(result_register());
+  EmitCreateIteratorResult(true);
+  EmitUnwindAndReturn();
 
-      __ bind(&suspend);
-      VisitForAccumulatorValue(expr->generator_object());
-      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
-             Immediate(Smi::FromInt(continuation.pos())));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
-      __ mov(ecx, esi);
-      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
-                          kDontSaveFPRegs);
-      __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
-      __ cmp(esp, ebx);
-      __ j(equal, &post_runtime);
-      __ push(eax);  // generator object
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ mov(context_register(),
-             Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ bind(&post_runtime);
-      PopOperand(result_register());
-      EmitReturnSequence();
+  __ bind(&suspend);
+  OperandStackDepthIncrement(1);  // Not popped on this path.
+  VisitForAccumulatorValue(expr->generator_object());
+  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+  __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+         Immediate(Smi::FromInt(continuation.pos())));
+  __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+  __ mov(ecx, esi);
+  __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+                      kDontSaveFPRegs);
+  __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
+  __ cmp(esp, ebx);
+  __ j(equal, &post_runtime);
+  __ push(eax);  // generator object
+  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+  __ mov(context_register(),
+         Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ bind(&post_runtime);
+  PopOperand(result_register());
+  EmitReturnSequence();
 
-      __ bind(&resume);
-      context()->Plug(result_register());
-      break;
-    }
-
-    case Yield::kFinal: {
-      // Pop value from top-of-stack slot, box result into result register.
-      OperandStackDepthDecrement(1);
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
-      break;
-    }
-
-    case Yield::kDelegating:
-      UNREACHABLE();
-  }
+  __ bind(&resume);
+  context()->Plug(result_register());
 }
 
 
@@ -1886,7 +1821,10 @@
   // Push receiver.
   __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
 
-  // Push holes for arguments to generator function.
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
   __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   __ mov(edx,
          FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1991,19 +1929,7 @@
   __ mov(FieldOperand(eax, JSIteratorResult::kDoneOffset),
          isolate()->factory()->ToBoolean(done));
   STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(!prop->IsSuperAccess());
-
-  __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
-  __ mov(LoadDescriptor::SlotRegister(),
-         Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF);
+  OperandStackDepthDecrement(1);
 }
 
 
@@ -2597,7 +2523,7 @@
   }
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-  SetCallPosition(expr);
+  SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
       __ CallRuntime(Runtime::kTraceTailCall);
@@ -2953,9 +2879,10 @@
   __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, eax);
   __ j(below, &null, Label::kNear);
 
-  // Return 'Function' for JSFunction objects.
-  __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
-  __ j(equal, &function, Label::kNear);
+  // Return 'Function' for JSFunction and JSBoundFunction objects.
+  __ CmpInstanceType(eax, FIRST_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+  __ j(above_equal, &function, Label::kNear);
 
   // Check if the constructor in the map is a JS function.
   __ GetMapConstructor(eax, eax, ebx);
@@ -3077,23 +3004,6 @@
 }
 
 
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into eax and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to an integer.
-  Label done_convert;
-  __ JumpIfSmi(eax, &done_convert, Label::kNear);
-  __ Push(eax);
-  __ CallRuntime(Runtime::kToInteger);
-  __ bind(&done_convert);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3277,6 +3187,12 @@
   context()->Plug(eax);
 }
 
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+  DCHECK_EQ(0, expr->arguments()->length());
+  __ mov(eax, NativeContextOperand());
+  __ mov(eax, ContextOperand(eax, Context::ORDINARY_HAS_INSTANCE_INDEX));
+  context()->Plug(eax);
+}
 
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
@@ -3318,10 +3234,12 @@
 
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+  // Push function.
+  __ LoadGlobalFunction(expr->context_index(), eax);
+  PushOperand(eax);
+
   // Push undefined as receiver.
   PushOperand(isolate()->factory()->undefined_value());
-
-  __ LoadGlobalFunction(expr->context_index(), eax);
 }
 
 
@@ -3335,58 +3253,9 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-}
 
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  if (expr->is_jsruntime()) {
-    Comment cmnt(masm_, "[ CallRuntime");
-    EmitLoadJSRuntimeFunction(expr);
-
-    // Push the target function under the receiver.
-    PushOperand(Operand(esp, 0));
-    __ mov(Operand(esp, kPointerSize), eax);
-
-    // Push the arguments ("left-to-right").
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-
-    PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-    EmitCallJSRuntimeFunction(expr);
-
-    // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    context()->DropAndPlug(1, eax);
-
-  } else {
-    const Runtime::Function* function = expr->function();
-    switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name)     \
-  case Runtime::kInline##Name: {           \
-    Comment cmnt(masm_, "[ Inline" #Name); \
-    return Emit##Name(expr);               \
-  }
-      FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
-      default: {
-        Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
-        // Push the arguments ("left-to-right").
-        for (int i = 0; i < arg_count; i++) {
-          VisitForStackValue(args->at(i));
-        }
-
-        // Call the C runtime function.
-        PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-        __ CallRuntime(expr->function(), arg_count);
-        OperandStackDepthDecrement(arg_count);
-        context()->Plug(eax);
-      }
-    }
-  }
+  // Restore context register.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
 }
 
 
@@ -3631,11 +3500,11 @@
     __ jmp(&stub_call, Label::kNear);
     __ bind(&slow);
   }
-  if (!is_strong(language_mode())) {
-    ToNumberStub convert_stub(isolate());
-    __ CallStub(&convert_stub);
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
+
+  // Convert old value into a number.
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3675,9 +3544,6 @@
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
-  if (is_strong(language_mode())) {
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
   // Store the value returned in eax.
   switch (assign_type) {
     case VARIABLE:
@@ -3803,7 +3669,7 @@
     // Check for undetectable objects => true.
     __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
     __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
+              Immediate(1 << Map::kIsUndetectable));
     Split(not_zero, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->function_string())) {
     __ JumpIfSmi(eax, if_false);
@@ -3822,7 +3688,7 @@
     __ j(below, if_false);
     // Check for callable or undetectable objects => false.
     __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
-              (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+              Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     Split(zero, if_true, if_false, fall_through);
 // clang-format off
 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)   \
@@ -3933,21 +3799,16 @@
     __ cmp(eax, nil_value);
     Split(equal, if_true, if_false, fall_through);
   } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
-    CallIC(ic, expr->CompareOperationFeedbackId());
-    __ cmp(eax, isolate()->factory()->true_value());
-    Split(equal, if_true, if_false, fall_through);
+    __ JumpIfSmi(eax, if_false);
+    __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(eax, Map::kBitFieldOffset),
+              Immediate(1 << Map::kIsUndetectable));
+    Split(not_zero, if_true, if_false, fall_through);
   }
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  context()->Plug(eax);
-}
-
-
 Register FullCodeGenerator::result_register() {
   return eax;
 }
@@ -3957,6 +3818,10 @@
   return esi;
 }
 
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+  DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  __ mov(value, Operand(ebp, frame_offset));
+}
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
@@ -4023,12 +3888,6 @@
 }
 
 
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
-  DCHECK(!slot.IsInvalid());
-  __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
-         Immediate(SmiFromSlot(slot)));
-}
-
 void FullCodeGenerator::DeferredCommands::EmitCommands() {
   DCHECK(!result_register().is(edx));
   __ Pop(result_register());  // Restore the accumulator.
@@ -4086,7 +3945,6 @@
       *jns_offset_address = kJnsOffset;
       break;
     case ON_STACK_REPLACEMENT:
-    case OSR_AFTER_STACK_CHECK:
       //     sub <profiling_counter>, <delta>  ;; Not changed
       //     nop
       //     nop
@@ -4124,15 +3982,10 @@
   DCHECK_EQ(kNopByteOne, *jns_instr_address);
   DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
 
-  if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
-      isolate->builtins()->OnStackReplacement()->entry()) {
-    return ON_STACK_REPLACEMENT;
-  }
-
-  DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
-            Assembler::target_address_at(call_target_address,
-                                         unoptimized_code));
-  return OSR_AFTER_STACK_CHECK;
+  DCHECK_EQ(
+      isolate->builtins()->OnStackReplacement()->entry(),
+      Assembler::target_address_at(call_target_address, unoptimized_code));
+  return ON_STACK_REPLACEMENT;
 }
 
 
diff --git a/src/full-codegen/mips/full-codegen-mips.cc b/src/full-codegen/mips/full-codegen-mips.cc
index c8ce204..f329a23 100644
--- a/src/full-codegen/mips/full-codegen-mips.cc
+++ b/src/full-codegen/mips/full-codegen-mips.cc
@@ -305,38 +305,35 @@
 
   // Visit the declarations and body unless there is an illegal
   // redeclaration.
-  if (scope()->HasIllegalRedeclaration()) {
+  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  {
     Comment cmnt(masm_, "[ Declarations");
-    VisitForEffect(scope()->GetIllegalRedeclaration());
+    VisitDeclarations(scope()->declarations());
+  }
 
-  } else {
-    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
-    { Comment cmnt(masm_, "[ Declarations");
-      VisitDeclarations(scope()->declarations());
-    }
+  // Assert that the declarations do not use ICs. Otherwise the debugger
+  // won't be able to redirect a PC at an IC to the correct IC in newly
+  // recompiled code.
+  DCHECK_EQ(0, ic_total_count_);
 
-    // Assert that the declarations do not use ICs. Otherwise the debugger
-    // won't be able to redirect a PC at an IC to the correct IC in newly
-    // recompiled code.
-    DCHECK_EQ(0, ic_total_count_);
+  {
+    Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    Label ok;
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(&ok, hs, sp, Operand(at));
+    Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+    PredictableCodeSizeScope predictable(
+        masm_, masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+    __ Call(stack_check, RelocInfo::CODE_TARGET);
+    __ bind(&ok);
+  }
 
-    { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
-      Label ok;
-      __ LoadRoot(at, Heap::kStackLimitRootIndex);
-      __ Branch(&ok, hs, sp, Operand(at));
-      Handle<Code> stack_check = isolate()->builtins()->StackCheck();
-      PredictableCodeSizeScope predictable(masm_,
-          masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
-      __ Call(stack_check, RelocInfo::CODE_TARGET);
-      __ bind(&ok);
-    }
-
-    { Comment cmnt(masm_, "[ Body");
-      DCHECK(loop_depth() == 0);
-      VisitStatements(literal()->body());
-      DCHECK(loop_depth() == 0);
-    }
+  {
+    Comment cmnt(masm_, "[ Body");
+    DCHECK(loop_depth() == 0);
+    VisitStatements(literal()->body());
+    DCHECK(loop_depth() == 0);
   }
 
   // Always emit a 'return undefined' in case control fell off the end of
@@ -527,7 +524,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ Branch(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -637,7 +634,7 @@
                                Label* if_false,
                                Label* fall_through) {
   __ mov(a0, result_register());
-  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+  Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
   CallIC(ic, condition->test_id());
   __ LoadRoot(at, Heap::kTrueValueRootIndex);
   Split(eq, result_register(), Operand(at), if_true, if_false, fall_through);
@@ -990,15 +987,15 @@
 
   FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
 
-  Label loop, exit;
-  ForIn loop_statement(this, stmt);
-  increment_loop_depth();
-
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
   __ mov(a0, result_register());
-  OperandStackDepthIncrement(ForIn::kElementCount);
+  OperandStackDepthIncrement(5);
+
+  Label loop, exit;
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
 
   // If the object is null or undefined, skip over the loop, otherwise convert
   // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
@@ -1072,11 +1069,6 @@
   // We got a fixed array in register v0. Iterate through that.
   __ bind(&fixed_array);
 
-  int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(a1);
-  __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-  __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
-
   __ li(a1, Operand(Smi::FromInt(1)));  // Smi(1) indicates slow check
   __ Push(a1, v0);  // Smi and array
   __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
@@ -1111,12 +1103,8 @@
   __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
   __ Branch(&update_each, eq, t0, Operand(a2));
 
-  // We might get here from TurboFan or Crankshaft when something in the
-  // for-in loop body deopts and only now notice in fullcodegen, that we
-  // can now longer use the enum cache, i.e. left fast mode. So better record
-  // this information here, in case we later OSR back into this loop or
-  // reoptimize the whole function w/o rerunning the loop with the slow
-  // mode object in fullcodegen (which would result in a deopt loop).
+  // We need to filter the key, record slow-path here.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(a0);
   __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ sw(a2, FieldMemOperand(a0, FixedArray::OffsetOfElementAt(vector_index)));
@@ -1167,31 +1155,6 @@
 }
 
 
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
-                                       bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning. If
-  // we're running with the --always-opt or the --prepare-always-opt
-  // flag, we need to use the runtime function so that the new function
-  // we are creating here gets a chance to have its code optimized and
-  // doesn't just get a copy of the existing unoptimized code.
-  if (!FLAG_always_opt &&
-      !FLAG_prepare_always_opt &&
-      !pretenure &&
-      scope()->is_function_scope() &&
-      info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
-    __ li(a2, Operand(info));
-    __ CallStub(&stub);
-  } else {
-    __ Push(info);
-    __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
-                             : Runtime::kNewClosure);
-  }
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
@@ -1628,13 +1591,6 @@
     }
   }
 
-  if (expr->has_function()) {
-    DCHECK(result_saved);
-    __ lw(a0, MemOperand(sp));
-    __ push(a0);
-    __ CallRuntime(Runtime::kToFastProperties);
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1882,62 +1838,41 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  switch (expr->yield_kind()) {
-    case Yield::kSuspend:
-      // Pop value from top-of-stack slot; box result into result register.
-      EmitCreateIteratorResult(false);
-      __ push(result_register());
-      // Fall through.
-    case Yield::kInitial: {
-      Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume;
 
-      __ jmp(&suspend);
-      __ bind(&continuation);
-      // When we arrive here, the stack top is the resume mode and
-      // result_register() holds the input value (the argument given to the
-      // respective resume operation).
-      __ RecordGeneratorContinuation();
-      __ pop(a1);
-      __ Branch(&resume, ne, a1,
-                Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
-      __ push(result_register());
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
+  __ jmp(&suspend);
+  __ bind(&continuation);
+  // When we arrive here, the stack top is the resume mode and
+  // result_register() holds the input value (the argument given to the
+  // respective resume operation).
+  __ RecordGeneratorContinuation();
+  __ pop(a1);
+  __ Branch(&resume, ne, a1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+  __ push(result_register());
+  EmitCreateIteratorResult(true);
+  EmitUnwindAndReturn();
 
-      __ bind(&suspend);
-      VisitForAccumulatorValue(expr->generator_object());
-      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-      __ li(a1, Operand(Smi::FromInt(continuation.pos())));
-      __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
-      __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
-      __ mov(a1, cp);
-      __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
-                          kRAHasBeenSaved, kDontSaveFPRegs);
-      __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
-      __ Branch(&post_runtime, eq, sp, Operand(a1));
-      __ push(v0);  // generator object
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ bind(&post_runtime);
-      PopOperand(result_register());
-      EmitReturnSequence();
+  __ bind(&suspend);
+  OperandStackDepthIncrement(1);  // Not popped on this path.
+  VisitForAccumulatorValue(expr->generator_object());
+  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+  __ li(a1, Operand(Smi::FromInt(continuation.pos())));
+  __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
+  __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
+  __ mov(a1, cp);
+  __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
+                      kRAHasBeenSaved, kDontSaveFPRegs);
+  __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+  __ Branch(&post_runtime, eq, sp, Operand(a1));
+  __ push(v0);  // generator object
+  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ bind(&post_runtime);
+  PopOperand(result_register());
+  EmitReturnSequence();
 
-      __ bind(&resume);
-      context()->Plug(result_register());
-      break;
-    }
-
-    case Yield::kFinal: {
-      // Pop value from top-of-stack slot, box result into result register.
-      OperandStackDepthDecrement(1);
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
-      break;
-    }
-
-    case Yield::kDelegating:
-      UNREACHABLE();
-  }
+  __ bind(&resume);
+  context()->Plug(result_register());
 }
 
 
@@ -1967,7 +1902,10 @@
   __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
   __ push(a2);
 
-  // Push holes for the rest of the arguments to the generator function.
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
   __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
   __ lw(a3,
         FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1990,9 +1928,7 @@
   // fp = caller's frame pointer.
   // cp = callee's context,
   // t0 = callee's JS function.
-  __ Push(ra, fp, cp, t0);
-  // Adjust FP to point to saved FP.
-  __ Addu(fp, sp, 2 * kPointerSize);
+  __ PushStandardFrame(t0);
 
   // Load the operand stack size.
   __ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
@@ -2079,7 +2015,7 @@
 
   __ bind(&done_allocate);
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
-  __ pop(a2);
+  PopOperand(a2);
   __ LoadRoot(a3,
               done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
   __ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
@@ -2092,18 +2028,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!prop->IsSuperAccess());
-
-  __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
-  __ li(LoadDescriptor::SlotRegister(),
-        Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF);
-}
-
-
 void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               Expression* left_expr,
@@ -2711,7 +2635,7 @@
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
   // Record source position of the IC call.
-  SetCallPosition(expr);
+  SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
       __ CallRuntime(Runtime::kTraceTailCall);
@@ -3073,8 +2997,9 @@
   __ GetObjectType(v0, v0, a1);  // Map is now in v0.
   __ Branch(&null, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
 
-  // Return 'Function' for JSFunction objects.
-  __ Branch(&function, eq, a1, Operand(JS_FUNCTION_TYPE));
+  // Return 'Function' for JSFunction and JSBoundFunction objects.
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+  __ Branch(&function, hs, a1, Operand(FIRST_FUNCTION_TYPE));
 
   // Check if the constructor in the map is a JS function.
   Register instance_type = a2;
@@ -3203,23 +3128,6 @@
 }
 
 
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into v0 and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to an integer.
-  Label done_convert;
-  __ JumpIfSmi(v0, &done_convert);
-  __ Push(v0);
-  __ CallRuntime(Runtime::kToInteger);
-  __ bind(&done_convert);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3402,6 +3310,11 @@
   context()->Plug(v0);
 }
 
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+  DCHECK_EQ(0, expr->arguments()->length());
+  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, v0);
+  context()->Plug(v0);
+}
 
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
@@ -3443,11 +3356,13 @@
 
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+  // Push function.
+  __ LoadNativeContextSlot(expr->context_index(), v0);
+  PushOperand(v0);
+
   // Push undefined as the receiver.
   __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
   PushOperand(v0);
-
-  __ LoadNativeContextSlot(expr->context_index(), v0);
 }
 
 
@@ -3461,60 +3376,9 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-}
 
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  if (expr->is_jsruntime()) {
-    Comment cmnt(masm_, "[ CallRuntime");
-    EmitLoadJSRuntimeFunction(expr);
-
-    // Push the target function under the receiver.
-    __ lw(at, MemOperand(sp, 0));
-    PushOperand(at);
-    __ sw(v0, MemOperand(sp, kPointerSize));
-
-    // Push the arguments ("left-to-right").
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-
-    PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-    EmitCallJSRuntimeFunction(expr);
-
-    // Restore context register.
-    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-    context()->DropAndPlug(1, v0);
-
-  } else {
-    const Runtime::Function* function = expr->function();
-    switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name)     \
-  case Runtime::kInline##Name: {           \
-    Comment cmnt(masm_, "[ Inline" #Name); \
-    return Emit##Name(expr);               \
-  }
-      FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
-      default: {
-        Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
-        // Push the arguments ("left-to-right").
-        for (int i = 0; i < arg_count; i++) {
-          VisitForStackValue(args->at(i));
-        }
-
-        // Call the C runtime function.
-        PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-        __ CallRuntime(expr->function(), arg_count);
-        OperandStackDepthDecrement(arg_count);
-        context()->Plug(v0);
-      }
-    }
-  }
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
 
@@ -3752,11 +3616,11 @@
     __ jmp(&stub_call);
     __ bind(&slow);
   }
-  if (!is_strong(language_mode())) {
-    ToNumberStub convert_stub(isolate());
-    __ CallStub(&convert_stub);
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
+
+  // Convert old value into a number.
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3795,9 +3659,6 @@
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
-  if (is_strong(language_mode())) {
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
   // Store the value returned in v0.
   switch (assign_type) {
     case VARIABLE:
@@ -4044,29 +3905,23 @@
 
   VisitForAccumulatorValue(sub_expr);
   PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  __ mov(a0, result_register());
   if (expr->op() == Token::EQ_STRICT) {
     Heap::RootListIndex nil_value = nil == kNullValue ?
         Heap::kNullValueRootIndex :
         Heap::kUndefinedValueRootIndex;
     __ LoadRoot(a1, nil_value);
-    Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
-  } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
-    CallIC(ic, expr->CompareOperationFeedbackId());
-    __ LoadRoot(a1, Heap::kTrueValueRootIndex);
     Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+  } else {
+    __ JumpIfSmi(v0, if_false);
+    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
   }
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  __ lw(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  context()->Plug(v0);
-}
-
-
 Register FullCodeGenerator::result_register() {
   return v0;
 }
@@ -4076,6 +3931,10 @@
   return cp;
 }
 
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+  DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  __ lw(value, MemOperand(fp, frame_offset));
+}
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
@@ -4146,12 +4005,6 @@
 }
 
 
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
-  DCHECK(!slot.IsInvalid());
-  __ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
-        Operand(SmiFromSlot(slot)));
-}
-
 void FullCodeGenerator::DeferredCommands::EmitCommands() {
   DCHECK(!result_register().is(a1));
   __ Pop(result_register());  // Restore the accumulator.
@@ -4187,7 +4040,9 @@
                             BackEdgeState target_state,
                             Code* replacement_code) {
   static const int kInstrSize = Assembler::kInstrSize;
-  Address branch_address = pc - 6 * kInstrSize;
+  Address pc_immediate_load_address =
+      Assembler::target_address_from_return_address(pc);
+  Address branch_address = pc_immediate_load_address - 2 * kInstrSize;
   Isolate* isolate = unoptimized_code->GetIsolate();
   CodePatcher patcher(isolate, branch_address, 1);
 
@@ -4203,7 +4058,6 @@
       patcher.masm()->slt(at, a3, zero_reg);
       break;
     case ON_STACK_REPLACEMENT:
-    case OSR_AFTER_STACK_CHECK:
       // addiu at, zero_reg, 1
       // beq at, zero_reg, ok  ;; Not changed
       // lui t9, <on-stack replacement address> upper
@@ -4214,7 +4068,6 @@
       patcher.masm()->addiu(at, zero_reg, 1);
       break;
   }
-  Address pc_immediate_load_address = pc - 4 * kInstrSize;
   // Replace the stack check address in the load-immediate (lui/ori pair)
   // with the entry address of the replacement code.
   Assembler::set_target_address_at(isolate, pc_immediate_load_address,
@@ -4230,10 +4083,11 @@
     Code* unoptimized_code,
     Address pc) {
   static const int kInstrSize = Assembler::kInstrSize;
-  Address branch_address = pc - 6 * kInstrSize;
-  Address pc_immediate_load_address = pc - 4 * kInstrSize;
+  Address pc_immediate_load_address =
+      Assembler::target_address_from_return_address(pc);
+  Address branch_address = pc_immediate_load_address - 2 * kInstrSize;
 
-  DCHECK(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize)));
+  DCHECK(Assembler::IsBeq(Assembler::instr_at(branch_address + kInstrSize)));
   if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
     DCHECK(reinterpret_cast<uint32_t>(
         Assembler::target_address_at(pc_immediate_load_address)) ==
@@ -4244,18 +4098,11 @@
 
   DCHECK(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
 
-  if (reinterpret_cast<uint32_t>(
-      Assembler::target_address_at(pc_immediate_load_address)) ==
-          reinterpret_cast<uint32_t>(
-              isolate->builtins()->OnStackReplacement()->entry())) {
-    return ON_STACK_REPLACEMENT;
-  }
-
   DCHECK(reinterpret_cast<uint32_t>(
-      Assembler::target_address_at(pc_immediate_load_address)) ==
+             Assembler::target_address_at(pc_immediate_load_address)) ==
          reinterpret_cast<uint32_t>(
-             isolate->builtins()->OsrAfterStackCheck()->entry()));
-  return OSR_AFTER_STACK_CHECK;
+             isolate->builtins()->OnStackReplacement()->entry()));
+  return ON_STACK_REPLACEMENT;
 }
 
 
diff --git a/src/full-codegen/mips64/full-codegen-mips64.cc b/src/full-codegen/mips64/full-codegen-mips64.cc
index c85dee4..681abd1 100644
--- a/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -301,42 +301,38 @@
     __ CallRuntime(Runtime::kTraceEnter);
   }
 
-  // Visit the declarations and body unless there is an illegal
-  // redeclaration.
-  if (scope()->HasIllegalRedeclaration()) {
+  // Visit the declarations and body.
+  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  {
     Comment cmnt(masm_, "[ Declarations");
-    VisitForEffect(scope()->GetIllegalRedeclaration());
+    VisitDeclarations(scope()->declarations());
+  }
 
-  } else {
-    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
-    { Comment cmnt(masm_, "[ Declarations");
-      VisitDeclarations(scope()->declarations());
-    }
+  // Assert that the declarations do not use ICs. Otherwise the debugger
+  // won't be able to redirect a PC at an IC to the correct IC in newly
+  // recompiled code.
+  DCHECK_EQ(0, ic_total_count_);
 
-    // Assert that the declarations do not use ICs. Otherwise the debugger
-    // won't be able to redirect a PC at an IC to the correct IC in newly
-    // recompiled code.
-    DCHECK_EQ(0, ic_total_count_);
+  {
+    Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    Label ok;
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(&ok, hs, sp, Operand(at));
+    Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+    PredictableCodeSizeScope predictable(
+        masm_, masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+    __ Call(stack_check, RelocInfo::CODE_TARGET);
+    __ bind(&ok);
+  }
 
-    { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
-      Label ok;
-      __ LoadRoot(at, Heap::kStackLimitRootIndex);
-      __ Branch(&ok, hs, sp, Operand(at));
-      Handle<Code> stack_check = isolate()->builtins()->StackCheck();
-      PredictableCodeSizeScope predictable(masm_,
-          masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
-      __ Call(stack_check, RelocInfo::CODE_TARGET);
-      __ bind(&ok);
-    }
+  {
+    Comment cmnt(masm_, "[ Body");
+    DCHECK(loop_depth() == 0);
 
-    { Comment cmnt(masm_, "[ Body");
-      DCHECK(loop_depth() == 0);
+    VisitStatements(literal()->body());
 
-      VisitStatements(literal()->body());
-
-      DCHECK(loop_depth() == 0);
-    }
+    DCHECK(loop_depth() == 0);
   }
 
   // Always emit a 'return undefined' in case control fell off the end of
@@ -527,7 +523,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ Branch(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -637,7 +633,7 @@
                                Label* if_false,
                                Label* fall_through) {
   __ mov(a0, result_register());
-  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+  Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
   CallIC(ic, condition->test_id());
   __ LoadRoot(at, Heap::kTrueValueRootIndex);
   Split(eq, result_register(), Operand(at), if_true, if_false, fall_through);
@@ -990,16 +986,16 @@
 
   FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
 
-  Label loop, exit;
-  ForIn loop_statement(this, stmt);
-  increment_loop_depth();
-
   // Get the object to enumerate over. If the object is null or undefined, skip
   // over the loop.  See ECMA-262 version 5, section 12.6.4.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
   __ mov(a0, result_register());
-  OperandStackDepthIncrement(ForIn::kElementCount);
+  OperandStackDepthIncrement(5);
+
+  Label loop, exit;
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
 
   // If the object is null or undefined, skip over the loop, otherwise convert
   // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
@@ -1073,11 +1069,6 @@
   // We got a fixed array in register v0. Iterate through that.
   __ bind(&fixed_array);
 
-  int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(a1);
-  __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-  __ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
-
   __ li(a1, Operand(Smi::FromInt(1)));  // Smi(1) indicates slow check
   __ Push(a1, v0);  // Smi and array
   __ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
@@ -1113,12 +1104,8 @@
   __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
   __ Branch(&update_each, eq, a4, Operand(a2));
 
-  // We might get here from TurboFan or Crankshaft when something in the
-  // for-in loop body deopts and only now notice in fullcodegen, that we
-  // can now longer use the enum cache, i.e. left fast mode. So better record
-  // this information here, in case we later OSR back into this loop or
-  // reoptimize the whole function w/o rerunning the loop with the slow
-  // mode object in fullcodegen (which would result in a deopt loop).
+  // We need to filter the key, record slow-path here.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(a0);
   __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ sd(a2, FieldMemOperand(a0, FixedArray::OffsetOfElementAt(vector_index)));
@@ -1169,31 +1156,6 @@
 }
 
 
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
-                                       bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning. If
-  // we're running with the --always-opt or the --prepare-always-opt
-  // flag, we need to use the runtime function so that the new function
-  // we are creating here gets a chance to have its code optimized and
-  // doesn't just get a copy of the existing unoptimized code.
-  if (!FLAG_always_opt &&
-      !FLAG_prepare_always_opt &&
-      !pretenure &&
-      scope()->is_function_scope() &&
-      info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
-    __ li(a2, Operand(info));
-    __ CallStub(&stub);
-  } else {
-    __ Push(info);
-    __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
-                             : Runtime::kNewClosure);
-  }
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
@@ -1630,13 +1592,6 @@
     }
   }
 
-  if (expr->has_function()) {
-    DCHECK(result_saved);
-    __ ld(a0, MemOperand(sp));
-    __ push(a0);
-    __ CallRuntime(Runtime::kToFastProperties);
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1884,62 +1839,41 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  switch (expr->yield_kind()) {
-    case Yield::kSuspend:
-      // Pop value from top-of-stack slot; box result into result register.
-      EmitCreateIteratorResult(false);
-      __ push(result_register());
-      // Fall through.
-    case Yield::kInitial: {
-      Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume;
 
-      __ jmp(&suspend);
-      __ bind(&continuation);
-      // When we arrive here, the stack top is the resume mode and
-      // result_register() holds the input value (the argument given to the
-      // respective resume operation).
-      __ RecordGeneratorContinuation();
-      __ pop(a1);
-      __ Branch(&resume, ne, a1,
-                Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
-      __ push(result_register());
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
+  __ jmp(&suspend);
+  __ bind(&continuation);
+  // When we arrive here, the stack top is the resume mode and
+  // result_register() holds the input value (the argument given to the
+  // respective resume operation).
+  __ RecordGeneratorContinuation();
+  __ pop(a1);
+  __ Branch(&resume, ne, a1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+  __ push(result_register());
+  EmitCreateIteratorResult(true);
+  EmitUnwindAndReturn();
 
-      __ bind(&suspend);
-      VisitForAccumulatorValue(expr->generator_object());
-      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-      __ li(a1, Operand(Smi::FromInt(continuation.pos())));
-      __ sd(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
-      __ sd(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
-      __ mov(a1, cp);
-      __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
-                          kRAHasBeenSaved, kDontSaveFPRegs);
-      __ Daddu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
-      __ Branch(&post_runtime, eq, sp, Operand(a1));
-      __ push(v0);  // generator object
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ bind(&post_runtime);
-      PopOperand(result_register());
-      EmitReturnSequence();
+  __ bind(&suspend);
+  OperandStackDepthIncrement(1);  // Not popped on this path.
+  VisitForAccumulatorValue(expr->generator_object());
+  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+  __ li(a1, Operand(Smi::FromInt(continuation.pos())));
+  __ sd(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
+  __ sd(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
+  __ mov(a1, cp);
+  __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
+                      kRAHasBeenSaved, kDontSaveFPRegs);
+  __ Daddu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+  __ Branch(&post_runtime, eq, sp, Operand(a1));
+  __ push(v0);  // generator object
+  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ bind(&post_runtime);
+  PopOperand(result_register());
+  EmitReturnSequence();
 
-      __ bind(&resume);
-      context()->Plug(result_register());
-      break;
-    }
-
-    case Yield::kFinal: {
-      // Pop value from top-of-stack slot, box result into result register.
-      OperandStackDepthDecrement(1);
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
-      break;
-    }
-
-    case Yield::kDelegating:
-      UNREACHABLE();
-  }
+  __ bind(&resume);
+  context()->Plug(result_register());
 }
 
 
@@ -1969,7 +1903,10 @@
   __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
   __ push(a2);
 
-  // Push holes for the rest of the arguments to the generator function.
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
   __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
   // The argument count is stored as int32_t on 64-bit platforms.
   // TODO(plind): Smi on 32-bit platforms.
@@ -1994,9 +1931,7 @@
   // fp = caller's frame pointer.
   // cp = callee's context,
   // a4 = callee's JS function.
-  __ Push(ra, fp, cp, a4);
-  // Adjust FP to point to saved FP.
-  __ Daddu(fp, sp, 2 * kPointerSize);
+  __ PushStandardFrame(a4);
 
   // Load the operand stack size.
   __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
@@ -2083,7 +2018,7 @@
 
   __ bind(&done_allocate);
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
-  __ pop(a2);
+  PopOperand(a2);
   __ LoadRoot(a3,
               done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
   __ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
@@ -2096,18 +2031,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!prop->IsSuperAccess());
-
-  __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
-  __ li(LoadDescriptor::SlotRegister(),
-        Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF);
-}
-
-
 void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               Expression* left_expr,
@@ -2161,12 +2084,10 @@
       break;
     }
     case Token::ADD:
-      __ DadduAndCheckForOverflow(v0, left, right, scratch1);
-      __ BranchOnOverflow(&stub_call, scratch1);
+      __ DaddBranchOvf(v0, left, Operand(right), &stub_call);
       break;
     case Token::SUB:
-      __ DsubuAndCheckForOverflow(v0, left, right, scratch1);
-      __ BranchOnOverflow(&stub_call, scratch1);
+      __ DsubBranchOvf(v0, left, Operand(right), &stub_call);
       break;
     case Token::MUL: {
       __ Dmulh(v0, left, right);
@@ -2715,7 +2636,7 @@
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
   // Record source position of the IC call.
-  SetCallPosition(expr);
+  SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
       __ CallRuntime(Runtime::kTraceTailCall);
@@ -3077,8 +2998,9 @@
   __ GetObjectType(v0, v0, a1);  // Map is now in v0.
   __ Branch(&null, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
 
-  // Return 'Function' for JSFunction objects.
-  __ Branch(&function, eq, a1, Operand(JS_FUNCTION_TYPE));
+  // Return 'Function' for JSFunction and JSBoundFunction objects.
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+  __ Branch(&function, hs, a1, Operand(FIRST_FUNCTION_TYPE));
 
   // Check if the constructor in the map is a JS function.
   Register instance_type = a2;
@@ -3208,23 +3130,6 @@
 }
 
 
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into v0 and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to an integer.
-  Label done_convert;
-  __ JumpIfSmi(v0, &done_convert);
-  __ Push(v0);
-  __ CallRuntime(Runtime::kToInteger);
-  __ bind(&done_convert);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3407,6 +3312,11 @@
   context()->Plug(v0);
 }
 
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+  DCHECK_EQ(0, expr->arguments()->length());
+  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, v0);
+  context()->Plug(v0);
+}
 
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
@@ -3448,11 +3358,13 @@
 
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+  // Push function.
+  __ LoadNativeContextSlot(expr->context_index(), v0);
+  PushOperand(v0);
+
   // Push undefined as the receiver.
   __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
   PushOperand(v0);
-
-  __ LoadNativeContextSlot(expr->context_index(), v0);
 }
 
 
@@ -3466,59 +3378,9 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-}
 
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  if (expr->is_jsruntime()) {
-    Comment cmnt(masm_, "[ CallRuntime");
-    EmitLoadJSRuntimeFunction(expr);
-
-    // Push the target function under the receiver.
-    __ ld(at, MemOperand(sp, 0));
-    PushOperand(at);
-    __ sd(v0, MemOperand(sp, kPointerSize));
-
-    // Push the arguments ("left-to-right").
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-
-    PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-    EmitCallJSRuntimeFunction(expr);
-
-    // Restore context register.
-    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-    context()->DropAndPlug(1, v0);
-  } else {
-    const Runtime::Function* function = expr->function();
-    switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name)     \
-  case Runtime::kInline##Name: {           \
-    Comment cmnt(masm_, "[ Inline" #Name); \
-    return Emit##Name(expr);               \
-  }
-      FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
-      default: {
-        Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
-        // Push the arguments ("left-to-right").
-        for (int i = 0; i < arg_count; i++) {
-          VisitForStackValue(args->at(i));
-        }
-
-        // Call the C runtime function.
-        PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-        __ CallRuntime(expr->function(), arg_count);
-        OperandStackDepthDecrement(arg_count);
-        context()->Plug(v0);
-      }
-    }
-  }
+  // Restore context register.
+  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
 
@@ -3750,20 +3612,18 @@
     }
 
     Register scratch1 = a1;
-    Register scratch2 = a4;
     __ li(scratch1, Operand(Smi::FromInt(count_value)));
-    __ DadduAndCheckForOverflow(v0, v0, scratch1, scratch2);
-    __ BranchOnNoOverflow(&done, scratch2);
+    __ DaddBranchNoOvf(v0, v0, Operand(scratch1), &done);
     // Call stub. Undo operation first.
     __ Move(v0, a0);
     __ jmp(&stub_call);
     __ bind(&slow);
   }
-  if (!is_strong(language_mode())) {
-    ToNumberStub convert_stub(isolate());
-    __ CallStub(&convert_stub);
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
+
+  // Convert old value into a number.
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3802,9 +3662,6 @@
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
-  if (is_strong(language_mode())) {
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
   // Store the value returned in v0.
   switch (assign_type) {
     case VARIABLE:
@@ -4051,29 +3908,23 @@
 
   VisitForAccumulatorValue(sub_expr);
   PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  __ mov(a0, result_register());
   if (expr->op() == Token::EQ_STRICT) {
     Heap::RootListIndex nil_value = nil == kNullValue ?
         Heap::kNullValueRootIndex :
         Heap::kUndefinedValueRootIndex;
     __ LoadRoot(a1, nil_value);
-    Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
-  } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
-    CallIC(ic, expr->CompareOperationFeedbackId());
-    __ LoadRoot(a1, Heap::kTrueValueRootIndex);
     Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+  } else {
+    __ JumpIfSmi(v0, if_false);
+    __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
   }
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  __ ld(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  context()->Plug(v0);
-}
-
-
 Register FullCodeGenerator::result_register() {
   return v0;
 }
@@ -4083,6 +3934,12 @@
   return cp;
 }
 
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+  // DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  DCHECK(IsAligned(frame_offset, kPointerSize));
+  //  __ sw(value, MemOperand(fp, frame_offset));
+  __ ld(value, MemOperand(fp, frame_offset));
+}
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   // DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
@@ -4155,12 +4012,6 @@
 }
 
 
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
-  DCHECK(!slot.IsInvalid());
-  __ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
-        Operand(SmiFromSlot(slot)));
-}
-
 void FullCodeGenerator::DeferredCommands::EmitCommands() {
   __ Pop(result_register());  // Restore the accumulator.
   __ Pop(a1);                 // Get the token.
@@ -4195,7 +4046,9 @@
                             BackEdgeState target_state,
                             Code* replacement_code) {
   static const int kInstrSize = Assembler::kInstrSize;
-  Address branch_address = pc - 8 * kInstrSize;
+  Address pc_immediate_load_address =
+      Assembler::target_address_from_return_address(pc);
+  Address branch_address = pc_immediate_load_address - 2 * kInstrSize;
   Isolate* isolate = unoptimized_code->GetIsolate();
   CodePatcher patcher(isolate, branch_address, 1);
 
@@ -4213,7 +4066,6 @@
       patcher.masm()->slt(at, a3, zero_reg);
       break;
     case ON_STACK_REPLACEMENT:
-    case OSR_AFTER_STACK_CHECK:
       // addiu at, zero_reg, 1
       // beq  at, zero_reg, ok  ;; Not changed
       // lui  t9, <on-stack replacement address> upper
@@ -4226,7 +4078,6 @@
       patcher.masm()->daddiu(at, zero_reg, 1);
       break;
   }
-  Address pc_immediate_load_address = pc - 6 * kInstrSize;
   // Replace the stack check address in the load-immediate (6-instr sequence)
   // with the entry address of the replacement code.
   Assembler::set_target_address_at(isolate, pc_immediate_load_address,
@@ -4242,10 +4093,11 @@
     Code* unoptimized_code,
     Address pc) {
   static const int kInstrSize = Assembler::kInstrSize;
-  Address branch_address = pc - 8 * kInstrSize;
-  Address pc_immediate_load_address = pc - 6 * kInstrSize;
+  Address pc_immediate_load_address =
+      Assembler::target_address_from_return_address(pc);
+  Address branch_address = pc_immediate_load_address - 2 * kInstrSize;
 
-  DCHECK(Assembler::IsBeq(Assembler::instr_at(pc - 7 * kInstrSize)));
+  DCHECK(Assembler::IsBeq(Assembler::instr_at(branch_address + kInstrSize)));
   if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
     DCHECK(reinterpret_cast<uint64_t>(
         Assembler::target_address_at(pc_immediate_load_address)) ==
@@ -4256,18 +4108,11 @@
 
   DCHECK(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
 
-  if (reinterpret_cast<uint64_t>(
-      Assembler::target_address_at(pc_immediate_load_address)) ==
-          reinterpret_cast<uint64_t>(
-              isolate->builtins()->OnStackReplacement()->entry())) {
-    return ON_STACK_REPLACEMENT;
-  }
-
   DCHECK(reinterpret_cast<uint64_t>(
-      Assembler::target_address_at(pc_immediate_load_address)) ==
+             Assembler::target_address_at(pc_immediate_load_address)) ==
          reinterpret_cast<uint64_t>(
-             isolate->builtins()->OsrAfterStackCheck()->entry()));
-  return OSR_AFTER_STACK_CHECK;
+             isolate->builtins()->OnStackReplacement()->entry()));
+  return ON_STACK_REPLACEMENT;
 }
 
 
diff --git a/src/full-codegen/ppc/full-codegen-ppc.cc b/src/full-codegen/ppc/full-codegen-ppc.cc
index 24a2a38..301ccf5 100644
--- a/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -299,41 +299,34 @@
     __ CallRuntime(Runtime::kTraceEnter);
   }
 
-  // Visit the declarations and body unless there is an illegal
-  // redeclaration.
-  if (scope()->HasIllegalRedeclaration()) {
+  // Visit the declarations and body.
+  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  {
     Comment cmnt(masm_, "[ Declarations");
-    VisitForEffect(scope()->GetIllegalRedeclaration());
+    VisitDeclarations(scope()->declarations());
+  }
 
-  } else {
-    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
-    {
-      Comment cmnt(masm_, "[ Declarations");
-      VisitDeclarations(scope()->declarations());
-    }
+  // Assert that the declarations do not use ICs. Otherwise the debugger
+  // won't be able to redirect a PC at an IC to the correct IC in newly
+  // recompiled code.
+  DCHECK_EQ(0, ic_total_count_);
 
-    // Assert that the declarations do not use ICs. Otherwise the debugger
-    // won't be able to redirect a PC at an IC to the correct IC in newly
-    // recompiled code.
-    DCHECK_EQ(0, ic_total_count_);
+  {
+    Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    Label ok;
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmpl(sp, ip);
+    __ bc_short(ge, &ok);
+    __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+    __ bind(&ok);
+  }
 
-    {
-      Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
-      Label ok;
-      __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-      __ cmpl(sp, ip);
-      __ bc_short(ge, &ok);
-      __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
-      __ bind(&ok);
-    }
-
-    {
-      Comment cmnt(masm_, "[ Body");
-      DCHECK(loop_depth() == 0);
-      VisitStatements(literal()->body());
-      DCHECK(loop_depth() == 0);
-    }
+  {
+    Comment cmnt(masm_, "[ Body");
+    DCHECK(loop_depth() == 0);
+    VisitStatements(literal()->body());
+    DCHECK(loop_depth() == 0);
   }
 
   // Always emit a 'return undefined' in case control fell off the end of
@@ -516,7 +509,7 @@
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
   codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ b(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -616,7 +609,7 @@
 
 void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
                                Label* if_false, Label* fall_through) {
-  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+  Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
   CallIC(ic, condition->test_id());
   __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
   Split(eq, if_true, if_false, fall_through);
@@ -955,14 +948,14 @@
 
   FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
 
-  Label loop, exit;
-  ForIn loop_statement(this, stmt);
-  increment_loop_depth();
-
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  OperandStackDepthIncrement(ForIn::kElementCount);
+  OperandStackDepthIncrement(5);
+
+  Label loop, exit;
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
 
   // If the object is null or undefined, skip over the loop, otherwise convert
   // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
@@ -1038,11 +1031,6 @@
   // We got a fixed array in register r3. Iterate through that.
   __ bind(&fixed_array);
 
-  int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(r4);
-  __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-  __ StoreP(
-      r5, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(vector_index)), r0);
   __ LoadSmiLiteral(r4, Smi::FromInt(1));  // Smi(1) indicates slow check
   __ Push(r4, r3);  // Smi and array
   __ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
@@ -1079,12 +1067,8 @@
   __ cmp(r7, r5);
   __ beq(&update_each);
 
-  // We might get here from TurboFan or Crankshaft when something in the
-  // for-in loop body deopts and only now notice in fullcodegen, that we
-  // can now longer use the enum cache, i.e. left fast mode. So better record
-  // this information here, in case we later OSR back into this loop or
-  // reoptimize the whole function w/o rerunning the loop with the slow
-  // mode object in fullcodegen (which would result in a deopt loop).
+  // We need to filter the key, record slow-path here.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(r3);
   __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ StoreP(
@@ -1138,28 +1122,6 @@
 }
 
 
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
-                                       bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning. If
-  // we're running with the --always-opt or the --prepare-always-opt
-  // flag, we need to use the runtime function so that the new function
-  // we are creating here gets a chance to have its code optimized and
-  // doesn't just get a copy of the existing unoptimized code.
-  if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
-      scope()->is_function_scope() && info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
-    __ mov(r5, Operand(info));
-    __ CallStub(&stub);
-  } else {
-    __ Push(info);
-    __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
-                             : Runtime::kNewClosure);
-  }
-  context()->Plug(r3);
-}
-
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
@@ -1592,13 +1554,6 @@
     }
   }
 
-  if (expr->has_function()) {
-    DCHECK(result_saved);
-    __ LoadP(r3, MemOperand(sp));
-    __ push(r3);
-    __ CallRuntime(Runtime::kToFastProperties);
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1843,64 +1798,44 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  switch (expr->yield_kind()) {
-    case Yield::kSuspend:
-      // Pop value from top-of-stack slot; box result into result register.
-      EmitCreateIteratorResult(false);
-      __ push(result_register());
-    // Fall through.
-    case Yield::kInitial: {
-      Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume;
 
-      __ b(&suspend);
-      __ bind(&continuation);
-      // When we arrive here, the stack top is the resume mode and
-      // result_register() holds the input value (the argument given to the
-      // respective resume operation).
-      __ RecordGeneratorContinuation();
-      __ pop(r4);
-      __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::RETURN), r0);
-      __ bne(&resume);
-      __ push(result_register());
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
+  __ b(&suspend);
+  __ bind(&continuation);
+  // When we arrive here, the stack top is the resume mode and
+  // result_register() holds the input value (the argument given to the
+  // respective resume operation).
+  __ RecordGeneratorContinuation();
+  __ pop(r4);
+  __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::RETURN), r0);
+  __ bne(&resume);
+  __ push(result_register());
+  EmitCreateIteratorResult(true);
+  EmitUnwindAndReturn();
 
-      __ bind(&suspend);
-      VisitForAccumulatorValue(expr->generator_object());
-      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-      __ LoadSmiLiteral(r4, Smi::FromInt(continuation.pos()));
-      __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
-                r0);
-      __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
-      __ mr(r4, cp);
-      __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
-                          kLRHasBeenSaved, kDontSaveFPRegs);
-      __ addi(r4, fp, Operand(StandardFrameConstants::kExpressionsOffset));
-      __ cmp(sp, r4);
-      __ beq(&post_runtime);
-      __ push(r3);  // generator object
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ bind(&post_runtime);
-      PopOperand(result_register());
-      EmitReturnSequence();
+  __ bind(&suspend);
+  OperandStackDepthIncrement(1);  // Not popped on this path.
+  VisitForAccumulatorValue(expr->generator_object());
+  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+  __ LoadSmiLiteral(r4, Smi::FromInt(continuation.pos()));
+  __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+            r0);
+  __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
+  __ mr(r4, cp);
+  __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
+                      kLRHasBeenSaved, kDontSaveFPRegs);
+  __ addi(r4, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+  __ cmp(sp, r4);
+  __ beq(&post_runtime);
+  __ push(r3);  // generator object
+  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ bind(&post_runtime);
+  PopOperand(result_register());
+  EmitReturnSequence();
 
-      __ bind(&resume);
-      context()->Plug(result_register());
-      break;
-    }
-
-    case Yield::kFinal: {
-      // Pop value from top-of-stack slot, box result into result register.
-      OperandStackDepthDecrement(1);
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
-      break;
-    }
-
-    case Yield::kDelegating:
-      UNREACHABLE();
-  }
+  __ bind(&resume);
+  context()->Plug(result_register());
 }
 
 
@@ -1930,7 +1865,10 @@
   __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
   __ push(r5);
 
-  // Push holes for the rest of the arguments to the generator function.
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
   __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
   __ LoadWordArith(
       r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1959,9 +1897,7 @@
   // fp = caller's frame pointer.
   // cp = callee's context,
   // r7 = callee's JS function.
-  __ PushFixedFrame(r7);
-  // Adjust FP to point to saved FP.
-  __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  __ PushStandardFrame(r7);
 
   // Load the operand stack size.
   __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
@@ -2060,7 +1996,7 @@
 
   __ bind(&done_allocate);
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
-  __ pop(r5);
+  PopOperand(r5);
   __ LoadRoot(r6,
               done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
   __ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
@@ -2072,18 +2008,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!prop->IsSuperAccess());
-
-  __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
-  __ mov(LoadDescriptor::SlotRegister(),
-         Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF);
-}
-
-
 void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               Expression* left_expr,
@@ -2715,7 +2639,7 @@
   }
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-  SetCallPosition(expr);
+  SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
       __ CallRuntime(Runtime::kTraceTailCall);
@@ -3077,9 +3001,10 @@
   // Map is now in r3.
   __ blt(&null);
 
-  // Return 'Function' for JSFunction objects.
-  __ cmpi(r4, Operand(JS_FUNCTION_TYPE));
-  __ beq(&function);
+  // Return 'Function' for JSFunction and JSBoundFunction objects.
+  __ cmpli(r4, Operand(FIRST_FUNCTION_TYPE));
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+  __ bge(&function);
 
   // Check if the constructor in the map is a JS function.
   Register instance_type = r5;
@@ -3197,23 +3122,6 @@
 }
 
 
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into r3 and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to an integer.
-  Label done_convert;
-  __ JumpIfSmi(r3, &done_convert);
-  __ Push(r3);
-  __ CallRuntime(Runtime::kToInteger);
-  __ bind(&done_convert);
-  context()->Plug(r3);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3383,6 +3291,11 @@
   context()->Plug(r3);
 }
 
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+  DCHECK_EQ(0, expr->arguments()->length());
+  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r3);
+  context()->Plug(r3);
+}
 
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
@@ -3424,11 +3337,13 @@
 
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+  // Push function.
+  __ LoadNativeContextSlot(expr->context_index(), r3);
+  PushOperand(r3);
+
   // Push undefined as the receiver.
   __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
   PushOperand(r3);
-
-  __ LoadNativeContextSlot(expr->context_index(), r3);
 }
 
 
@@ -3442,60 +3357,9 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-}
 
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  if (expr->is_jsruntime()) {
-    Comment cmnt(masm_, "[ CallRuntime");
-    EmitLoadJSRuntimeFunction(expr);
-
-    // Push the target function under the receiver.
-    __ LoadP(ip, MemOperand(sp, 0));
-    PushOperand(ip);
-    __ StoreP(r3, MemOperand(sp, kPointerSize));
-
-    // Push the arguments ("left-to-right").
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-
-    PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-    EmitCallJSRuntimeFunction(expr);
-
-    // Restore context register.
-    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-    context()->DropAndPlug(1, r3);
-
-  } else {
-    const Runtime::Function* function = expr->function();
-    switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name)     \
-  case Runtime::kInline##Name: {           \
-    Comment cmnt(masm_, "[ Inline" #Name); \
-    return Emit##Name(expr);               \
-  }
-      FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
-      default: {
-        Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
-        // Push the arguments ("left-to-right").
-        for (int i = 0; i < arg_count; i++) {
-          VisitForStackValue(args->at(i));
-        }
-
-        // Call the C runtime function.
-        PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-        __ CallRuntime(expr->function(), arg_count);
-        OperandStackDepthDecrement(arg_count);
-        context()->Plug(r3);
-      }
-    }
-  }
+  // Restore context register.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
 
@@ -3730,11 +3594,11 @@
     __ b(&stub_call);
     __ bind(&slow);
   }
-  if (!is_strong(language_mode())) {
-    ToNumberStub convert_stub(isolate());
-    __ CallStub(&convert_stub);
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
+
+  // Convert old value into a number.
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3773,9 +3637,6 @@
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
-  if (is_strong(language_mode())) {
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
   // Store the value returned in r3.
   switch (assign_type) {
     case VARIABLE:
@@ -4031,26 +3892,25 @@
     __ cmp(r3, r4);
     Split(eq, if_true, if_false, fall_through);
   } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
-    CallIC(ic, expr->CompareOperationFeedbackId());
-    __ CompareRoot(r3, Heap::kTrueValueRootIndex);
-    Split(eq, if_true, if_false, fall_through);
+    __ JumpIfSmi(r3, if_false);
+    __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+    __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+    __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+    Split(ne, if_true, if_false, fall_through, cr0);
   }
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  context()->Plug(r3);
-}
-
-
 Register FullCodeGenerator::result_register() { return r3; }
 
 
 Register FullCodeGenerator::context_register() { return cp; }
 
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+  DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+  __ LoadP(value, MemOperand(fp, frame_offset), r0);
+}
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
@@ -4121,12 +3981,6 @@
 }
 
 
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
-  DCHECK(!slot.IsInvalid());
-  __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
-         Operand(SmiFromSlot(slot)));
-}
-
 void FullCodeGenerator::DeferredCommands::EmitCommands() {
   DCHECK(!result_register().is(r4));
   // Restore the accumulator (r3) and token (r4).
@@ -4179,7 +4033,6 @@
       break;
     }
     case ON_STACK_REPLACEMENT:
-    case OSR_AFTER_STACK_CHECK:
       //  <decrement profiling counter>
       //         crset
       //         bge     <ok>            ;; not changed
@@ -4208,8 +4061,10 @@
     Isolate* isolate, Code* unoptimized_code, Address pc) {
   Address mov_address = Assembler::target_address_from_return_address(pc);
   Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
+#ifdef DEBUG
   Address interrupt_address =
       Assembler::target_address_at(mov_address, unoptimized_code);
+#endif
 
   if (Assembler::IsCmpImmediate(Assembler::instr_at(cmp_address))) {
     DCHECK(interrupt_address == isolate->builtins()->InterruptCheck()->entry());
@@ -4218,13 +4073,9 @@
 
   DCHECK(Assembler::IsCrSet(Assembler::instr_at(cmp_address)));
 
-  if (interrupt_address == isolate->builtins()->OnStackReplacement()->entry()) {
-    return ON_STACK_REPLACEMENT;
-  }
-
   DCHECK(interrupt_address ==
-         isolate->builtins()->OsrAfterStackCheck()->entry());
-  return OSR_AFTER_STACK_CHECK;
+         isolate->builtins()->OnStackReplacement()->entry());
+  return ON_STACK_REPLACEMENT;
 }
 }  // namespace internal
 }  // namespace v8
diff --git a/src/full-codegen/s390/OWNERS b/src/full-codegen/s390/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/full-codegen/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/full-codegen/s390/full-codegen-s390.cc b/src/full-codegen/s390/full-codegen-s390.cc
new file mode 100644
index 0000000..88bec4c
--- /dev/null
+++ b/src/full-codegen/s390/full-codegen-s390.cc
@@ -0,0 +1,3981 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ast/scopes.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/ic/ic.h"
+#include "src/parsing/parser.h"
+
+#include "src/s390/code-stubs-s390.h"
+#include "src/s390/macro-assembler-s390.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a cmpi rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16 bit
+// immediate value is used) is the delta from the pc to the first instruction of
+// the patchable code.
+// See PatchInlinedSmiCode in ic-s390.cc for the code that patches it
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+  explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+    info_emitted_ = false;
+#endif
+  }
+
+  ~JumpPatchSite() { DCHECK(patch_site_.is_bound() == info_emitted_); }
+
+  // When initially emitting this ensure that a jump is always generated to skip
+  // the inlined smi code.
+  void EmitJumpIfNotSmi(Register reg, Label* target) {
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
+    __ bind(&patch_site_);
+    __ CmpP(reg, reg);
+// Emit the Nop to make bigger place for patching on 31-bit
+// as the TestIfSmi sequence uses 4-byte TMLL
+#ifndef V8_TARGET_ARCH_S390X
+    __ nop();
+#endif
+    __ beq(target);  // Always taken before patched.
+  }
+
+  // When initially emitting this ensure that a jump is never generated to skip
+  // the inlined smi code.
+  void EmitJumpIfSmi(Register reg, Label* target) {
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
+    __ bind(&patch_site_);
+    __ CmpP(reg, reg);
+// Emit the Nop to make bigger place for patching on 31-bit
+// as the TestIfSmi sequence uses 4-byte TMLL
+#ifndef V8_TARGET_ARCH_S390X
+    __ nop();
+#endif
+    __ bne(target);  // Never taken before patched.
+  }
+
+  void EmitPatchInfo() {
+    if (patch_site_.is_bound()) {
+      int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+      DCHECK(is_int16(delta_to_patch_site));
+      __ chi(r0, Operand(delta_to_patch_site));
+#ifdef DEBUG
+      info_emitted_ = true;
+#endif
+    } else {
+      __ nop();
+      __ nop();
+    }
+  }
+
+ private:
+  MacroAssembler* masm() { return masm_; }
+  MacroAssembler* masm_;
+  Label patch_site_;
+#ifdef DEBUG
+  bool info_emitted_;
+#endif
+};
+
+// Generate code for a JS function.  On entry to the function the receiver
+// and arguments have been pushed on the stack left to right.  The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+//   o r3: the JS function object being called (i.e., ourselves)
+//   o r5: the new target value
+//   o cp: our context
+//   o fp: our caller's frame pointer
+//   o sp: stack pointer
+//   o lr: return address
+//   o ip: our own function entry (required by the prologue)
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-s390.h for its layout.
+void FullCodeGenerator::Generate() {
+  CompilationInfo* info = info_;
+  profiling_counter_ = isolate()->factory()->NewCell(
+      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+  SetFunctionPosition(literal());
+  Comment cmnt(masm_, "[ function compiled by full code generator");
+
+  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+  if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
+    int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+    __ LoadP(r4, MemOperand(sp, receiver_offset), r0);
+    __ AssertNotSmi(r4);
+    __ CompareObjectType(r4, r4, no_reg, FIRST_JS_RECEIVER_TYPE);
+    __ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
+  }
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+  int prologue_offset = masm_->pc_offset();
+
+  info->set_prologue_offset(prologue_offset);
+  __ Prologue(info->GeneratePreagedPrologue(), ip, prologue_offset);
+
+  {
+    Comment cmnt(masm_, "[ Allocate locals");
+    int locals_count = info->scope()->num_stack_slots();
+    // Generators allocate locals, if any, in context slots.
+    DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+    OperandStackDepthIncrement(locals_count);
+    if (locals_count > 0) {
+      if (locals_count >= 128) {
+        Label ok;
+        __ AddP(ip, sp, Operand(-(locals_count * kPointerSize)));
+        __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+        __ CmpLogicalP(ip, r5);
+        __ bge(&ok, Label::kNear);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
+        __ bind(&ok);
+      }
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+      int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+      if (locals_count >= kMaxPushes) {
+        int loop_iterations = locals_count / kMaxPushes;
+        __ mov(r4, Operand(loop_iterations));
+        Label loop_header;
+        __ bind(&loop_header);
+        // Do pushes.
+        // TODO(joransiu): Use MVC for better performance
+        __ lay(sp, MemOperand(sp, -kMaxPushes * kPointerSize));
+        for (int i = 0; i < kMaxPushes; i++) {
+          __ StoreP(ip, MemOperand(sp, i * kPointerSize));
+        }
+        // Continue loop if not done.
+        __ BranchOnCount(r4, &loop_header);
+      }
+      int remaining = locals_count % kMaxPushes;
+      // Emit the remaining pushes.
+      // TODO(joransiu): Use MVC for better performance
+      if (remaining > 0) {
+        __ lay(sp, MemOperand(sp, -remaining * kPointerSize));
+        for (int i = 0; i < remaining; i++) {
+          __ StoreP(ip, MemOperand(sp, i * kPointerSize));
+        }
+      }
+    }
+  }
+
+  bool function_in_register_r3 = true;
+
+  // Possibly allocate a local context.
+  if (info->scope()->num_heap_slots() > 0) {
+    // Argument to NewContext is the function, which is still in r3.
+    Comment cmnt(masm_, "[ Allocate context");
+    bool need_write_barrier = true;
+    int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    if (info->scope()->is_script_scope()) {
+      __ push(r3);
+      __ Push(info->scope()->GetScopeInfo(info->isolate()));
+      __ CallRuntime(Runtime::kNewScriptContext);
+      PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+      // The new target value is not used, clobbering is safe.
+      DCHECK_NULL(info->scope()->new_target_var());
+    } else {
+      if (info->scope()->new_target_var() != nullptr) {
+        __ push(r5);  // Preserve new target.
+      }
+      if (slots <= FastNewContextStub::kMaximumSlots) {
+        FastNewContextStub stub(isolate(), slots);
+        __ CallStub(&stub);
+        // Result of FastNewContextStub is always in new space.
+        need_write_barrier = false;
+      } else {
+        __ push(r3);
+        __ CallRuntime(Runtime::kNewFunctionContext);
+      }
+      if (info->scope()->new_target_var() != nullptr) {
+        __ pop(r5);  // Preserve new target.
+      }
+    }
+    function_in_register_r3 = false;
+    // Context is returned in r2.  It replaces the context passed to us.
+    // It's saved in the stack and kept live in cp.
+    __ LoadRR(cp, r2);
+    __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = info->scope()->num_parameters();
+    int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+    for (int i = first_parameter; i < num_parameters; i++) {
+      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+                               (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ LoadP(r2, MemOperand(fp, parameter_offset), r0);
+        // Store it in the context.
+        MemOperand target = ContextMemOperand(cp, var->index());
+        __ StoreP(r2, target);
+
+        // Update the write barrier.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(cp, target.offset(), r2, r4,
+                                    kLRHasBeenSaved, kDontSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, r2, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+  }
+
+  // Register holding this function and new target are both trashed in case we
+  // bailout here. But since that can happen only when new target is not used
+  // and we allocate a context, the value of |function_in_register| is correct.
+  PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+
+  // Possibly set up a local binding to the this function which is used in
+  // derived constructors with super calls.
+  Variable* this_function_var = scope()->this_function_var();
+  if (this_function_var != nullptr) {
+    Comment cmnt(masm_, "[ This function");
+    if (!function_in_register_r3) {
+      __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+      // The write barrier clobbers register again, keep it marked as such.
+    }
+    SetVar(this_function_var, r3, r2, r4);
+  }
+
+  // Possibly set up a local binding to the new target value.
+  Variable* new_target_var = scope()->new_target_var();
+  if (new_target_var != nullptr) {
+    Comment cmnt(masm_, "[ new.target");
+    SetVar(new_target_var, r5, r2, r4);
+  }
+
+  // Possibly allocate RestParameters
+  int rest_index;
+  Variable* rest_param = scope()->rest_parameter(&rest_index);
+  if (rest_param) {
+    Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+    if (!function_in_register_r3) {
+      __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    FastNewRestParameterStub stub(isolate());
+    __ CallStub(&stub);
+
+    function_in_register_r3 = false;
+    SetVar(rest_param, r2, r3, r4);
+  }
+
+  Variable* arguments = scope()->arguments();
+  if (arguments != NULL) {
+    // Function uses arguments object.
+    Comment cmnt(masm_, "[ Allocate arguments object");
+    if (!function_in_register_r3) {
+      // Load this again, if it's used by the local context below.
+      __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    if (is_strict(language_mode()) || !has_simple_parameters()) {
+      FastNewStrictArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    } else if (literal()->has_duplicate_parameters()) {
+      __ Push(r3);
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+    } else {
+      FastNewSloppyArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    }
+
+    SetVar(arguments, r2, r3, r4);
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter);
+  }
+
+  // Visit the declarations and body.
+  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  {
+    Comment cmnt(masm_, "[ Declarations");
+    VisitDeclarations(scope()->declarations());
+  }
+
+  // Assert that the declarations do not use ICs. Otherwise the debugger
+  // won't be able to redirect a PC at an IC to the correct IC in newly
+  // recompiled code.
+  DCHECK_EQ(0, ic_total_count_);
+
+  {
+    Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    Label ok;
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ CmpLogicalP(sp, ip);
+    __ bge(&ok, Label::kNear);
+    __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+    __ bind(&ok);
+  }
+
+  {
+    Comment cmnt(masm_, "[ Body");
+    DCHECK(loop_depth() == 0);
+    VisitStatements(literal()->body());
+    DCHECK(loop_depth() == 0);
+  }
+
+  // Always emit a 'return undefined' in case control fell off the end of
+  // the body.
+  {
+    Comment cmnt(masm_, "[ return <undefined>;");
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+  }
+  EmitReturnSequence();
+}
+
+void FullCodeGenerator::ClearAccumulator() {
+  __ LoadSmiLiteral(r2, Smi::FromInt(0));
+}
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+  __ mov(r4, Operand(profiling_counter_));
+  intptr_t smi_delta = reinterpret_cast<intptr_t>(Smi::FromInt(delta));
+  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(-smi_delta)) {
+    __ AddP(FieldMemOperand(r4, Cell::kValueOffset), Operand(-smi_delta));
+    __ LoadP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+  } else {
+    __ LoadP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+    __ SubSmiLiteral(r5, r5, Smi::FromInt(delta), r0);
+    __ StoreP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+  }
+}
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+  int reset_value = FLAG_interrupt_budget;
+  __ mov(r4, Operand(profiling_counter_));
+  __ LoadSmiLiteral(r5, Smi::FromInt(reset_value));
+  __ StoreP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+}
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+                                                Label* back_edge_target) {
+  Comment cmnt(masm_, "[ Back edge bookkeeping");
+  Label ok;
+
+  DCHECK(back_edge_target->is_bound());
+  int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target) +
+                 kCodeSizeMultiplier / 2;
+  int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  EmitProfilingCounterDecrement(weight);
+  {
+    // BackEdgeTable::PatchAt manipulates this sequence.
+    __ bge(&ok, Label::kNear);
+    __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+    // Record a mapping of this PC offset to the OSR id.  This is used to find
+    // the AST id from the unoptimized code in order to use it as a key into
+    // the deoptimization input data found in the optimized code.
+    RecordBackEdge(stmt->OsrEntryId());
+  }
+  EmitProfilingCounterReset();
+
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  // Record a mapping of the OSR id to this PC.  This is used if the OSR
+  // entry becomes the target of a bailout.  We don't expect it to be, but
+  // we want it to work if it is.
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+    bool is_tail_call) {
+  // Pretend that the exit is a backwards jump to the entry.
+  int weight = 1;
+  if (info_->ShouldSelfOptimize()) {
+    weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+  } else {
+    int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+    weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  }
+  EmitProfilingCounterDecrement(weight);
+  Label ok;
+  __ CmpP(r5, Operand::Zero());
+  __ bge(&ok);
+  // Don't need to save result register if we are going to do a tail call.
+  if (!is_tail_call) {
+    __ push(r2);
+  }
+  __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+  if (!is_tail_call) {
+    __ pop(r2);
+  }
+  EmitProfilingCounterReset();
+  __ bind(&ok);
+}
+
+void FullCodeGenerator::EmitReturnSequence() {
+  Comment cmnt(masm_, "[ Return sequence");
+  if (return_label_.is_bound()) {
+    __ b(&return_label_);
+  } else {
+    __ bind(&return_label_);
+    if (FLAG_trace) {
+      // Push the return value on the stack as the parameter.
+      // Runtime::TraceExit returns its parameter in r2
+      __ push(r2);
+      __ CallRuntime(Runtime::kTraceExit);
+    }
+    EmitProfilingCounterHandlingForReturnSequence(false);
+
+    // Make sure that the constant pool is not emitted inside of the return
+    // sequence.
+    {
+      // Here we use masm_-> instead of the __ macro to avoid the code coverage
+      // tool from instrumenting as we rely on the code size here.
+      int32_t arg_count = info_->scope()->num_parameters() + 1;
+      int32_t sp_delta = arg_count * kPointerSize;
+      SetReturnPosition(literal());
+      __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+
+      __ Ret();
+    }
+  }
+}
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
+  codegen()->PushOperand(result_register());
+}
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Heap::RootListIndex index) const {
+  __ LoadRoot(result_register(), index);
+}
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Heap::RootListIndex index) const {
+  __ LoadRoot(result_register(), index);
+  codegen()->PushOperand(result_register());
+}
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+                                          false_label_);
+  if (index == Heap::kUndefinedValueRootIndex ||
+      index == Heap::kNullValueRootIndex ||
+      index == Heap::kFalseValueRootIndex) {
+    if (false_label_ != fall_through_) __ b(false_label_);
+  } else if (index == Heap::kTrueValueRootIndex) {
+    if (true_label_ != fall_through_) __ b(true_label_);
+  } else {
+    __ LoadRoot(result_register(), index);
+    codegen()->DoTest(this);
+  }
+}
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Handle<Object> lit) const {
+  __ mov(result_register(), Operand(lit));
+}
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+  // Immediates cannot be pushed directly.
+  __ mov(result_register(), Operand(lit));
+  codegen()->PushOperand(result_register());
+}
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+                                          false_label_);
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
+  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+    if (false_label_ != fall_through_) __ b(false_label_);
+  } else if (lit->IsTrue() || lit->IsJSObject()) {
+    if (true_label_ != fall_through_) __ b(true_label_);
+  } else if (lit->IsString()) {
+    if (String::cast(*lit)->length() == 0) {
+      if (false_label_ != fall_through_) __ b(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ b(true_label_);
+    }
+  } else if (lit->IsSmi()) {
+    if (Smi::cast(*lit)->value() == 0) {
+      if (false_label_ != fall_through_) __ b(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ b(true_label_);
+    }
+  } else {
+    // For simplicity we always test the accumulator register.
+    __ mov(result_register(), Operand(lit));
+    codegen()->DoTest(this);
+  }
+}
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+                                                       Register reg) const {
+  DCHECK(count > 0);
+  if (count > 1) codegen()->DropOperands(count - 1);
+  __ StoreP(reg, MemOperand(sp, 0));
+}
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+                                            Label* materialize_false) const {
+  DCHECK(materialize_true == materialize_false);
+  __ bind(materialize_true);
+}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Label* materialize_true, Label* materialize_false) const {
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+  __ b(&done, Label::kNear);
+  __ bind(materialize_false);
+  __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Label* materialize_true, Label* materialize_false) const {
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+  __ b(&done, Label::kNear);
+  __ bind(materialize_false);
+  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+  __ bind(&done);
+  codegen()->PushOperand(ip);
+}
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+                                          Label* materialize_false) const {
+  DCHECK(materialize_true == true_label_);
+  DCHECK(materialize_false == false_label_);
+}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(result_register(), value_root_index);
+}
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(ip, value_root_index);
+  codegen()->PushOperand(ip);
+}
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+                                          false_label_);
+  if (flag) {
+    if (true_label_ != fall_through_) __ b(true_label_);
+  } else {
+    if (false_label_ != fall_through_) __ b(false_label_);
+  }
+}
+
+void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
+                               Label* if_false, Label* fall_through) {
+  Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
+  CallIC(ic, condition->test_id());
+  __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+  Split(eq, if_true, if_false, fall_through);
+}
+
+void FullCodeGenerator::Split(Condition cond, Label* if_true, Label* if_false,
+                              Label* fall_through) {
+  if (if_false == fall_through) {
+    __ b(cond, if_true);
+  } else if (if_true == fall_through) {
+    __ b(NegateCondition(cond), if_false);
+  } else {
+    __ b(cond, if_true);
+    __ b(if_false);
+  }
+}
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+  DCHECK(var->IsStackAllocated());
+  // Offset is negative because higher indexes are at lower addresses.
+  int offset = -var->index() * kPointerSize;
+  // Adjust by a (parameter or local) base offset.
+  if (var->IsParameter()) {
+    offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+  } else {
+    offset += JavaScriptFrameConstants::kLocal0Offset;
+  }
+  return MemOperand(fp, offset);
+}
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  if (var->IsContextSlot()) {
+    int context_chain_length = scope()->ContextChainLength(var->scope());
+    __ LoadContext(scratch, context_chain_length);
+    return ContextMemOperand(scratch, var->index());
+  } else {
+    return StackOperand(var);
+  }
+}
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+  // Use destination as scratch.
+  MemOperand location = VarOperand(var, dest);
+  __ LoadP(dest, location, r0);
+}
+
+void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
+                               Register scratch1) {
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(!scratch0.is(src));
+  DCHECK(!scratch0.is(scratch1));
+  DCHECK(!scratch1.is(src));
+  MemOperand location = VarOperand(var, scratch0);
+  __ StoreP(src, location);
+
+  // Emit the write barrier code if the location is in the heap.
+  if (var->IsContextSlot()) {
+    __ RecordWriteContextSlot(scratch0, location.offset(), src, scratch1,
+                              kLRHasBeenSaved, kDontSaveFPRegs);
+  }
+}
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest()) return;
+
+  Label skip;
+  if (should_normalize) __ b(&skip);
+  PrepareForBailout(expr, TOS_REG);
+  if (should_normalize) {
+    __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+    Split(eq, if_true, if_false, NULL);
+    __ bind(&skip);
+  }
+}
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+  // The variable in the declaration always resides in the current function
+  // context.
+  DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
+  if (FLAG_debug_code) {
+    // Check that we're not inside a with or catch context.
+    __ LoadP(r3, FieldMemOperand(cp, HeapObject::kMapOffset));
+    __ CompareRoot(r3, Heap::kWithContextMapRootIndex);
+    __ Check(ne, kDeclarationInWithContext);
+    __ CompareRoot(r3, Heap::kCatchContextMapRootIndex);
+    __ Check(ne, kDeclarationInCatchContext);
+  }
+}
+
+void FullCodeGenerator::VisitVariableDeclaration(
+    VariableDeclaration* declaration) {
+  // If it was not possible to allocate the variable at compile time, we
+  // need to "declare" it at runtime to make sure it actually exists in the
+  // local context.
+  VariableProxy* proxy = declaration->proxy();
+  VariableMode mode = declaration->mode();
+  Variable* variable = proxy->var();
+  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  switch (variable->location()) {
+    case VariableLocation::GLOBAL:
+    case VariableLocation::UNALLOCATED:
+      globals_->Add(variable->name(), zone());
+      globals_->Add(variable->binding_needs_init()
+                        ? isolate()->factory()->the_hole_value()
+                        : isolate()->factory()->undefined_value(),
+                    zone());
+      break;
+
+    case VariableLocation::PARAMETER:
+    case VariableLocation::LOCAL:
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+        __ StoreP(ip, StackOperand(variable));
+      }
+      break;
+
+    case VariableLocation::CONTEXT:
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        EmitDebugCheckDeclarationContext(variable);
+        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+        __ StoreP(ip, ContextMemOperand(cp, variable->index()));
+        // No write barrier since the_hole_value is in old space.
+        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      }
+      break;
+
+    case VariableLocation::LOOKUP: {
+      Comment cmnt(masm_, "[ VariableDeclaration");
+      __ mov(r4, Operand(variable->name()));
+      // Declaration nodes are always introduced in one of four modes.
+      DCHECK(IsDeclaredVariableMode(mode));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      if (hole_init) {
+        __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+      } else {
+        __ LoadSmiLiteral(r2, Smi::FromInt(0));  // Indicates no initial value.
+      }
+      __ Push(r4, r2);
+      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      break;
+    }
+  }
+}
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+    FunctionDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case VariableLocation::GLOBAL:
+    case VariableLocation::UNALLOCATED: {
+      globals_->Add(variable->name(), zone());
+      Handle<SharedFunctionInfo> function =
+          Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals_->Add(function, zone());
+      break;
+    }
+
+    case VariableLocation::PARAMETER:
+    case VariableLocation::LOCAL: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      VisitForAccumulatorValue(declaration->fun());
+      __ StoreP(result_register(), StackOperand(variable));
+      break;
+    }
+
+    case VariableLocation::CONTEXT: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      VisitForAccumulatorValue(declaration->fun());
+      __ StoreP(result_register(), ContextMemOperand(cp, variable->index()));
+      int offset = Context::SlotOffset(variable->index());
+      // We know that we have written a function, which is not a smi.
+      __ RecordWriteContextSlot(cp, offset, result_register(), r4,
+                                kLRHasBeenSaved, kDontSaveFPRegs,
+                                EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      break;
+    }
+
+    case VariableLocation::LOOKUP: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      __ mov(r4, Operand(variable->name()));
+      PushOperand(r4);
+      // Push initial value for function declaration.
+      VisitForStackValue(declaration->fun());
+      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      break;
+    }
+  }
+}
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.
+  __ mov(r3, Operand(pairs));
+  __ LoadSmiLiteral(r2, Smi::FromInt(DeclareGlobalsFlags()));
+  __ Push(r3, r2);
+  __ CallRuntime(Runtime::kDeclareGlobals);
+  // Return value is ignored.
+}
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+  // Call the runtime to declare the modules.
+  __ Push(descriptions);
+  __ CallRuntime(Runtime::kDeclareModules);
+  // Return value is ignored.
+}
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+
+  // Keep the switch value on the stack until a case matches.
+  VisitForStackValue(stmt->tag());
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
+
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    clause->body_target()->Unuse();
+
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForAccumulatorValue(clause->label());
+
+    // Perform the comparison as if via '==='.
+    __ LoadP(r3, MemOperand(sp, 0));  // Switch value.
+    bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    JumpPatchSite patch_site(masm_);
+    if (inline_smi_code) {
+      Label slow_case;
+      __ LoadRR(r4, r2);
+      __ OrP(r4, r3);
+      patch_site.EmitJumpIfNotSmi(r4, &slow_case);
+
+      __ CmpP(r3, r2);
+      __ bne(&next_test);
+      __ Drop(1);  // Switch value is no longer needed.
+      __ b(clause->body_target());
+      __ bind(&slow_case);
+    }
+
+    // Record position before stub call for type feedback.
+    SetExpressionPosition(clause);
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+    CallIC(ic, clause->CompareId());
+    patch_site.EmitPatchInfo();
+
+    Label skip;
+    __ b(&skip);
+    PrepareForBailout(clause, TOS_REG);
+    __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+    __ bne(&next_test);
+    __ Drop(1);
+    __ b(clause->body_target());
+    __ bind(&skip);
+
+    __ CmpP(r2, Operand::Zero());
+    __ bne(&next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ b(clause->body_target());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  DropOperands(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ b(nested_statement.break_label());
+  } else {
+    __ b(default_clause->body_target());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target());
+    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_label());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  Comment cmnt(masm_, "[ ForInStatement");
+  SetStatementPosition(stmt, SKIP_BREAK);
+
+  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+
+  // Get the object to enumerate over.
+  SetExpressionAsStatementPosition(stmt->enumerable());
+  VisitForAccumulatorValue(stmt->enumerable());
+  OperandStackDepthIncrement(5);
+
+  Label loop, exit;
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // If the object is null or undefined, skip over the loop, otherwise convert
+  // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
+  Label convert, done_convert;
+  __ JumpIfSmi(r2, &convert);
+  __ CompareObjectType(r2, r3, r3, FIRST_JS_RECEIVER_TYPE);
+  __ bge(&done_convert);
+  __ CompareRoot(r2, Heap::kNullValueRootIndex);
+  __ beq(&exit);
+  __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+  __ beq(&exit);
+  __ bind(&convert);
+  ToObjectStub stub(isolate());
+  __ CallStub(&stub);
+  __ bind(&done_convert);
+  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  __ push(r2);
+
+  // Check cache validity in generated code. This is a fast case for
+  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+  // guarantee cache validity, call the runtime system to check cache
+  // validity or get the property names in a fixed array.
+  // Note: Proxies never have an enum cache, so will always take the
+  // slow path.
+  Label call_runtime;
+  __ CheckEnumCache(&call_runtime);
+
+  // The enum cache is valid.  Load the map of the object being
+  // iterated over and use the cache for the iteration.
+  Label use_cache;
+  __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ b(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(r2);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kForInEnumerate);
+  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ CompareRoot(r4, Heap::kMetaMapRootIndex);
+  __ bne(&fixed_array);
+
+  // We got a map in register r2. Get the enumeration cache from it.
+  Label no_descriptors;
+  __ bind(&use_cache);
+
+  __ EnumLength(r3, r2);
+  __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+  __ beq(&no_descriptors, Label::kNear);
+
+  __ LoadInstanceDescriptors(r2, r4);
+  __ LoadP(r4, FieldMemOperand(r4, DescriptorArray::kEnumCacheOffset));
+  __ LoadP(r4,
+           FieldMemOperand(r4, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Set up the four remaining stack slots.
+  __ push(r2);  // Map.
+  __ LoadSmiLiteral(r2, Smi::FromInt(0));
+  // Push enumeration cache, enumeration cache length (as smi) and zero.
+  __ Push(r4, r3, r2);
+  __ b(&loop);
+
+  __ bind(&no_descriptors);
+  __ Drop(1);
+  __ b(&exit);
+
+  // We got a fixed array in register r2. Iterate through that.
+  __ bind(&fixed_array);
+
+  __ LoadSmiLiteral(r3, Smi::FromInt(1));  // Smi(1) indicates slow check
+  __ Push(r3, r2);                         // Smi and array
+  __ LoadP(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ Push(r3);  // Fixed array length (as smi).
+  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  __ LoadSmiLiteral(r2, Smi::FromInt(0));
+  __ Push(r2);  // Initial index.
+
+  // Generate code for doing the condition check.
+  __ bind(&loop);
+  SetExpressionAsStatementPosition(stmt->each());
+
+  // Load the current count to r2, load the length to r3.
+  __ LoadP(r2, MemOperand(sp, 0 * kPointerSize));
+  __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
+  __ CmpLogicalP(r2, r3);  // Compare to the array length.
+  __ bge(loop_statement.break_label());
+
+  // Get the current entry of the array into register r5.
+  __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+  __ AddP(r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiToPtrArrayOffset(r5, r2);
+  __ LoadP(r5, MemOperand(r5, r4));
+
+  // Get the expected map from the stack or a smi in the
+  // permanent slow case into register r4.
+  __ LoadP(r4, MemOperand(sp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we may have to filter the key.
+  Label update_each;
+  __ LoadP(r3, MemOperand(sp, 4 * kPointerSize));
+  __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ CmpP(r6, r4);
+  __ beq(&update_each);
+
+  // We need to filter the key, record slow-path here.
+  int const vector_index = SmiFromSlot(slot)->value();
+  __ EmitLoadTypeFeedbackVector(r2);
+  __ mov(r4, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ StoreP(
+      r4, FieldMemOperand(r2, FixedArray::OffsetOfElementAt(vector_index)), r0);
+
+  // Convert the entry to a string or (smi) 0 if it isn't a property
+  // any more. If the property has been removed while iterating, we
+  // just skip it.
+  __ Push(r3, r5);  // Enumerable and current entry.
+  __ CallRuntime(Runtime::kForInFilter);
+  PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+  __ LoadRR(r5, r2);
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  __ CmpP(r2, r0);
+  __ beq(loop_statement.continue_label());
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register r5.
+  __ bind(&update_each);
+  __ LoadRR(result_register(), r5);
+  // Perform the assignment as if via '='.
+  {
+    EffectContext context(this);
+    EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
+    PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+  }
+
+  // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  // Generate code for the body of the loop.
+  Visit(stmt->body());
+
+  // Generate code for the going to the next element by incrementing
+  // the index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_label());
+  __ pop(r2);
+  __ AddSmiLiteral(r2, r2, Smi::FromInt(1), r0);
+  __ push(r2);
+
+  EmitBackEdgeBookkeeping(stmt, &loop);
+  __ b(&loop);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_label());
+  DropOperands(5);
+
+  // Exit and decrement the loop depth.
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  __ bind(&exit);
+  decrement_loop_depth();
+}
+
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+                                          FeedbackVectorSlot slot) {
+  DCHECK(NeedsHomeObject(initializer));
+  __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+  __ mov(StoreDescriptor::NameRegister(),
+         Operand(isolate()->factory()->home_object_symbol()));
+  __ LoadP(StoreDescriptor::ValueRegister(),
+           MemOperand(sp, offset * kPointerSize));
+  EmitLoadStoreICSlot(slot);
+  CallStoreIC();
+}
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+                                                     int offset,
+                                                     FeedbackVectorSlot slot) {
+  DCHECK(NeedsHomeObject(initializer));
+  __ Move(StoreDescriptor::ReceiverRegister(), r2);
+  __ mov(StoreDescriptor::NameRegister(),
+         Operand(isolate()->factory()->home_object_symbol()));
+  __ LoadP(StoreDescriptor::ValueRegister(),
+           MemOperand(sp, offset * kPointerSize));
+  EmitLoadStoreICSlot(slot);
+  CallStoreIC();
+}
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
+                                                      TypeofMode typeof_mode,
+                                                      Label* slow) {
+  Register current = cp;
+  Register next = r3;
+  Register temp = r4;
+
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_sloppy_eval()) {
+        // Check that extension is "the hole".
+        __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+        __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+      }
+      // Load next context in chain.
+      __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+      // Walk the rest of the chain without clobbering cp.
+      current = next;
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.
+    if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s->is_eval_scope()) {
+    Label loop, fast;
+    if (!current.is(next)) {
+      __ Move(next, current);
+    }
+    __ bind(&loop);
+    // Terminate at native context.
+    __ LoadP(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+    __ CompareRoot(temp, Heap::kNativeContextMapRootIndex);
+    __ beq(&fast, Label::kNear);
+    // Check that extension is "the hole".
+    __ LoadP(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+    __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+    // Load next context in chain.
+    __ LoadP(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
+    __ b(&loop);
+    __ bind(&fast);
+  }
+
+  // All extension objects were empty and it is safe to use a normal global
+  // load machinery.
+  EmitGlobalVariableLoad(proxy, typeof_mode);
+}
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+                                                                Label* slow) {
+  DCHECK(var->IsContextSlot());
+  Register context = cp;
+  Register next = r5;
+  Register temp = r6;
+
+  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_sloppy_eval()) {
+        // Check that extension is "the hole".
+        __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+        __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+      }
+      __ LoadP(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+      // Walk the rest of the chain without clobbering cp.
+      context = next;
+    }
+  }
+  // Check that last extension is "the hole".
+  __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+  __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+
+  // This function is used only for loads, not stores, so it's safe to
+  // return an cp-based operand (the write barrier cannot be allowed to
+  // destroy the cp register).
+  return ContextMemOperand(context, var->index());
+}
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
+                                                  TypeofMode typeof_mode,
+                                                  Label* slow, Label* done) {
+  // Generate fast-case code for variables that might be shadowed by
+  // eval-introduced variables.  Eval is used a lot without
+  // introducing variables.  In those cases, we do not want to
+  // perform a runtime call for all variables in the scope
+  // containing the eval.
+  Variable* var = proxy->var();
+  if (var->mode() == DYNAMIC_GLOBAL) {
+    EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
+    __ b(done);
+  } else if (var->mode() == DYNAMIC_LOCAL) {
+    Variable* local = var->local_if_not_shadowed();
+    __ LoadP(r2, ContextSlotOperandCheckExtensions(local, slow));
+    if (local->mode() == LET || local->mode() == CONST ||
+        local->mode() == CONST_LEGACY) {
+      __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+      __ bne(done);
+      if (local->mode() == CONST_LEGACY) {
+        __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+      } else {  // LET || CONST
+        __ mov(r2, Operand(var->name()));
+        __ push(r2);
+        __ CallRuntime(Runtime::kThrowReferenceError);
+      }
+    }
+    __ b(done);
+  }
+}
+
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+                                               TypeofMode typeof_mode) {
+  Variable* var = proxy->var();
+  DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+  __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
+  __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+  __ mov(LoadDescriptor::SlotRegister(),
+         Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+  CallLoadIC(typeof_mode);
+}
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+                                         TypeofMode typeof_mode) {
+  // Record position before possible IC call.
+  SetExpressionPosition(proxy);
+  PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+  Variable* var = proxy->var();
+
+  // Three cases: global variables, lookup variables, and all other types of
+  // variables.
+  switch (var->location()) {
+    case VariableLocation::GLOBAL:
+    case VariableLocation::UNALLOCATED: {
+      Comment cmnt(masm_, "[ Global variable");
+      EmitGlobalVariableLoad(proxy, typeof_mode);
+      context()->Plug(r2);
+      break;
+    }
+
+    case VariableLocation::PARAMETER:
+    case VariableLocation::LOCAL:
+    case VariableLocation::CONTEXT: {
+      DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
+      Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+                                               : "[ Stack variable");
+      if (NeedsHoleCheckForLoad(proxy)) {
+        Label done;
+        // Let and const need a read barrier.
+        GetVar(r2, var);
+        __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+        __ bne(&done);
+        if (var->mode() == LET || var->mode() == CONST) {
+          // Throw a reference error when using an uninitialized let/const
+          // binding in harmony mode.
+          __ mov(r2, Operand(var->name()));
+          __ push(r2);
+          __ CallRuntime(Runtime::kThrowReferenceError);
+        } else {
+          // Uninitialized legacy const bindings are unholed.
+          DCHECK(var->mode() == CONST_LEGACY);
+          __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+        }
+        __ bind(&done);
+        context()->Plug(r2);
+        break;
+      }
+      context()->Plug(var);
+      break;
+    }
+
+    case VariableLocation::LOOKUP: {
+      Comment cmnt(masm_, "[ Lookup variable");
+      Label done, slow;
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
+      __ bind(&slow);
+      __ Push(var->name());
+      Runtime::FunctionId function_id =
+          typeof_mode == NOT_INSIDE_TYPEOF
+              ? Runtime::kLoadLookupSlot
+              : Runtime::kLoadLookupSlotInsideTypeof;
+      __ CallRuntime(function_id);
+      __ bind(&done);
+      context()->Plug(r2);
+    }
+  }
+}
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Comment cmnt(masm_, "[ RegExpLiteral");
+  __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+  __ mov(r3, Operand(expr->pattern()));
+  __ LoadSmiLiteral(r2, Smi::FromInt(expr->flags()));
+  FastCloneRegExpStub stub(isolate());
+  __ CallStub(&stub);
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+  Expression* expression = (property == NULL) ? NULL : property->value();
+  if (expression == NULL) {
+    __ LoadRoot(r3, Heap::kNullValueRootIndex);
+    PushOperand(r3);
+  } else {
+    VisitForStackValue(expression);
+    if (NeedsHomeObject(expression)) {
+      DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+             property->kind() == ObjectLiteral::Property::SETTER);
+      int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+      EmitSetHomeObject(expression, offset, property->GetSlot());
+    }
+  }
+}
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  Comment cmnt(masm_, "[ ObjectLiteral");
+
+  Handle<FixedArray> constant_properties = expr->constant_properties();
+  __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+  __ mov(r3, Operand(constant_properties));
+  int flags = expr->ComputeFlags();
+  __ LoadSmiLiteral(r2, Smi::FromInt(flags));
+  if (MustCreateObjectLiteralWithRuntime(expr)) {
+    __ Push(r5, r4, r3, r2);
+    __ CallRuntime(Runtime::kCreateObjectLiteral);
+  } else {
+    FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
+    __ CallStub(&stub);
+  }
+  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+
+  // If result_saved is true the result is on top of the stack.  If
+  // result_saved is false the result is in r2.
+  bool result_saved = false;
+
+  AccessorTable accessor_table(zone());
+  int property_index = 0;
+  for (; property_index < expr->properties()->length(); property_index++) {
+    ObjectLiteral::Property* property = expr->properties()->at(property_index);
+    if (property->is_computed_name()) break;
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key()->AsLiteral();
+    Expression* value = property->value();
+    if (!result_saved) {
+      PushOperand(r2);  // Save result on stack
+      result_saved = true;
+    }
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+      // Fall through.
+      case ObjectLiteral::Property::COMPUTED:
+        // It is safe to use [[Put]] here because the boilerplate already
+        // contains computed properties with an uninitialized value.
+        if (key->value()->IsInternalizedString()) {
+          if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            DCHECK(StoreDescriptor::ValueRegister().is(r2));
+            __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+            __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+            EmitLoadStoreICSlot(property->GetSlot(0));
+            CallStoreIC();
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+            if (NeedsHomeObject(value)) {
+              EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
+            }
+          } else {
+            VisitForEffect(value);
+          }
+          break;
+        }
+        // Duplicate receiver on stack.
+        __ LoadP(r2, MemOperand(sp));
+        PushOperand(r2);
+        VisitForStackValue(key);
+        VisitForStackValue(value);
+        if (property->emit_store()) {
+          if (NeedsHomeObject(value)) {
+            EmitSetHomeObject(value, 2, property->GetSlot());
+          }
+          __ LoadSmiLiteral(r2, Smi::FromInt(SLOPPY));  // PropertyAttributes
+          PushOperand(r2);
+          CallRuntimeWithOperands(Runtime::kSetProperty);
+        } else {
+          DropOperands(3);
+        }
+        break;
+      case ObjectLiteral::Property::PROTOTYPE:
+        // Duplicate receiver on stack.
+        __ LoadP(r2, MemOperand(sp));
+        PushOperand(r2);
+        VisitForStackValue(value);
+        DCHECK(property->emit_store());
+        CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
+        PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                               NO_REGISTERS);
+        break;
+      case ObjectLiteral::Property::GETTER:
+        if (property->emit_store()) {
+          accessor_table.lookup(key)->second->getter = property;
+        }
+        break;
+      case ObjectLiteral::Property::SETTER:
+        if (property->emit_store()) {
+          accessor_table.lookup(key)->second->setter = property;
+        }
+        break;
+    }
+  }
+
+  // Emit code to define accessors, using only a single call to the runtime for
+  // each pair of corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end(); ++it) {
+    __ LoadP(r2, MemOperand(sp));  // Duplicate receiver.
+    PushOperand(r2);
+    VisitForStackValue(it->first);
+    EmitAccessor(it->second->getter);
+    EmitAccessor(it->second->setter);
+    __ LoadSmiLiteral(r2, Smi::FromInt(NONE));
+    PushOperand(r2);
+    CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+  }
+
+  // Object literals have two parts. The "static" part on the left contains no
+  // computed property names, and so we can compute its map ahead of time; see
+  // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+  // starts with the first computed property name, and continues with all
+  // properties to its right.  All the code from above initializes the static
+  // component of the object literal, and arranges for the map of the result to
+  // reflect the static order in which the keys appear. For the dynamic
+  // properties, we compile them into a series of "SetOwnProperty" runtime
+  // calls. This will preserve insertion order.
+  for (; property_index < expr->properties()->length(); property_index++) {
+    ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+    Expression* value = property->value();
+    if (!result_saved) {
+      PushOperand(r2);  // Save result on the stack
+      result_saved = true;
+    }
+
+    __ LoadP(r2, MemOperand(sp));  // Duplicate receiver.
+    PushOperand(r2);
+
+    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+      DCHECK(!property->is_computed_name());
+      VisitForStackValue(value);
+      DCHECK(property->emit_store());
+      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
+      PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+                             NO_REGISTERS);
+    } else {
+      EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
+      VisitForStackValue(value);
+      if (NeedsHomeObject(value)) {
+        EmitSetHomeObject(value, 2, property->GetSlot());
+      }
+
+      switch (property->kind()) {
+        case ObjectLiteral::Property::CONSTANT:
+        case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        case ObjectLiteral::Property::COMPUTED:
+          if (property->emit_store()) {
+            PushOperand(Smi::FromInt(NONE));
+            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+          } else {
+            DropOperands(3);
+          }
+          break;
+
+        case ObjectLiteral::Property::PROTOTYPE:
+          UNREACHABLE();
+          break;
+
+        case ObjectLiteral::Property::GETTER:
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
+          break;
+
+        case ObjectLiteral::Property::SETTER:
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
+          break;
+      }
+    }
+  }
+
+  if (result_saved) {
+    context()->PlugTOS();
+  } else {
+    context()->Plug(r2);
+  }
+}
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  bool has_fast_elements =
+      IsFastObjectElementsKind(expr->constant_elements_kind());
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
+
+  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+  if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+    // If the only customer of allocation sites is transitioning, then
+    // we can turn it off if we don't have anywhere else to transition to.
+    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+  }
+
+  __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+  __ mov(r3, Operand(constant_elements));
+  if (MustCreateArrayLiteralWithRuntime(expr)) {
+    __ LoadSmiLiteral(r2, Smi::FromInt(expr->ComputeFlags()));
+    __ Push(r5, r4, r3, r2);
+    __ CallRuntime(Runtime::kCreateArrayLiteral);
+  } else {
+    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+    __ CallStub(&stub);
+  }
+  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+
+  bool result_saved = false;  // Is the result saved to the stack?
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  int array_index = 0;
+  for (; array_index < length; array_index++) {
+    Expression* subexpr = subexprs->at(array_index);
+    DCHECK(!subexpr->IsSpread());
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+    if (!result_saved) {
+      PushOperand(r2);
+      result_saved = true;
+    }
+    VisitForAccumulatorValue(subexpr);
+
+    __ LoadSmiLiteral(StoreDescriptor::NameRegister(),
+                      Smi::FromInt(array_index));
+    __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+    EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+    Handle<Code> ic =
+        CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+    CallIC(ic);
+
+    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+  }
+
+  // In case the array literal contains spread expressions it has two parts. The
+  // first part is  the "static" array which has a literal index is  handled
+  // above. The second part is the part after the first spread expression
+  // (inclusive) and these elements gets appended to the array. Note that the
+  // number elements an iterable produces is unknown ahead of time.
+  if (array_index < length && result_saved) {
+    PopOperand(r2);
+    result_saved = false;
+  }
+  for (; array_index < length; array_index++) {
+    Expression* subexpr = subexprs->at(array_index);
+
+    PushOperand(r2);
+    DCHECK(!subexpr->IsSpread());
+    VisitForStackValue(subexpr);
+    CallRuntimeWithOperands(Runtime::kAppendElement);
+
+    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+  }
+
+  if (result_saved) {
+    context()->PlugTOS();
+  } else {
+    context()->Plug(r2);
+  }
+}
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+  DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
+
+  Comment cmnt(masm_, "[ Assignment");
+  SetExpressionPosition(expr, INSERT_BREAK);
+
+  Property* property = expr->target()->AsProperty();
+  LhsKind assign_type = Property::GetAssignType(property);
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      if (expr->is_compound()) {
+        // We need the receiver both on the stack and in the register.
+        VisitForStackValue(property->obj());
+        __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+      } else {
+        VisitForStackValue(property->obj());
+      }
+      break;
+    case NAMED_SUPER_PROPERTY:
+      VisitForStackValue(
+          property->obj()->AsSuperPropertyReference()->this_var());
+      VisitForAccumulatorValue(
+          property->obj()->AsSuperPropertyReference()->home_object());
+      PushOperand(result_register());
+      if (expr->is_compound()) {
+        const Register scratch = r3;
+        __ LoadP(scratch, MemOperand(sp, kPointerSize));
+        PushOperands(scratch, result_register());
+      }
+      break;
+    case KEYED_SUPER_PROPERTY: {
+      const Register scratch = r3;
+      VisitForStackValue(
+          property->obj()->AsSuperPropertyReference()->this_var());
+      VisitForAccumulatorValue(
+          property->obj()->AsSuperPropertyReference()->home_object());
+      __ LoadRR(scratch, result_register());
+      VisitForAccumulatorValue(property->key());
+      PushOperands(scratch, result_register());
+      if (expr->is_compound()) {
+        const Register scratch1 = r4;
+        __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+        PushOperands(scratch1, scratch, result_register());
+      }
+      break;
+    }
+    case KEYED_PROPERTY:
+      if (expr->is_compound()) {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+        __ LoadP(LoadDescriptor::ReceiverRegister(),
+                 MemOperand(sp, 1 * kPointerSize));
+        __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+      } else {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+      }
+      break;
+  }
+
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
+  if (expr->is_compound()) {
+    {
+      AccumulatorValueContext context(this);
+      switch (assign_type) {
+        case VARIABLE:
+          EmitVariableLoad(expr->target()->AsVariableProxy());
+          PrepareForBailout(expr->target(), TOS_REG);
+          break;
+        case NAMED_PROPERTY:
+          EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          break;
+        case NAMED_SUPER_PROPERTY:
+          EmitNamedSuperPropertyLoad(property);
+          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          break;
+        case KEYED_SUPER_PROPERTY:
+          EmitKeyedSuperPropertyLoad(property);
+          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          break;
+        case KEYED_PROPERTY:
+          EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          break;
+      }
+    }
+
+    Token::Value op = expr->binary_op();
+    PushOperand(r2);  // Left operand goes on the stack.
+    VisitForAccumulatorValue(expr->value());
+
+    AccumulatorValueContext context(this);
+    if (ShouldInlineSmiCase(op)) {
+      EmitInlineSmiBinaryOp(expr->binary_operation(), op, expr->target(),
+                            expr->value());
+    } else {
+      EmitBinaryOp(expr->binary_operation(), op);
+    }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
+  } else {
+    VisitForAccumulatorValue(expr->value());
+  }
+
+  SetExpressionPosition(expr);
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             expr->op(), expr->AssignmentSlot());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      context()->Plug(r2);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case NAMED_SUPER_PROPERTY:
+      EmitNamedSuperPropertyStore(property);
+      context()->Plug(r2);
+      break;
+    case KEYED_SUPER_PROPERTY:
+      EmitKeyedSuperPropertyStore(property);
+      context()->Plug(r2);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
+}
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+  Comment cmnt(masm_, "[ Yield");
+  SetExpressionPosition(expr);
+
+  // Evaluate yielded value first; the initial iterator definition depends on
+  // this.  It stays on the stack while we update the iterator.
+  VisitForStackValue(expr->expression());
+
+  Label suspend, continuation, post_runtime, resume;
+
+  __ b(&suspend);
+  __ bind(&continuation);
+  // When we arrive here, the stack top is the resume mode and
+  // result_register() holds the input value (the argument given to the
+  // respective resume operation).
+  __ RecordGeneratorContinuation();
+  __ pop(r3);
+  __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::RETURN), r0);
+  __ bne(&resume);
+  __ push(result_register());
+  EmitCreateIteratorResult(true);
+  EmitUnwindAndReturn();
+
+  __ bind(&suspend);
+  OperandStackDepthIncrement(1);  // Not popped on this path.
+  VisitForAccumulatorValue(expr->generator_object());
+  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+  __ LoadSmiLiteral(r3, Smi::FromInt(continuation.pos()));
+  __ StoreP(r3, FieldMemOperand(r2, JSGeneratorObject::kContinuationOffset),
+            r0);
+  __ StoreP(cp, FieldMemOperand(r2, JSGeneratorObject::kContextOffset), r0);
+  __ LoadRR(r3, cp);
+  __ RecordWriteField(r2, JSGeneratorObject::kContextOffset, r3, r4,
+                      kLRHasBeenSaved, kDontSaveFPRegs);
+  __ AddP(r3, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+  __ CmpP(sp, r3);
+  __ beq(&post_runtime);
+  __ push(r2);  // generator object
+  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ bind(&post_runtime);
+  PopOperand(result_register());
+  EmitReturnSequence();
+
+  __ bind(&resume);
+  context()->Plug(result_register());
+}
+
+void FullCodeGenerator::EmitGeneratorResume(
+    Expression* generator, Expression* value,
+    JSGeneratorObject::ResumeMode resume_mode) {
+  // The value stays in r2, and is ultimately read by the resumed generator, as
+  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+  // is read to throw the value when the resumed generator is already closed.
+  // r3 will hold the generator object until the activation has been resumed.
+  VisitForStackValue(generator);
+  VisitForAccumulatorValue(value);
+  PopOperand(r3);
+
+  // Store input value into generator object.
+  __ StoreP(result_register(),
+            FieldMemOperand(r3, JSGeneratorObject::kInputOffset), r0);
+  __ LoadRR(r4, result_register());
+  __ RecordWriteField(r3, JSGeneratorObject::kInputOffset, r4, r5,
+                      kLRHasBeenSaved, kDontSaveFPRegs);
+
+  // Load suspended function and context.
+  __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
+  __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+
+  // Load receiver and store as the first argument.
+  __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+  __ push(r4);
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadW(
+      r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+  Label argument_loop, push_frame;
+#if V8_TARGET_ARCH_S390X
+  __ CmpP(r5, Operand::Zero());
+  __ beq(&push_frame, Label::kNear);
+#else
+  __ SmiUntag(r5);
+  __ beq(&push_frame, Label::kNear);
+#endif
+  __ LoadRR(r0, r5);
+  __ bind(&argument_loop);
+  __ push(r4);
+  __ SubP(r0, Operand(1));
+  __ bne(&argument_loop);
+
+  // Enter a new JavaScript frame, and initialize its slots as they were when
+  // the generator was suspended.
+  Label resume_frame, done;
+  __ bind(&push_frame);
+  __ b(r14, &resume_frame);  // brasl
+  __ b(&done);
+  __ bind(&resume_frame);
+  // lr = return address.
+  // fp = caller's frame pointer.
+  // cp = callee's context,
+  // r6 = callee's JS function.
+  __ PushStandardFrame(r6);
+
+  // Load the operand stack size.
+  __ LoadP(r5, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
+  __ LoadP(r5, FieldMemOperand(r5, FixedArray::kLengthOffset));
+  __ SmiUntag(r5);
+
+  // If we are sending a value and there is no operand stack, we can jump back
+  // in directly.
+  Label call_resume;
+  if (resume_mode == JSGeneratorObject::NEXT) {
+    Label slow_resume;
+    __ bne(&slow_resume, Label::kNear);
+    __ LoadP(ip, FieldMemOperand(r6, JSFunction::kCodeEntryOffset));
+    __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+    __ SmiUntag(r4);
+    __ AddP(ip, ip, r4);
+    __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+    __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
+    __ Jump(ip);
+    __ bind(&slow_resume);
+  } else {
+    __ beq(&call_resume);
+  }
+
+  // Otherwise, we push holes for the operand stack and call the runtime to fix
+  // up the stack and the handlers.
+  Label operand_loop;
+  __ LoadRR(r0, r5);
+  __ bind(&operand_loop);
+  __ push(r4);
+  __ SubP(r0, Operand(1));
+  __ bne(&operand_loop);
+
+  __ bind(&call_resume);
+  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
+  DCHECK(!result_register().is(r3));
+  __ Push(r3, result_register());
+  __ Push(Smi::FromInt(resume_mode));
+  __ CallRuntime(Runtime::kResumeJSGeneratorObject);
+  // Not reached: the runtime call returns elsewhere.
+  __ stop("not-reached");
+
+  __ bind(&done);
+  context()->Plug(result_register());
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+  OperandStackDepthIncrement(2);
+  __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+                                     Register reg3) {
+  OperandStackDepthIncrement(3);
+  __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+                                     Register reg3, Register reg4) {
+  OperandStackDepthIncrement(4);
+  __ Push(reg1, reg2, reg3, reg4);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+  OperandStackDepthDecrement(2);
+  __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+  if (FLAG_debug_code) {
+    int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+                        operand_stack_depth_ * kPointerSize;
+    __ SubP(r2, fp, sp);
+    __ CmpP(r2, Operand(expected_diff));
+    __ Assert(eq, kUnexpectedStackDepth);
+  }
+}
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+  Label allocate, done_allocate;
+
+  __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &allocate, TAG_OBJECT);
+  __ b(&done_allocate);
+
+  __ bind(&allocate);
+  __ Push(Smi::FromInt(JSIteratorResult::kSize));
+  __ CallRuntime(Runtime::kAllocateInNewSpace);
+
+  __ bind(&done_allocate);
+  __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
+  PopOperand(r4);
+  __ LoadRoot(r5,
+              done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r3, FieldMemOperand(r2, HeapObject::kMapOffset), r0);
+  __ StoreP(r6, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
+  __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+  __ StoreP(r4, FieldMemOperand(r2, JSIteratorResult::kValueOffset), r0);
+  __ StoreP(r5, FieldMemOperand(r2, JSIteratorResult::kDoneOffset), r0);
+}
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+                                              Token::Value op,
+                                              Expression* left_expr,
+                                              Expression* right_expr) {
+  Label done, smi_case, stub_call;
+
+  Register scratch1 = r4;
+  Register scratch2 = r5;
+
+  // Get the arguments.
+  Register left = r3;
+  Register right = r2;
+  PopOperand(left);
+
+  // Perform combined smi check on both operands.
+  __ LoadRR(scratch1, right);
+  __ OrP(scratch1, left);
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+  __ bind(&stub_call);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
+  CallIC(code, expr->BinaryOperationFeedbackId());
+  patch_site.EmitPatchInfo();
+  __ b(&done);
+
+  __ bind(&smi_case);
+  // Smi case. This code works the same way as the smi-smi case in the type
+  // recording binary operation stub.
+  switch (op) {
+    case Token::SAR:
+      __ GetLeastBitsFromSmi(scratch1, right, 5);
+      __ ShiftRightArithP(right, left, scratch1);
+      __ ClearRightImm(right, right, Operand(kSmiTagSize + kSmiShiftSize));
+      break;
+    case Token::SHL: {
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+#if V8_TARGET_ARCH_S390X
+      __ ShiftLeftP(right, left, scratch2);
+#else
+      __ SmiUntag(scratch1, left);
+      __ ShiftLeftP(scratch1, scratch1, scratch2);
+      // Check that the *signed* result fits in a smi
+      __ JumpIfNotSmiCandidate(scratch1, scratch2, &stub_call);
+      __ SmiTag(right, scratch1);
+#endif
+      break;
+    }
+    case Token::SHR: {
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ srl(scratch1, scratch2);
+      // Unsigned shift is not allowed to produce a negative number.
+      __ JumpIfNotUnsignedSmiCandidate(scratch1, r0, &stub_call);
+      __ SmiTag(right, scratch1);
+      break;
+    }
+    case Token::ADD: {
+      __ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+      __ BranchOnOverflow(&stub_call);
+      __ LoadRR(right, scratch1);
+      break;
+    }
+    case Token::SUB: {
+      __ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+      __ BranchOnOverflow(&stub_call);
+      __ LoadRR(right, scratch1);
+      break;
+    }
+    case Token::MUL: {
+      Label mul_zero;
+#if V8_TARGET_ARCH_S390X
+      // Remove tag from both operands.
+      __ SmiUntag(ip, right);
+      __ SmiUntag(scratch2, left);
+      __ mr_z(scratch1, ip);
+      // Check for overflowing the smi range - no overflow if higher 33 bits of
+      // the result are identical.
+      __ lr(ip, scratch2);  // 32 bit load
+      __ sra(ip, Operand(31));
+      __ cr_z(ip, scratch1);  // 32 bit compare
+      __ bne(&stub_call);
+#else
+      __ SmiUntag(ip, right);
+      __ LoadRR(scratch2, left);  // load into low order of reg pair
+      __ mr_z(scratch1, ip);      // R4:R5 = R5 * ip
+      // Check for overflowing the smi range - no overflow if higher 33 bits of
+      // the result are identical.
+      __ TestIfInt32(scratch1, scratch2, ip);
+      __ bne(&stub_call);
+#endif
+      // Go slow on zero result to handle -0.
+      __ chi(scratch2, Operand::Zero());
+      __ beq(&mul_zero, Label::kNear);
+#if V8_TARGET_ARCH_S390X
+      __ SmiTag(right, scratch2);
+#else
+      __ LoadRR(right, scratch2);
+#endif
+      __ b(&done);
+      // We need -0 if we were multiplying a negative number with 0 to get 0.
+      // We know one of them was zero.
+      __ bind(&mul_zero);
+      __ AddP(scratch2, right, left);
+      __ CmpP(scratch2, Operand::Zero());
+      __ blt(&stub_call);
+      __ LoadSmiLiteral(right, Smi::FromInt(0));
+      break;
+    }
+    case Token::BIT_OR:
+      __ OrP(right, left);
+      break;
+    case Token::BIT_AND:
+      __ AndP(right, left);
+      break;
+    case Token::BIT_XOR:
+      __ XorP(right, left);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  __ bind(&done);
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+  for (int i = 0; i < lit->properties()->length(); i++) {
+    ObjectLiteral::Property* property = lit->properties()->at(i);
+    Expression* value = property->value();
+
+    Register scratch = r3;
+    if (property->is_static()) {
+      __ LoadP(scratch, MemOperand(sp, kPointerSize));  // constructor
+    } else {
+      __ LoadP(scratch, MemOperand(sp, 0));  // prototype
+    }
+    PushOperand(scratch);
+    EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+    // The static prototype property is read only. We handle the non computed
+    // property name case in the parser. Since this is the only case where we
+    // need to check for an own read only property we special case this so we do
+    // not need to do this for every property.
+    if (property->is_static() && property->is_computed_name()) {
+      __ CallRuntime(Runtime::kThrowIfStaticPrototype);
+      __ push(r2);
+    }
+
+    VisitForStackValue(value);
+    if (NeedsHomeObject(value)) {
+      EmitSetHomeObject(value, 2, property->GetSlot());
+    }
+
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+      case ObjectLiteral::Property::PROTOTYPE:
+        UNREACHABLE();
+      case ObjectLiteral::Property::COMPUTED:
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+        break;
+
+      case ObjectLiteral::Property::GETTER:
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
+        break;
+
+      case ObjectLiteral::Property::SETTER:
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
+        break;
+
+      default:
+        UNREACHABLE();
+    }
+  }
+}
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
+  PopOperand(r3);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
+  JumpPatchSite patch_site(masm_);  // unbound, signals no inlined smi code.
+  CallIC(code, expr->BinaryOperationFeedbackId());
+  patch_site.EmitPatchInfo();
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+                                       FeedbackVectorSlot slot) {
+  DCHECK(expr->IsValidReferenceExpressionOrThis());
+
+  Property* prop = expr->AsProperty();
+  LhsKind assign_type = Property::GetAssignType(prop);
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EffectContext context(this);
+      EmitVariableAssignment(var, Token::ASSIGN, slot);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      PushOperand(r2);  // Preserve value.
+      VisitForAccumulatorValue(prop->obj());
+      __ Move(StoreDescriptor::ReceiverRegister(), r2);
+      PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
+      __ mov(StoreDescriptor::NameRegister(),
+             Operand(prop->key()->AsLiteral()->value()));
+      EmitLoadStoreICSlot(slot);
+      CallStoreIC();
+      break;
+    }
+    case NAMED_SUPER_PROPERTY: {
+      PushOperand(r2);
+      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+      VisitForAccumulatorValue(
+          prop->obj()->AsSuperPropertyReference()->home_object());
+      // stack: value, this; r2: home_object
+      Register scratch = r4;
+      Register scratch2 = r5;
+      __ LoadRR(scratch, result_register());              // home_object
+      __ LoadP(r2, MemOperand(sp, kPointerSize));         // value
+      __ LoadP(scratch2, MemOperand(sp, 0));              // this
+      __ StoreP(scratch2, MemOperand(sp, kPointerSize));  // this
+      __ StoreP(scratch, MemOperand(sp, 0));              // home_object
+      // stack: this, home_object; r2: value
+      EmitNamedSuperPropertyStore(prop);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      PushOperand(r2);
+      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+      VisitForStackValue(
+          prop->obj()->AsSuperPropertyReference()->home_object());
+      VisitForAccumulatorValue(prop->key());
+      Register scratch = r4;
+      Register scratch2 = r5;
+      __ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize));  // value
+      // stack: value, this, home_object; r3: key, r6: value
+      __ LoadP(scratch, MemOperand(sp, kPointerSize));  // this
+      __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
+      __ LoadP(scratch, MemOperand(sp, 0));  // home_object
+      __ StoreP(scratch, MemOperand(sp, kPointerSize));
+      __ StoreP(r2, MemOperand(sp, 0));
+      __ Move(r2, scratch2);
+      // stack: this, home_object, key; r2: value.
+      EmitKeyedSuperPropertyStore(prop);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      PushOperand(r2);  // Preserve value.
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
+      __ Move(StoreDescriptor::NameRegister(), r2);
+      PopOperands(StoreDescriptor::ValueRegister(),
+                  StoreDescriptor::ReceiverRegister());
+      EmitLoadStoreICSlot(slot);
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+      CallIC(ic);
+      break;
+    }
+  }
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+    Variable* var, MemOperand location) {
+  __ StoreP(result_register(), location);
+  if (var->IsContextSlot()) {
+    // RecordWrite may destroy all its register arguments.
+    __ LoadRR(r5, result_register());
+    int offset = Context::SlotOffset(var->index());
+    __ RecordWriteContextSlot(r3, offset, r5, r4, kLRHasBeenSaved,
+                              kDontSaveFPRegs);
+  }
+}
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+                                               FeedbackVectorSlot slot) {
+  if (var->IsUnallocated()) {
+    // Global var, const, or let.
+    __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+    __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+    EmitLoadStoreICSlot(slot);
+    CallStoreIC();
+
+  } else if (var->mode() == LET && op != Token::INIT) {
+    // Non-initializing assignment to let variable needs a write barrier.
+    DCHECK(!var->IsLookupSlot());
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label assign;
+    MemOperand location = VarOperand(var, r3);
+    __ LoadP(r5, location);
+    __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+    __ bne(&assign);
+    __ mov(r5, Operand(var->name()));
+    __ push(r5);
+    __ CallRuntime(Runtime::kThrowReferenceError);
+    // Perform the assignment.
+    __ bind(&assign);
+    EmitStoreToStackLocalOrContextSlot(var, location);
+
+  } else if (var->mode() == CONST && op != Token::INIT) {
+    // Assignment to const variable needs a write barrier.
+    DCHECK(!var->IsLookupSlot());
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label const_error;
+    MemOperand location = VarOperand(var, r3);
+    __ LoadP(r5, location);
+    __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+    __ bne(&const_error, Label::kNear);
+    __ mov(r5, Operand(var->name()));
+    __ push(r5);
+    __ CallRuntime(Runtime::kThrowReferenceError);
+    __ bind(&const_error);
+    __ CallRuntime(Runtime::kThrowConstAssignError);
+
+  } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
+    // Initializing assignment to const {this} needs a write barrier.
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label uninitialized_this;
+    MemOperand location = VarOperand(var, r3);
+    __ LoadP(r5, location);
+    __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+    __ beq(&uninitialized_this);
+    __ mov(r3, Operand(var->name()));
+    __ push(r3);
+    __ CallRuntime(Runtime::kThrowReferenceError);
+    __ bind(&uninitialized_this);
+    EmitStoreToStackLocalOrContextSlot(var, location);
+
+  } else if (!var->is_const_mode() ||
+             (var->mode() == CONST && op == Token::INIT)) {
+    if (var->IsLookupSlot()) {
+      // Assignment to var.
+      __ Push(var->name());
+      __ Push(r2);
+      __ CallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreLookupSlot_Strict
+                         : Runtime::kStoreLookupSlot_Sloppy);
+    } else {
+      // Assignment to var or initializing assignment to let/const in harmony
+      // mode.
+      DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+      MemOperand location = VarOperand(var, r3);
+      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+        // Check for an uninitialized let binding.
+        __ LoadP(r4, location);
+        __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+        __ Check(eq, kLetBindingReInitialization);
+      }
+      EmitStoreToStackLocalOrContextSlot(var, location);
+    }
+  } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
+    // Const initializers need a write barrier.
+    DCHECK(!var->IsParameter());  // No const parameters.
+    if (var->IsLookupSlot()) {
+      __ push(r2);
+      __ mov(r2, Operand(var->name()));
+      __ Push(cp, r2);  // Context and name.
+      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
+    } else {
+      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+      Label skip;
+      MemOperand location = VarOperand(var, r3);
+      __ LoadP(r4, location);
+      __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+      __ bne(&skip);
+      EmitStoreToStackLocalOrContextSlot(var, location);
+      __ bind(&skip);
+    }
+
+  } else {
+    DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
+    if (is_strict(language_mode())) {
+      __ CallRuntime(Runtime::kThrowConstAssignError);
+    }
+    // Silently ignore store in sloppy mode.
+  }
+}
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+  // Assignment to a property, using a named store IC.
+  Property* prop = expr->target()->AsProperty();
+  DCHECK(prop != NULL);
+  DCHECK(prop->key()->IsLiteral());
+
+  __ mov(StoreDescriptor::NameRegister(),
+         Operand(prop->key()->AsLiteral()->value()));
+  PopOperand(StoreDescriptor::ReceiverRegister());
+  EmitLoadStoreICSlot(expr->AssignmentSlot());
+  CallStoreIC();
+
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
+  // Assignment to named property of super.
+  // r2 : value
+  // stack : receiver ('this'), home_object
+  DCHECK(prop != NULL);
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(key != NULL);
+
+  PushOperand(key->value());
+  PushOperand(r2);
+  CallRuntimeWithOperands((is_strict(language_mode())
+                               ? Runtime::kStoreToSuper_Strict
+                               : Runtime::kStoreToSuper_Sloppy));
+}
+
+void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
+  // Assignment to named property of super.
+  // r2 : value
+  // stack : receiver ('this'), home_object, key
+  DCHECK(prop != NULL);
+
+  PushOperand(r2);
+  CallRuntimeWithOperands((is_strict(language_mode())
+                               ? Runtime::kStoreKeyedToSuper_Strict
+                               : Runtime::kStoreKeyedToSuper_Sloppy));
+}
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+  // Assignment to a property, using a keyed store IC.
+  PopOperands(StoreDescriptor::ReceiverRegister(),
+              StoreDescriptor::NameRegister());
+  DCHECK(StoreDescriptor::ValueRegister().is(r2));
+
+  Handle<Code> ic =
+      CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+  EmitLoadStoreICSlot(expr->AssignmentSlot());
+  CallIC(ic);
+
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+  Comment cmnt(masm_, "[ Property");
+  SetExpressionPosition(expr);
+
+  Expression* key = expr->key();
+
+  if (key->IsPropertyName()) {
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), r2);
+      EmitNamedPropertyLoad(expr);
+    } else {
+      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+      VisitForStackValue(
+          expr->obj()->AsSuperPropertyReference()->home_object());
+      EmitNamedSuperPropertyLoad(expr);
+    }
+  } else {
+    if (!expr->IsSuperAccess()) {
+      VisitForStackValue(expr->obj());
+      VisitForAccumulatorValue(expr->key());
+      __ Move(LoadDescriptor::NameRegister(), r2);
+      PopOperand(LoadDescriptor::ReceiverRegister());
+      EmitKeyedPropertyLoad(expr);
+    } else {
+      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+      VisitForStackValue(
+          expr->obj()->AsSuperPropertyReference()->home_object());
+      VisitForStackValue(expr->key());
+      EmitKeyedSuperPropertyLoad(expr);
+    }
+  }
+  PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
+  ic_total_count_++;
+  __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+
+  // Get the target function.
+  ConvertReceiverMode convert_mode;
+  if (callee->IsVariableProxy()) {
+    {
+      StackValueContext context(this);
+      EmitVariableLoad(callee->AsVariableProxy());
+      PrepareForBailout(callee, NO_REGISTERS);
+    }
+    // Push undefined as receiver. This is patched in the method prologue if it
+    // is a sloppy mode method.
+    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+    PushOperand(r1);
+    convert_mode = ConvertReceiverMode::kNullOrUndefined;
+  } else {
+    // Load the function from the receiver.
+    DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
+    __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+    EmitNamedPropertyLoad(callee->AsProperty());
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    // Push the target function under the receiver.
+    __ LoadP(r1, MemOperand(sp, 0));
+    PushOperand(r1);
+    __ StoreP(r2, MemOperand(sp, kPointerSize));
+    convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
+  }
+
+  EmitCall(expr, convert_mode);
+}
+
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+  SetExpressionPosition(prop);
+
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  // Load the function from the receiver.
+  const Register scratch = r3;
+  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+  VisitForAccumulatorValue(super_ref->home_object());
+  __ LoadRR(scratch, r2);
+  VisitForAccumulatorValue(super_ref->this_var());
+  PushOperands(scratch, r2, r2, scratch);
+  PushOperand(key->value());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - this (receiver) <-- LoadFromSuper will pop here and below.
+  //  - home_object
+  //  - key
+  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+
+  // Replace home_object with target function.
+  __ StoreP(r2, MemOperand(sp, kPointerSize));
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr);
+}
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
+  // Load the key.
+  VisitForAccumulatorValue(key);
+
+  Expression* callee = expr->expression();
+
+  // Load the function from the receiver.
+  DCHECK(callee->IsProperty());
+  __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+  __ Move(LoadDescriptor::NameRegister(), r2);
+  EmitKeyedPropertyLoad(callee->AsProperty());
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+  // Push the target function under the receiver.
+  __ LoadP(ip, MemOperand(sp, 0));
+  PushOperand(ip);
+  __ StoreP(r2, MemOperand(sp, kPointerSize));
+
+  EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
+}
+
+void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetExpressionPosition(prop);
+  // Load the function from the receiver.
+  const Register scratch = r3;
+  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+  VisitForAccumulatorValue(super_ref->home_object());
+  __ LoadRR(scratch, r2);
+  VisitForAccumulatorValue(super_ref->this_var());
+  PushOperands(scratch, r2, r2, scratch);
+  VisitForStackValue(prop->key());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
+  //  - home_object
+  //  - key
+  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+
+  // Replace home_object with target function.
+  __ StoreP(r2, MemOperand(sp, kPointerSize));
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr);
+}
+
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
+  // Load the arguments.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  SetCallPosition(expr, expr->tail_call_mode());
+  if (expr->tail_call_mode() == TailCallMode::kAllow) {
+    if (FLAG_trace) {
+      __ CallRuntime(Runtime::kTraceTailCall);
+    }
+    // Update profiling counters before the tail call since we will
+    // not return to this function.
+    EmitProfilingCounterHandlingForReturnSequence(true);
+  }
+  Handle<Code> ic =
+      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+          .code();
+  __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
+  __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+  // Don't assign a type feedback id to the IC, since type feedback is provided
+  // by the vector above.
+  CallIC(ic);
+  OperandStackDepthDecrement(arg_count + 1);
+
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->DropAndPlug(1, r2);
+}
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+  // r6: copy of the first argument or undefined if it doesn't exist.
+  if (arg_count > 0) {
+    __ LoadP(r6, MemOperand(sp, arg_count * kPointerSize), r0);
+  } else {
+    __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+  }
+
+  // r5: the receiver of the enclosing function.
+  __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+  // r4: language mode.
+  __ LoadSmiLiteral(r4, Smi::FromInt(language_mode()));
+
+  // r3: the start position of the scope the calls resides in.
+  __ LoadSmiLiteral(r3, Smi::FromInt(scope()->start_position()));
+
+  // Do the runtime call.
+  __ Push(r6, r5, r4, r3);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
+}
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+  VariableProxy* callee = expr->expression()->AsVariableProxy();
+  if (callee->var()->IsLookupSlot()) {
+    Label slow, done;
+    SetExpressionPosition(callee);
+    // Generate code for loading from variables potentially shadowed by
+    // eval-introduced variables.
+    EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+
+    __ bind(&slow);
+    // Call the runtime to find the function to call (returned in r2) and
+    // the object holding it (returned in r3).
+    __ Push(callee->name());
+    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+    PushOperands(r2, r3);  // Function, receiver.
+    PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+    // If fast case code has been generated, emit code to push the function
+    // and receiver and have the slow path jump around this code.
+    if (done.is_linked()) {
+      Label call;
+      __ b(&call);
+      __ bind(&done);
+      // Push function.
+      __ push(r2);
+      // Pass undefined as the receiver, which is the WithBaseObject of a
+      // non-object environment record.  If the callee is sloppy, it will patch
+      // it up to be the global receiver.
+      __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+      __ push(r3);
+      __ bind(&call);
+    }
+  } else {
+    VisitForStackValue(callee);
+    // refEnv.WithBaseObject()
+    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+    PushOperand(r4);  // Reserved receiver slot.
+  }
+}
+
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+  // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+  // to resolve the function we need to call.  Then we call the resolved
+  // function using the given arguments.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+
+  PushCalleeAndWithBaseObject(expr);
+
+  // Push the arguments.
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  // Push a copy of the function (found below the arguments) and
+  // resolve eval.
+  __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+  __ push(r3);
+  EmitResolvePossiblyDirectEval(arg_count);
+
+  // Touch up the stack with the resolved function.
+  __ StoreP(r2, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+
+  PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+
+  // Record source position for debugger.
+  SetCallPosition(expr);
+  __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+  __ mov(r2, Operand(arg_count));
+  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                      expr->tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->DropAndPlug(1, r2);
+}
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+  Comment cmnt(masm_, "[ CallNew");
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments.
+
+  // Push constructor on the stack.  If it's not a function it's used as
+  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+  // ignored.
+  DCHECK(!expr->expression()->IsSuperPropertyReference());
+  VisitForStackValue(expr->expression());
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetConstructCallPosition(expr);
+
+  // Load function and argument count into r3 and r2.
+  __ mov(r2, Operand(arg_count));
+  __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize), r0);
+
+  // Record call targets in unoptimized code.
+  __ EmitLoadTypeFeedbackVector(r4);
+  __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallNewFeedbackSlot()));
+
+  CallConstructStub stub(isolate());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
+  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+  // Restore context register.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+  SuperCallReference* super_call_ref =
+      expr->expression()->AsSuperCallReference();
+  DCHECK_NOT_NULL(super_call_ref);
+
+  // Push the super constructor target on the stack (may be null,
+  // but the Construct builtin can deal with that properly).
+  VisitForAccumulatorValue(super_call_ref->this_function_var());
+  __ AssertFunction(result_register());
+  __ LoadP(result_register(),
+           FieldMemOperand(result_register(), HeapObject::kMapOffset));
+  __ LoadP(result_register(),
+           FieldMemOperand(result_register(), Map::kPrototypeOffset));
+  PushOperand(result_register());
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetConstructCallPosition(expr);
+
+  // Load new target into r5.
+  VisitForAccumulatorValue(super_call_ref->new_target_var());
+  __ LoadRR(r5, result_register());
+
+  // Load function and argument count into r1 and r0.
+  __ mov(r2, Operand(arg_count));
+  __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize));
+
+  __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
+
+  RecordJSReturnSite(expr);
+
+  // Restore context register.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false, skip_lookup;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  __ TestIfSmi(r2);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r2, if_false);
+  __ CompareObjectType(r2, r3, r3, FIRST_JS_RECEIVER_TYPE);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(ge, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r2, if_false);
+  __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r2, if_false);
+  __ CompareObjectType(r2, r3, r3, JS_TYPED_ARRAY_TYPE);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r2, if_false);
+  __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r2, if_false);
+  __ CompareObjectType(r2, r3, r3, JS_PROXY_TYPE);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForAccumulatorValue(args->at(0));
+
+  // If the object is not a JSReceiver, we return null.
+  __ JumpIfSmi(r2, &null);
+  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+  __ CompareObjectType(r2, r2, r3, FIRST_JS_RECEIVER_TYPE);
+  // Map is now in r2.
+  __ blt(&null);
+
+  // Return 'Function' for JSFunction and JSBoundFunction objects.
+  __ CmpLogicalP(r3, Operand(FIRST_FUNCTION_TYPE));
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+  __ bge(&function);
+
+  // Check if the constructor in the map is a JS function.
+  Register instance_type = r4;
+  __ GetMapConstructor(r2, r2, r3, instance_type);
+  __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
+  __ bne(&non_function_constructor, Label::kNear);
+
+  // r2 now contains the constructor function. Grab the
+  // instance class name from there.
+  __ LoadP(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r2,
+           FieldMemOperand(r2, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ b(&done, Label::kNear);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ LoadRoot(r2, Heap::kFunction_stringRootIndex);
+  __ b(&done, Label::kNear);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ LoadRoot(r2, Heap::kObject_stringRootIndex);
+  __ b(&done, Label::kNear);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ LoadRoot(r2, Heap::kNullValueRootIndex);
+
+  // All done.
+  __ bind(&done);
+
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ JumpIfSmi(r2, &done);
+  // If the object is not a value type, return the object.
+  __ CompareObjectType(r2, r3, r3, JS_VALUE_TYPE);
+  __ bne(&done, Label::kNear);
+  __ LoadP(r2, FieldMemOperand(r2, JSValue::kValueOffset));
+
+  __ bind(&done);
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(3, args->length());
+
+  Register string = r2;
+  Register index = r3;
+  Register value = r4;
+
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
+  PopOperands(index, value);
+
+  if (FLAG_debug_code) {
+    __ TestIfSmi(value);
+    __ Check(eq, kNonSmiValue, cr0);
+    __ TestIfSmi(index);
+    __ Check(eq, kNonSmiIndex, cr0);
+    __ SmiUntag(index);
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+    __ SmiTag(index);
+  }
+
+  __ SmiUntag(value);
+  __ AddP(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ SmiToByteArrayOffset(r1, index);
+  __ StoreByte(value, MemOperand(ip, r1));
+  context()->Plug(string);
+}
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(3, args->length());
+
+  Register string = r2;
+  Register index = r3;
+  Register value = r4;
+
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
+  PopOperands(index, value);
+
+  if (FLAG_debug_code) {
+    __ TestIfSmi(value);
+    __ Check(eq, kNonSmiValue, cr0);
+    __ TestIfSmi(index);
+    __ Check(eq, kNonSmiIndex, cr0);
+    __ SmiUntag(index, index);
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+    __ SmiTag(index, index);
+  }
+
+  __ SmiUntag(value);
+  __ SmiToShortArrayOffset(r1, index);
+  __ StoreHalfWord(value, MemOperand(r1, string, SeqTwoByteString::kHeaderSize -
+                                                     kHeapObjectTag));
+  context()->Plug(string);
+}
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+  VisitForAccumulatorValue(args->at(0));
+
+  Label done;
+  StringCharFromCodeGenerator generator(r2, r3);
+  generator.GenerateFast(masm_);
+  __ b(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(r3);
+}
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  Register object = r3;
+  Register index = r2;
+  Register result = r5;
+
+  PopOperand(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+                                      &need_conversion, &index_out_of_range,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ b(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // NaN.
+  __ LoadRoot(result, Heap::kNanValueRootIndex);
+  __ b(&done);
+
+  __ bind(&need_conversion);
+  // Load the undefined value into the result register, which will
+  // trigger conversion.
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ b(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
+}
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  Register object = r3;
+  Register index = r2;
+  Register scratch = r5;
+  Register result = r2;
+
+  PopOperand(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharAtGenerator generator(object, index, scratch, result,
+                                  &need_conversion, &need_conversion,
+                                  &index_out_of_range, STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ b(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // the empty string.
+  __ LoadRoot(result, Heap::kempty_stringRootIndex);
+  __ b(&done);
+
+  __ bind(&need_conversion);
+  // Move smi zero into the result register, which will trigger
+  // conversion.
+  __ LoadSmiLiteral(result, Smi::FromInt(0));
+  __ b(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
+}
+
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_LE(2, args->length());
+  // Push target, receiver and arguments onto the stack.
+  for (Expression* const arg : *args) {
+    VisitForStackValue(arg);
+  }
+  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  // Move target to r3.
+  int const argc = args->length() - 2;
+  __ LoadP(r3, MemOperand(sp, (argc + 1) * kPointerSize));
+  // Call the target.
+  __ mov(r2, Operand(argc));
+  __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(argc + 1);
+  // Restore context register.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  // Discard the function left on TOS.
+  context()->DropAndPlug(1, r2);
+}
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
+  __ AndP(r0, r2, Operand(String::kContainsCachedArrayIndexMask));
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+  VisitForAccumulatorValue(args->at(0));
+
+  __ AssertString(r2);
+
+  __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
+  __ IndexFromHash(r2, r2);
+
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(1, args->length());
+  VisitForAccumulatorValue(args->at(0));
+  __ AssertFunction(r2);
+  __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadP(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+  DCHECK_EQ(0, expr->arguments()->length());
+  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r2);
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(isolate());
+  __ mov(ip, Operand(debug_is_active));
+  __ LoadlB(r2, MemOperand(ip));
+  __ SmiTag(r2);
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(2, args->length());
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+
+  Label runtime, done;
+
+  __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &runtime, TAG_OBJECT);
+  __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
+  __ Pop(r4, r5);
+  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r3, FieldMemOperand(r2, HeapObject::kMapOffset), r0);
+  __ StoreP(r6, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
+  __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+  __ StoreP(r4, FieldMemOperand(r2, JSIteratorResult::kValueOffset), r0);
+  __ StoreP(r5, FieldMemOperand(r2, JSIteratorResult::kDoneOffset), r0);
+  STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+  __ b(&done);
+
+  __ bind(&runtime);
+  CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
+
+  __ bind(&done);
+  context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+  // Push function.
+  __ LoadNativeContextSlot(expr->context_index(), r2);
+  PushOperand(r2);
+
+  // Push undefined as the receiver.
+  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+  PushOperand(r2);
+}
+
+void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+
+  SetCallPosition(expr);
+  __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+  __ mov(r2, Operand(arg_count));
+  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+          RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
+
+  // Restore context register.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* property = expr->expression()->AsProperty();
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+      if (property != NULL) {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+        CallRuntimeWithOperands(is_strict(language_mode())
+                                    ? Runtime::kDeleteProperty_Strict
+                                    : Runtime::kDeleteProperty_Sloppy);
+        context()->Plug(r2);
+      } else if (proxy != NULL) {
+        Variable* var = proxy->var();
+        // Delete of an unqualified identifier is disallowed in strict mode but
+        // "delete this" is allowed.
+        bool is_this = var->HasThisName(isolate());
+        DCHECK(is_sloppy(language_mode()) || is_this);
+        if (var->IsUnallocatedOrGlobalSlot()) {
+          __ LoadGlobalObject(r4);
+          __ mov(r3, Operand(var->name()));
+          __ Push(r4, r3);
+          __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+          context()->Plug(r2);
+        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+          // Result of deleting non-global, non-dynamic variables is false.
+          // The subexpression does not have side effects.
+          context()->Plug(is_this);
+        } else {
+          // Non-global variable.  Call the runtime to try to delete from the
+          // context where the variable was introduced.
+          __ Push(var->name());
+          __ CallRuntime(Runtime::kDeleteLookupSlot);
+          context()->Plug(r2);
+        }
+      } else {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        context()->Plug(true);
+      }
+      break;
+    }
+
+    case Token::VOID: {
+      Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+      VisitForEffect(expr->expression());
+      context()->Plug(Heap::kUndefinedValueRootIndex);
+      break;
+    }
+
+    case Token::NOT: {
+      Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+      if (context()->IsEffect()) {
+        // Unary NOT has no side effects so it's only necessary to visit the
+        // subexpression.  Match the optimizing compiler by not branching.
+        VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(), test->false_label(),
+                        test->true_label(), test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
+      } else {
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(), &materialize_false,
+                        &materialize_true, &materialize_true);
+        if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        __ LoadRoot(r2, Heap::kTrueValueRootIndex);
+        if (context()->IsStackValue()) __ push(r2);
+        __ b(&done);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        __ LoadRoot(r2, Heap::kFalseValueRootIndex);
+        if (context()->IsStackValue()) __ push(r2);
+        __ bind(&done);
+      }
+      break;
+    }
+
+    case Token::TYPEOF: {
+      Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+      {
+        AccumulatorValueContext context(this);
+        VisitForTypeofValue(expr->expression());
+      }
+      __ LoadRR(r5, r2);
+      TypeofStub typeof_stub(isolate());
+      __ CallStub(&typeof_stub);
+      context()->Plug(r2);
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+  DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
+
+  Comment cmnt(masm_, "[ CountOperation");
+
+  Property* prop = expr->expression()->AsProperty();
+  LhsKind assign_type = Property::GetAssignType(prop);
+
+  // Evaluate expression and get value.
+  if (assign_type == VARIABLE) {
+    DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
+    AccumulatorValueContext context(this);
+    EmitVariableLoad(expr->expression()->AsVariableProxy());
+  } else {
+    // Reserve space for result of postfix operation.
+    if (expr->is_postfix() && !context()->IsEffect()) {
+      __ LoadSmiLiteral(ip, Smi::FromInt(0));
+      PushOperand(ip);
+    }
+    switch (assign_type) {
+      case NAMED_PROPERTY: {
+        // Put the object both on the stack and in the register.
+        VisitForStackValue(prop->obj());
+        __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+        EmitNamedPropertyLoad(prop);
+        break;
+      }
+
+      case NAMED_SUPER_PROPERTY: {
+        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+        VisitForAccumulatorValue(
+            prop->obj()->AsSuperPropertyReference()->home_object());
+        PushOperand(result_register());
+        const Register scratch = r3;
+        __ LoadP(scratch, MemOperand(sp, kPointerSize));
+        PushOperands(scratch, result_register());
+        EmitNamedSuperPropertyLoad(prop);
+        break;
+      }
+
+      case KEYED_SUPER_PROPERTY: {
+        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+        VisitForAccumulatorValue(
+            prop->obj()->AsSuperPropertyReference()->home_object());
+        const Register scratch = r3;
+        const Register scratch1 = r4;
+        __ LoadRR(scratch, result_register());
+        VisitForAccumulatorValue(prop->key());
+        PushOperands(scratch, result_register());
+        __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+        PushOperands(scratch1, scratch, result_register());
+        EmitKeyedSuperPropertyLoad(prop);
+        break;
+      }
+
+      case KEYED_PROPERTY: {
+        VisitForStackValue(prop->obj());
+        VisitForStackValue(prop->key());
+        __ LoadP(LoadDescriptor::ReceiverRegister(),
+                 MemOperand(sp, 1 * kPointerSize));
+        __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+        EmitKeyedPropertyLoad(prop);
+        break;
+      }
+
+      case VARIABLE:
+        UNREACHABLE();
+    }
+  }
+
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+  }
+
+  // Inline smi case if we are in a loop.
+  Label stub_call, done;
+  JumpPatchSite patch_site(masm_);
+
+  int count_value = expr->op() == Token::INC ? 1 : -1;
+  if (ShouldInlineSmiCase(expr->op())) {
+    Label slow;
+    patch_site.EmitJumpIfNotSmi(r2, &slow);
+
+    // Save result for postfix expressions.
+    if (expr->is_postfix()) {
+      if (!context()->IsEffect()) {
+        // Save the result on the stack. If we have a named or keyed property
+        // we store the result under the receiver that is currently on top
+        // of the stack.
+        switch (assign_type) {
+          case VARIABLE:
+            __ push(r2);
+            break;
+          case NAMED_PROPERTY:
+            __ StoreP(r2, MemOperand(sp, kPointerSize));
+            break;
+          case NAMED_SUPER_PROPERTY:
+            __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+            break;
+          case KEYED_PROPERTY:
+            __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+            break;
+          case KEYED_SUPER_PROPERTY:
+            __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
+            break;
+        }
+      }
+    }
+
+    Register scratch1 = r3;
+    Register scratch2 = r4;
+    __ LoadSmiLiteral(scratch1, Smi::FromInt(count_value));
+    __ AddAndCheckForOverflow(r2, r2, scratch1, scratch2, r0);
+    __ BranchOnNoOverflow(&done);
+    // Call stub. Undo operation first.
+    __ SubP(r2, r2, scratch1);
+    __ b(&stub_call);
+    __ bind(&slow);
+  }
+
+  // Convert old value into a number.
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+
+  // Save result for postfix expressions.
+  if (expr->is_postfix()) {
+    if (!context()->IsEffect()) {
+      // Save the result on the stack. If we have a named or keyed property
+      // we store the result under the receiver that is currently on top
+      // of the stack.
+      switch (assign_type) {
+        case VARIABLE:
+          PushOperand(r2);
+          break;
+        case NAMED_PROPERTY:
+          __ StoreP(r2, MemOperand(sp, kPointerSize));
+          break;
+        case NAMED_SUPER_PROPERTY:
+          __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+          break;
+        case KEYED_PROPERTY:
+          __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+          break;
+        case KEYED_SUPER_PROPERTY:
+          __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
+          break;
+      }
+    }
+  }
+
+  __ bind(&stub_call);
+  __ LoadRR(r3, r2);
+  __ LoadSmiLiteral(r2, Smi::FromInt(count_value));
+
+  SetExpressionPosition(expr);
+
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
+  CallIC(code, expr->CountBinOpFeedbackId());
+  patch_site.EmitPatchInfo();
+  __ bind(&done);
+
+  // Store the value returned in r2.
+  switch (assign_type) {
+    case VARIABLE:
+      if (expr->is_postfix()) {
+        {
+          EffectContext context(this);
+          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                                 Token::ASSIGN, expr->CountSlot());
+          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          context.Plug(r2);
+        }
+        // For all contexts except EffectConstant We have the result on
+        // top of the stack.
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN, expr->CountSlot());
+        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        context()->Plug(r2);
+      }
+      break;
+    case NAMED_PROPERTY: {
+      __ mov(StoreDescriptor::NameRegister(),
+             Operand(prop->key()->AsLiteral()->value()));
+      PopOperand(StoreDescriptor::ReceiverRegister());
+      EmitLoadStoreICSlot(expr->CountSlot());
+      CallStoreIC();
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(r2);
+      }
+      break;
+    }
+    case NAMED_SUPER_PROPERTY: {
+      EmitNamedSuperPropertyStore(prop);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(r2);
+      }
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      EmitKeyedSuperPropertyStore(prop);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(r2);
+      }
+      break;
+    }
+    case KEYED_PROPERTY: {
+      PopOperands(StoreDescriptor::ReceiverRegister(),
+                  StoreDescriptor::NameRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+      EmitLoadStoreICSlot(expr->CountSlot());
+      CallIC(ic);
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(r2);
+      }
+      break;
+    }
+  }
+}
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  {
+    AccumulatorValueContext context(this);
+    VisitForTypeofValue(sub_expr);
+  }
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+  Factory* factory = isolate()->factory();
+  if (String::Equals(check, factory->number_string())) {
+    __ JumpIfSmi(r2, if_true);
+    __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+    __ CompareRoot(r2, Heap::kHeapNumberMapRootIndex);
+    Split(eq, if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->string_string())) {
+    __ JumpIfSmi(r2, if_false);
+    __ CompareObjectType(r2, r2, r3, FIRST_NONSTRING_TYPE);
+    Split(lt, if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->symbol_string())) {
+    __ JumpIfSmi(r2, if_false);
+    __ CompareObjectType(r2, r2, r3, SYMBOL_TYPE);
+    Split(eq, if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->boolean_string())) {
+    __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+    __ beq(if_true);
+    __ CompareRoot(r2, Heap::kFalseValueRootIndex);
+    Split(eq, if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->undefined_string())) {
+    __ CompareRoot(r2, Heap::kNullValueRootIndex);
+    __ beq(if_false);
+    __ JumpIfSmi(r2, if_false);
+    // Check for undetectable objects => true.
+    __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+    __ tm(FieldMemOperand(r2, Map::kBitFieldOffset),
+          Operand(1 << Map::kIsUndetectable));
+    Split(ne, if_true, if_false, fall_through);
+
+  } else if (String::Equals(check, factory->function_string())) {
+    __ JumpIfSmi(r2, if_false);
+    __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+    __ LoadlB(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+    __ AndP(r3, r3,
+            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    __ CmpP(r3, Operand(1 << Map::kIsCallable));
+    Split(eq, if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->object_string())) {
+    __ JumpIfSmi(r2, if_false);
+    __ CompareRoot(r2, Heap::kNullValueRootIndex);
+    __ beq(if_true);
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    __ CompareObjectType(r2, r2, r3, FIRST_JS_RECEIVER_TYPE);
+    __ blt(if_false);
+    __ tm(FieldMemOperand(r2, Map::kBitFieldOffset),
+          Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    Split(eq, if_true, if_false, fall_through);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)   \
+  } else if (String::Equals(check, factory->type##_string())) { \
+    __ JumpIfSmi(r2, if_false);                                 \
+    __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));  \
+    __ CompareRoot(r2, Heap::k##Type##MapRootIndex);            \
+    Split(eq, if_true, if_false, fall_through);
+  SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+    // clang-format on
+  } else {
+    if (if_false != fall_through) __ b(if_false);
+  }
+  context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+  Comment cmnt(masm_, "[ CompareOperation");
+  SetExpressionPosition(expr);
+
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
+  // Always perform the comparison for its control flow.  Pack the result
+  // into the expression's context after the comparison is performed.
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  Token::Value op = expr->op();
+  VisitForStackValue(expr->left());
+  switch (op) {
+    case Token::IN:
+      VisitForStackValue(expr->right());
+      CallRuntimeWithOperands(Runtime::kHasProperty);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+      __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+      Split(eq, if_true, if_false, fall_through);
+      break;
+
+    case Token::INSTANCEOF: {
+      VisitForAccumulatorValue(expr->right());
+      PopOperand(r3);
+      InstanceOfStub stub(isolate());
+      __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+      __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+      Split(eq, if_true, if_false, fall_through);
+      break;
+    }
+
+    default: {
+      VisitForAccumulatorValue(expr->right());
+      Condition cond = CompareIC::ComputeCondition(op);
+      PopOperand(r3);
+
+      bool inline_smi_code = ShouldInlineSmiCase(op);
+      JumpPatchSite patch_site(masm_);
+      if (inline_smi_code) {
+        Label slow_case;
+        __ LoadRR(r4, r3);
+        __ OrP(r4, r2);
+        patch_site.EmitJumpIfNotSmi(r4, &slow_case);
+        __ CmpP(r3, r2);
+        Split(cond, if_true, if_false, NULL);
+        __ bind(&slow_case);
+      }
+
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+      CallIC(ic, expr->CompareOperationFeedbackId());
+      patch_site.EmitPatchInfo();
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      __ CmpP(r2, Operand::Zero());
+      Split(cond, if_true, if_false, fall_through);
+    }
+  }
+
+  // Convert the result of the comparison into one expected for this
+  // expression's context.
+  context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  if (expr->op() == Token::EQ_STRICT) {
+    Heap::RootListIndex nil_value = nil == kNullValue
+                                        ? Heap::kNullValueRootIndex
+                                        : Heap::kUndefinedValueRootIndex;
+    __ CompareRoot(r2, nil_value);
+    Split(eq, if_true, if_false, fall_through);
+  } else {
+    __ JumpIfSmi(r2, if_false);
+    __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+    __ LoadlB(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+    __ AndP(r0, r3, Operand(1 << Map::kIsUndetectable));
+    Split(ne, if_true, if_false, fall_through);
+  }
+  context()->Plug(if_true, if_false);
+}
+Register FullCodeGenerator::result_register() { return r2; }
+
+Register FullCodeGenerator::context_register() { return cp; }
+
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+  DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+  __ LoadP(value, MemOperand(fp, frame_offset));
+}
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+  DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+  __ StoreP(value, MemOperand(fp, frame_offset));
+}
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+  __ LoadP(dst, ContextMemOperand(cp, context_index), r0);
+}
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+  Scope* closure_scope = scope()->ClosureScope();
+  if (closure_scope->is_script_scope() || closure_scope->is_module_scope()) {
+    // Contexts nested in the native context have a canonical empty function
+    // as their closure, not the anonymous closure containing the global
+    // code.
+    __ LoadNativeContextSlot(Context::CLOSURE_INDEX, ip);
+  } else if (closure_scope->is_eval_scope()) {
+    // Contexts created by a call to eval have the same closure as the
+    // context calling eval, not the anonymous closure containing the eval
+    // code.  Fetch it from the context.
+    __ LoadP(ip, ContextMemOperand(cp, Context::CLOSURE_INDEX));
+  } else {
+    DCHECK(closure_scope->is_function_scope());
+    __ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+  PushOperand(ip);
+}
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+  DCHECK(!result_register().is(r3));
+  // Store pending message while executing finally block.
+  ExternalReference pending_message_obj =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  __ mov(ip, Operand(pending_message_obj));
+  __ LoadP(r3, MemOperand(ip));
+  PushOperand(r3);
+
+  ClearPendingMessage();
+}
+
+void FullCodeGenerator::ExitFinallyBlock() {
+  DCHECK(!result_register().is(r3));
+  // Restore pending message from stack.
+  PopOperand(r3);
+  ExternalReference pending_message_obj =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  __ mov(ip, Operand(pending_message_obj));
+  __ StoreP(r3, MemOperand(ip));
+}
+
+void FullCodeGenerator::ClearPendingMessage() {
+  DCHECK(!result_register().is(r3));
+  ExternalReference pending_message_obj =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+  __ mov(ip, Operand(pending_message_obj));
+  __ StoreP(r3, MemOperand(ip));
+}
+
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+  DCHECK(!result_register().is(r3));
+  // Restore the accumulator (r2) and token (r3).
+  __ Pop(r3, result_register());
+  for (DeferredCommand cmd : commands_) {
+    Label skip;
+    __ CmpSmiLiteral(r3, Smi::FromInt(cmd.token), r0);
+    __ bne(&skip);
+    switch (cmd.command) {
+      case kReturn:
+        codegen_->EmitUnwindAndReturn();
+        break;
+      case kThrow:
+        __ Push(result_register());
+        __ CallRuntime(Runtime::kReThrow);
+        break;
+      case kContinue:
+        codegen_->EmitContinue(cmd.target);
+        break;
+      case kBreak:
+        codegen_->EmitBreak(cmd.target);
+        break;
+    }
+    __ bind(&skip);
+  }
+}
+
+#undef __
+
+#if V8_TARGET_ARCH_S390X
+static const FourByteInstr kInterruptBranchInstruction = 0xA7A40011;
+static const FourByteInstr kOSRBranchInstruction = 0xA7040011;
+static const int16_t kBackEdgeBranchOffset = 0x11 * 2;
+#else
+static const FourByteInstr kInterruptBranchInstruction = 0xA7A4000D;
+static const FourByteInstr kOSRBranchInstruction = 0xA704000D;
+static const int16_t kBackEdgeBranchOffset = 0xD * 2;
+#endif
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
+                            BackEdgeState target_state,
+                            Code* replacement_code) {
+  Address call_address = Assembler::target_address_from_return_address(pc);
+  Address branch_address = call_address - 4;
+  Isolate* isolate = unoptimized_code->GetIsolate();
+  CodePatcher patcher(isolate, branch_address, 4);
+
+  switch (target_state) {
+    case INTERRUPT: {
+      //  <decrement profiling counter>
+      //         bge     <ok>            ;; patched to GE BRC
+      //         brasrl    r14, <interrupt stub address>
+      //  <reset profiling counter>
+      //  ok-label
+      patcher.masm()->brc(ge, Operand(kBackEdgeBranchOffset));
+      break;
+    }
+    case ON_STACK_REPLACEMENT:
+      //  <decrement profiling counter>
+      //         brc   0x0, <ok>            ;;  patched to NOP BRC
+      //         brasrl    r14, <interrupt stub address>
+      //  <reset profiling counter>
+      //  ok-label ----- pc_after points here
+      patcher.masm()->brc(CC_NOP, Operand(kBackEdgeBranchOffset));
+      break;
+  }
+
+  // Replace the stack check address in the mov sequence with the
+  // entry address of the replacement code.
+  Assembler::set_target_address_at(isolate, call_address, unoptimized_code,
+                                   replacement_code->entry());
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, call_address, replacement_code);
+}
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+    Isolate* isolate, Code* unoptimized_code, Address pc) {
+  Address call_address = Assembler::target_address_from_return_address(pc);
+  Address branch_address = call_address - 4;
+#ifdef DEBUG
+  Address interrupt_address =
+      Assembler::target_address_at(call_address, unoptimized_code);
+#endif
+
+  DCHECK(BRC == Instruction::S390OpcodeValue(branch_address));
+  // For interrupt, we expect a branch greater than or equal
+  // i.e. BRC 0xa, +XXXX  (0xA7A4XXXX)
+  FourByteInstr br_instr = Instruction::InstructionBits(
+      reinterpret_cast<const byte*>(branch_address));
+  if (kInterruptBranchInstruction == br_instr) {
+    DCHECK(interrupt_address == isolate->builtins()->InterruptCheck()->entry());
+    return INTERRUPT;
+  }
+
+  // Expect BRC to be patched to NOP branch.
+  // i.e. BRC 0x0, +XXXX (0xA704XXXX)
+  USE(kOSRBranchInstruction);
+  DCHECK(kOSRBranchInstruction == br_instr);
+
+  DCHECK(interrupt_address ==
+         isolate->builtins()->OnStackReplacement()->entry());
+  return ON_STACK_REPLACEMENT;
+}
+
+}  // namespace internal
+}  // namespace v8
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/full-codegen/x64/full-codegen-x64.cc b/src/full-codegen/x64/full-codegen-x64.cc
index 910b2cf..992e7fe 100644
--- a/src/full-codegen/x64/full-codegen-x64.cc
+++ b/src/full-codegen/x64/full-codegen-x64.cc
@@ -286,35 +286,32 @@
 
   // Visit the declarations and body unless there is an illegal
   // redeclaration.
-  if (scope()->HasIllegalRedeclaration()) {
+  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  {
     Comment cmnt(masm_, "[ Declarations");
-    VisitForEffect(scope()->GetIllegalRedeclaration());
+    VisitDeclarations(scope()->declarations());
+  }
 
-  } else {
-    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
-    { Comment cmnt(masm_, "[ Declarations");
-      VisitDeclarations(scope()->declarations());
-    }
+  // Assert that the declarations do not use ICs. Otherwise the debugger
+  // won't be able to redirect a PC at an IC to the correct IC in newly
+  // recompiled code.
+  DCHECK_EQ(0, ic_total_count_);
 
-    // Assert that the declarations do not use ICs. Otherwise the debugger
-    // won't be able to redirect a PC at an IC to the correct IC in newly
-    // recompiled code.
-    DCHECK_EQ(0, ic_total_count_);
+  {
+    Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    Label ok;
+    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+    __ j(above_equal, &ok, Label::kNear);
+    __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+    __ bind(&ok);
+  }
 
-    { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
-       Label ok;
-       __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-       __ j(above_equal, &ok, Label::kNear);
-       __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
-       __ bind(&ok);
-    }
-
-    { Comment cmnt(masm_, "[ Body");
-      DCHECK(loop_depth() == 0);
-      VisitStatements(literal()->body());
-      DCHECK(loop_depth() == 0);
-    }
+  {
+    Comment cmnt(masm_, "[ Body");
+    DCHECK(loop_depth() == 0);
+    VisitStatements(literal()->body());
+    DCHECK(loop_depth() == 0);
   }
 
   // Always emit a 'return undefined' in case control fell off the end of
@@ -500,7 +497,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -606,7 +603,7 @@
                                Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+  Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
   CallIC(ic, condition->test_id());
   __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
   Split(equal, if_true, if_false, fall_through);
@@ -947,14 +944,14 @@
 
   FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
 
-  Label loop, exit;
-  ForIn loop_statement(this, stmt);
-  increment_loop_depth();
-
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  OperandStackDepthIncrement(ForIn::kElementCount);
+  OperandStackDepthIncrement(5);
+
+  Label loop, exit;
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
 
   // If the object is null or undefined, skip over the loop, otherwise convert
   // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
@@ -1029,11 +1026,6 @@
   // We got a fixed array in register rax. Iterate through that.
   __ bind(&fixed_array);
 
-  // No need for a write barrier, we are storing a Smi in the feedback vector.
-  int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(rbx);
-  __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(vector_index)),
-          TypeFeedbackVector::MegamorphicSentinel(isolate()));
   __ movp(rcx, Operand(rsp, 0 * kPointerSize));  // Get enumerated object
   __ Push(Smi::FromInt(1));                      // Smi(1) indicates slow check
   __ Push(rax);  // Array
@@ -1069,12 +1061,8 @@
   __ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
-  // We might get here from TurboFan or Crankshaft when something in the
-  // for-in loop body deopts and only now notice in fullcodegen, that we
-  // can now longer use the enum cache, i.e. left fast mode. So better record
-  // this information here, in case we later OSR back into this loop or
-  // reoptimize the whole function w/o rerunning the loop with the slow
-  // mode object in fullcodegen (which would result in a deopt loop).
+  // We need to filter the key, record slow-path here.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(rdx);
   __ Move(FieldOperand(rdx, FixedArray::OffsetOfElementAt(vector_index)),
           TypeFeedbackVector::MegamorphicSentinel(isolate()));
@@ -1115,8 +1103,7 @@
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ addp(rsp, Immediate(5 * kPointerSize));
-  OperandStackDepthDecrement(ForIn::kElementCount);
+  DropOperands(5);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1125,31 +1112,6 @@
 }
 
 
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
-                                       bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning. If
-  // we're running with the --always-opt or the --prepare-always-opt
-  // flag, we need to use the runtime function so that the new function
-  // we are creating here gets a chance to have its code optimized and
-  // doesn't just get a copy of the existing unoptimized code.
-  if (!FLAG_always_opt &&
-      !FLAG_prepare_always_opt &&
-      !pretenure &&
-      scope()->is_function_scope() &&
-      info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
-    __ Move(rbx, info);
-    __ CallStub(&stub);
-  } else {
-    __ Push(info);
-    __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
-                             : Runtime::kNewClosure);
-  }
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
@@ -1578,12 +1540,6 @@
     }
   }
 
-  if (expr->has_function()) {
-    DCHECK(result_saved);
-    __ Push(Operand(rsp, 0));
-    __ CallRuntime(Runtime::kToFastProperties);
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1824,65 +1780,45 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  switch (expr->yield_kind()) {
-    case Yield::kSuspend:
-      // Pop value from top-of-stack slot; box result into result register.
-      EmitCreateIteratorResult(false);
-      __ Push(result_register());
-      // Fall through.
-    case Yield::kInitial: {
-      Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume;
 
-      __ jmp(&suspend);
-      __ bind(&continuation);
-      // When we arrive here, the stack top is the resume mode and
-      // result_register() holds the input value (the argument given to the
-      // respective resume operation).
-      __ RecordGeneratorContinuation();
-      __ Pop(rbx);
-      __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::RETURN));
-      __ j(not_equal, &resume);
-      __ Push(result_register());
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
+  __ jmp(&suspend);
+  __ bind(&continuation);
+  // When we arrive here, the stack top is the resume mode and
+  // result_register() holds the input value (the argument given to the
+  // respective resume operation).
+  __ RecordGeneratorContinuation();
+  __ Pop(rbx);
+  __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::RETURN));
+  __ j(not_equal, &resume);
+  __ Push(result_register());
+  EmitCreateIteratorResult(true);
+  EmitUnwindAndReturn();
 
-      __ bind(&suspend);
-      VisitForAccumulatorValue(expr->generator_object());
-      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-      __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
-              Smi::FromInt(continuation.pos()));
-      __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
-      __ movp(rcx, rsi);
-      __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
-                          kDontSaveFPRegs);
-      __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
-      __ cmpp(rsp, rbx);
-      __ j(equal, &post_runtime);
-      __ Push(rax);  // generator object
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ movp(context_register(),
-              Operand(rbp, StandardFrameConstants::kContextOffset));
-      __ bind(&post_runtime);
+  __ bind(&suspend);
+  OperandStackDepthIncrement(1);  // Not popped on this path.
+  VisitForAccumulatorValue(expr->generator_object());
+  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+  __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
+          Smi::FromInt(continuation.pos()));
+  __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+  __ movp(rcx, rsi);
+  __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
+                      kDontSaveFPRegs);
+  __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
+  __ cmpp(rsp, rbx);
+  __ j(equal, &post_runtime);
+  __ Push(rax);  // generator object
+  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+  __ movp(context_register(),
+          Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ bind(&post_runtime);
 
-      PopOperand(result_register());
-      EmitReturnSequence();
+  PopOperand(result_register());
+  EmitReturnSequence();
 
-      __ bind(&resume);
-      context()->Plug(result_register());
-      break;
-    }
-
-    case Yield::kFinal: {
-      // Pop value from top-of-stack slot, box result into result register.
-      OperandStackDepthDecrement(1);
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
-      break;
-    }
-
-    case Yield::kDelegating:
-      UNREACHABLE();
-  }
+  __ bind(&resume);
+  context()->Plug(result_register());
 }
 
 
@@ -1911,7 +1847,10 @@
   // Push receiver.
   __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
 
-  // Push holes for arguments to generator function.
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
   __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
   __ LoadSharedFunctionInfoSpecialField(rdx, rdx,
       SharedFunctionInfo::kFormalParameterCountOffset);
@@ -2014,19 +1953,7 @@
   __ LoadRoot(FieldOperand(rax, JSIteratorResult::kDoneOffset),
               done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
   STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(!prop->IsSuperAccess());
-
-  __ Move(LoadDescriptor::NameRegister(), key->value());
-  __ Move(LoadDescriptor::SlotRegister(),
-          SmiFromSlot(prop->PropertyFeedbackSlot()));
-  CallLoadIC(NOT_INSIDE_TYPEOF);
+  OperandStackDepthDecrement(1);
 }
 
 
@@ -2586,7 +2513,7 @@
   }
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-  SetCallPosition(expr);
+  SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
       __ CallRuntime(Runtime::kTraceTailCall);
@@ -2944,9 +2871,10 @@
   __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rax);
   __ j(below, &null, Label::kNear);
 
-  // Return 'Function' for JSFunction objects.
-  __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
-  __ j(equal, &function, Label::kNear);
+  // Return 'Function' for JSFunction and JSBoundFunction objects.
+  __ CmpInstanceType(rax, FIRST_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+  __ j(above_equal, &function, Label::kNear);
 
   // Check if the constructor in the map is a JS function.
   __ GetMapConstructor(rax, rax, rbx);
@@ -3065,23 +2993,6 @@
 }
 
 
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into rax and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to an integer.
-  Label done_convert;
-  __ JumpIfSmi(rax, &done_convert, Label::kNear);
-  __ Push(rax);
-  __ CallRuntime(Runtime::kToInteger);
-  __ bind(&done_convert);
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3265,6 +3176,11 @@
   context()->Plug(rax);
 }
 
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+  DCHECK_EQ(0, expr->arguments()->length());
+  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, rax);
+  context()->Plug(rax);
+}
 
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
@@ -3305,11 +3221,13 @@
 
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
-  // Push the builtins object as receiver.
+  // Push function.
+  __ LoadNativeContextSlot(expr->context_index(), rax);
+  PushOperand(rax);
+
+  // Push undefined as receiver.
   OperandStackDepthIncrement(1);
   __ PushRoot(Heap::kUndefinedValueRootIndex);
-
-  __ LoadNativeContextSlot(expr->context_index(), rax);
 }
 
 
@@ -3323,59 +3241,9 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-}
 
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  if (expr->is_jsruntime()) {
-    Comment cmnt(masm_, "[ CallRuntime");
-
-    EmitLoadJSRuntimeFunction(expr);
-
-    // Push the target function under the receiver.
-    PushOperand(Operand(rsp, 0));
-    __ movp(Operand(rsp, kPointerSize), rax);
-
-    // Push the arguments ("left-to-right").
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-
-    PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-    EmitCallJSRuntimeFunction(expr);
-
-    // Restore context register.
-    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-    context()->DropAndPlug(1, rax);
-
-  } else {
-    const Runtime::Function* function = expr->function();
-    switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name)     \
-  case Runtime::kInline##Name: {           \
-    Comment cmnt(masm_, "[ Inline" #Name); \
-    return Emit##Name(expr);               \
-  }
-      FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
-      default: {
-        Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
-        // Push the arguments ("left-to-right").
-        for (int i = 0; i < arg_count; i++) {
-          VisitForStackValue(args->at(i));
-        }
-
-        // Call the C runtime.
-        PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-        __ CallRuntime(function, arg_count);
-        OperandStackDepthDecrement(arg_count);
-        context()->Plug(rax);
-      }
-    }
-  }
+  // Restore context register.
+  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
 
@@ -3618,11 +3486,11 @@
     __ jmp(&stub_call, Label::kNear);
     __ bind(&slow);
   }
-  if (!is_strong(language_mode())) {
-    ToNumberStub convert_stub(isolate());
-    __ CallStub(&convert_stub);
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
+
+  // Convert old value into a number.
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3662,9 +3530,6 @@
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
-  if (is_strong(language_mode())) {
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
   // Store the value returned in rax.
   switch (assign_type) {
     case VARIABLE:
@@ -3919,21 +3784,16 @@
     __ CompareRoot(rax, nil_value);
     Split(equal, if_true, if_false, fall_through);
   } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
-    CallIC(ic, expr->CompareOperationFeedbackId());
-    __ CompareRoot(rax, Heap::kTrueValueRootIndex);
-    Split(equal, if_true, if_false, fall_through);
+    __ JumpIfSmi(rax, if_false);
+    __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
+    __ testb(FieldOperand(rax, Map::kBitFieldOffset),
+             Immediate(1 << Map::kIsUndetectable));
+    Split(not_zero, if_true, if_false, fall_through);
   }
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-  context()->Plug(rax);
-}
-
-
 Register FullCodeGenerator::result_register() {
   return rax;
 }
@@ -3943,6 +3803,10 @@
   return rsi;
 }
 
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+  DCHECK(IsAligned(frame_offset, kPointerSize));
+  __ movp(value, Operand(rbp, frame_offset));
+}
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   DCHECK(IsAligned(frame_offset, kPointerSize));
@@ -4012,11 +3876,6 @@
 }
 
 
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
-  DCHECK(!slot.IsInvalid());
-  __ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
-}
-
 void FullCodeGenerator::DeferredCommands::EmitCommands() {
   __ Pop(result_register());  // Restore the accumulator.
   __ Pop(rdx);                // Get the token.
@@ -4072,7 +3931,6 @@
       *jns_offset_address = kJnsOffset;
       break;
     case ON_STACK_REPLACEMENT:
-    case OSR_AFTER_STACK_CHECK:
       //     sub <profiling_counter>, <delta>  ;; Not changed
       //     nop
       //     nop
@@ -4110,16 +3968,10 @@
   DCHECK_EQ(kNopByteOne, *jns_instr_address);
   DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
 
-  if (Assembler::target_address_at(call_target_address,
-                                   unoptimized_code) ==
-      isolate->builtins()->OnStackReplacement()->entry()) {
-    return ON_STACK_REPLACEMENT;
-  }
-
-  DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
-            Assembler::target_address_at(call_target_address,
-                                         unoptimized_code));
-  return OSR_AFTER_STACK_CHECK;
+  DCHECK_EQ(
+      isolate->builtins()->OnStackReplacement()->entry(),
+      Assembler::target_address_at(call_target_address, unoptimized_code));
+  return ON_STACK_REPLACEMENT;
 }
 
 }  // namespace internal
diff --git a/src/full-codegen/x87/full-codegen-x87.cc b/src/full-codegen/x87/full-codegen-x87.cc
index 36b7c5d..f14aaf6 100644
--- a/src/full-codegen/x87/full-codegen-x87.cc
+++ b/src/full-codegen/x87/full-codegen-x87.cc
@@ -285,39 +285,35 @@
     __ CallRuntime(Runtime::kTraceEnter);
   }
 
-  // Visit the declarations and body unless there is an illegal
-  // redeclaration.
-  if (scope()->HasIllegalRedeclaration()) {
+  // Visit the declarations and body.
+  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  {
     Comment cmnt(masm_, "[ Declarations");
-    VisitForEffect(scope()->GetIllegalRedeclaration());
+    VisitDeclarations(scope()->declarations());
+  }
 
-  } else {
-    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
-    { Comment cmnt(masm_, "[ Declarations");
-      VisitDeclarations(scope()->declarations());
-    }
+  // Assert that the declarations do not use ICs. Otherwise the debugger
+  // won't be able to redirect a PC at an IC to the correct IC in newly
+  // recompiled code.
+  DCHECK_EQ(0, ic_total_count_);
 
-    // Assert that the declarations do not use ICs. Otherwise the debugger
-    // won't be able to redirect a PC at an IC to the correct IC in newly
-    // recompiled code.
-    DCHECK_EQ(0, ic_total_count_);
+  {
+    Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    Label ok;
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(isolate());
+    __ cmp(esp, Operand::StaticVariable(stack_limit));
+    __ j(above_equal, &ok, Label::kNear);
+    __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+    __ bind(&ok);
+  }
 
-    { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
-      Label ok;
-      ExternalReference stack_limit
-          = ExternalReference::address_of_stack_limit(isolate());
-      __ cmp(esp, Operand::StaticVariable(stack_limit));
-      __ j(above_equal, &ok, Label::kNear);
-      __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
-      __ bind(&ok);
-    }
-
-    { Comment cmnt(masm_, "[ Body");
-      DCHECK(loop_depth() == 0);
-      VisitStatements(literal()->body());
-      DCHECK(loop_depth() == 0);
-    }
+  {
+    Comment cmnt(masm_, "[ Body");
+    DCHECK(loop_depth() == 0);
+    VisitStatements(literal()->body());
+    DCHECK(loop_depth() == 0);
   }
 
   // Always emit a 'return undefined' in case control fell off the end of
@@ -484,7 +480,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -592,7 +588,7 @@
                                Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+  Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
   CallIC(ic, condition->test_id());
   __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
   Split(equal, if_true, if_false, fall_through);
@@ -926,14 +922,14 @@
 
   FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
 
-  Label loop, exit;
-  ForIn loop_statement(this, stmt);
-  increment_loop_depth();
-
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  OperandStackDepthIncrement(ForIn::kElementCount);
+  OperandStackDepthIncrement(5);
+
+  Label loop, exit;
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
 
   // If the object is null or undefined, skip over the loop, otherwise convert
   // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
@@ -1000,11 +996,6 @@
   // We got a fixed array in register eax. Iterate through that.
   __ bind(&fixed_array);
 
-  // No need for a write barrier, we are storing a Smi in the feedback vector.
-  int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(ebx);
-  __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
-         Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ push(Immediate(Smi::FromInt(1)));  // Smi(1) undicates slow check
   __ push(eax);  // Array
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
@@ -1035,12 +1026,8 @@
   __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
-  // We might get here from TurboFan or Crankshaft when something in the
-  // for-in loop body deopts and only now notice in fullcodegen, that we
-  // can now longer use the enum cache, i.e. left fast mode. So better record
-  // this information here, in case we later OSR back into this loop or
-  // reoptimize the whole function w/o rerunning the loop with the slow
-  // mode object in fullcodegen (which would result in a deopt loop).
+  // We need to filter the key, record slow-path here.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(edx);
   __ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
          Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
@@ -1081,8 +1068,7 @@
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ add(esp, Immediate(5 * kPointerSize));
-  OperandStackDepthDecrement(ForIn::kElementCount);
+  DropOperands(5);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1091,31 +1077,6 @@
 }
 
 
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
-                                       bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning. If
-  // we're running with the --always-opt or the --prepare-always-opt
-  // flag, we need to use the runtime function so that the new function
-  // we are creating here gets a chance to have its code optimized and
-  // doesn't just get a copy of the existing unoptimized code.
-  if (!FLAG_always_opt &&
-      !FLAG_prepare_always_opt &&
-      !pretenure &&
-      scope()->is_function_scope() &&
-      info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
-    __ mov(ebx, Immediate(info));
-    __ CallStub(&stub);
-  } else {
-    __ push(Immediate(info));
-    __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
-                             : Runtime::kNewClosure);
-  }
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
                                           FeedbackVectorSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
@@ -1546,12 +1507,6 @@
     }
   }
 
-  if (expr->has_function()) {
-    DCHECK(result_saved);
-    __ push(Operand(esp, 0));
-    __ CallRuntime(Runtime::kToFastProperties);
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1793,64 +1748,44 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  switch (expr->yield_kind()) {
-    case Yield::kSuspend:
-      // Pop value from top-of-stack slot; box result into result register.
-      EmitCreateIteratorResult(false);
-      __ push(result_register());
-      // Fall through.
-    case Yield::kInitial: {
-      Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume;
 
-      __ jmp(&suspend);
-      __ bind(&continuation);
-      // When we arrive here, the stack top is the resume mode and
-      // result_register() holds the input value (the argument given to the
-      // respective resume operation).
-      __ RecordGeneratorContinuation();
-      __ pop(ebx);
-      __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
-      __ j(not_equal, &resume);
-      __ push(result_register());
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
+  __ jmp(&suspend);
+  __ bind(&continuation);
+  // When we arrive here, the stack top is the resume mode and
+  // result_register() holds the input value (the argument given to the
+  // respective resume operation).
+  __ RecordGeneratorContinuation();
+  __ pop(ebx);
+  __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
+  __ j(not_equal, &resume);
+  __ push(result_register());
+  EmitCreateIteratorResult(true);
+  EmitUnwindAndReturn();
 
-      __ bind(&suspend);
-      VisitForAccumulatorValue(expr->generator_object());
-      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
-             Immediate(Smi::FromInt(continuation.pos())));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
-      __ mov(ecx, esi);
-      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
-                          kDontSaveFPRegs);
-      __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
-      __ cmp(esp, ebx);
-      __ j(equal, &post_runtime);
-      __ push(eax);  // generator object
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-      __ mov(context_register(),
-             Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ bind(&post_runtime);
-      PopOperand(result_register());
-      EmitReturnSequence();
+  __ bind(&suspend);
+  OperandStackDepthIncrement(1);  // Not popped on this path.
+  VisitForAccumulatorValue(expr->generator_object());
+  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+  __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
+         Immediate(Smi::FromInt(continuation.pos())));
+  __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
+  __ mov(ecx, esi);
+  __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+                      kDontSaveFPRegs);
+  __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
+  __ cmp(esp, ebx);
+  __ j(equal, &post_runtime);
+  __ push(eax);  // generator object
+  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+  __ mov(context_register(),
+         Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ bind(&post_runtime);
+  PopOperand(result_register());
+  EmitReturnSequence();
 
-      __ bind(&resume);
-      context()->Plug(result_register());
-      break;
-    }
-
-    case Yield::kFinal: {
-      // Pop value from top-of-stack slot, box result into result register.
-      OperandStackDepthDecrement(1);
-      EmitCreateIteratorResult(true);
-      EmitUnwindAndReturn();
-      break;
-    }
-
-    case Yield::kDelegating:
-      UNREACHABLE();
-  }
+  __ bind(&resume);
+  context()->Plug(result_register());
 }
 
 
@@ -1878,7 +1813,10 @@
   // Push receiver.
   __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
 
-  // Push holes for arguments to generator function.
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
   __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   __ mov(edx,
          FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -1983,19 +1921,7 @@
   __ mov(FieldOperand(eax, JSIteratorResult::kDoneOffset),
          isolate()->factory()->ToBoolean(done));
   STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(!prop->IsSuperAccess());
-
-  __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
-  __ mov(LoadDescriptor::SlotRegister(),
-         Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF);
+  OperandStackDepthDecrement(1);
 }
 
 
@@ -2589,7 +2515,7 @@
   }
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-  SetCallPosition(expr);
+  SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
       __ CallRuntime(Runtime::kTraceTailCall);
@@ -2945,9 +2871,10 @@
   __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, eax);
   __ j(below, &null, Label::kNear);
 
-  // Return 'Function' for JSFunction objects.
-  __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
-  __ j(equal, &function, Label::kNear);
+  // Return 'Function' for JSFunction and JSBoundFunction objects.
+  __ CmpInstanceType(eax, FIRST_FUNCTION_TYPE);
+  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+  __ j(above_equal, &function, Label::kNear);
 
   // Check if the constructor in the map is a JS function.
   __ GetMapConstructor(eax, eax, ebx);
@@ -3069,23 +2996,6 @@
 }
 
 
-void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into eax and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to an integer.
-  Label done_convert;
-  __ JumpIfSmi(eax, &done_convert, Label::kNear);
-  __ Push(eax);
-  __ CallRuntime(Runtime::kToInteger);
-  __ bind(&done_convert);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3269,6 +3179,12 @@
   context()->Plug(eax);
 }
 
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+  DCHECK_EQ(0, expr->arguments()->length());
+  __ mov(eax, NativeContextOperand());
+  __ mov(eax, ContextOperand(eax, Context::ORDINARY_HAS_INSTANCE_INDEX));
+  context()->Plug(eax);
+}
 
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
@@ -3310,10 +3226,12 @@
 
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+  // Push function.
+  __ LoadGlobalFunction(expr->context_index(), eax);
+  PushOperand(eax);
+
   // Push undefined as receiver.
   PushOperand(isolate()->factory()->undefined_value());
-
-  __ LoadGlobalFunction(expr->context_index(), eax);
 }
 
 
@@ -3327,58 +3245,9 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-}
 
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  if (expr->is_jsruntime()) {
-    Comment cmnt(masm_, "[ CallRuntime");
-    EmitLoadJSRuntimeFunction(expr);
-
-    // Push the target function under the receiver.
-    PushOperand(Operand(esp, 0));
-    __ mov(Operand(esp, kPointerSize), eax);
-
-    // Push the arguments ("left-to-right").
-    for (int i = 0; i < arg_count; i++) {
-      VisitForStackValue(args->at(i));
-    }
-
-    PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-    EmitCallJSRuntimeFunction(expr);
-
-    // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    context()->DropAndPlug(1, eax);
-
-  } else {
-    const Runtime::Function* function = expr->function();
-    switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name)     \
-  case Runtime::kInline##Name: {           \
-    Comment cmnt(masm_, "[ Inline" #Name); \
-    return Emit##Name(expr);               \
-  }
-      FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
-      default: {
-        Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
-        // Push the arguments ("left-to-right").
-        for (int i = 0; i < arg_count; i++) {
-          VisitForStackValue(args->at(i));
-        }
-
-        // Call the C runtime function.
-        PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
-        __ CallRuntime(expr->function(), arg_count);
-        OperandStackDepthDecrement(arg_count);
-        context()->Plug(eax);
-      }
-    }
-  }
+  // Restore context register.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
 }
 
 
@@ -3623,11 +3492,11 @@
     __ jmp(&stub_call, Label::kNear);
     __ bind(&slow);
   }
-  if (!is_strong(language_mode())) {
-    ToNumberStub convert_stub(isolate());
-    __ CallStub(&convert_stub);
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
+
+  // Convert old value into a number.
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3667,9 +3536,6 @@
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
-  if (is_strong(language_mode())) {
-    PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
-  }
   // Store the value returned in eax.
   switch (assign_type) {
     case VARIABLE:
@@ -3795,7 +3661,7 @@
     // Check for undetectable objects => true.
     __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
     __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
+              Immediate(1 << Map::kIsUndetectable));
     Split(not_zero, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->function_string())) {
     __ JumpIfSmi(eax, if_false);
@@ -3814,7 +3680,7 @@
     __ j(below, if_false);
     // Check for callable or undetectable objects => false.
     __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
-              (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+              Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     Split(zero, if_true, if_false, fall_through);
 // clang-format off
 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)   \
@@ -3925,21 +3791,16 @@
     __ cmp(eax, nil_value);
     Split(equal, if_true, if_false, fall_through);
   } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
-    CallIC(ic, expr->CompareOperationFeedbackId());
-    __ cmp(eax, isolate()->factory()->true_value());
-    Split(equal, if_true, if_false, fall_through);
+    __ JumpIfSmi(eax, if_false);
+    __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(eax, Map::kBitFieldOffset),
+              Immediate(1 << Map::kIsUndetectable));
+    Split(not_zero, if_true, if_false, fall_through);
   }
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  context()->Plug(eax);
-}
-
-
 Register FullCodeGenerator::result_register() {
   return eax;
 }
@@ -3949,6 +3810,10 @@
   return esi;
 }
 
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+  DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  __ mov(value, Operand(ebp, frame_offset));
+}
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
@@ -4015,12 +3880,6 @@
 }
 
 
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
-  DCHECK(!slot.IsInvalid());
-  __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
-         Immediate(SmiFromSlot(slot)));
-}
-
 void FullCodeGenerator::DeferredCommands::EmitCommands() {
   DCHECK(!result_register().is(edx));
   __ Pop(result_register());  // Restore the accumulator.
@@ -4078,7 +3937,6 @@
       *jns_offset_address = kJnsOffset;
       break;
     case ON_STACK_REPLACEMENT:
-    case OSR_AFTER_STACK_CHECK:
       //     sub <profiling_counter>, <delta>  ;; Not changed
       //     nop
       //     nop
@@ -4116,15 +3974,10 @@
   DCHECK_EQ(kNopByteOne, *jns_instr_address);
   DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
 
-  if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
-      isolate->builtins()->OnStackReplacement()->entry()) {
-    return ON_STACK_REPLACEMENT;
-  }
-
-  DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
-            Assembler::target_address_at(call_target_address,
-                                         unoptimized_code));
-  return OSR_AFTER_STACK_CHECK;
+  DCHECK_EQ(
+      isolate->builtins()->OnStackReplacement()->entry(),
+      Assembler::target_address_at(call_target_address, unoptimized_code));
+  return ON_STACK_REPLACEMENT;
 }
 
 
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 819bd69..0df5975 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -656,6 +656,12 @@
 #elif V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && V8_OS_LINUX
     const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 2, 2, 1, 0,
                                0,    0,   0,   0,   0, 0, 0, 0};
+#elif V8_TARGET_ARCH_S390X
+    const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 2, 2, 1, 3,
+                               0,    0,   0,   0,   0, 0, 0, 0};
+#elif V8_TARGET_ARCH_S390
+    const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 1, 2, 1, 3,
+                               0,    0,   0,   0,   0, 0, 0, 0};
 #else
 #error Unsupported target architecture.
 #endif
@@ -680,6 +686,11 @@
     // id=B81AEC1A37F5DAF185257C3E004E8845&linkid=1n0000&c_t=
     // c9xw7v5dzsj7gt1ifgf4cjbcnskqptmr
     header->machine = 21;
+#elif V8_TARGET_ARCH_S390
+    // Processor identification value is 22 (EM_S390) as defined in the ABI:
+    // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN1691
+    // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_zSeries.html#AEN1599
+    header->machine = 22;
 #else
 #error Unsupported target architecture.
 #endif
@@ -772,7 +783,8 @@
     return static_cast<Binding>(info >> 4);
   }
 #if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \
-     (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
+     (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT) ||                   \
+     (V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT))
   struct SerializedLayout {
     SerializedLayout(uint32_t name,
                      uintptr_t value,
@@ -796,7 +808,7 @@
     uint16_t section;
   };
 #elif(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT) || \
-    (V8_TARGET_ARCH_PPC64 && V8_OS_LINUX)
+    (V8_TARGET_ARCH_PPC64 && V8_OS_LINUX) || V8_TARGET_ARCH_S390X
   struct SerializedLayout {
     SerializedLayout(uint32_t name,
                      uintptr_t value,
@@ -1145,6 +1157,8 @@
       UNIMPLEMENTED();
 #elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX
       w->Write<uint8_t>(DW_OP_reg31);  // The frame pointer is here on PPC64.
+#elif V8_TARGET_ARCH_S390
+      w->Write<uint8_t>(DW_OP_reg11);  // The frame pointer's here on S390.
 #else
 #error Unsupported target architecture.
 #endif
@@ -1927,7 +1941,7 @@
 
 static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
 #ifdef __MACH_O
-  Zone zone;
+  Zone zone(isolate->allocator());
   MachO mach_o(&zone);
   Writer w(&mach_o);
 
@@ -1939,7 +1953,7 @@
 
   mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
 #else
-  Zone zone;
+  Zone zone(isolate->allocator());
   ELF elf(&zone);
   Writer w(&elf);
 
diff --git a/src/global-handles.cc b/src/global-handles.cc
index edd52b0..ed9caa9 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -644,7 +644,6 @@
   return Node::FromLocation(location)->IsWeak();
 }
 
-
 void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
   for (NodeIterator it(this); !it.done(); it.Advance()) {
     Node* node = it.node();
@@ -811,6 +810,111 @@
   return any_group_was_visited;
 }
 
+namespace {
+// Traces the information about object groups and implicit ref groups given by
+// the embedder to the V8 during each gc prologue.
+class ObjectGroupsTracer {
+ public:
+  explicit ObjectGroupsTracer(Isolate* isolate);
+  void Print();
+
+ private:
+  void PrintObjectGroup(ObjectGroup* group);
+  void PrintImplicitRefGroup(ImplicitRefGroup* group);
+  void PrintObject(Object* object);
+  void PrintConstructor(JSObject* js_object);
+  void PrintInternalFields(JSObject* js_object);
+  Isolate* isolate_;
+  DISALLOW_COPY_AND_ASSIGN(ObjectGroupsTracer);
+};
+
+ObjectGroupsTracer::ObjectGroupsTracer(Isolate* isolate) : isolate_(isolate) {}
+
+void ObjectGroupsTracer::Print() {
+  GlobalHandles* global_handles = isolate_->global_handles();
+
+  PrintIsolate(isolate_, "### Tracing object groups:\n");
+
+  for (auto group : *(global_handles->object_groups())) {
+    PrintObjectGroup(group);
+  }
+  for (auto group : *(global_handles->implicit_ref_groups())) {
+    PrintImplicitRefGroup(group);
+  }
+
+  PrintIsolate(isolate_, "### Tracing object groups finished.\n");
+}
+
+void ObjectGroupsTracer::PrintObject(Object* object) {
+  if (object->IsJSObject()) {
+    JSObject* js_object = JSObject::cast(object);
+
+    PrintF("{ constructor_name: ");
+    PrintConstructor(js_object);
+    PrintF(", hidden_fields: [ ");
+    PrintInternalFields(js_object);
+    PrintF(" ] }\n");
+  } else {
+    PrintF("object of unexpected type: %p\n", object);
+  }
+}
+
+void ObjectGroupsTracer::PrintConstructor(JSObject* js_object) {
+  Object* maybe_constructor = js_object->map()->GetConstructor();
+  if (maybe_constructor->IsJSFunction()) {
+    JSFunction* constructor = JSFunction::cast(maybe_constructor);
+    String* name = String::cast(constructor->shared()->name());
+    if (name->length() == 0) name = constructor->shared()->inferred_name();
+
+    PrintF("%s", name->ToCString().get());
+  } else if (maybe_constructor->IsNull()) {
+    if (js_object->IsOddball()) {
+      PrintF("<oddball>");
+    } else {
+      PrintF("<null>");
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+void ObjectGroupsTracer::PrintInternalFields(JSObject* js_object) {
+  for (int i = 0; i < js_object->GetInternalFieldCount(); ++i) {
+    if (i != 0) {
+      PrintF(", ");
+    }
+    PrintF("%p", js_object->GetInternalField(i));
+  }
+}
+
+void ObjectGroupsTracer::PrintObjectGroup(ObjectGroup* group) {
+  PrintIsolate(isolate_, "ObjectGroup (size: %lu)\n", group->length);
+  Object*** objects = group->objects;
+
+  for (size_t i = 0; i < group->length; ++i) {
+    PrintIsolate(isolate_, "  - Member: ");
+    PrintObject(*objects[i]);
+  }
+}
+
+void ObjectGroupsTracer::PrintImplicitRefGroup(ImplicitRefGroup* group) {
+  PrintIsolate(isolate_, "ImplicitRefGroup (children count: %lu)\n",
+               group->length);
+  PrintIsolate(isolate_, "  - Parent: ");
+  PrintObject(*(group->parent));
+
+  Object*** children = group->children;
+  for (size_t i = 0; i < group->length; ++i) {
+    PrintIsolate(isolate_, "  - Child: ");
+    PrintObject(*children[i]);
+  }
+}
+
+}  // namespace
+
+void GlobalHandles::PrintObjectGroups() {
+  ObjectGroupsTracer(isolate_).Print();
+}
 
 void GlobalHandles::InvokeSecondPassPhantomCallbacks(
     List<PendingPhantomCallback>* callbacks, Isolate* isolate) {
@@ -1119,7 +1223,8 @@
   }
 
   PrintF("Global Handle Statistics:\n");
-  PrintF("  allocated memory = %" V8_PTR_PREFIX "dB\n", sizeof(Node) * total);
+  PrintF("  allocated memory = %" V8_SIZET_PREFIX V8_PTR_PREFIX "dB\n",
+         total * sizeof(Node));
   PrintF("  # weak       = %d\n", weak);
   PrintF("  # pending    = %d\n", pending);
   PrintF("  # near_death = %d\n", near_death);
diff --git a/src/global-handles.h b/src/global-handles.h
index 7047d8c..ac8487b 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -165,7 +165,7 @@
   // Clear the weakness of a global handle.
   static void* ClearWeakness(Object** location);
 
-  // Clear the weakness of a global handle.
+  // Mark the reference to this object independent of any object group.
   static void MarkIndependent(Object** location);
 
   // Mark the reference to this object externaly unreachable.
@@ -242,6 +242,9 @@
   // can be skipped and false otherwise.
   bool IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip);
 
+  // Print all objects in object groups
+  void PrintObjectGroups();
+
   // Add an object group.
   // Should be only used in GC callback function before a collection.
   // All groups are destroyed after a garbage collection.
diff --git a/src/globals.h b/src/globals.h
index be401a6..e7ac2b9 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -59,6 +59,9 @@
 #if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
 #define USE_SIMULATOR 1
 #endif
+#if (V8_TARGET_ARCH_S390 && !V8_HOST_ARCH_S390)
+#define USE_SIMULATOR 1
+#endif
 #endif
 
 // Determine whether the architecture uses an embedded constant pool
@@ -110,6 +113,7 @@
 const int kMinUInt16 = 0;
 
 const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
+const int kMinUInt32 = 0;
 
 const int kCharSize      = sizeof(char);      // NOLINT
 const int kShortSize     = sizeof(short);     // NOLINT
@@ -120,6 +124,11 @@
 const int kDoubleSize    = sizeof(double);    // NOLINT
 const int kIntptrSize    = sizeof(intptr_t);  // NOLINT
 const int kPointerSize   = sizeof(void*);     // NOLINT
+#if V8_TARGET_ARCH_ARM64
+const int kFrameAlignmentInBytes = 2 * kPointerSize;
+#else
+const int kFrameAlignmentInBytes = kPointerSize;
+#endif
 #if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
 const int kRegisterSize  = kPointerSize + kPointerSize;
 #else
@@ -128,6 +137,12 @@
 const int kPCOnStackSize = kRegisterSize;
 const int kFPOnStackSize = kRegisterSize;
 
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+const int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
+#else
+const int kElidedFrameSlots = 0;
+#endif
+
 const int kDoubleSizeLog2 = 3;
 
 #if V8_HOST_ARCH_64_BIT
@@ -243,89 +258,36 @@
 
 // The Strict Mode (ECMA-262 5th edition, 4.2.2).
 
-enum LanguageMode {
-  // LanguageMode is expressed as a bitmask. Descriptions of the bits:
-  STRICT_BIT = 1 << 0,
-  STRONG_BIT = 1 << 1,
-  LANGUAGE_END,
-
-  // Shorthands for some common language modes.
-  SLOPPY = 0,
-  STRICT = STRICT_BIT,
-  STRONG = STRICT_BIT | STRONG_BIT
-};
+enum LanguageMode { SLOPPY, STRICT, LANGUAGE_END = 3 };
 
 
 inline std::ostream& operator<<(std::ostream& os, const LanguageMode& mode) {
   switch (mode) {
-    case SLOPPY:
-      return os << "sloppy";
-    case STRICT:
-      return os << "strict";
-    case STRONG:
-      return os << "strong";
-    default:
-      return os << "unknown";
+    case SLOPPY: return os << "sloppy";
+    case STRICT: return os << "strict";
+    default: UNREACHABLE();
   }
+  return os;
 }
 
 
 inline bool is_sloppy(LanguageMode language_mode) {
-  return (language_mode & STRICT_BIT) == 0;
+  return language_mode == SLOPPY;
 }
 
 
 inline bool is_strict(LanguageMode language_mode) {
-  return language_mode & STRICT_BIT;
-}
-
-
-inline bool is_strong(LanguageMode language_mode) {
-  return language_mode & STRONG_BIT;
+  return language_mode != SLOPPY;
 }
 
 
 inline bool is_valid_language_mode(int language_mode) {
-  return language_mode == SLOPPY || language_mode == STRICT ||
-         language_mode == STRONG;
+  return language_mode == SLOPPY || language_mode == STRICT;
 }
 
 
-inline LanguageMode construct_language_mode(bool strict_bit, bool strong_bit) {
-  int language_mode = 0;
-  if (strict_bit) language_mode |= STRICT_BIT;
-  if (strong_bit) language_mode |= STRONG_BIT;
-  DCHECK(is_valid_language_mode(language_mode));
-  return static_cast<LanguageMode>(language_mode);
-}
-
-
-// Strong mode behaviour must sometimes be signalled by a two valued enum where
-// caching is involved, to prevent sloppy and strict mode from being incorrectly
-// differentiated.
-enum class Strength : bool {
-  WEAK,   // sloppy, strict behaviour
-  STRONG  // strong behaviour
-};
-
-
-inline bool is_strong(Strength strength) {
-  return strength == Strength::STRONG;
-}
-
-
-inline std::ostream& operator<<(std::ostream& os, const Strength& strength) {
-  return os << (is_strong(strength) ? "strong" : "weak");
-}
-
-
-inline Strength strength(LanguageMode language_mode) {
-  return is_strong(language_mode) ? Strength::STRONG : Strength::WEAK;
-}
-
-
-inline size_t hash_value(Strength strength) {
-  return static_cast<size_t>(strength);
+inline LanguageMode construct_language_mode(bool strict_bit) {
+  return static_cast<LanguageMode>(strict_bit);
 }
 
 
@@ -525,7 +487,9 @@
   VISIT_ALL,
   VISIT_ALL_IN_SCAVENGE,
   VISIT_ALL_IN_SWEEP_NEWSPACE,
-  VISIT_ONLY_STRONG
+  VISIT_ONLY_STRONG,
+  VISIT_ONLY_STRONG_FOR_SERIALIZATION,
+  VISIT_ONLY_STRONG_ROOT_LIST,
 };
 
 // Flag indicating whether code is built into the VM (one of the natives files).
@@ -726,10 +690,13 @@
   FPR_GPR_MOV,
   LWSYNC,
   ISELECT,
+  // S390
+  DISTINCT_OPS,
+  GENERAL_INSTR_EXT,
+  FLOATING_POINT_EXT,
   NUMBER_OF_CPU_FEATURES
 };
 
-
 // Defines hints about receiver values based on structural knowledge.
 enum class ConvertReceiverMode : unsigned {
   kNullOrUndefined,     // Guaranteed to be null or undefined.
@@ -959,12 +926,6 @@
 enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
 
 
-enum ClearExceptionFlag {
-  KEEP_EXCEPTION,
-  CLEAR_EXCEPTION
-};
-
-
 enum MinusZeroMode {
   TREAT_MINUS_ZERO_AS_ZERO,
   FAIL_ON_MINUS_ZERO
@@ -1069,7 +1030,6 @@
   if (IsConciseMethod(kind)) return false;
   if (IsArrowFunction(kind)) return false;
   if (IsGeneratorFunction(kind)) return false;
-  if (is_strong(mode)) return IsClassConstructor(kind);
   return true;
 }
 
diff --git a/src/handles.cc b/src/handles.cc
index b162ba8..6331c79 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -118,9 +118,8 @@
   return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
 }
 
-
 CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
-    : isolate_(isolate) {
+    : isolate_(isolate), zone_(isolate->allocator()) {
   HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
   prev_canonical_scope_ = handle_scope_data->canonical_scope;
   handle_scope_data->canonical_scope = this;
diff --git a/src/heap-symbols.h b/src/heap-symbols.h
index 4a772eb..f019ace 100644
--- a/src/heap-symbols.h
+++ b/src/heap-symbols.h
@@ -45,7 +45,6 @@
   V(dot_string, ".")                                               \
   V(entries_string, "entries")                                     \
   V(enumerable_string, "enumerable")                               \
-  V(enumerate_string, "enumerate")                                 \
   V(Error_string, "Error")                                         \
   V(eval_string, "eval")                                           \
   V(false_string, "false")                                         \
@@ -178,7 +177,6 @@
   V(strict_function_transition_symbol)      \
   V(string_iterator_iterated_string_symbol) \
   V(string_iterator_next_index_symbol)      \
-  V(strong_function_transition_symbol)      \
   V(uninitialized_symbol)
 
 #define PUBLIC_SYMBOL_LIST(V)                \
diff --git a/src/heap/gc-idle-time-handler.cc b/src/heap/gc-idle-time-handler.cc
index 4e6e608..972dfa6 100644
--- a/src/heap/gc-idle-time-handler.cc
+++ b/src/heap/gc-idle-time-handler.cc
@@ -12,7 +12,6 @@
 namespace internal {
 
 const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
-const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
 const size_t GCIdleTimeHandler::kMaxFinalIncrementalMarkCompactTimeInMs = 1000;
 const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
 const size_t GCIdleTimeHandler::kMinTimeForOverApproximatingWeakClosureInMs = 1;
@@ -42,86 +41,55 @@
 void GCIdleTimeHeapState::Print() {
   PrintF("contexts_disposed=%d ", contexts_disposed);
   PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
-  PrintF("size_of_objects=%" V8_PTR_PREFIX "d ", size_of_objects);
+  PrintF("size_of_objects=%" V8_SIZET_PREFIX V8_PTR_PREFIX "d ",
+         size_of_objects);
   PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
 }
 
-
 size_t GCIdleTimeHandler::EstimateMarkingStepSize(
-    size_t idle_time_in_ms, size_t marking_speed_in_bytes_per_ms) {
+    double idle_time_in_ms, double marking_speed_in_bytes_per_ms) {
   DCHECK(idle_time_in_ms > 0);
 
   if (marking_speed_in_bytes_per_ms == 0) {
     marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
   }
 
-  size_t marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
-  if (marking_step_size / marking_speed_in_bytes_per_ms != idle_time_in_ms) {
-    // In the case of an overflow we return maximum marking step size.
+  double marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
+  if (marking_step_size >= kMaximumMarkingStepSize) {
     return kMaximumMarkingStepSize;
   }
-
-  if (marking_step_size > kMaximumMarkingStepSize)
-    return kMaximumMarkingStepSize;
-
   return static_cast<size_t>(marking_step_size * kConservativeTimeRatio);
 }
 
-
-size_t GCIdleTimeHandler::EstimateMarkCompactTime(
-    size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
-  // TODO(hpayer): Be more precise about the type of mark-compact event. It
-  // makes a huge difference if compaction is happening.
-  if (mark_compact_speed_in_bytes_per_ms == 0) {
-    mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
-  }
-  size_t result = size_of_objects / mark_compact_speed_in_bytes_per_ms;
-  return Min(result, kMaxMarkCompactTimeInMs);
-}
-
-
-size_t GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
+double GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
     size_t size_of_objects,
-    size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
+    double final_incremental_mark_compact_speed_in_bytes_per_ms) {
   if (final_incremental_mark_compact_speed_in_bytes_per_ms == 0) {
     final_incremental_mark_compact_speed_in_bytes_per_ms =
         kInitialConservativeFinalIncrementalMarkCompactSpeed;
   }
-  size_t result =
+  double result =
       size_of_objects / final_incremental_mark_compact_speed_in_bytes_per_ms;
-  return Min(result, kMaxFinalIncrementalMarkCompactTimeInMs);
+  return Min<double>(result, kMaxFinalIncrementalMarkCompactTimeInMs);
 }
 
-
-bool GCIdleTimeHandler::ShouldDoMarkCompact(
-    size_t idle_time_in_ms, size_t size_of_objects,
-    size_t mark_compact_speed_in_bytes_per_ms) {
-  return idle_time_in_ms >= kMaxScheduledIdleTime &&
-         idle_time_in_ms >=
-             EstimateMarkCompactTime(size_of_objects,
-                                     mark_compact_speed_in_bytes_per_ms);
-}
-
-
 bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
     int contexts_disposed, double contexts_disposal_rate) {
   return contexts_disposed > 0 && contexts_disposal_rate > 0 &&
          contexts_disposal_rate < kHighContextDisposalRate;
 }
 
-
 bool GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
-    size_t idle_time_in_ms, size_t size_of_objects,
-    size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
+    double idle_time_in_ms, size_t size_of_objects,
+    double final_incremental_mark_compact_speed_in_bytes_per_ms) {
   return idle_time_in_ms >=
          EstimateFinalIncrementalMarkCompactTime(
              size_of_objects,
              final_incremental_mark_compact_speed_in_bytes_per_ms);
 }
 
-
 bool GCIdleTimeHandler::ShouldDoOverApproximateWeakClosure(
-    size_t idle_time_in_ms) {
+    double idle_time_in_ms) {
   // TODO(jochen): Estimate the time it will take to build the object groups.
   return idle_time_in_ms >= kMinTimeForOverApproximatingWeakClosureInMs;
 }
diff --git a/src/heap/gc-idle-time-handler.h b/src/heap/gc-idle-time-handler.h
index 74ef1b1..39dea7e 100644
--- a/src/heap/gc-idle-time-handler.h
+++ b/src/heap/gc-idle-time-handler.h
@@ -90,9 +90,6 @@
   static const size_t kInitialConservativeFinalIncrementalMarkCompactSpeed =
       2 * MB;
 
-  // Maximum mark-compact time returned by EstimateMarkCompactTime.
-  static const size_t kMaxMarkCompactTimeInMs;
-
   // Maximum final incremental mark-compact time returned by
   // EstimateFinalIncrementalMarkCompactTime.
   static const size_t kMaxFinalIncrementalMarkCompactTimeInMs;
@@ -130,27 +127,20 @@
 
   void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
 
-  static size_t EstimateMarkingStepSize(size_t idle_time_in_ms,
-                                        size_t marking_speed_in_bytes_per_ms);
+  static size_t EstimateMarkingStepSize(double idle_time_in_ms,
+                                        double marking_speed_in_bytes_per_ms);
 
-  static size_t EstimateMarkCompactTime(
-      size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
-
-  static size_t EstimateFinalIncrementalMarkCompactTime(
-      size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
-
-  static bool ShouldDoMarkCompact(size_t idle_time_in_ms,
-                                  size_t size_of_objects,
-                                  size_t mark_compact_speed_in_bytes_per_ms);
+  static double EstimateFinalIncrementalMarkCompactTime(
+      size_t size_of_objects, double mark_compact_speed_in_bytes_per_ms);
 
   static bool ShouldDoContextDisposalMarkCompact(int context_disposed,
                                                  double contexts_disposal_rate);
 
   static bool ShouldDoFinalIncrementalMarkCompact(
-      size_t idle_time_in_ms, size_t size_of_objects,
-      size_t final_incremental_mark_compact_speed_in_bytes_per_ms);
+      double idle_time_in_ms, size_t size_of_objects,
+      double final_incremental_mark_compact_speed_in_bytes_per_ms);
 
-  static bool ShouldDoOverApproximateWeakClosure(size_t idle_time_in_ms);
+  static bool ShouldDoOverApproximateWeakClosure(double idle_time_in_ms);
 
  private:
   GCIdleTimeAction NothingOrDone(double idle_time_in_ms);
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
index ec1ad65..3c46f52 100644
--- a/src/heap/gc-tracer.cc
+++ b/src/heap/gc-tracer.cc
@@ -24,6 +24,13 @@
 GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
     : tracer_(tracer), scope_(scope) {
   start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
+  // TODO(cbruni): remove once we fully moved to a trace-based system.
+  if (FLAG_runtime_call_stats) {
+    RuntimeCallStats* stats =
+        tracer_->heap_->isolate()->counters()->runtime_call_stats();
+    timer_.Initialize(&stats->GC, stats->current_timer());
+    stats->Enter(&timer_);
+  }
 }
 
 
@@ -31,26 +38,25 @@
   DCHECK(scope_ < NUMBER_OF_SCOPES);  // scope_ is unsigned.
   tracer_->current_.scopes[scope_] +=
       tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
+  // TODO(cbruni): remove once we fully moved to a trace-based system.
+  if (FLAG_runtime_call_stats) {
+    tracer_->heap_->isolate()->counters()->runtime_call_stats()->Leave(&timer_);
+  }
 }
 
-
-GCTracer::AllocationEvent::AllocationEvent(double duration,
-                                           size_t allocation_in_bytes) {
-  duration_ = duration;
-  allocation_in_bytes_ = allocation_in_bytes;
+const char* GCTracer::Scope::Name(ScopeId id) {
+#define CASE(scope)  \
+  case Scope::scope: \
+    return "V8.GC_" #scope;
+  switch (id) {
+    TRACER_SCOPES(CASE)
+    case Scope::NUMBER_OF_SCOPES:
+      break;
+  }
+#undef CASE
+  return "(unknown)";
 }
 
-
-GCTracer::ContextDisposalEvent::ContextDisposalEvent(double time) {
-  time_ = time;
-}
-
-
-GCTracer::SurvivalEvent::SurvivalEvent(double promotion_ratio) {
-  promotion_ratio_ = promotion_ratio;
-}
-
-
 GCTracer::Event::Event(Type type, const char* gc_reason,
                        const char* collector_reason)
     : type(type),
@@ -182,9 +188,15 @@
       start_time, committed_memory);
   heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
       start_time, used_memory);
+  // TODO(cbruni): remove once we fully moved to a trace-based system.
+  if (FLAG_runtime_call_stats) {
+    RuntimeCallStats* stats =
+        heap_->isolate()->counters()->runtime_call_stats();
+    timer_.Initialize(&stats->GC, stats->current_timer());
+    stats->Enter(&timer_);
+  }
 }
 
-
 void GCTracer::Stop(GarbageCollector collector) {
   start_counter_--;
   if (start_counter_ != 0) {
@@ -215,6 +227,7 @@
   heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
       current_.end_time, used_memory);
 
+  double duration = current_.end_time - current_.start_time;
   if (current_.type == Event::SCAVENGER) {
     current_.incremental_marking_steps =
         current_.cumulative_incremental_marking_steps -
@@ -228,7 +241,10 @@
     current_.pure_incremental_marking_duration =
         current_.cumulative_pure_incremental_marking_duration -
         previous_.cumulative_pure_incremental_marking_duration;
-    scavenger_events_.push_front(current_);
+    recorded_scavenges_total_.Push(
+        MakeBytesAndDuration(current_.new_space_object_size, duration));
+    recorded_scavenges_survived_.Push(MakeBytesAndDuration(
+        current_.survived_new_space_object_size, duration));
   } else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
     current_.incremental_marking_steps =
         current_.cumulative_incremental_marking_steps -
@@ -247,20 +263,24 @@
         previous_incremental_mark_compactor_event_
             .cumulative_pure_incremental_marking_duration;
     longest_incremental_marking_step_ = 0.0;
-    incremental_mark_compactor_events_.push_front(current_);
+    recorded_incremental_marking_steps_.Push(
+        MakeBytesAndDuration(current_.incremental_marking_bytes,
+                             current_.pure_incremental_marking_duration));
+    recorded_incremental_mark_compacts_.Push(
+        MakeBytesAndDuration(current_.start_object_size, duration));
     combined_mark_compact_speed_cache_ = 0.0;
   } else {
     DCHECK(current_.incremental_marking_bytes == 0);
     DCHECK(current_.incremental_marking_duration == 0);
     DCHECK(current_.pure_incremental_marking_duration == 0);
     longest_incremental_marking_step_ = 0.0;
-    mark_compactor_events_.push_front(current_);
+    recorded_mark_compacts_.Push(
+        MakeBytesAndDuration(current_.start_object_size, duration));
     combined_mark_compact_speed_cache_ = 0.0;
   }
 
   // TODO(ernstm): move the code below out of GCTracer.
 
-  double duration = current_.end_time - current_.start_time;
   double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0);
 
   heap_->UpdateCumulativeGCStatistics(duration, spent_in_mutator,
@@ -281,6 +301,10 @@
   longest_incremental_marking_finalization_step_ = 0.0;
   cumulative_incremental_marking_finalization_steps_ = 0;
   cumulative_incremental_marking_finalization_duration_ = 0.0;
+  // TODO(cbruni): remove once we fully moved to a trace-based system.
+  if (FLAG_runtime_call_stats) {
+    heap_->isolate()->counters()->runtime_call_stats()->Leave(&timer_);
+  }
 }
 
 
@@ -313,11 +337,14 @@
 
 void GCTracer::AddAllocation(double current_ms) {
   allocation_time_ms_ = current_ms;
-  new_space_allocation_events_.push_front(AllocationEvent(
-      allocation_duration_since_gc_, new_space_allocation_in_bytes_since_gc_));
-  old_generation_allocation_events_.push_front(
-      AllocationEvent(allocation_duration_since_gc_,
-                      old_generation_allocation_in_bytes_since_gc_));
+  if (allocation_duration_since_gc_ > 0) {
+    recorded_new_generation_allocations_.Push(
+        MakeBytesAndDuration(new_space_allocation_in_bytes_since_gc_,
+                             allocation_duration_since_gc_));
+    recorded_old_generation_allocations_.Push(
+        MakeBytesAndDuration(old_generation_allocation_in_bytes_since_gc_,
+                             allocation_duration_since_gc_));
+  }
   allocation_duration_since_gc_ = 0;
   new_space_allocation_in_bytes_since_gc_ = 0;
   old_generation_allocation_in_bytes_since_gc_ = 0;
@@ -325,19 +352,19 @@
 
 
 void GCTracer::AddContextDisposalTime(double time) {
-  context_disposal_events_.push_front(ContextDisposalEvent(time));
+  recorded_context_disposal_times_.Push(time);
 }
 
 
 void GCTracer::AddCompactionEvent(double duration,
                                   intptr_t live_bytes_compacted) {
-  compaction_events_.push_front(
-      CompactionEvent(duration, live_bytes_compacted));
+  recorded_compactions_.Push(
+      MakeBytesAndDuration(live_bytes_compacted, duration));
 }
 
 
 void GCTracer::AddSurvivalRatio(double promotion_ratio) {
-  survival_events_.push_front(SurvivalEvent(promotion_ratio));
+  recorded_survival_ratios_.Push(promotion_ratio);
 }
 
 
@@ -394,9 +421,8 @@
          static_cast<double>(current_.end_object_size) / MB,
          static_cast<double>(current_.end_memory_size) / MB);
 
-  int external_time = static_cast<int>(current_.scopes[Scope::EXTERNAL]);
   double duration = current_.end_time - current_.start_time;
-  Output("%.1f / %d ms", duration, external_time);
+  Output("%.1f / %.1f ms", duration, TotalExternalTime());
 
   if (current_.type == Event::SCAVENGER) {
     if (current_.incremental_marking_steps > 0) {
@@ -448,10 +474,12 @@
                    "code=%.2f "
                    "semispace=%.2f "
                    "object_groups=%.2f "
+                   "external_prologue=%.2f "
+                   "external_epilogue=%.2f "
+                   "external_weak_global_handles=%.2f "
                    "steps_count=%d "
                    "steps_took=%.1f "
-                   "scavenge_throughput=%" V8_PTR_PREFIX
-                   "d "
+                   "scavenge_throughput=%.f "
                    "total_size_before=%" V8_PTR_PREFIX
                    "d "
                    "total_size_after=%" V8_PTR_PREFIX
@@ -473,8 +501,7 @@
                    "average_survival_ratio=%.1f%% "
                    "promotion_rate=%.1f%% "
                    "semi_space_copy_rate=%.1f%% "
-                   "new_space_allocation_throughput=%" V8_PTR_PREFIX
-                   "d "
+                   "new_space_allocation_throughput=%.1f "
                    "context_disposal_rate=%.1f\n",
                    heap_->isolate()->time_millis_since_init(), duration,
                    spent_in_mutator, current_.TypeName(true),
@@ -486,6 +513,9 @@
                    current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
                    current_.scopes[Scope::SCAVENGER_SEMISPACE],
                    current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS],
+                   current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE],
+                   current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE],
+                   current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
                    current_.incremental_marking_steps,
                    current_.incremental_marking_duration,
                    ScavengeSpeedInBytesPerMillisecond(),
@@ -509,7 +539,6 @@
           "mutator=%.1f "
           "gc=%s "
           "reduce_memory=%d "
-          "external=%.1f "
           "clear=%1.f "
           "clear.code_flush=%.1f "
           "clear.dependent_code=%.1f "
@@ -524,18 +553,27 @@
           "evacuate=%.1f "
           "evacuate.candidates=%.1f "
           "evacuate.clean_up=%.1f "
-          "evacuate.new_space=%.1f "
+          "evacuate.copy=%.1f "
           "evacuate.update_pointers=%.1f "
           "evacuate.update_pointers.between_evacuated=%.1f "
           "evacuate.update_pointers.to_evacuated=%.1f "
           "evacuate.update_pointers.to_new=%.1f "
           "evacuate.update_pointers.weak=%.1f "
+          "external.mc_prologue=%.1f "
+          "external.mc_epilogue=%.1f "
+          "external.mc_incremental_prologue=%.1f "
+          "external.mc_incremental_epilogue=%.1f "
+          "external.weak_global_handles=%.1f "
           "finish=%.1f "
           "mark=%.1f "
           "mark.finish_incremental=%.1f "
           "mark.prepare_code_flush=%.1f "
           "mark.roots=%.1f "
           "mark.weak_closure=%.1f "
+          "mark.weak_closure.ephemeral=%.1f "
+          "mark.weak_closure.weak_handles=%.1f "
+          "mark.weak_closure.weak_roots=%.1f "
+          "mark.weak_closure.harmony=%.1f "
           "sweep=%.1f "
           "sweep.code=%.1f "
           "sweep.map=%.1f "
@@ -547,8 +585,7 @@
           "finalization_steps_count=%d "
           "finalization_steps_took=%.1f "
           "finalization_longest_step=%.1f "
-          "incremental_marking_throughput=%" V8_PTR_PREFIX
-          "d "
+          "incremental_marking_throughput=%.f "
           "total_size_before=%" V8_PTR_PREFIX
           "d "
           "total_size_after=%" V8_PTR_PREFIX
@@ -570,13 +607,12 @@
           "average_survival_ratio=%.1f%% "
           "promotion_rate=%.1f%% "
           "semi_space_copy_rate=%.1f%% "
-          "new_space_allocation_throughput=%" V8_PTR_PREFIX
-          "d "
+          "new_space_allocation_throughput=%.1f "
           "context_disposal_rate=%.1f "
-          "compaction_speed=%" V8_PTR_PREFIX "d\n",
+          "compaction_speed=%.f\n",
           heap_->isolate()->time_millis_since_init(), duration,
           spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
-          current_.scopes[Scope::EXTERNAL], current_.scopes[Scope::MC_CLEAR],
+          current_.scopes[Scope::MC_CLEAR],
           current_.scopes[Scope::MC_CLEAR_CODE_FLUSH],
           current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
           current_.scopes[Scope::MC_CLEAR_GLOBAL_HANDLES],
@@ -590,17 +626,26 @@
           current_.scopes[Scope::MC_EVACUATE],
           current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
           current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
-          current_.scopes[Scope::MC_EVACUATE_NEW_SPACE],
+          current_.scopes[Scope::MC_EVACUATE_COPY],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
+          current_.scopes[Scope::MC_EXTERNAL_PROLOGUE],
+          current_.scopes[Scope::MC_EXTERNAL_EPILOGUE],
+          current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
+          current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
+          current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
           current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
           current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
           current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
           current_.scopes[Scope::MC_MARK_ROOTS],
           current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
+          current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL],
+          current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
+          current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS],
+          current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_HARMONY],
           current_.scopes[Scope::MC_SWEEP],
           current_.scopes[Scope::MC_SWEEP_CODE],
           current_.scopes[Scope::MC_SWEEP_MAP],
@@ -632,181 +677,72 @@
   }
 }
 
-
-double GCTracer::MeanDuration(const EventBuffer& events) const {
-  if (events.empty()) return 0.0;
-
-  double mean = 0.0;
-  EventBuffer::const_iterator iter = events.begin();
-  while (iter != events.end()) {
-    mean += iter->end_time - iter->start_time;
-    ++iter;
-  }
-
-  return mean / events.size();
+double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
+                              const BytesAndDuration& initial, double time_ms) {
+  BytesAndDuration sum = buffer.Sum(
+      [time_ms](BytesAndDuration a, BytesAndDuration b) {
+        if (time_ms != 0 && a.second >= time_ms) return a;
+        return std::make_pair(a.first + b.first, a.second + b.second);
+      },
+      initial);
+  uint64_t bytes = sum.first;
+  double durations = sum.second;
+  if (durations == 0.0) return 0;
+  double speed = bytes / durations;
+  const int max_speed = 1024 * MB;
+  const int min_speed = 1;
+  if (speed >= max_speed) return max_speed;
+  if (speed <= min_speed) return min_speed;
+  return speed;
 }
 
-
-double GCTracer::MaxDuration(const EventBuffer& events) const {
-  if (events.empty()) return 0.0;
-
-  double maximum = 0.0f;
-  EventBuffer::const_iterator iter = events.begin();
-  while (iter != events.end()) {
-    maximum = Max(iter->end_time - iter->start_time, maximum);
-    ++iter;
-  }
-
-  return maximum;
+double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer) {
+  return AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 0);
 }
 
-
-double GCTracer::MeanIncrementalMarkingDuration() const {
-  if (cumulative_incremental_marking_steps_ == 0) return 0.0;
-
-  // We haven't completed an entire round of incremental marking, yet.
-  // Use data from GCTracer instead of data from event buffers.
-  if (incremental_mark_compactor_events_.empty()) {
-    return cumulative_incremental_marking_duration_ /
-           cumulative_incremental_marking_steps_;
-  }
-
-  int steps = 0;
-  double durations = 0.0;
-  EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
-  while (iter != incremental_mark_compactor_events_.end()) {
-    steps += iter->incremental_marking_steps;
-    durations += iter->incremental_marking_duration;
-    ++iter;
-  }
-
-  if (steps == 0) return 0.0;
-
-  return durations / steps;
-}
-
-
-double GCTracer::MaxIncrementalMarkingDuration() const {
-  // We haven't completed an entire round of incremental marking, yet.
-  // Use data from GCTracer instead of data from event buffers.
-  if (incremental_mark_compactor_events_.empty())
-    return longest_incremental_marking_step_;
-
-  double max_duration = 0.0;
-  EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
-  while (iter != incremental_mark_compactor_events_.end())
-    max_duration = Max(iter->longest_incremental_marking_step, max_duration);
-
-  return max_duration;
-}
-
-
-intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
+double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
   if (cumulative_incremental_marking_duration_ == 0.0) return 0;
-
   // We haven't completed an entire round of incremental marking, yet.
   // Use data from GCTracer instead of data from event buffers.
-  if (incremental_mark_compactor_events_.empty()) {
-    return static_cast<intptr_t>(cumulative_incremental_marking_bytes_ /
-                                 cumulative_pure_incremental_marking_duration_);
+  if (recorded_incremental_marking_steps_.Count() == 0) {
+    return cumulative_incremental_marking_bytes_ /
+           cumulative_pure_incremental_marking_duration_;
   }
-
-  intptr_t bytes = 0;
-  double durations = 0.0;
-  EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
-  while (iter != incremental_mark_compactor_events_.end()) {
-    bytes += iter->incremental_marking_bytes;
-    durations += iter->pure_incremental_marking_duration;
-    ++iter;
-  }
-
-  if (durations == 0.0) return 0;
-  // Make sure the result is at least 1.
-  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+  return AverageSpeed(recorded_incremental_marking_steps_);
 }
 
-
-intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond(
+double GCTracer::ScavengeSpeedInBytesPerMillisecond(
     ScavengeSpeedMode mode) const {
-  intptr_t bytes = 0;
-  double durations = 0.0;
-  EventBuffer::const_iterator iter = scavenger_events_.begin();
-  while (iter != scavenger_events_.end()) {
-    bytes += mode == kForAllObjects ? iter->new_space_object_size
-                                    : iter->survived_new_space_object_size;
-    durations += iter->end_time - iter->start_time;
-    ++iter;
+  if (mode == kForAllObjects) {
+    return AverageSpeed(recorded_scavenges_total_);
+  } else {
+    return AverageSpeed(recorded_scavenges_survived_);
   }
-
-  if (durations == 0.0) return 0;
-  // Make sure the result is at least 1.
-  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
 }
 
-
-intptr_t GCTracer::CompactionSpeedInBytesPerMillisecond() const {
-  if (compaction_events_.size() == 0) return 0;
-  intptr_t bytes = 0;
-  double durations = 0.0;
-  CompactionEventBuffer::const_iterator iter = compaction_events_.begin();
-  while (iter != compaction_events_.end()) {
-    bytes += iter->live_bytes_compacted;
-    durations += iter->duration;
-    ++iter;
-  }
-
-  if (durations == 0.0) return 0;
-  // Make sure the result is at least 1.
-  return Max<intptr_t>(static_cast<intptr_t>(bytes / durations + 0.5), 1);
+double GCTracer::CompactionSpeedInBytesPerMillisecond() const {
+  return AverageSpeed(recorded_compactions_);
 }
 
-
-intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
-  intptr_t bytes = 0;
-  double durations = 0.0;
-  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
-  while (iter != mark_compactor_events_.end()) {
-    bytes += iter->start_object_size;
-    durations += iter->end_time - iter->start_time;
-    ++iter;
-  }
-
-  if (durations == 0.0) return 0;
-  // Make sure the result is at least 1.
-  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+double GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
+  return AverageSpeed(recorded_mark_compacts_);
 }
 
-
-intptr_t GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()
-    const {
-  intptr_t bytes = 0;
-  double durations = 0.0;
-  EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
-  while (iter != incremental_mark_compactor_events_.end()) {
-    bytes += iter->start_object_size;
-    durations += iter->end_time - iter->start_time;
-    ++iter;
-  }
-
-  if (durations == 0.0) return 0;
-  // Make sure the result is at least 1.
-  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+double GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const {
+  return AverageSpeed(recorded_incremental_mark_compacts_);
 }
 
-
 double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
   if (combined_mark_compact_speed_cache_ > 0)
     return combined_mark_compact_speed_cache_;
   const double kMinimumMarkingSpeed = 0.5;
-  double speed1 =
-      static_cast<double>(IncrementalMarkingSpeedInBytesPerMillisecond());
-  double speed2 = static_cast<double>(
-      FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
+  double speed1 = IncrementalMarkingSpeedInBytesPerMillisecond();
+  double speed2 = FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
   if (speed1 < kMinimumMarkingSpeed || speed2 < kMinimumMarkingSpeed) {
     // No data for the incremental marking speed.
     // Return the non-incremental mark-compact speed.
     combined_mark_compact_speed_cache_ =
-        static_cast<double>(MarkCompactSpeedInBytesPerMillisecond());
+        MarkCompactSpeedInBytesPerMillisecond();
   } else {
     // Combine the speed of incremental step and the speed of the final step.
     // 1 / (1 / speed1 + 1 / speed2) = speed1 * speed2 / (speed1 + speed2).
@@ -815,101 +751,59 @@
   return combined_mark_compact_speed_cache_;
 }
 
-
-size_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
+double GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
     double time_ms) const {
   size_t bytes = new_space_allocation_in_bytes_since_gc_;
   double durations = allocation_duration_since_gc_;
-  AllocationEventBuffer::const_iterator iter =
-      new_space_allocation_events_.begin();
-  const size_t max_bytes = static_cast<size_t>(-1);
-  while (iter != new_space_allocation_events_.end() &&
-         bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
-    bytes += iter->allocation_in_bytes_;
-    durations += iter->duration_;
-    ++iter;
-  }
-
-  if (durations == 0.0) return 0;
-  // Make sure the result is at least 1.
-  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+  return AverageSpeed(recorded_new_generation_allocations_,
+                      MakeBytesAndDuration(bytes, durations), time_ms);
 }
 
-
-size_t GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
+double GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
     double time_ms) const {
   size_t bytes = old_generation_allocation_in_bytes_since_gc_;
   double durations = allocation_duration_since_gc_;
-  AllocationEventBuffer::const_iterator iter =
-      old_generation_allocation_events_.begin();
-  const size_t max_bytes = static_cast<size_t>(-1);
-  while (iter != old_generation_allocation_events_.end() &&
-         bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
-    bytes += iter->allocation_in_bytes_;
-    durations += iter->duration_;
-    ++iter;
-  }
-
-  if (durations == 0.0) return 0;
-  // Make sure the result is at least 1.
-  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+  return AverageSpeed(recorded_old_generation_allocations_,
+                      MakeBytesAndDuration(bytes, durations), time_ms);
 }
 
-
-size_t GCTracer::AllocationThroughputInBytesPerMillisecond(
+double GCTracer::AllocationThroughputInBytesPerMillisecond(
     double time_ms) const {
   return NewSpaceAllocationThroughputInBytesPerMillisecond(time_ms) +
          OldGenerationAllocationThroughputInBytesPerMillisecond(time_ms);
 }
 
-
-size_t GCTracer::CurrentAllocationThroughputInBytesPerMillisecond() const {
+double GCTracer::CurrentAllocationThroughputInBytesPerMillisecond() const {
   return AllocationThroughputInBytesPerMillisecond(kThroughputTimeFrameMs);
 }
 
-
-size_t GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
+double GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
     const {
   return OldGenerationAllocationThroughputInBytesPerMillisecond(
       kThroughputTimeFrameMs);
 }
 
-
 double GCTracer::ContextDisposalRateInMilliseconds() const {
-  if (context_disposal_events_.size() < kRingBufferMaxSize) return 0.0;
-
+  if (recorded_context_disposal_times_.Count() <
+      recorded_context_disposal_times_.kSize)
+    return 0.0;
   double begin = heap_->MonotonicallyIncreasingTimeInMs();
-  double end = 0.0;
-  ContextDisposalEventBuffer::const_iterator iter =
-      context_disposal_events_.begin();
-  while (iter != context_disposal_events_.end()) {
-    end = iter->time_;
-    ++iter;
-  }
-
-  return (begin - end) / context_disposal_events_.size();
+  double end = recorded_context_disposal_times_.Sum(
+      [](double a, double b) { return b; }, 0.0);
+  return (begin - end) / recorded_context_disposal_times_.Count();
 }
 
-
 double GCTracer::AverageSurvivalRatio() const {
-  if (survival_events_.size() == 0) return 0.0;
-
-  double sum_of_rates = 0.0;
-  SurvivalEventBuffer::const_iterator iter = survival_events_.begin();
-  while (iter != survival_events_.end()) {
-    sum_of_rates += iter->promotion_ratio_;
-    ++iter;
-  }
-
-  return sum_of_rates / static_cast<double>(survival_events_.size());
+  if (recorded_survival_ratios_.Count() == 0) return 0.0;
+  double sum = recorded_survival_ratios_.Sum(
+      [](double a, double b) { return a + b; }, 0.0);
+  return sum / recorded_survival_ratios_.Count();
 }
 
-
 bool GCTracer::SurvivalEventsRecorded() const {
-  return survival_events_.size() > 0;
+  return recorded_survival_ratios_.Count() > 0;
 }
 
-
-void GCTracer::ResetSurvivalEvents() { survival_events_.reset(); }
+void GCTracer::ResetSurvivalEvents() { recorded_survival_ratios_.Reset(); }
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
index e8ec168..9ea3cce 100644
--- a/src/heap/gc-tracer.h
+++ b/src/heap/gc-tracer.h
@@ -6,88 +6,114 @@
 #define V8_HEAP_GC_TRACER_H_
 
 #include "src/base/platform/platform.h"
+#include "src/counters.h"
 #include "src/globals.h"
 
 namespace v8 {
 namespace internal {
 
-// A simple ring buffer class with maximum size known at compile time.
-// The class only implements the functionality required in GCTracer.
-template <typename T, size_t MAX_SIZE>
+template <typename T>
 class RingBuffer {
  public:
-  class const_iterator {
-   public:
-    const_iterator() : index_(0), elements_(NULL) {}
-
-    const_iterator(size_t index, const T* elements)
-        : index_(index), elements_(elements) {}
-
-    bool operator==(const const_iterator& rhs) const {
-      return elements_ == rhs.elements_ && index_ == rhs.index_;
+  RingBuffer() { Reset(); }
+  static const int kSize = 10;
+  void Push(const T& value) {
+    if (count_ == kSize) {
+      elements_[start_++] = value;
+      if (start_ == kSize) start_ = 0;
+    } else {
+      DCHECK_EQ(start_, 0);
+      elements_[count_++] = value;
     }
-
-    bool operator!=(const const_iterator& rhs) const {
-      return elements_ != rhs.elements_ || index_ != rhs.index_;
-    }
-
-    operator const T*() const { return elements_ + index_; }
-
-    const T* operator->() const { return elements_ + index_; }
-
-    const T& operator*() const { return elements_[index_]; }
-
-    const_iterator& operator++() {
-      index_ = (index_ + 1) % (MAX_SIZE + 1);
-      return *this;
-    }
-
-    const_iterator& operator--() {
-      index_ = (index_ + MAX_SIZE) % (MAX_SIZE + 1);
-      return *this;
-    }
-
-   private:
-    size_t index_;
-    const T* elements_;
-  };
-
-  RingBuffer() : begin_(0), end_(0) {}
-
-  bool empty() const { return begin_ == end_; }
-  size_t size() const {
-    return (end_ - begin_ + MAX_SIZE + 1) % (MAX_SIZE + 1);
-  }
-  const_iterator begin() const { return const_iterator(begin_, elements_); }
-  const_iterator end() const { return const_iterator(end_, elements_); }
-  const_iterator back() const { return --end(); }
-  void push_back(const T& element) {
-    elements_[end_] = element;
-    end_ = (end_ + 1) % (MAX_SIZE + 1);
-    if (end_ == begin_) begin_ = (begin_ + 1) % (MAX_SIZE + 1);
-  }
-  void push_front(const T& element) {
-    begin_ = (begin_ + MAX_SIZE) % (MAX_SIZE + 1);
-    if (begin_ == end_) end_ = (end_ + MAX_SIZE) % (MAX_SIZE + 1);
-    elements_[begin_] = element;
   }
 
-  void reset() {
-    begin_ = 0;
-    end_ = 0;
+  int Count() const { return count_; }
+
+  template <typename Callback>
+  T Sum(Callback callback, const T& initial) const {
+    int j = start_ + count_ - 1;
+    if (j >= kSize) j -= kSize;
+    T result = initial;
+    for (int i = 0; i < count_; i++) {
+      result = callback(result, elements_[j]);
+      if (--j == -1) j += kSize;
+    }
+    return result;
   }
 
+  void Reset() { start_ = count_ = 0; }
+
  private:
-  T elements_[MAX_SIZE + 1];
-  size_t begin_;
-  size_t end_;
-
+  T elements_[kSize];
+  int start_;
+  int count_;
   DISALLOW_COPY_AND_ASSIGN(RingBuffer);
 };
 
+typedef std::pair<uint64_t, double> BytesAndDuration;
+
+inline BytesAndDuration MakeBytesAndDuration(uint64_t bytes, double duration) {
+  return std::make_pair(bytes, duration);
+}
 
 enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
 
+#define TRACER_SCOPES(F)                           \
+  F(EXTERNAL_WEAK_GLOBAL_HANDLES)                  \
+  F(MC_CLEAR)                                      \
+  F(MC_CLEAR_CODE_FLUSH)                           \
+  F(MC_CLEAR_DEPENDENT_CODE)                       \
+  F(MC_CLEAR_GLOBAL_HANDLES)                       \
+  F(MC_CLEAR_MAPS)                                 \
+  F(MC_CLEAR_SLOTS_BUFFER)                         \
+  F(MC_CLEAR_STORE_BUFFER)                         \
+  F(MC_CLEAR_STRING_TABLE)                         \
+  F(MC_CLEAR_WEAK_CELLS)                           \
+  F(MC_CLEAR_WEAK_COLLECTIONS)                     \
+  F(MC_CLEAR_WEAK_LISTS)                           \
+  F(MC_EVACUATE)                                   \
+  F(MC_EVACUATE_CANDIDATES)                        \
+  F(MC_EVACUATE_CLEAN_UP)                          \
+  F(MC_EVACUATE_COPY)                              \
+  F(MC_EVACUATE_UPDATE_POINTERS)                   \
+  F(MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED) \
+  F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED)      \
+  F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW)            \
+  F(MC_EVACUATE_UPDATE_POINTERS_WEAK)              \
+  F(MC_EXTERNAL_EPILOGUE)                          \
+  F(MC_EXTERNAL_PROLOGUE)                          \
+  F(MC_FINISH)                                     \
+  F(MC_INCREMENTAL_FINALIZE)                       \
+  F(MC_INCREMENTAL_EXTERNAL_EPILOGUE)              \
+  F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)              \
+  F(MC_MARK)                                       \
+  F(MC_MARK_FINISH_INCREMENTAL)                    \
+  F(MC_MARK_PREPARE_CODE_FLUSH)                    \
+  F(MC_MARK_ROOTS)                                 \
+  F(MC_MARK_WEAK_CLOSURE)                          \
+  F(MC_MARK_WEAK_CLOSURE_EPHEMERAL)                \
+  F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES)             \
+  F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS)               \
+  F(MC_MARK_WEAK_CLOSURE_HARMONY)                  \
+  F(MC_SWEEP)                                      \
+  F(MC_SWEEP_CODE)                                 \
+  F(MC_SWEEP_MAP)                                  \
+  F(MC_SWEEP_OLD)                                  \
+  F(SCAVENGER_CODE_FLUSH_CANDIDATES)               \
+  F(SCAVENGER_EXTERNAL_EPILOGUE)                   \
+  F(SCAVENGER_EXTERNAL_PROLOGUE)                   \
+  F(SCAVENGER_OBJECT_GROUPS)                       \
+  F(SCAVENGER_OLD_TO_NEW_POINTERS)                 \
+  F(SCAVENGER_ROOTS)                               \
+  F(SCAVENGER_SCAVENGE)                            \
+  F(SCAVENGER_SEMISPACE)                           \
+  F(SCAVENGER_WEAK)
+
+#define TRACE_GC(tracer, scope_id)                             \
+  GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id);       \
+  GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id); \
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"),                \
+               GCTracer::Scope::Name(gc_tracer_scope_id))
 
 // GCTracer collects and prints ONE line after each garbage collector
 // invocation IFF --trace_gc is used.
@@ -97,112 +123,26 @@
   class Scope {
    public:
     enum ScopeId {
-      EXTERNAL,
-      MC_CLEAR,
-      MC_CLEAR_CODE_FLUSH,
-      MC_CLEAR_DEPENDENT_CODE,
-      MC_CLEAR_GLOBAL_HANDLES,
-      MC_CLEAR_MAPS,
-      MC_CLEAR_SLOTS_BUFFER,
-      MC_CLEAR_STORE_BUFFER,
-      MC_CLEAR_STRING_TABLE,
-      MC_CLEAR_WEAK_CELLS,
-      MC_CLEAR_WEAK_COLLECTIONS,
-      MC_CLEAR_WEAK_LISTS,
-      MC_EVACUATE,
-      MC_EVACUATE_CANDIDATES,
-      MC_EVACUATE_CLEAN_UP,
-      MC_EVACUATE_NEW_SPACE,
-      MC_EVACUATE_UPDATE_POINTERS,
-      MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED,
-      MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED,
-      MC_EVACUATE_UPDATE_POINTERS_TO_NEW,
-      MC_EVACUATE_UPDATE_POINTERS_WEAK,
-      MC_FINISH,
-      MC_INCREMENTAL_FINALIZE,
-      MC_MARK,
-      MC_MARK_FINISH_INCREMENTAL,
-      MC_MARK_PREPARE_CODE_FLUSH,
-      MC_MARK_ROOTS,
-      MC_MARK_WEAK_CLOSURE,
-      MC_SWEEP,
-      MC_SWEEP_CODE,
-      MC_SWEEP_MAP,
-      MC_SWEEP_OLD,
-      SCAVENGER_CODE_FLUSH_CANDIDATES,
-      SCAVENGER_OBJECT_GROUPS,
-      SCAVENGER_OLD_TO_NEW_POINTERS,
-      SCAVENGER_ROOTS,
-      SCAVENGER_SCAVENGE,
-      SCAVENGER_SEMISPACE,
-      SCAVENGER_WEAK,
-      NUMBER_OF_SCOPES
+#define DEFINE_SCOPE(scope) scope,
+      TRACER_SCOPES(DEFINE_SCOPE)
+#undef DEFINE_SCOPE
+          NUMBER_OF_SCOPES
     };
 
     Scope(GCTracer* tracer, ScopeId scope);
     ~Scope();
+    static const char* Name(ScopeId id);
 
    private:
     GCTracer* tracer_;
     ScopeId scope_;
     double start_time_;
+    RuntimeCallTimer timer_;
 
     DISALLOW_COPY_AND_ASSIGN(Scope);
   };
 
 
-  class AllocationEvent {
-   public:
-    // Default constructor leaves the event uninitialized.
-    AllocationEvent() {}
-
-    AllocationEvent(double duration, size_t allocation_in_bytes);
-
-    // Time spent in the mutator during the end of the last sample to the
-    // beginning of the next sample.
-    double duration_;
-
-    // Memory allocated in the new space during the end of the last sample
-    // to the beginning of the next sample
-    size_t allocation_in_bytes_;
-  };
-
-
-  class CompactionEvent {
-   public:
-    CompactionEvent() : duration(0), live_bytes_compacted(0) {}
-
-    CompactionEvent(double duration, intptr_t live_bytes_compacted)
-        : duration(duration), live_bytes_compacted(live_bytes_compacted) {}
-
-    double duration;
-    intptr_t live_bytes_compacted;
-  };
-
-
-  class ContextDisposalEvent {
-   public:
-    // Default constructor leaves the event uninitialized.
-    ContextDisposalEvent() {}
-
-    explicit ContextDisposalEvent(double time);
-
-    // Time when context disposal event happened.
-    double time_;
-  };
-
-
-  class SurvivalEvent {
-   public:
-    // Default constructor leaves the event uninitialized.
-    SurvivalEvent() {}
-
-    explicit SurvivalEvent(double survival_ratio);
-
-    double promotion_ratio_;
-  };
-
-
   class Event {
    public:
     enum Type {
@@ -307,19 +247,6 @@
     double scopes[Scope::NUMBER_OF_SCOPES];
   };
 
-  static const size_t kRingBufferMaxSize = 10;
-
-  typedef RingBuffer<Event, kRingBufferMaxSize> EventBuffer;
-
-  typedef RingBuffer<AllocationEvent, kRingBufferMaxSize> AllocationEventBuffer;
-
-  typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
-      ContextDisposalEventBuffer;
-
-  typedef RingBuffer<CompactionEvent, kRingBufferMaxSize> CompactionEventBuffer;
-
-  typedef RingBuffer<SurvivalEvent, kRingBufferMaxSize> SurvivalEventBuffer;
-
   static const int kThroughputTimeFrameMs = 5000;
 
   explicit GCTracer(Heap* heap);
@@ -369,63 +296,27 @@
     return cumulative_sweeping_duration_;
   }
 
-  // Compute the mean duration of the last scavenger events. Returns 0 if no
-  // events have been recorded.
-  double MeanScavengerDuration() const {
-    return MeanDuration(scavenger_events_);
-  }
-
-  // Compute the max duration of the last scavenger events. Returns 0 if no
-  // events have been recorded.
-  double MaxScavengerDuration() const { return MaxDuration(scavenger_events_); }
-
-  // Compute the mean duration of the last mark compactor events. Returns 0 if
-  // no events have been recorded.
-  double MeanMarkCompactorDuration() const {
-    return MeanDuration(mark_compactor_events_);
-  }
-
-  // Compute the max duration of the last mark compactor events. Return 0 if no
-  // events have been recorded.
-  double MaxMarkCompactorDuration() const {
-    return MaxDuration(mark_compactor_events_);
-  }
-
-  // Compute the mean duration of the last incremental mark compactor
-  // events. Returns 0 if no events have been recorded.
-  double MeanIncrementalMarkCompactorDuration() const {
-    return MeanDuration(incremental_mark_compactor_events_);
-  }
-
-  // Compute the mean step duration of the last incremental marking round.
-  // Returns 0 if no incremental marking round has been completed.
-  double MeanIncrementalMarkingDuration() const;
-
-  // Compute the max step duration of the last incremental marking round.
-  // Returns 0 if no incremental marking round has been completed.
-  double MaxIncrementalMarkingDuration() const;
-
   // Compute the average incremental marking speed in bytes/millisecond.
   // Returns 0 if no events have been recorded.
-  intptr_t IncrementalMarkingSpeedInBytesPerMillisecond() const;
+  double IncrementalMarkingSpeedInBytesPerMillisecond() const;
 
   // Compute the average scavenge speed in bytes/millisecond.
   // Returns 0 if no events have been recorded.
-  intptr_t ScavengeSpeedInBytesPerMillisecond(
+  double ScavengeSpeedInBytesPerMillisecond(
       ScavengeSpeedMode mode = kForAllObjects) const;
 
   // Compute the average compaction speed in bytes/millisecond.
   // Returns 0 if not enough events have been recorded.
-  intptr_t CompactionSpeedInBytesPerMillisecond() const;
+  double CompactionSpeedInBytesPerMillisecond() const;
 
   // Compute the average mark-sweep speed in bytes/millisecond.
   // Returns 0 if no events have been recorded.
-  intptr_t MarkCompactSpeedInBytesPerMillisecond() const;
+  double MarkCompactSpeedInBytesPerMillisecond() const;
 
   // Compute the average incremental mark-sweep finalize speed in
   // bytes/millisecond.
   // Returns 0 if no events have been recorded.
-  intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
+  double FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
 
   // Compute the overall mark compact speed including incremental steps
   // and the final mark-compact step.
@@ -433,29 +324,29 @@
 
   // Allocation throughput in the new space in bytes/millisecond.
   // Returns 0 if no allocation events have been recorded.
-  size_t NewSpaceAllocationThroughputInBytesPerMillisecond(
+  double NewSpaceAllocationThroughputInBytesPerMillisecond(
       double time_ms = 0) const;
 
   // Allocation throughput in the old generation in bytes/millisecond in the
   // last time_ms milliseconds.
   // Returns 0 if no allocation events have been recorded.
-  size_t OldGenerationAllocationThroughputInBytesPerMillisecond(
+  double OldGenerationAllocationThroughputInBytesPerMillisecond(
       double time_ms = 0) const;
 
   // Allocation throughput in heap in bytes/millisecond in the last time_ms
   // milliseconds.
   // Returns 0 if no allocation events have been recorded.
-  size_t AllocationThroughputInBytesPerMillisecond(double time_ms) const;
+  double AllocationThroughputInBytesPerMillisecond(double time_ms) const;
 
   // Allocation throughput in heap in bytes/milliseconds in the last
   // kThroughputTimeFrameMs seconds.
   // Returns 0 if no allocation events have been recorded.
-  size_t CurrentAllocationThroughputInBytesPerMillisecond() const;
+  double CurrentAllocationThroughputInBytesPerMillisecond() const;
 
   // Allocation throughput in old generation in bytes/milliseconds in the last
   // kThroughputTimeFrameMs seconds.
   // Returns 0 if no allocation events have been recorded.
-  size_t CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
+  double CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
 
   // Computes the context disposal rate in milliseconds. It takes the time
   // frame of the first recorded context disposal to the current time and
@@ -474,6 +365,13 @@
   // Discard all recorded survival events.
   void ResetSurvivalEvents();
 
+  // Returns the average speed of the events in the buffer.
+  // If the buffer is empty, the result is 0.
+  // Otherwise, the result is between 1 byte/ms and 1 GB/ms.
+  static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer);
+  static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
+                             const BytesAndDuration& initial, double time_ms);
+
  private:
   // Print one detailed trace line in name=value format.
   // TODO(ernstm): Move to Heap.
@@ -487,12 +385,6 @@
   // it can be included in later crash dumps.
   void Output(const char* format, ...) const;
 
-  // Compute the mean duration of the events in the given ring buffer.
-  double MeanDuration(const EventBuffer& events) const;
-
-  // Compute the max duration of the events in the given ring buffer.
-  double MaxDuration(const EventBuffer& events) const;
-
   void ClearMarkCompactStatistics() {
     cumulative_incremental_marking_steps_ = 0;
     cumulative_incremental_marking_bytes_ = 0;
@@ -506,6 +398,16 @@
     cumulative_sweeping_duration_ = 0;
   }
 
+  double TotalExternalTime() const {
+    return current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES] +
+           current_.scopes[Scope::MC_EXTERNAL_EPILOGUE] +
+           current_.scopes[Scope::MC_EXTERNAL_PROLOGUE] +
+           current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE] +
+           current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE] +
+           current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE] +
+           current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE];
+  }
+
   // Pointer to the heap that owns this tracer.
   Heap* heap_;
 
@@ -519,28 +421,6 @@
   // Previous INCREMENTAL_MARK_COMPACTOR event.
   Event previous_incremental_mark_compactor_event_;
 
-  // RingBuffers for SCAVENGER events.
-  EventBuffer scavenger_events_;
-
-  // RingBuffers for MARK_COMPACTOR events.
-  EventBuffer mark_compactor_events_;
-
-  // RingBuffers for INCREMENTAL_MARK_COMPACTOR events.
-  EventBuffer incremental_mark_compactor_events_;
-
-  // RingBuffer for allocation events.
-  AllocationEventBuffer new_space_allocation_events_;
-  AllocationEventBuffer old_generation_allocation_events_;
-
-  // RingBuffer for context disposal events.
-  ContextDisposalEventBuffer context_disposal_events_;
-
-  // RingBuffer for compaction events.
-  CompactionEventBuffer compaction_events_;
-
-  // RingBuffer for survival events.
-  SurvivalEventBuffer survival_events_;
-
   // Cumulative number of incremental marking steps since creation of tracer.
   int cumulative_incremental_marking_steps_;
 
@@ -597,6 +477,20 @@
   // Counts how many tracers were started without stopping.
   int start_counter_;
 
+  // Separate timer used for --runtime_call_stats
+  RuntimeCallTimer timer_;
+
+  RingBuffer<BytesAndDuration> recorded_incremental_marking_steps_;
+  RingBuffer<BytesAndDuration> recorded_scavenges_total_;
+  RingBuffer<BytesAndDuration> recorded_scavenges_survived_;
+  RingBuffer<BytesAndDuration> recorded_compactions_;
+  RingBuffer<BytesAndDuration> recorded_mark_compacts_;
+  RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
+  RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
+  RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
+  RingBuffer<double> recorded_context_disposal_times_;
+  RingBuffer<double> recorded_survival_ratios_;
+
   DISALLOW_COPY_AND_ASSIGN(GCTracer);
 };
 }  // namespace internal
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index 57e6cc4..e31d3d6 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -12,9 +12,9 @@
 #include "src/heap/heap.h"
 #include "src/heap/incremental-marking-inl.h"
 #include "src/heap/mark-compact.h"
+#include "src/heap/remembered-set.h"
 #include "src/heap/spaces-inl.h"
 #include "src/heap/store-buffer.h"
-#include "src/heap/store-buffer-inl.h"
 #include "src/isolate.h"
 #include "src/list-inl.h"
 #include "src/log.h"
@@ -25,20 +25,24 @@
 namespace v8 {
 namespace internal {
 
-void PromotionQueue::insert(HeapObject* target, int size) {
+void PromotionQueue::insert(HeapObject* target, int32_t size,
+                            bool was_marked_black) {
   if (emergency_stack_ != NULL) {
-    emergency_stack_->Add(Entry(target, size));
+    emergency_stack_->Add(Entry(target, size, was_marked_black));
     return;
   }
 
-  if ((rear_ - 2) < limit_) {
+  if ((rear_ - 1) < limit_) {
     RelocateQueueHead();
-    emergency_stack_->Add(Entry(target, size));
+    emergency_stack_->Add(Entry(target, size, was_marked_black));
     return;
   }
 
-  *(--rear_) = reinterpret_cast<intptr_t>(target);
-  *(--rear_) = size;
+  struct Entry* entry = reinterpret_cast<struct Entry*>(--rear_);
+  entry->obj_ = target;
+  entry->size_ = size;
+  entry->was_marked_black_ = was_marked_black;
+
 // Assert no overflow into live objects.
 #ifdef DEBUG
   SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
@@ -247,6 +251,12 @@
   } else {
     old_gen_exhausted_ = true;
   }
+
+  if (!old_gen_exhausted_ && incremental_marking()->black_allocation() &&
+      space != OLD_SPACE) {
+    Marking::MarkBlack(Marking::MarkBitFrom(object));
+    MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
+  }
   return allocation;
 }
 
@@ -434,31 +444,11 @@
   return false;
 }
 
-
 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
   CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
             static_cast<size_t>(byte_size / kPointerSize));
 }
 
-
-void Heap::MoveBlock(Address dst, Address src, int byte_size) {
-  DCHECK(IsAligned(byte_size, kPointerSize));
-
-  int size_in_words = byte_size / kPointerSize;
-
-  if ((dst < src) || (dst >= (src + byte_size))) {
-    Object** src_slot = reinterpret_cast<Object**>(src);
-    Object** dst_slot = reinterpret_cast<Object**>(dst);
-    Object** end_slot = src_slot + size_in_words;
-
-    while (src_slot != end_slot) {
-      *dst_slot++ = *src_slot++;
-    }
-  } else {
-    MemMove(dst, src, static_cast<size_t>(byte_size));
-  }
-}
-
 template <Heap::FindMementoMode mode>
 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
   // Check if there is potentially a memento behind the object. If
@@ -655,8 +645,7 @@
   set_instanceof_cache_function(Smi::FromInt(0));
 }
 
-
-Object* Heap::ToBoolean(bool condition) {
+Oddball* Heap::ToBoolean(bool condition) {
   return condition ? true_value() : false_value();
 }
 
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 1c9be1a..c3f56ac 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -36,7 +36,7 @@
 #include "src/regexp/jsregexp.h"
 #include "src/runtime-profiler.h"
 #include "src/snapshot/natives.h"
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/serializer-common.h"
 #include "src/snapshot/snapshot.h"
 #include "src/tracing/trace-event.h"
 #include "src/type-feedback-vector.h"
@@ -68,7 +68,6 @@
   Heap& heap_;
 };
 
-
 Heap::Heap()
     : amount_of_external_allocated_memory_(0),
       amount_of_external_allocated_memory_at_last_global_gc_(0),
@@ -76,7 +75,6 @@
       code_range_size_(0),
       // semispace_size_ should be a power of 2 and old_generation_size_ should
       // be a multiple of Page::kPageSize.
-      reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
       max_semi_space_size_(8 * (kPointerSize / 4) * MB),
       initial_semispace_size_(Page::kPageSize),
       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
@@ -92,6 +90,7 @@
       survived_since_last_expansion_(0),
       survived_last_scavenge_(0),
       always_allocate_scope_count_(0),
+      memory_pressure_level_(MemoryPressureLevel::kNone),
       contexts_disposed_(0),
       number_of_disposed_maps_(0),
       global_ic_age_(0),
@@ -116,6 +115,7 @@
       inline_allocation_disabled_(false),
       total_regexp_code_generated_(0),
       tracer_(nullptr),
+      embedder_heap_tracer_(nullptr),
       high_survival_rate_period_length_(0),
       promoted_objects_size_(0),
       promotion_ratio_(0),
@@ -453,10 +453,6 @@
   ReportStatisticsBeforeGC();
 #endif  // DEBUG
 
-  if (isolate()->concurrent_osr_enabled()) {
-    isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
-  }
-
   if (new_space_.IsAtMaximumCapacity()) {
     maximum_size_scavenges_++;
   } else {
@@ -794,12 +790,19 @@
 
 
 void Heap::HandleGCRequest() {
-  if (incremental_marking()->request_type() ==
-      IncrementalMarking::COMPLETE_MARKING) {
+  if (HighMemoryPressure()) {
+    incremental_marking()->reset_request_type();
+    CheckMemoryPressure();
+  } else if (incremental_marking()->request_type() ==
+             IncrementalMarking::COMPLETE_MARKING) {
+    incremental_marking()->reset_request_type();
     CollectAllGarbage(current_gc_flags_, "GC interrupt",
                       current_gc_callback_flags_);
-  } else if (incremental_marking()->IsMarking() &&
+  } else if (incremental_marking()->request_type() ==
+                 IncrementalMarking::FINALIZATION &&
+             incremental_marking()->IsMarking() &&
              !incremental_marking()->finalize_marking_completed()) {
+    incremental_marking()->reset_request_type();
     FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
   }
 }
@@ -815,7 +818,7 @@
     PrintF("[IncrementalMarking] (%s).\n", gc_reason);
   }
 
-  GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
+  TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
   HistogramTimerScope incremental_marking_scope(
       isolate()->counters()->gc_incremental_marking_finalize());
   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
@@ -824,7 +827,7 @@
     GCCallbacksScope scope(this);
     if (scope.CheckReenter()) {
       AllowHeapAllocation allow_allocation;
-      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+      TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
       VMState<EXTERNAL> state(isolate_);
       HandleScope handle_scope(isolate_);
       CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
@@ -835,7 +838,7 @@
     GCCallbacksScope scope(this);
     if (scope.CheckReenter()) {
       AllowHeapAllocation allow_allocation;
-      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+      TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
       VMState<EXTERNAL> state(isolate_);
       HandleScope handle_scope(isolate_);
       CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
@@ -924,7 +927,7 @@
     double deadline = MonotonicallyIncreasingTimeInMs() +
                       FLAG_external_allocation_limit_incremental_time;
     incremental_marking()->AdvanceIncrementalMarking(
-        0, deadline,
+        deadline,
         IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD,
                                         IncrementalMarking::FORCE_MARKING,
                                         IncrementalMarking::FORCE_COMPLETION));
@@ -933,20 +936,15 @@
 
 
 void Heap::EnsureFillerObjectAtTop() {
-  // There may be an allocation memento behind every object in new space.
-  // If we evacuate a not full new space or if we are on the last page of
-  // the new space, then there may be uninitialized memory behind the top
-  // pointer of the new space page. We store a filler object there to
-  // identify the unused space.
-  Address from_top = new_space_.top();
-  // Check that from_top is inside its page (i.e., not at the end).
-  Address space_end = new_space_.ToSpaceEnd();
-  if (from_top < space_end) {
-    Page* page = Page::FromAddress(from_top);
-    if (page->Contains(from_top)) {
-      int remaining_in_page = static_cast<int>(page->area_end() - from_top);
-      CreateFillerObjectAt(from_top, remaining_in_page);
-    }
+  // There may be an allocation memento behind objects in new space. Upon
+  // evacuation of a non-full new space (or if we are on the last page) there
+  // may be uninitialized memory behind top. We fill the remainder of the page
+  // with a filler.
+  Address to_top = new_space_.top();
+  NewSpacePage* page = NewSpacePage::FromAddress(to_top - kPointerSize);
+  if (page->Contains(to_top)) {
+    int remaining_in_page = static_cast<int>(page->area_end() - to_top);
+    CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
   }
 }
 
@@ -1037,6 +1035,7 @@
       if (deserialization_complete_) {
         memory_reducer_->NotifyMarkCompact(event);
       }
+      memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
     }
 
     tracer()->Stop(collector);
@@ -1109,7 +1108,7 @@
                   dst_objects[i]);
     }
   }
-  incremental_marking()->RecordWrites(array);
+  incremental_marking()->IterateBlackObject(array);
 }
 
 
@@ -1143,7 +1142,8 @@
   static const int kThreshold = 20;
   while (gc_performed && counter++ < kThreshold) {
     gc_performed = false;
-    for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
+    for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
+         space++) {
       Reservation* reservation = &reservations[space];
       DCHECK_LE(1, reservation->length());
       if (reservation->at(0).size == 0) continue;
@@ -1160,15 +1160,18 @@
           if (space == NEW_SPACE) {
             allocation = new_space()->AllocateRawUnaligned(size);
           } else {
-            allocation = paged_space(space)->AllocateRawUnaligned(size);
+            // The deserializer will update the skip list.
+            allocation = paged_space(space)->AllocateRawUnaligned(
+                size, PagedSpace::IGNORE_SKIP_LIST);
           }
           HeapObject* free_space = nullptr;
           if (allocation.To(&free_space)) {
             // Mark with a free list node, in case we have a GC before
             // deserializing.
             Address free_space_address = free_space->address();
-            CreateFillerObjectAt(free_space_address, size);
-            DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
+            CreateFillerObjectAt(free_space_address, size,
+                                 ClearRecordedSlots::kNo);
+            DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
             chunk.start = free_space_address;
             chunk.end = free_space_address + size;
           } else {
@@ -1279,7 +1282,9 @@
     GCCallbacksScope scope(this);
     if (scope.CheckReenter()) {
       AllowHeapAllocation allow_allocation;
-      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+      TRACE_GC(tracer(), collector == MARK_COMPACTOR
+                             ? GCTracer::Scope::MC_EXTERNAL_PROLOGUE
+                             : GCTracer::Scope::SCAVENGER_EXTERNAL_PROLOGUE);
       VMState<EXTERNAL> state(isolate_);
       HandleScope handle_scope(isolate_);
       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
@@ -1323,22 +1328,10 @@
 
   isolate_->counters()->objs_since_last_young()->Set(0);
 
-  if (collector != SCAVENGER) {
-    // Callbacks that fire after this point might trigger nested GCs and
-    // restart incremental marking, the assertion can't be moved down.
-    DCHECK(incremental_marking()->IsStopped());
-
-    // We finished a marking cycle. We can uncommit the marking deque until
-    // we start marking again.
-    mark_compact_collector()->marking_deque()->Uninitialize();
-    mark_compact_collector()->EnsureMarkingDequeIsCommitted(
-        MarkCompactCollector::kMinMarkingDequeSize);
-  }
-
   gc_post_processing_depth_++;
   {
     AllowHeapAllocation allow_allocation;
-    GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+    TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_WEAK_GLOBAL_HANDLES);
     freed_global_handles =
         isolate_->global_handles()->PostGarbageCollectionProcessing(
             collector, gc_callback_flags);
@@ -1351,9 +1344,8 @@
   Relocatable::PostGarbageCollectionProcessing(isolate_);
 
   double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
-  double mutator_speed = static_cast<double>(
-      tracer()
-          ->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond());
+  double mutator_speed =
+      tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
   intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
   if (collector == MARK_COMPACTOR) {
     // Register the amount of external allocated memory.
@@ -1369,7 +1361,9 @@
     GCCallbacksScope scope(this);
     if (scope.CheckReenter()) {
       AllowHeapAllocation allow_allocation;
-      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+      TRACE_GC(tracer(), collector == MARK_COMPACTOR
+                             ? GCTracer::Scope::MC_EXTERNAL_EPILOGUE
+                             : GCTracer::Scope::SCAVENGER_EXTERNAL_EPILOGUE);
       VMState<EXTERNAL> state(isolate_);
       HandleScope handle_scope(isolate_);
       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
@@ -1399,6 +1393,10 @@
       }
     }
   }
+  if (FLAG_trace_object_groups && (gc_type == kGCTypeIncrementalMarking ||
+                                   gc_type == kGCTypeMarkSweepCompact)) {
+    isolate_->global_handles()->PrintObjectGroups();
+  }
 }
 
 
@@ -1453,6 +1451,13 @@
   incremental_marking()->Epilogue();
 
   PreprocessStackTraces();
+  DCHECK(incremental_marking()->IsStopped());
+
+  // We finished a marking cycle. We can uncommit the marking deque until
+  // we start marking again.
+  mark_compact_collector()->marking_deque()->Uninitialize();
+  mark_compact_collector()->EnsureMarkingDequeIsCommitted(
+      MarkCompactCollector::kMinMarkingDequeSize);
 }
 
 
@@ -1558,8 +1563,8 @@
   DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
          0);
   front_ = rear_ =
-      reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
-  limit_ = reinterpret_cast<intptr_t*>(
+      reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
+  limit_ = reinterpret_cast<struct Entry*>(
       Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
   emergency_stack_ = NULL;
 }
@@ -1569,22 +1574,21 @@
   DCHECK(emergency_stack_ == NULL);
 
   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
-  intptr_t* head_start = rear_;
-  intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
+  struct Entry* head_start = rear_;
+  struct Entry* head_end =
+      Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
 
   int entries_count =
-      static_cast<int>(head_end - head_start) / kEntrySizeInWords;
+      static_cast<int>(head_end - head_start) / sizeof(struct Entry);
 
   emergency_stack_ = new List<Entry>(2 * entries_count);
 
   while (head_start != head_end) {
-    int size = static_cast<int>(*(head_start++));
-    HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+    struct Entry* entry = head_start++;
     // New space allocation in SemiSpaceCopyObject marked the region
     // overlapping with promotion queue as uninitialized.
-    MSAN_MEMORY_IS_INITIALIZED(&size, sizeof(size));
-    MSAN_MEMORY_IS_INITIALIZED(&obj, sizeof(obj));
-    emergency_stack_->Add(Entry(obj, size));
+    MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry));
+    emergency_stack_->Add(*entry);
   }
   rear_ = head_end;
 }
@@ -1612,7 +1616,7 @@
 
 
 void Heap::Scavenge() {
-  GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
+  TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
   RelocationLock relocation_lock(this);
   // There are soft limits in the allocation code, designed to trigger a mark
   // sweep collection by failing allocations. There is no sense in trying to
@@ -1673,20 +1677,19 @@
 
   {
     // Copy roots.
-    GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
+    TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
     IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
   }
 
   {
     // Copy objects reachable from the old generation.
-    GCTracer::Scope gc_scope(tracer(),
-                             GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
+    TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
     RememberedSet<OLD_TO_NEW>::IterateWithWrapper(this,
                                                   Scavenger::ScavengeObject);
   }
 
   {
-    GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
+    TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
     // Copy objects reachable from the encountered weak collections list.
     scavenge_visitor.VisitPointer(&encountered_weak_collections_);
     // Copy objects reachable from the encountered weak cells.
@@ -1695,8 +1698,7 @@
 
   {
     // Copy objects reachable from the code flushing candidates list.
-    GCTracer::Scope gc_scope(tracer(),
-                             GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
+    TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
     MarkCompactCollector* collector = mark_compact_collector();
     if (collector->is_code_flushing_enabled()) {
       collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
@@ -1704,7 +1706,7 @@
   }
 
   {
-    GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
+    TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
   }
 
@@ -1716,8 +1718,7 @@
         &scavenge_visitor);
     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
   } else {
-    GCTracer::Scope gc_scope(tracer(),
-                             GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
+    TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
     while (isolate()->global_handles()->IterateObjectGroups(
         &scavenge_visitor, &IsUnscavengedHeapObject)) {
       new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
@@ -1849,6 +1850,10 @@
   set_allocation_sites_list(allocation_site_obj);
 }
 
+void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
+  set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
+  set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
+}
 
 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
   DisallowHeapAllocation no_allocation_scope;
@@ -1938,8 +1943,9 @@
     {
       while (!promotion_queue()->is_empty()) {
         HeapObject* target;
-        int size;
-        promotion_queue()->remove(&target, &size);
+        int32_t size;
+        bool was_marked_black;
+        promotion_queue()->remove(&target, &size, &was_marked_black);
 
         // Promoted object might be already partially visited
         // during old space pointer iteration. Thus we search specifically
@@ -1947,7 +1953,8 @@
         // to new space.
         DCHECK(!target->IsMap());
 
-        IteratePointersToFromSpace(target, size, &Scavenger::ScavengeObject);
+        IteratePromotedObject(target, static_cast<int>(size), was_marked_black,
+                              &Scavenger::ScavengeObject);
       }
     }
 
@@ -2000,7 +2007,7 @@
 
 
 HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
-  CreateFillerObjectAt(object->address(), filler_size);
+  CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
   return HeapObject::FromAddress(object->address() + filler_size);
 }
 
@@ -2016,7 +2023,8 @@
     filler_size -= pre_filler;
   }
   if (filler_size)
-    CreateFillerObjectAt(object->address() + object_size, filler_size);
+    CreateFillerObjectAt(object->address() + object_size, filler_size,
+                         ClearRecordedSlots::kNo);
   return object;
 }
 
@@ -2134,7 +2142,7 @@
   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
   DCHECK(chunk->owner()->identity() == space);
 #endif
-  CreateFillerObjectAt(obj->address(), size);
+  CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
   return obj;
 }
 
@@ -2320,6 +2328,7 @@
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
 
     for (unsigned i = 0; i < arraysize(string_type_table); i++) {
       const StringTypeTable& entry = string_type_table[i];
@@ -2380,6 +2389,7 @@
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
@@ -2401,6 +2411,20 @@
 #undef ALLOCATE_MAP
   }
 
+  {
+    AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
+    if (!allocation.To(&obj)) return false;
+  }
+  set_true_value(Oddball::cast(obj));
+  Oddball::cast(obj)->set_kind(Oddball::kTrue);
+
+  {
+    AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
+    if (!allocation.To(&obj)) return false;
+  }
+  set_false_value(Oddball::cast(obj));
+  Oddball::cast(obj)->set_kind(Oddball::kFalse);
+
   {  // Empty arrays
     {
       ByteArray* byte_array;
@@ -2530,6 +2554,15 @@
   TransitionArray* array = TransitionArray::cast(raw_array);
   array->set_length(capacity);
   MemsetPointer(array->data_start(), undefined_value(), capacity);
+  // Transition arrays are tenured. When black allocation is on we have to
+  // add the transition array to the list of encountered_transition_arrays.
+  if (incremental_marking()->black_allocation()) {
+    array->set_next_link(encountered_transition_arrays(),
+                         UPDATE_WEAK_WRITE_BARRIER);
+    set_encountered_transition_arrays(array);
+  } else {
+    array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
+  }
   return array;
 }
 
@@ -2618,50 +2651,61 @@
   // Allocate initial string table.
   set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
 
+  // Allocate
+
   // Finish initializing oddballs after creating the string table.
   Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
-                      factory->nan_value(), "undefined", Oddball::kUndefined);
+                      factory->nan_value(), false, "undefined",
+                      Oddball::kUndefined);
 
   // Initialize the null_value.
   Oddball::Initialize(isolate(), factory->null_value(), "null",
-                      handle(Smi::FromInt(0), isolate()), "object",
+                      handle(Smi::FromInt(0), isolate()), false, "object",
                       Oddball::kNull);
 
-  set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
-                                      handle(Smi::FromInt(1), isolate()),
-                                      "boolean", Oddball::kTrue));
+  // Initialize the true_value.
+  Oddball::Initialize(isolate(), factory->true_value(), "true",
+                      handle(Smi::FromInt(1), isolate()), true, "boolean",
+                      Oddball::kTrue);
 
-  set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
-                                       handle(Smi::FromInt(0), isolate()),
-                                       "boolean", Oddball::kFalse));
+  // Initialize the false_value.
+  Oddball::Initialize(isolate(), factory->false_value(), "false",
+                      handle(Smi::FromInt(0), isolate()), false, "boolean",
+                      Oddball::kFalse);
 
-  set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
-                                          handle(Smi::FromInt(-1), isolate()),
-                                          "undefined", Oddball::kTheHole));
+  set_the_hole_value(*factory->NewOddball(
+      factory->the_hole_map(), "hole", handle(Smi::FromInt(-1), isolate()),
+      false, "undefined", Oddball::kTheHole));
 
   set_uninitialized_value(
       *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
-                           handle(Smi::FromInt(-1), isolate()), "undefined",
-                           Oddball::kUninitialized));
+                           handle(Smi::FromInt(-1), isolate()), false,
+                           "undefined", Oddball::kUninitialized));
 
   set_arguments_marker(
       *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
-                           handle(Smi::FromInt(-4), isolate()), "undefined",
-                           Oddball::kArgumentsMarker));
+                           handle(Smi::FromInt(-4), isolate()), false,
+                           "undefined", Oddball::kArgumentsMarker));
 
   set_no_interceptor_result_sentinel(*factory->NewOddball(
       factory->no_interceptor_result_sentinel_map(),
       "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
-      "undefined", Oddball::kOther));
+      false, "undefined", Oddball::kOther));
 
   set_termination_exception(*factory->NewOddball(
       factory->termination_exception_map(), "termination_exception",
-      handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
+      handle(Smi::FromInt(-3), isolate()), false, "undefined",
+      Oddball::kOther));
 
   set_exception(*factory->NewOddball(factory->exception_map(), "exception",
-                                     handle(Smi::FromInt(-5), isolate()),
+                                     handle(Smi::FromInt(-5), isolate()), false,
                                      "undefined", Oddball::kException));
 
+  set_optimized_out(
+      *factory->NewOddball(factory->optimized_out_map(), "optimized_out",
+                           handle(Smi::FromInt(-6), isolate()), false,
+                           "undefined", Oddball::kOptimizedOut));
+
   for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
     Handle<String> str =
         factory->InternalizeUtf8String(constant_string_table[i].contents);
@@ -2876,7 +2920,6 @@
 
 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
   switch (root_index) {
-    case kStoreBufferTopRootIndex:
     case kNumberStringCacheRootIndex:
     case kInstanceofCacheFunctionRootIndex:
     case kInstanceofCacheMapRootIndex:
@@ -3037,14 +3080,14 @@
   instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
   instance->set_constant_pool(constant_pool);
   instance->set_handler_table(empty_fixed_array());
-  instance->set_source_position_table(empty_fixed_array());
+  instance->set_source_position_table(empty_byte_array());
   CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
 
   return result;
 }
 
-
-void Heap::CreateFillerObjectAt(Address addr, int size) {
+void Heap::CreateFillerObjectAt(Address addr, int size,
+                                ClearRecordedSlots mode) {
   if (size == 0) return;
   HeapObject* filler = HeapObject::FromAddress(addr);
   if (size == kPointerSize) {
@@ -3059,6 +3102,9 @@
         reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
     FreeSpace::cast(filler)->nobarrier_set_size(size);
   }
+  if (mode == ClearRecordedSlots::kYes) {
+    ClearRecordedSlotRange(addr, addr + size);
+  }
   // At this point, we may be deserializing the heap from a snapshot, and
   // none of the maps have been created yet and are NULL.
   DCHECK((filler->map() == NULL && !deserialization_complete_) ||
@@ -3092,9 +3138,11 @@
   // the heap using HeapIterator, we can update the live byte count. We cannot
   // update while using HeapIterator because the iterator is temporarily
   // marking the whole object graph, without updating live bytes.
-  if (!in_heap_iterator() &&
-      !mark_compact_collector()->sweeping_in_progress() &&
-      Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
+  if (lo_space()->Contains(object)) {
+    lo_space()->AdjustLiveBytes(by);
+  } else if (!in_heap_iterator() &&
+             !mark_compact_collector()->sweeping_in_progress() &&
+             Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
     if (mode == SEQUENTIAL_TO_SWEEPER) {
       MemoryChunk::IncrementLiveBytesFromGC(object, by);
     } else {
@@ -3135,7 +3183,8 @@
   // Technically in new space this write might be omitted (except for
   // debug mode which iterates through the heap), but to play safer
   // we still do it.
-  CreateFillerObjectAt(object->address(), bytes_to_trim);
+  CreateFillerObjectAt(object->address(), bytes_to_trim,
+                       ClearRecordedSlots::kYes);
 
   // Initialize header of the trimmed array. Since left trimming is only
   // performed on pages which are not concurrently swept creating a filler
@@ -3150,11 +3199,6 @@
 
   // Maintain consistency of live bytes during incremental marking
   Marking::TransferMark(this, object->address(), new_start);
-  if (mark_compact_collector()->sweeping_in_progress()) {
-    // Array trimming during sweeping can add invalid slots in free list.
-    ClearRecordedSlotRange(object, former_start,
-                           HeapObject::RawField(new_object, 0));
-  }
   AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
 
   // Notify the heap profiler of change in object layout.
@@ -3214,12 +3258,7 @@
   // TODO(hpayer): We should shrink the large object page if the size
   // of the object changed significantly.
   if (!lo_space()->Contains(object)) {
-    CreateFillerObjectAt(new_end, bytes_to_trim);
-    if (mark_compact_collector()->sweeping_in_progress()) {
-      // Array trimming during sweeping can add invalid slots in free list.
-      ClearRecordedSlotRange(object, reinterpret_cast<Object**>(new_end),
-                             reinterpret_cast<Object**>(old_end));
-    }
+    CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
   }
 
   // Initialize header of the trimmed array. We are storing the new length
@@ -3313,7 +3352,6 @@
 
   HeapObject* result = nullptr;
   if (!allocation.To(&result)) return allocation;
-
   if (immovable) {
     Address address = result->address();
     // Code objects which should stay at a fixed address are allocated either
@@ -3323,7 +3361,8 @@
         MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
       // Discard the first code allocation, which was on a page where it could
       // be moved.
-      CreateFillerObjectAt(result->address(), object_size);
+      CreateFillerObjectAt(result->address(), object_size,
+                           ClearRecordedSlots::kNo);
       allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
       if (!allocation.To(&result)) return allocation;
       OnAllocationEvent(result, object_size);
@@ -3363,6 +3402,9 @@
          isolate_->code_range()->contains(code->address()) ||
          obj_size <= code_space()->AreaSize());
   new_code->Relocate(new_addr - old_addr);
+  // We have to iterate over the object and process its pointers when black
+  // allocation is on.
+  incremental_marking()->IterateBlackObject(new_code);
   return new_code;
 }
 
@@ -3382,6 +3424,7 @@
   copy->set_constant_pool(bytecode_array->constant_pool());
   copy->set_handler_table(bytecode_array->handler_table());
   copy->set_source_position_table(bytecode_array->source_position_table());
+  copy->set_interrupt_budget(bytecode_array->interrupt_budget());
   bytecode_array->CopyBytecodesTo(copy);
   return copy;
 }
@@ -3429,7 +3472,9 @@
          new_obj_size <= code_space()->AreaSize());
 
   new_code->Relocate(new_addr - old_addr);
-
+  // We have to iterate over over the object and process its pointers when
+  // black allocation is on.
+  incremental_marking()->IterateBlackObject(new_code);
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) code->ObjectVerify();
 #endif
@@ -3560,11 +3605,12 @@
   // Make the clone.
   Map* map = source->map();
 
-  // We can only clone regexps, normal objects or arrays. Copying anything else
-  // will break invariants.
+  // We can only clone regexps, normal objects, api objects or arrays. Copying
+  // anything else will break invariants.
   CHECK(map->instance_type() == JS_REGEXP_TYPE ||
         map->instance_type() == JS_OBJECT_TYPE ||
-        map->instance_type() == JS_ARRAY_TYPE);
+        map->instance_type() == JS_ARRAY_TYPE ||
+        map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
 
   int object_size = map->instance_size();
   HeapObject* clone = nullptr;
@@ -4079,8 +4125,8 @@
 double Heap::YoungGenerationMutatorUtilization() {
   double mutator_speed = static_cast<double>(
       tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
-  double gc_speed = static_cast<double>(
-      tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
+  double gc_speed =
+      tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
   if (FLAG_trace_mutator_utilization) {
     PrintIsolate(isolate(),
@@ -4159,7 +4205,7 @@
   // TODO(ulan): Unify this constant with the similar constant in
   // GCIdleTimeHandler once the change is merged to 4.5.
   static const size_t kLowAllocationThroughput = 1000;
-  const size_t allocation_throughput =
+  const double allocation_throughput =
       tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
 
   if (FLAG_predictable) return;
@@ -4188,21 +4234,20 @@
 
 bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
   size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
-  size_t final_incremental_mark_compact_speed_in_bytes_per_ms =
-      static_cast<size_t>(
-          tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
+  double final_incremental_mark_compact_speed_in_bytes_per_ms =
+      tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
   if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
       (!incremental_marking()->finalize_marking_completed() &&
        mark_compact_collector()->marking_deque()->IsEmpty() &&
        gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
-           static_cast<size_t>(idle_time_in_ms)))) {
+           idle_time_in_ms))) {
     FinalizeIncrementalMarking(
         "Idle notification: finalize incremental marking");
     return true;
   } else if (incremental_marking()->IsComplete() ||
              (mark_compact_collector()->marking_deque()->IsEmpty() &&
               gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
-                  static_cast<size_t>(idle_time_in_ms), size_of_objects,
+                  idle_time_in_ms, size_of_objects,
                   final_incremental_mark_compact_speed_in_bytes_per_ms))) {
     CollectAllGarbage(current_gc_flags_,
                       "idle notification: finalize incremental marking");
@@ -4211,6 +4256,40 @@
   return false;
 }
 
+void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
+  // TODO(hpayer): We do not have to iterate reservations on black objects
+  // for marking. We just have to execute the special visiting side effect
+  // code that adds objects to global data structures, e.g. for array buffers.
+
+  // Code space, map space, and large object space do not use black pages.
+  // Hence we have to color all objects of the reservation first black to avoid
+  // unnecessary marking deque load.
+  if (incremental_marking()->black_allocation()) {
+    for (int i = CODE_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+      const Heap::Reservation& res = reservations[i];
+      for (auto& chunk : res) {
+        Address addr = chunk.start;
+        while (addr < chunk.end) {
+          HeapObject* obj = HeapObject::FromAddress(addr);
+          Marking::MarkBlack(Marking::MarkBitFrom(obj));
+          MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
+          addr += obj->Size();
+        }
+      }
+    }
+    for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+      const Heap::Reservation& res = reservations[i];
+      for (auto& chunk : res) {
+        Address addr = chunk.start;
+        while (addr < chunk.end) {
+          HeapObject* obj = HeapObject::FromAddress(addr);
+          incremental_marking()->IterateBlackObject(obj);
+          addr += obj->Size();
+        }
+      }
+    }
+  }
+}
 
 GCIdleTimeHeapState Heap::ComputeHeapState() {
   GCIdleTimeHeapState heap_state;
@@ -4357,6 +4436,59 @@
          MonotonicallyIncreasingTimeInMs();
 }
 
+class MemoryPressureInterruptTask : public CancelableTask {
+ public:
+  explicit MemoryPressureInterruptTask(Heap* heap)
+      : CancelableTask(heap->isolate()), heap_(heap) {}
+
+  virtual ~MemoryPressureInterruptTask() {}
+
+ private:
+  // v8::internal::CancelableTask overrides.
+  void RunInternal() override { heap_->CheckMemoryPressure(); }
+
+  Heap* heap_;
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
+};
+
+void Heap::CheckMemoryPressure() {
+  if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
+    CollectGarbageOnMemoryPressure("memory pressure");
+  } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
+    if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
+      StartIdleIncrementalMarking();
+    }
+  }
+  MemoryReducer::Event event;
+  event.type = MemoryReducer::kPossibleGarbage;
+  event.time_ms = MonotonicallyIncreasingTimeInMs();
+  memory_reducer_->NotifyPossibleGarbage(event);
+}
+
+void Heap::CollectGarbageOnMemoryPressure(const char* source) {
+  CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
+                    source);
+}
+
+void Heap::MemoryPressureNotification(MemoryPressureLevel level,
+                                      bool is_isolate_locked) {
+  MemoryPressureLevel previous = memory_pressure_level_.Value();
+  memory_pressure_level_.SetValue(level);
+  if ((previous != MemoryPressureLevel::kCritical &&
+       level == MemoryPressureLevel::kCritical) ||
+      (previous == MemoryPressureLevel::kNone &&
+       level == MemoryPressureLevel::kModerate)) {
+    if (is_isolate_locked) {
+      CheckMemoryPressure();
+    } else {
+      ExecutionAccess access(isolate());
+      isolate()->stack_guard()->RequestGC();
+      V8::GetCurrentPlatform()->CallOnForegroundThread(
+          reinterpret_cast<v8::Isolate*>(isolate()),
+          new MemoryPressureInterruptTask(this));
+    }
+  }
+}
 
 #ifdef DEBUG
 
@@ -4557,10 +4689,9 @@
   }
 }
 
-
-void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
-                                             Address end, bool record_slots,
-                                             ObjectSlotCallback callback) {
+void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
+                                         Address end, bool record_slots,
+                                         ObjectSlotCallback callback) {
   Address slot_address = start;
   Page* page = Page::FromAddress(start);
 
@@ -4587,24 +4718,29 @@
   }
 }
 
-
-class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
+class IteratePromotedObjectsVisitor final : public ObjectVisitor {
  public:
-  IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target,
-                                    bool record_slots,
-                                    ObjectSlotCallback callback)
+  IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
+                                bool record_slots, ObjectSlotCallback callback)
       : heap_(heap),
         target_(target),
         record_slots_(record_slots),
         callback_(callback) {}
 
   V8_INLINE void VisitPointers(Object** start, Object** end) override {
-    heap_->IterateAndMarkPointersToFromSpace(
+    heap_->IteratePromotedObjectPointers(
         target_, reinterpret_cast<Address>(start),
         reinterpret_cast<Address>(end), record_slots_, callback_);
   }
 
-  V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
+  V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
+    // Black allocation requires us to process objects referenced by
+    // promoted objects.
+    if (heap_->incremental_marking()->black_allocation()) {
+      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+      IncrementalMarking::MarkObject(heap_, code);
+    }
+  }
 
  private:
   Heap* heap_;
@@ -4613,9 +4749,9 @@
   ObjectSlotCallback callback_;
 };
 
-
-void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
-                                      ObjectSlotCallback callback) {
+void Heap::IteratePromotedObject(HeapObject* target, int size,
+                                 bool was_marked_black,
+                                 ObjectSlotCallback callback) {
   // We are not collecting slots on new space objects during mutation
   // thus we have to scan for pointers to evacuation candidates when we
   // promote objects. But we should not record any slots in non-black
@@ -4628,9 +4764,20 @@
     record_slots = Marking::IsBlack(mark_bit);
   }
 
-  IteratePointersToFromSpaceVisitor visitor(this, target, record_slots,
-                                            callback);
+  IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
   target->IterateBody(target->map()->instance_type(), size, &visitor);
+
+  // When black allocations is on, we have to visit not already marked black
+  // objects (in new space) promoted to black pages to keep their references
+  // alive.
+  // TODO(hpayer): Implement a special promotion visitor that incorporates
+  // regular visiting and IteratePromotedObjectPointers.
+  if (!was_marked_black) {
+    if (incremental_marking()->black_allocation()) {
+      IncrementalMarking::MarkObject(this, target->map());
+      incremental_marking()->IterateBlackObject(target);
+    }
+  }
 }
 
 
@@ -4662,6 +4809,10 @@
 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
   v->Synchronize(VisitorSynchronization::kStrongRootList);
+  // The serializer/deserializer iterates the root list twice, first to pick
+  // off immortal immovable roots to make sure they end up on the first page,
+  // and then again for the rest.
+  if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return;
 
   isolate_->bootstrapper()->Iterate(v);
   v->Synchronize(VisitorSynchronization::kBootstrapper);
@@ -4690,7 +4841,11 @@
 
   // Iterate over global handles.
   switch (mode) {
+    case VISIT_ONLY_STRONG_ROOT_LIST:
+      UNREACHABLE();
+      break;
     case VISIT_ONLY_STRONG:
+    case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
       isolate_->global_handles()->IterateStrongRoots(v);
       break;
     case VISIT_ALL_IN_SCAVENGE:
@@ -4721,15 +4876,10 @@
   }
   v->Synchronize(VisitorSynchronization::kStrongRoots);
 
-  // Iterate over the pointers the Serialization/Deserialization code is
-  // holding.
-  // During garbage collection this keeps the partial snapshot cache alive.
-  // During deserialization of the startup snapshot this creates the partial
-  // snapshot cache and deserializes the objects it refers to.  During
-  // serialization this does nothing, since the partial snapshot cache is
-  // empty.  However the next thing we do is create the partial snapshot,
-  // filling up the partial snapshot cache with objects it needs as we go.
-  SerializerDeserializer::Iterate(isolate_, v);
+  // Iterate over the partial snapshot cache unless serializing.
+  if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) {
+    SerializerDeserializer::Iterate(isolate_, v);
+  }
   // We don't do a v->Synchronize call here, because in debug mode that will
   // output a flag to the snapshot.  However at this point the serializer and
   // deserializer are deliberately a little unsynchronized (see above) so the
@@ -4779,32 +4929,10 @@
     max_semi_space_size_ = Page::kPageSize;
   }
 
-  if (isolate()->snapshot_available()) {
-    // If we are using a snapshot we always reserve the default amount
-    // of memory for each semispace because code in the snapshot has
-    // write-barrier code that relies on the size and alignment of new
-    // space.  We therefore cannot use a larger max semispace size
-    // than the default reserved semispace size.
-    if (max_semi_space_size_ > reserved_semispace_size_) {
-      max_semi_space_size_ = reserved_semispace_size_;
-      if (FLAG_trace_gc) {
-        PrintIsolate(isolate_,
-                     "Max semi-space size cannot be more than %d kbytes\n",
-                     reserved_semispace_size_ >> 10);
-      }
-    }
-  } else {
-    // If we are not using snapshots we reserve space for the actual
-    // max semispace size.
-    reserved_semispace_size_ = max_semi_space_size_;
-  }
-
   // The new space size must be a power of two to support single-bit testing
   // for containment.
   max_semi_space_size_ =
       base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
-  reserved_semispace_size_ =
-      base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
 
   if (FLAG_min_semi_space_size > 0) {
     int initial_semispace_size = FLAG_min_semi_space_size * MB;
@@ -5148,7 +5276,7 @@
   incremental_marking_ = new IncrementalMarking(this);
 
   // Set up new space.
-  if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
+  if (!new_space_.SetUp(initial_semispace_size_, max_semi_space_size_)) {
     return false;
   }
   new_space_top_after_last_gc_ = new_space()->top();
@@ -5256,6 +5384,10 @@
       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
 }
 
+void Heap::ClearStackLimits() {
+  roots_[kStackLimitRootIndex] = Smi::FromInt(0);
+  roots_[kRealStackLimitRootIndex] = Smi::FromInt(0);
+}
 
 void Heap::PrintAlloctionsHash() {
   uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
@@ -5275,6 +5407,13 @@
 #endif  // DEBUG
 }
 
+void Heap::RegisterExternallyReferencedObject(Object** object) {
+  DCHECK(mark_compact_collector()->in_use());
+  HeapObject* heap_object = HeapObject::cast(*object);
+  DCHECK(Contains(heap_object));
+  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+  mark_compact_collector()->MarkObject(heap_object, mark_bit);
+}
 
 void Heap::TearDown() {
 #ifdef VERIFY_HEAP
@@ -5440,6 +5579,11 @@
   UNREACHABLE();
 }
 
+void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
+  DCHECK_NOT_NULL(tracer);
+  CHECK_NULL(embedder_heap_tracer_);
+  embedder_heap_tracer_ = tracer;
+}
 
 // TODO(ishell): Find a better place for this.
 void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
@@ -5582,18 +5726,17 @@
     Page* page = Page::FromAddress(slot_addr);
     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
     RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
+    RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
   }
 }
 
-void Heap::ClearRecordedSlotRange(HeapObject* object, Object** start,
-                                  Object** end) {
-  if (!InNewSpace(object)) {
+void Heap::ClearRecordedSlotRange(Address start, Address end) {
+  Page* page = Page::FromAddress(start);
+  if (!page->InNewSpace()) {
     store_buffer()->MoveEntriesToRememberedSet();
-    Address start_addr = reinterpret_cast<Address>(start);
-    Address end_addr = reinterpret_cast<Address>(end);
-    Page* page = Page::FromAddress(start_addr);
     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
-    RememberedSet<OLD_TO_NEW>::RemoveRange(page, start_addr, end_addr);
+    RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end);
+    RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end);
   }
 }
 
diff --git a/src/heap/heap.h b/src/heap/heap.h
index 4a76777..9457453 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -10,6 +10,7 @@
 
 // Clients of this interface shouldn't depend on lots of heap internals.
 // Do not include anything from src/heap here!
+#include "include/v8.h"
 #include "src/allocation.h"
 #include "src/assert-scope.h"
 #include "src/atomic-utils.h"
@@ -23,6 +24,8 @@
 namespace v8 {
 namespace internal {
 
+using v8::MemoryPressureLevel;
+
 // Defines all the roots in Heap.
 #define STRONG_ROOT_LIST(V)                                                    \
   V(Map, byte_array_map, ByteArrayMap)                                         \
@@ -30,7 +33,6 @@
   V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
   V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
   /* Cluster the most popular ones in a few cache lines here at the top.    */ \
-  V(Smi, store_buffer_top, StoreBufferTop)                                     \
   V(Oddball, undefined_value, UndefinedValue)                                  \
   V(Oddball, the_hole_value, TheHoleValue)                                     \
   V(Oddball, null_value, NullValue)                                            \
@@ -75,6 +77,7 @@
   V(Oddball, arguments_marker, ArgumentsMarker)                                \
   V(Oddball, exception, Exception)                                             \
   V(Oddball, termination_exception, TerminationException)                      \
+  V(Oddball, optimized_out, OptimizedOut)                                      \
   V(FixedArray, number_string_cache, NumberStringCache)                        \
   V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
   V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
@@ -134,6 +137,7 @@
   V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
   V(Map, catch_context_map, CatchContextMap)                                   \
   V(Map, with_context_map, WithContextMap)                                     \
+  V(Map, debug_evaluate_context_map, DebugEvaluateContextMap)                  \
   V(Map, block_context_map, BlockContextMap)                                   \
   V(Map, module_context_map, ModuleContextMap)                                 \
   V(Map, script_context_map, ScriptContextMap)                                 \
@@ -147,6 +151,7 @@
   V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap)   \
   V(Map, exception_map, ExceptionMap)                                          \
   V(Map, termination_exception_map, TerminationExceptionMap)                   \
+  V(Map, optimized_out_map, OptimizedOutMap)                                   \
   V(Map, message_object_map, JSMessageObjectMap)                               \
   V(Map, foreign_map, ForeignMap)                                              \
   V(Map, neander_map, NeanderMap)                                              \
@@ -270,6 +275,10 @@
   V(JSMessageObjectMap)                 \
   V(ForeignMap)                         \
   V(NeanderMap)                         \
+  V(NanValue)                           \
+  V(InfinityValue)                      \
+  V(MinusZeroValue)                     \
+  V(MinusInfinityValue)                 \
   V(EmptyWeakCell)                      \
   V(empty_string)                       \
   PRIVATE_SYMBOL_LIST(V)
@@ -326,7 +335,7 @@
     // If the limit is not on the same page, we can ignore it.
     if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
 
-    limit_ = reinterpret_cast<intptr_t*>(limit);
+    limit_ = reinterpret_cast<struct Entry*>(limit);
 
     if (limit_ <= rear_) {
       return;
@@ -348,7 +357,7 @@
     }
     // If the to space top pointer is smaller or equal than the promotion
     // queue head, then the to-space objects are below the promotion queue.
-    return reinterpret_cast<intptr_t*>(to_space_top) <= rear_;
+    return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
   }
 
   bool is_empty() {
@@ -356,44 +365,49 @@
            (emergency_stack_ == NULL || emergency_stack_->length() == 0);
   }
 
-  inline void insert(HeapObject* target, int size);
+  inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
 
-  void remove(HeapObject** target, int* size) {
+  void remove(HeapObject** target, int32_t* size, bool* was_marked_black) {
     DCHECK(!is_empty());
     if (front_ == rear_) {
       Entry e = emergency_stack_->RemoveLast();
       *target = e.obj_;
       *size = e.size_;
+      *was_marked_black = e.was_marked_black_;
       return;
     }
 
-    *target = reinterpret_cast<HeapObject*>(*(--front_));
-    *size = static_cast<int>(*(--front_));
+    struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
+    *target = entry->obj_;
+    *size = entry->size_;
+    *was_marked_black = entry->was_marked_black_;
+
     // Assert no underflow.
     SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
                                 reinterpret_cast<Address>(front_));
   }
 
  private:
-  // The front of the queue is higher in the memory page chain than the rear.
-  intptr_t* front_;
-  intptr_t* rear_;
-  intptr_t* limit_;
-
-  static const int kEntrySizeInWords = 2;
-
   struct Entry {
-    Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {}
+    Entry(HeapObject* obj, int32_t size, bool was_marked_black)
+        : obj_(obj), size_(size), was_marked_black_(was_marked_black) {}
 
     HeapObject* obj_;
-    int size_;
+    int32_t size_ : 31;
+    bool was_marked_black_ : 1;
   };
+
+  void RelocateQueueHead();
+
+  // The front of the queue is higher in the memory page chain than the rear.
+  struct Entry* front_;
+  struct Entry* rear_;
+  struct Entry* limit_;
+
   List<Entry>* emergency_stack_;
 
   Heap* heap_;
 
-  void RelocateQueueHead();
-
   DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
 };
 
@@ -403,6 +417,7 @@
   INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
 };
 
+enum class ClearRecordedSlots { kYes, kNo };
 
 class Heap {
  public:
@@ -536,6 +551,7 @@
 
   STATIC_ASSERT(kUndefinedValueRootIndex ==
                 Internals::kUndefinedValueRootIndex);
+  STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
   STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
   STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
   STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
@@ -582,10 +598,6 @@
   // by pointer size.
   static inline void CopyBlock(Address dst, Address src, int byte_size);
 
-  // Optimized version of memmove for blocks with pointer size aligned sizes and
-  // pointer size aligned addresses.
-  static inline void MoveBlock(Address dst, Address src, int byte_size);
-
   // Determines a static visitor id based on the given {map} that can then be
   // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
   static int GetStaticVisitorIdForMap(Map* map);
@@ -632,8 +644,10 @@
   void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
 
   // Initialize a filler object to keep the ability to iterate over the heap
-  // when introducing gaps within pages.
-  void CreateFillerObjectAt(Address addr, int size);
+  // when introducing gaps within pages. If slots could have been recorded in
+  // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
+  // pass ClearRecordedSlots::kNo.
+  void CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode);
 
   bool CanMoveObjectStart(HeapObject* object);
 
@@ -649,7 +663,7 @@
   void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
 
   // Converts the given boolean condition to JavaScript boolean value.
-  inline Object* ToBoolean(bool condition);
+  inline Oddball* ToBoolean(bool condition);
 
   // Check whether the heap is currently iterable.
   bool IsHeapIterable();
@@ -726,6 +740,10 @@
   bool IdleNotification(double deadline_in_seconds);
   bool IdleNotification(int idle_time_in_ms);
 
+  void MemoryPressureNotification(MemoryPressureLevel level,
+                                  bool is_isolate_locked);
+  void CheckMemoryPressure();
+
   double MonotonicallyIncreasingTimeInMs();
 
   void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -740,6 +758,8 @@
 
     if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
 
+    if (HighMemoryPressure()) return true;
+
     return false;
   }
 
@@ -823,7 +843,12 @@
 
   void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
   void SetOptimizeForMemoryUsage();
-  bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
+  bool ShouldOptimizeForMemoryUsage() {
+    return optimize_for_memory_usage_ || HighMemoryPressure();
+  }
+  bool HighMemoryPressure() {
+    return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
+  }
 
   // ===========================================================================
   // Initialization. ===========================================================
@@ -853,10 +878,6 @@
   // Getters for spaces. =======================================================
   // ===========================================================================
 
-  // Return the starting address and a mask for the new space.  And-masking an
-  // address with the mask will result in the start address of the new space
-  // for all addresses in either semispace.
-  Address NewSpaceStart() { return new_space_.start(); }
   Address NewSpaceTop() { return new_space_.top(); }
 
   NewSpace* new_space() { return &new_space_; }
@@ -895,11 +916,21 @@
   const char* GetSpaceName(int idx);
 
   // ===========================================================================
+  // API. ======================================================================
+  // ===========================================================================
+
+  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+  void RegisterExternallyReferencedObject(Object** object);
+
+  // ===========================================================================
   // Getters to other components. ==============================================
   // ===========================================================================
 
   GCTracer* tracer() { return tracer_; }
 
+  EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
+
   PromotionQueue* promotion_queue() { return &promotion_queue_; }
 
   inline Isolate* isolate();
@@ -974,6 +1005,10 @@
   // jslimit_/real_jslimit_ variable in the StackGuard.
   void SetStackLimits();
 
+  // The stack limit is thread-dependent. To be able to reproduce the same
+  // snapshot blob, we need to reset it before serializing.
+  void ClearStackLimits();
+
   // Generated code can treat direct references to this root as constant.
   bool RootCanBeTreatedAsConstant(RootListIndex root_index);
 
@@ -1039,14 +1074,14 @@
   // Iterates over all the other roots in the heap.
   void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
 
-  // Iterate pointers to from semispace of new space found in memory interval
-  // from start to end within |object|.
-  void IteratePointersToFromSpace(HeapObject* target, int size,
-                                  ObjectSlotCallback callback);
+  // Iterate pointers of promoted objects.
+  void IteratePromotedObject(HeapObject* target, int size,
+                             bool was_marked_black,
+                             ObjectSlotCallback callback);
 
-  void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
-                                         Address end, bool record_slots,
-                                         ObjectSlotCallback callback);
+  void IteratePromotedObjectPointers(HeapObject* object, Address start,
+                                     Address end, bool record_slots,
+                                     ObjectSlotCallback callback);
 
   // ===========================================================================
   // Store buffer API. =========================================================
@@ -1055,12 +1090,10 @@
   // Write barrier support for object[offset] = o;
   inline void RecordWrite(Object* object, int offset, Object* o);
 
-  Address* store_buffer_top_address() {
-    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
-  }
+  Address* store_buffer_top_address() { return store_buffer()->top_address(); }
 
   void ClearRecordedSlot(HeapObject* object, Object** slot);
-  void ClearRecordedSlotRange(HeapObject* object, Object** start, Object** end);
+  void ClearRecordedSlotRange(Address start, Address end);
 
   // ===========================================================================
   // Incremental marking API. ==================================================
@@ -1081,6 +1114,8 @@
 
   bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms);
 
+  void RegisterReservationsForBlackAllocation(Reservation* reservations);
+
   IncrementalMarking* incremental_marking() { return incremental_marking_; }
 
   // ===========================================================================
@@ -1144,16 +1179,11 @@
   // GC statistics. ============================================================
   // ===========================================================================
 
-  // Returns the maximum amount of memory reserved for the heap.  For
-  // the young generation, we reserve 4 times the amount needed for a
-  // semi space.  The young generation consists of two semi spaces and
-  // we reserve twice the amount needed for those in order to ensure
-  // that new space can be aligned to its size.
+  // Returns the maximum amount of memory reserved for the heap.
   intptr_t MaxReserved() {
-    return 4 * reserved_semispace_size_ + max_old_generation_size_;
+    return 2 * max_semi_space_size_ + max_old_generation_size_;
   }
   int MaxSemiSpaceSize() { return max_semi_space_size_; }
-  int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
   int InitialSemiSpaceSize() { return initial_semispace_size_; }
   intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
   intptr_t MaxExecutableSize() { return max_executable_size_; }
@@ -1618,6 +1648,8 @@
 
   void CompactRetainedMaps(ArrayList* retained_maps);
 
+  void CollectGarbageOnMemoryPressure(const char* source);
+
   // Attempt to over-approximate the weak closure by marking object groups and
   // implicit references from global handles, but don't atomically complete
   // marking. If we continue to mark incrementally, we might have marked
@@ -1672,6 +1704,7 @@
   void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
   void ProcessNativeContexts(WeakObjectRetainer* retainer);
   void ProcessAllocationSites(WeakObjectRetainer* retainer);
+  void ProcessWeakListRoots(WeakObjectRetainer* retainer);
 
   // ===========================================================================
   // GC statistics. ============================================================
@@ -1962,10 +1995,8 @@
   Object* roots_[kRootListLength];
 
   size_t code_range_size_;
-  int reserved_semispace_size_;
   int max_semi_space_size_;
   int initial_semispace_size_;
-  int target_semispace_size_;
   intptr_t max_old_generation_size_;
   intptr_t initial_old_generation_size_;
   bool old_generation_size_configured_;
@@ -1983,6 +2014,10 @@
   // count, as scopes can be acquired from multiple tasks (read: threads).
   AtomicNumber<size_t> always_allocate_scope_count_;
 
+  // Stores the memory pressure level that set by MemoryPressureNotification
+  // and reset by a mark-compact garbage collection.
+  AtomicValue<MemoryPressureLevel> memory_pressure_level_;
+
   // For keeping track of context disposals.
   int contexts_disposed_;
 
@@ -2069,6 +2104,7 @@
   int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
 
   GCTracer* tracer_;
+  EmbedderHeapTracer* embedder_heap_tracer_;
 
   int high_survival_rate_period_length_;
   intptr_t promoted_objects_size_;
@@ -2210,7 +2246,7 @@
   friend class HeapIterator;
   friend class IdleScavengeObserver;
   friend class IncrementalMarking;
-  friend class IteratePointersToFromSpaceVisitor;
+  friend class IteratePromotedObjectsVisitor;
   friend class MarkCompactCollector;
   friend class MarkCompactMarkingVisitor;
   friend class NewSpace;
diff --git a/src/heap/incremental-marking-inl.h b/src/heap/incremental-marking-inl.h
index 0d55b83..fa22da6 100644
--- a/src/heap/incremental-marking-inl.h
+++ b/src/heap/incremental-marking-inl.h
@@ -26,11 +26,10 @@
   }
 }
 
-
-void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
+void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
                                              Object* value) {
   if (IsMarking() && value->IsHeapObject()) {
-    RecordWriteIntoCodeSlow(obj, rinfo, value);
+    RecordWriteIntoCodeSlow(host, rinfo, value);
   }
 }
 
diff --git a/src/heap/incremental-marking-job.cc b/src/heap/incremental-marking-job.cc
index a69dfac..3ccbec2 100644
--- a/src/heap/incremental-marking-job.cc
+++ b/src/heap/incremental-marking-job.cc
@@ -14,6 +14,8 @@
 namespace v8 {
 namespace internal {
 
+const double IncrementalMarkingJob::kLongDelayInSeconds = 5;
+const double IncrementalMarkingJob::kShortDelayInSeconds = 0.5;
 
 void IncrementalMarkingJob::Start(Heap* heap) {
   DCHECK(!heap->incremental_marking()->IsStopped());
@@ -58,8 +60,10 @@
     delayed_task_pending_ = true;
     made_progress_since_last_delayed_task_ = false;
     auto task = new DelayedTask(heap->isolate(), this);
+    double delay =
+        heap->HighMemoryPressure() ? kShortDelayInSeconds : kLongDelayInSeconds;
     V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(isolate, task,
-                                                            kDelayInSeconds);
+                                                            delay);
   }
 }
 
@@ -79,7 +83,7 @@
   }
   const double remaining_idle_time_in_ms =
       incremental_marking->AdvanceIncrementalMarking(
-          0, deadline_in_ms, IncrementalMarking::IdleStepActions());
+          deadline_in_ms, IncrementalMarking::IdleStepActions());
   if (remaining_idle_time_in_ms > 0.0) {
     heap->TryFinalizeIdleIncrementalMarking(remaining_idle_time_in_ms);
   }
@@ -117,10 +121,10 @@
   double deadline =
       heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
   heap->incremental_marking()->AdvanceIncrementalMarking(
-      0, deadline, i::IncrementalMarking::StepActions(
-                       i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
-                       i::IncrementalMarking::FORCE_MARKING,
-                       i::IncrementalMarking::FORCE_COMPLETION));
+      deadline, i::IncrementalMarking::StepActions(
+                    i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+                    i::IncrementalMarking::FORCE_MARKING,
+                    i::IncrementalMarking::FORCE_COMPLETION));
   heap->FinalizeIncrementalMarkingIfComplete(
       "Incremental marking task: finalize incremental marking");
 }
diff --git a/src/heap/incremental-marking-job.h b/src/heap/incremental-marking-job.h
index c998139..9c78182 100644
--- a/src/heap/incremental-marking-job.h
+++ b/src/heap/incremental-marking-job.h
@@ -49,7 +49,8 @@
   };
 
   // Delay of the delayed task.
-  static const int kDelayInSeconds = 5;
+  static const double kLongDelayInSeconds;
+  static const double kShortDelayInSeconds;
 
   IncrementalMarkingJob()
       : idle_task_pending_(false),
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
index ce6f6ee..376e848 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -39,12 +39,12 @@
       allocated_(0),
       write_barriers_invoked_since_last_step_(0),
       idle_marking_delay_counter_(0),
-      no_marking_scope_depth_(0),
       unscanned_bytes_of_large_object_(0),
       was_activated_(false),
+      black_allocation_(false),
       finalize_marking_completed_(false),
       incremental_marking_finalization_rounds_(0),
-      request_type_(COMPLETE_MARKING) {}
+      request_type_(NONE) {}
 
 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
   HeapObject* value_heap_obj = HeapObject::cast(value);
@@ -131,63 +131,15 @@
   }
 }
 
-
-void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
-                                                 RelocInfo* rinfo,
+void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
                                                  Object* value) {
-  if (BaseRecordWrite(obj, value)) {
-      // Object is not going to be rescanned.  We need to record the slot.
-      heap_->mark_compact_collector()->RecordRelocSlot(rinfo, value);
+  if (BaseRecordWrite(host, value)) {
+    // Object is not going to be rescanned.  We need to record the slot.
+    heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
   }
 }
 
 
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
-  if (IsMarking()) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-      if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
-        chunk->set_progress_bar(0);
-      }
-      BlackToGreyAndUnshift(obj, obj_bit);
-      RestartIfNotMarking();
-    }
-  }
-}
-
-
-void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
-                                               MarkBit mark_bit) {
-  DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
-  DCHECK(obj->Size() >= 2 * kPointerSize);
-  DCHECK(IsMarking());
-  Marking::BlackToGrey(mark_bit);
-  int obj_size = obj->Size();
-  MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size);
-  bytes_scanned_ -= obj_size;
-  int64_t old_bytes_rescanned = bytes_rescanned_;
-  bytes_rescanned_ = old_bytes_rescanned + obj_size;
-  if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
-    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
-      // If we have queued twice the heap size for rescanning then we are
-      // going around in circles, scanning the same objects again and again
-      // as the program mutates the heap faster than we can incrementally
-      // trace it.  In this case we switch to non-incremental marking in
-      // order to finish off this marking phase.
-      if (FLAG_trace_incremental_marking) {
-        PrintIsolate(
-            heap()->isolate(),
-            "Hurrying incremental marking because of lack of progress\n");
-      }
-      marking_speed_ = kMaxMarkingSpeed;
-    }
-  }
-
-  heap_->mark_compact_collector()->marking_deque()->Unshift(obj);
-}
-
-
 void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
   Marking::WhiteToGrey(mark_bit);
   heap_->mark_compact_collector()->marking_deque()->Push(obj);
@@ -323,6 +275,16 @@
   }
 };
 
+void IncrementalMarking::IterateBlackObject(HeapObject* object) {
+  if (IsMarking() && Marking::IsBlack(Marking::MarkBitFrom(object))) {
+    Page* page = Page::FromAddress(object->address());
+    if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
+      // IterateBlackObject requires us to visit the hole object.
+      page->ResetProgressBar();
+    }
+    IncrementalMarkingMarkingVisitor::IterateBody(object->map(), object);
+  }
+}
 
 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
  public:
@@ -556,6 +518,15 @@
 
 
 void IncrementalMarking::StartMarking() {
+  if (heap_->isolate()->serializer_enabled()) {
+    // Black allocation currently starts when we start incremental marking,
+    // but we cannot enable black allocation while deserializing. Hence, we
+    // have to delay the start of incremental marking in that case.
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Start delayed - serializer\n");
+    }
+    return;
+  }
   if (FLAG_trace_incremental_marking) {
     PrintF("[IncrementalMarking] Start marking\n");
   }
@@ -603,6 +574,26 @@
   }
 }
 
+void IncrementalMarking::StartBlackAllocation() {
+  DCHECK(FLAG_black_allocation);
+  DCHECK(IsMarking());
+  black_allocation_ = true;
+  OldSpace* old_space = heap()->old_space();
+  old_space->EmptyAllocationInfo();
+  old_space->free_list()->Reset();
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Black allocation started\n");
+  }
+}
+
+void IncrementalMarking::FinishBlackAllocation() {
+  if (black_allocation_) {
+    black_allocation_ = false;
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Black allocation finished\n");
+    }
+  }
+}
 
 void IncrementalMarking::MarkRoots() {
   DCHECK(!finalize_marking_completed_);
@@ -775,6 +766,13 @@
        FLAG_min_progress_during_incremental_marking_finalization)) {
     finalize_marking_completed_ = true;
   }
+
+  if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
+      !black_allocation_) {
+    // TODO(hpayer): Move to an earlier point as soon as we make faster marking
+    // progress.
+    StartBlackAllocation();
+  }
 }
 
 
@@ -805,6 +803,8 @@
       // them.
       if (map_word.IsForwardingAddress()) {
         HeapObject* dest = map_word.ToForwardingAddress();
+        if (Page::FromAddress(dest->address())->IsFlagSet(Page::BLACK_PAGE))
+          continue;
         array[new_top] = dest;
         new_top = ((new_top + 1) & mask);
         DCHECK(new_top != marking_deque->bottom());
@@ -904,7 +904,12 @@
 
 
 void IncrementalMarking::Hurry() {
-  if (state() == MARKING) {
+  // A scavenge may have pushed new objects on the marking deque (due to black
+  // allocation) even in COMPLETE state. This may happen if scavenges are
+  // forced e.g. in tests. It should not happen when COMPLETE was set when
+  // incremental marking finished and a regular GC was triggered after that
+  // because should_hurry_ will force a full GC.
+  if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
     double start = 0.0;
     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
       start = heap_->MonotonicallyIncreasingTimeInMs();
@@ -969,6 +974,7 @@
   heap_->isolate()->stack_guard()->ClearGC();
   state_ = STOPPED;
   is_compacting_ = false;
+  FinishBlackAllocation();
 }
 
 
@@ -1016,28 +1022,26 @@
   incremental_marking_finalization_rounds_ = 0;
 }
 
-
 double IncrementalMarking::AdvanceIncrementalMarking(
-    intptr_t step_size_in_bytes, double deadline_in_ms,
-    IncrementalMarking::StepActions step_actions) {
+    double deadline_in_ms, IncrementalMarking::StepActions step_actions) {
   DCHECK(!IsStopped());
 
-  if (step_size_in_bytes == 0) {
-    step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
-        static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs),
-        static_cast<size_t>(
-            heap()
-                ->tracer()
-                ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()));
-  }
-
+  intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
+      GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
+      heap()
+          ->tracer()
+          ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
   double remaining_time_in_ms = 0.0;
+  intptr_t bytes_processed = 0;
+
   do {
-    Step(step_size_in_bytes, step_actions.completion_action,
-         step_actions.force_marking, step_actions.force_completion);
+    bytes_processed =
+        Step(step_size_in_bytes, step_actions.completion_action,
+             step_actions.force_marking, step_actions.force_completion);
     remaining_time_in_ms =
         deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
-  } while (remaining_time_in_ms >=
+  } while (bytes_processed > 0 &&
+           remaining_time_in_ms >=
                2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
            !IsComplete() &&
            !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
@@ -1152,8 +1156,6 @@
     return 0;
   }
 
-  if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
-
   intptr_t bytes_processed = 0;
   {
     HistogramTimerScope incremental_marking_scope(
@@ -1187,7 +1189,8 @@
         bytes_scanned_ = 0;
         StartMarking();
       }
-    } else if (state_ == MARKING) {
+    }
+    if (state_ == MARKING) {
       bytes_processed = ProcessMarkingDeque(bytes_to_process);
       if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
         if (completion == FORCE_COMPLETION ||
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index 387dd0c..f10150d 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -29,7 +29,7 @@
 
   enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
 
-  enum GCRequestType { COMPLETE_MARKING, FINALIZATION };
+  enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
 
   struct StepActions {
     StepActions(CompletionAction complete_action_,
@@ -80,6 +80,8 @@
 
   GCRequestType request_type() const { return request_type_; }
 
+  void reset_request_type() { request_type_ = NONE; }
+
   bool CanBeActivated();
 
   bool ShouldActivateEvenWithoutIdleNotification();
@@ -104,13 +106,10 @@
 
   void Epilogue();
 
-  // Performs incremental marking steps of step_size_in_bytes as long as
-  // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute
-  // an estimate increment. Returns the remaining time that cannot be used
-  // for incremental marking anymore because a single step would exceed the
-  // deadline.
-  double AdvanceIncrementalMarking(intptr_t step_size_in_bytes,
-                                   double deadline_in_ms,
+  // Performs incremental marking steps until deadline_in_ms is reached. It
+  // returns the remaining time that cannot be used for incremental marking
+  // anymore because a single step would exceed the deadline.
+  double AdvanceIncrementalMarking(double deadline_in_ms,
                                    StepActions step_actions);
 
   // It's hard to know how much work the incremental marker should do to make
@@ -165,23 +164,17 @@
   // the incremental cycle (stays white).
   INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
   INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
-  INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
-                                  Object* value));
+  INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
   INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
                                      Code* value));
 
 
   void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
-  void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo,
-                               Object* value);
+  void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
   void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
   void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
   void RecordCodeTargetPatch(Address pc, HeapObject* value);
 
-  void RecordWrites(HeapObject* obj);
-
-  void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
-
   void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
 
   inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
@@ -198,10 +191,6 @@
 
   void NotifyOfHighPromotionRate();
 
-  void EnterNoMarkingScope() { no_marking_scope_depth_++; }
-
-  void LeaveNoMarkingScope() { no_marking_scope_depth_--; }
-
   void NotifyIncompleteScanOfObject(int unscanned_bytes) {
     unscanned_bytes_of_large_object_ = unscanned_bytes;
   }
@@ -210,7 +199,9 @@
 
   bool IsIdleMarkingDelayCounterLimitReached();
 
-  INLINE(static void MarkObject(Heap* heap, HeapObject* object));
+  static void MarkObject(Heap* heap, HeapObject* object);
+
+  void IterateBlackObject(HeapObject* object);
 
   Heap* heap() const { return heap_; }
 
@@ -218,6 +209,8 @@
     return &incremental_marking_job_;
   }
 
+  bool black_allocation() { return black_allocation_; }
+
  private:
   class Observer : public AllocationObserver {
    public:
@@ -242,6 +235,9 @@
 
   void StartMarking();
 
+  void StartBlackAllocation();
+  void FinishBlackAllocation();
+
   void MarkRoots();
   void MarkObjectGroups();
   void ProcessWeakCells();
@@ -288,12 +284,12 @@
   intptr_t write_barriers_invoked_since_last_step_;
   size_t idle_marking_delay_counter_;
 
-  int no_marking_scope_depth_;
-
   int unscanned_bytes_of_large_object_;
 
   bool was_activated_;
 
+  bool black_allocation_;
+
   bool finalize_marking_completed_;
 
   int incremental_marking_finalization_rounds_;
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
index f117ace..281ece4 100644
--- a/src/heap/mark-compact-inl.h
+++ b/src/heap/mark-compact-inl.h
@@ -6,7 +6,7 @@
 #define V8_HEAP_MARK_COMPACT_INL_H_
 
 #include "src/heap/mark-compact.h"
-#include "src/heap/slots-buffer.h"
+#include "src/heap/remembered-set.h"
 #include "src/isolate.h"
 
 namespace v8 {
@@ -70,25 +70,12 @@
 void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
                                       Object* target) {
   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
   if (target_page->IsEvacuationCandidate() &&
       !ShouldSkipEvacuationSlotRecording(object)) {
-    if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
-                            target_page->slots_buffer_address(), slot,
-                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
-      EvictPopularEvacuationCandidate(target_page);
-    }
-  }
-}
-
-
-void MarkCompactCollector::ForceRecordSlot(HeapObject* object, Object** slot,
-                                           Object* target) {
-  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
-  if (target_page->IsEvacuationCandidate() &&
-      !ShouldSkipEvacuationSlotRecording(object)) {
-    CHECK(SlotsBuffer::AddTo(slots_buffer_allocator_,
-                             target_page->slots_buffer_address(), slot,
-                             SlotsBuffer::IGNORE_OVERFLOW));
+    DCHECK(Marking::IsBlackOrGrey(Marking::MarkBitFrom(object)));
+    RememberedSet<OLD_TO_OLD>::Insert(source_page,
+                                      reinterpret_cast<Address>(slot));
   }
 }
 
@@ -182,6 +169,7 @@
       } else if (T == kAllLiveObjects) {
         object = HeapObject::FromAddress(addr);
       }
+
       // Clear the second bit of the found object.
       current_cell_ &= ~second_bit_index;
 
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index 646e634..e537689 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -21,7 +21,7 @@
 #include "src/heap/object-stats.h"
 #include "src/heap/objects-visiting-inl.h"
 #include "src/heap/objects-visiting.h"
-#include "src/heap/slots-buffer.h"
+#include "src/heap/page-parallel-job.h"
 #include "src/heap/spaces-inl.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
@@ -55,8 +55,6 @@
       marking_parity_(ODD_MARKING_PARITY),
       was_marked_incrementally_(false),
       evacuation_(false),
-      slots_buffer_allocator_(nullptr),
-      migration_slots_buffer_(nullptr),
       heap_(heap),
       marking_deque_memory_(NULL),
       marking_deque_memory_committed_(0),
@@ -64,7 +62,6 @@
       have_code_to_deoptimize_(false),
       compacting_(false),
       sweeping_in_progress_(false),
-      compaction_in_progress_(false),
       pending_sweeper_tasks_semaphore_(0),
       pending_compaction_tasks_semaphore_(0) {
 }
@@ -122,6 +119,15 @@
   }
 }
 
+static void VerifyMarkingBlackPage(Heap* heap, Page* page) {
+  CHECK(page->IsFlagSet(Page::BLACK_PAGE));
+  VerifyMarkingVisitor visitor(heap);
+  HeapObjectIterator it(page);
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+    object->Iterate(&visitor);
+  }
+}
 
 static void VerifyMarking(NewSpace* space) {
   Address end = space->top();
@@ -144,7 +150,11 @@
 
   while (it.has_next()) {
     Page* p = it.next();
-    VerifyMarking(space->heap(), p->area_start(), p->area_end());
+    if (p->IsFlagSet(Page::BLACK_PAGE)) {
+      VerifyMarkingBlackPage(space->heap(), p);
+    } else {
+      VerifyMarking(space->heap(), p->area_start(), p->area_end());
+    }
   }
 }
 
@@ -244,12 +254,8 @@
   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
 
-  free_list_old_space_.Reset(new FreeList(heap_->old_space()));
-  free_list_code_space_.Reset(new FreeList(heap_->code_space()));
-  free_list_map_space_.Reset(new FreeList(heap_->map_space()));
   EnsureMarkingDequeIsReserved();
   EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
-  slots_buffer_allocator_ = new SlotsBufferAllocator();
 
   if (FLAG_flush_code) {
     code_flusher_ = new CodeFlusher(isolate());
@@ -263,7 +269,6 @@
 void MarkCompactCollector::TearDown() {
   AbortCompaction();
   delete marking_deque_memory_;
-  delete slots_buffer_allocator_;
   delete code_flusher_;
 }
 
@@ -310,55 +315,25 @@
   return compacting_;
 }
 
-
-void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
+void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
     RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
   }
+// There is not need to filter the old to old set because
+// it is completely cleared after the mark-compact GC.
+// The slots that become invalid due to runtime transitions are
+// cleared eagerly immediately after the transition.
 
-  {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
-    for (Page* p : evacuation_candidates_) {
-      SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
-    }
-  }
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
-    VerifyValidStoreAndSlotsBufferEntries();
+    RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
+    RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
   }
 #endif
 }
 
 
-#ifdef VERIFY_HEAP
-static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
-    SlotsBuffer::VerifySlots(heap, p->slots_buffer());
-  }
-}
-
-
-void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
-  RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
-
-  VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
-  VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
-  VerifyValidSlotsBufferEntries(heap(), heap()->map_space());
-
-  LargeObjectIterator it(heap()->lo_space());
-  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
-    MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
-    SlotsBuffer::VerifySlots(heap(), chunk->slots_buffer());
-  }
-}
-#endif
-
-
 void MarkCompactCollector::CollectGarbage() {
   // Make sure that Prepare() has been called. The individual steps below will
   // update the state as they proceed.
@@ -448,7 +423,11 @@
   PageIterator it(space);
 
   while (it.has_next()) {
-    Bitmap::Clear(it.next());
+    Page* p = it.next();
+    Bitmap::Clear(p);
+    if (p->IsFlagSet(Page::BLACK_PAGE)) {
+      p->ClearFlag(Page::BLACK_PAGE);
+    }
   }
 }
 
@@ -471,8 +450,12 @@
   LargeObjectIterator it(heap_->lo_space());
   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     Marking::MarkWhite(Marking::MarkBitFrom(obj));
-    Page::FromAddress(obj->address())->ResetProgressBar();
-    Page::FromAddress(obj->address())->ResetLiveBytes();
+    MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+    chunk->ResetProgressBar();
+    chunk->ResetLiveBytes();
+    if (chunk->IsFlagSet(Page::BLACK_PAGE)) {
+      chunk->ClearFlag(Page::BLACK_PAGE);
+    }
   }
 }
 
@@ -509,9 +492,6 @@
 
 
 void MarkCompactCollector::StartSweeperThreads() {
-  DCHECK(free_list_old_space_.get()->IsEmpty());
-  DCHECK(free_list_code_space_.get()->IsEmpty());
-  DCHECK(free_list_map_space_.get()->IsEmpty());
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
       new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
@@ -589,7 +569,9 @@
   DCHECK(MemoryChunk::FromAddress(old_start) ==
          MemoryChunk::FromAddress(new_start));
 
-  if (!heap->incremental_marking()->IsMarking()) return;
+  if (!heap->incremental_marking()->IsMarking() ||
+      Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE))
+    return;
 
   // If the mark doesn't move, we don't check the color of the object.
   // It doesn't matter whether the object is black, since it hasn't changed
@@ -661,15 +643,15 @@
     *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
     *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
   } else {
-    const intptr_t estimated_compaction_speed =
+    const double estimated_compaction_speed =
         heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
     if (estimated_compaction_speed != 0) {
       // Estimate the target fragmentation based on traced compaction speed
       // and a goal for a single page.
-      const intptr_t estimated_ms_per_area =
-          1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
-      *target_fragmentation_percent =
-          100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
+      const double estimated_ms_per_area =
+          1 + area_size / estimated_compaction_speed;
+      *target_fragmentation_percent = static_cast<int>(
+          100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
       if (*target_fragmentation_percent <
           kTargetFragmentationPercentForReduceMemory) {
         *target_fragmentation_percent =
@@ -698,17 +680,14 @@
   while (it.has_next()) {
     Page* p = it.next();
     if (p->NeverEvacuate()) continue;
-    if (p->IsFlagSet(Page::POPULAR_PAGE)) {
-      // This page had slots buffer overflow on previous GC, skip it.
-      p->ClearFlag(Page::POPULAR_PAGE);
-      continue;
-    }
+    if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
     // Invariant: Evacuation candidates are just created when marking is
     // started. This means that sweeping has finished. Furthermore, at the end
     // of a GC all evacuation candidates are cleared and their slot buffers are
     // released.
     CHECK(!p->IsEvacuationCandidate());
-    CHECK(p->slots_buffer() == nullptr);
+    CHECK_NULL(p->old_to_old_slots());
+    CHECK_NULL(p->typed_old_to_old_slots());
     CHECK(p->SweepingDone());
     DCHECK(p->area_size() == area_size);
     pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
@@ -814,10 +793,9 @@
 
 void MarkCompactCollector::AbortCompaction() {
   if (compacting_) {
+    RememberedSet<OLD_TO_OLD>::ClearAll(heap());
     for (Page* p : evacuation_candidates_) {
-      slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
       p->ClearEvacuationCandidate();
-      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
     }
     compacting_ = false;
     evacuation_candidates_.Rewind(0);
@@ -877,7 +855,7 @@
 
 
 void MarkCompactCollector::Finish() {
-  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_FINISH);
+  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
 
   // The hashing of weak_object_to_code_table is no longer valid.
   heap()->weak_object_to_code_table()->Rehash(
@@ -1031,7 +1009,7 @@
 
 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
   // Make sure previous flushing decisions are revisited.
-  isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
+  isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
 
   if (FLAG_trace_code_flushing) {
     PrintF("[code-flushing abandons function-info: ");
@@ -1067,8 +1045,9 @@
   Object* undefined = isolate_->heap()->undefined_value();
 
   // Make sure previous flushing decisions are revisited.
-  isolate_->heap()->incremental_marking()->RecordWrites(function);
-  isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
+  isolate_->heap()->incremental_marking()->IterateBlackObject(function);
+  isolate_->heap()->incremental_marking()->IterateBlackObject(
+      function->shared());
 
   if (FLAG_trace_code_flushing) {
     PrintF("[code-flushing abandons closure: ");
@@ -1231,9 +1210,11 @@
       // was marked through the compilation cache before marker reached JSRegExp
       // object.
       FixedArray* data = FixedArray::cast(re->data());
-      Object** slot =
-          data->data_start() + JSRegExp::saved_code_index(is_one_byte);
-      heap->mark_compact_collector()->RecordSlot(data, slot, code);
+      if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(data))) {
+        Object** slot =
+            data->data_start() + JSRegExp::saved_code_index(is_one_byte);
+        heap->mark_compact_collector()->RecordSlot(data, slot, code);
+      }
 
       // Set a number in the 0-255 range to guarantee no smi overflow.
       re->SetDataAt(JSRegExp::code_index(is_one_byte),
@@ -1353,12 +1334,6 @@
   // If code flushing is disabled, there is no need to prepare for it.
   if (!is_code_flushing_enabled()) return;
 
-  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
-  // relies on it being marked before any other descriptor array.
-  HeapObject* descriptor_array = heap()->empty_descriptor_array();
-  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
-  MarkObject(descriptor_array, descriptor_array_mark);
-
   // Make sure we are not referencing the code from the stack.
   DCHECK(this == heap()->mark_compact_collector());
   PrepareThreadForCodeFlushing(heap()->isolate(),
@@ -1422,25 +1397,34 @@
 
 
 // Helper class for pruning the string table.
-template <bool finalize_external_strings>
+template <bool finalize_external_strings, bool record_slots>
 class StringTableCleaner : public ObjectVisitor {
  public:
-  explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
+  StringTableCleaner(Heap* heap, HeapObject* table)
+      : heap_(heap), pointers_removed_(0), table_(table) {
+    DCHECK(!record_slots || table != nullptr);
+  }
 
   void VisitPointers(Object** start, Object** end) override {
     // Visit all HeapObject pointers in [start, end).
+    MarkCompactCollector* collector = heap_->mark_compact_collector();
     for (Object** p = start; p < end; p++) {
       Object* o = *p;
-      if (o->IsHeapObject() &&
-          Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
-        if (finalize_external_strings) {
-          DCHECK(o->IsExternalString());
-          heap_->FinalizeExternalString(String::cast(*p));
-        } else {
-          pointers_removed_++;
+      if (o->IsHeapObject()) {
+        if (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
+          if (finalize_external_strings) {
+            DCHECK(o->IsExternalString());
+            heap_->FinalizeExternalString(String::cast(*p));
+          } else {
+            pointers_removed_++;
+          }
+          // Set the entry to the_hole_value (as deleted).
+          *p = heap_->the_hole_value();
+        } else if (record_slots) {
+          // StringTable contains only old space strings.
+          DCHECK(!heap_->InNewSpace(o));
+          collector->RecordSlot(table_, p, o);
         }
-        // Set the entry to the_hole_value (as deleted).
-        *p = heap_->the_hole_value();
       }
     }
   }
@@ -1453,12 +1437,11 @@
  private:
   Heap* heap_;
   int pointers_removed_;
+  HeapObject* table_;
 };
 
-
-typedef StringTableCleaner<false> InternalizedStringTableCleaner;
-typedef StringTableCleaner<true> ExternalStringTableCleaner;
-
+typedef StringTableCleaner<false, true> InternalizedStringTableCleaner;
+typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
 
 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
 // are retained.
@@ -1504,7 +1487,6 @@
   }
 }
 
-
 void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
   DCHECK(!marking_deque()->IsFull());
   LiveObjectIterator<kGreyObjects> it(p);
@@ -1518,6 +1500,39 @@
   }
 }
 
+class RecordMigratedSlotVisitor final : public ObjectVisitor {
+ public:
+  inline void VisitPointer(Object** p) final {
+    RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
+  }
+
+  inline void VisitPointers(Object** start, Object** end) final {
+    while (start < end) {
+      RecordMigratedSlot(*start, reinterpret_cast<Address>(start));
+      ++start;
+    }
+  }
+
+  inline void VisitCodeEntry(Address code_entry_slot) final {
+    Address code_entry = Memory::Address_at(code_entry_slot);
+    if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+      RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
+                                             CODE_ENTRY_SLOT, code_entry_slot);
+    }
+  }
+
+ private:
+  inline void RecordMigratedSlot(Object* value, Address slot) {
+    if (value->IsHeapObject()) {
+      Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+      if (p->InNewSpace()) {
+        RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+      } else if (p->IsEvacuationCandidate()) {
+        RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
+      }
+    }
+  }
+};
 
 class MarkCompactCollector::HeapObjectVisitor {
  public:
@@ -1525,39 +1540,82 @@
   virtual bool Visit(HeapObject* object) = 0;
 };
 
-
 class MarkCompactCollector::EvacuateVisitorBase
     : public MarkCompactCollector::HeapObjectVisitor {
- public:
-  EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
-                      SlotsBuffer** evacuation_slots_buffer,
-                      LocalStoreBuffer* local_store_buffer)
-      : heap_(heap),
-        evacuation_slots_buffer_(evacuation_slots_buffer),
-        compaction_spaces_(compaction_spaces),
-        local_store_buffer_(local_store_buffer) {}
+ protected:
+  enum MigrationMode { kFast, kProfiled };
 
-  bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
-                         HeapObject** target_object) {
+  EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces)
+      : heap_(heap),
+        compaction_spaces_(compaction_spaces),
+        profiling_(
+            heap->isolate()->cpu_profiler()->is_profiling() ||
+            heap->isolate()->logger()->is_logging_code_events() ||
+            heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
+
+  inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
+                                HeapObject** target_object) {
     int size = object->Size();
     AllocationAlignment alignment = object->RequiredAlignment();
     AllocationResult allocation = target_space->AllocateRaw(size, alignment);
     if (allocation.To(target_object)) {
-      heap_->mark_compact_collector()->MigrateObject(
-          *target_object, object, size, target_space->identity(),
-          evacuation_slots_buffer_, local_store_buffer_);
+      MigrateObject(*target_object, object, size, target_space->identity());
       return true;
     }
     return false;
   }
 
- protected:
-  Heap* heap_;
-  SlotsBuffer** evacuation_slots_buffer_;
-  CompactionSpaceCollection* compaction_spaces_;
-  LocalStoreBuffer* local_store_buffer_;
-};
+  inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
+                            AllocationSpace dest) {
+    if (profiling_) {
+      MigrateObject<kProfiled>(dst, src, size, dest);
+    } else {
+      MigrateObject<kFast>(dst, src, size, dest);
+    }
+  }
 
+  template <MigrationMode mode>
+  inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
+                            AllocationSpace dest) {
+    Address dst_addr = dst->address();
+    Address src_addr = src->address();
+    DCHECK(heap_->AllowedToBeMigrated(src, dest));
+    DCHECK(dest != LO_SPACE);
+    if (dest == OLD_SPACE) {
+      DCHECK_OBJECT_SIZE(size);
+      DCHECK(IsAligned(size, kPointerSize));
+      heap_->CopyBlock(dst_addr, src_addr, size);
+      if ((mode == kProfiled) && FLAG_ignition && dst->IsBytecodeArray()) {
+        PROFILE(heap_->isolate(),
+                CodeMoveEvent(AbstractCode::cast(src), dst_addr));
+      }
+      RecordMigratedSlotVisitor visitor;
+      dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
+    } else if (dest == CODE_SPACE) {
+      DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
+      if (mode == kProfiled) {
+        PROFILE(heap_->isolate(),
+                CodeMoveEvent(AbstractCode::cast(src), dst_addr));
+      }
+      heap_->CopyBlock(dst_addr, src_addr, size);
+      RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(dst_addr),
+                                             RELOCATED_CODE_OBJECT, dst_addr);
+      Code::cast(dst)->Relocate(dst_addr - src_addr);
+    } else {
+      DCHECK_OBJECT_SIZE(size);
+      DCHECK(dest == NEW_SPACE);
+      heap_->CopyBlock(dst_addr, src_addr, size);
+    }
+    if (mode == kProfiled) {
+      heap_->OnMoveEvent(dst, src, size);
+    }
+    Memory::Address_at(src_addr) = dst_addr;
+  }
+
+  Heap* heap_;
+  CompactionSpaceCollection* compaction_spaces_;
+  bool profiling_;
+};
 
 class MarkCompactCollector::EvacuateNewSpaceVisitor final
     : public MarkCompactCollector::EvacuateVisitorBase {
@@ -1567,11 +1625,8 @@
 
   explicit EvacuateNewSpaceVisitor(Heap* heap,
                                    CompactionSpaceCollection* compaction_spaces,
-                                   SlotsBuffer** evacuation_slots_buffer,
-                                   LocalStoreBuffer* local_store_buffer,
                                    HashMap* local_pretenuring_feedback)
-      : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
-                            local_store_buffer),
+      : EvacuateVisitorBase(heap, compaction_spaces),
         buffer_(LocalAllocationBuffer::InvalidBuffer()),
         space_to_allocate_(NEW_SPACE),
         promoted_size_(0),
@@ -1596,10 +1651,7 @@
     }
     HeapObject* target = nullptr;
     AllocationSpace space = AllocateTargetObject(object, &target);
-    heap_->mark_compact_collector()->MigrateObject(
-        HeapObject::cast(target), object, size, space,
-        (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_,
-        (space == NEW_SPACE) ? nullptr : local_store_buffer_);
+    MigrateObject(HeapObject::cast(target), object, size, space);
     if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
       heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
     }
@@ -1718,11 +1770,8 @@
     : public MarkCompactCollector::EvacuateVisitorBase {
  public:
   EvacuateOldSpaceVisitor(Heap* heap,
-                          CompactionSpaceCollection* compaction_spaces,
-                          SlotsBuffer** evacuation_slots_buffer,
-                          LocalStoreBuffer* local_store_buffer)
-      : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
-                            local_store_buffer) {}
+                          CompactionSpaceCollection* compaction_spaces)
+      : EvacuateVisitorBase(heap, compaction_spaces) {}
 
   bool Visit(HeapObject* object) override {
     CompactionSpace* target_space = compaction_spaces_->Get(
@@ -1741,7 +1790,9 @@
   PageIterator it(space);
   while (it.has_next()) {
     Page* p = it.next();
-    DiscoverGreyObjectsOnPage(p);
+    if (!p->IsFlagSet(Page::BLACK_PAGE)) {
+      DiscoverGreyObjectsOnPage(p);
+    }
     if (marking_deque()->IsFull()) return;
   }
 }
@@ -2030,7 +2081,7 @@
 
 
 void MarkCompactCollector::MarkLiveObjects() {
-  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
+  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
   double start_time = 0.0;
   if (FLAG_print_cumulative_gc_stat) {
     start_time = heap_->MonotonicallyIncreasingTimeInMs();
@@ -2041,8 +2092,7 @@
   PostponeInterruptsScope postpone(isolate());
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
     IncrementalMarking* incremental_marking = heap_->incremental_marking();
     if (was_marked_incrementally_) {
       incremental_marking->Finalize();
@@ -2064,27 +2114,30 @@
       MarkCompactCollector::kMaxMarkingDequeSize);
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
     PrepareForCodeFlushing();
   }
 
   RootMarkingVisitor root_visitor(heap());
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
     MarkRoots(&root_visitor);
     ProcessTopOptimizedFrame(&root_visitor);
   }
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
 
     // The objects reachable from the roots are marked, yet unreachable
     // objects are unmarked.  Mark objects reachable due to host
     // application specific logic or through Harmony weak maps.
-    ProcessEphemeralMarking(&root_visitor, false);
+    {
+      TRACE_GC(heap()->tracer(),
+               GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
+      ProcessEphemeralMarking(&root_visitor, false);
+      ProcessMarkingDeque();
+    }
 
     // The objects reachable from the roots, weak maps or object groups
     // are marked. Objects pointed to only by weak global handles cannot be
@@ -2093,18 +2146,32 @@
     //
     // First we identify nonlive weak handles and mark them as pending
     // destruction.
-    heap()->isolate()->global_handles()->IdentifyWeakHandles(
-        &IsUnmarkedHeapObject);
+    {
+      TRACE_GC(heap()->tracer(),
+               GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
+      heap()->isolate()->global_handles()->IdentifyWeakHandles(
+          &IsUnmarkedHeapObject);
+      ProcessMarkingDeque();
+    }
     // Then we mark the objects.
-    heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
-    ProcessMarkingDeque();
+
+    {
+      TRACE_GC(heap()->tracer(),
+               GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
+      heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+      ProcessMarkingDeque();
+    }
 
     // Repeat Harmony weak maps marking to mark unmarked objects reachable from
     // the weak roots we just marked as pending destruction.
     //
     // We only process harmony collections, as all object groups have been fully
     // processed and no weakly reachable node can discover new objects groups.
-    ProcessEphemeralMarking(&root_visitor, true);
+    {
+      TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
+      ProcessEphemeralMarking(&root_visitor, true);
+      ProcessMarkingDeque();
+    }
   }
 
   if (FLAG_print_cumulative_gc_stat) {
@@ -2121,36 +2188,33 @@
 
 
 void MarkCompactCollector::ClearNonLiveReferences() {
-  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
+  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_CLEAR_STRING_TABLE);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
 
     // Prune the string table removing all strings only pointed to by the
     // string table.  Cannot use string_table() here because the string
     // table is marked.
     StringTable* string_table = heap()->string_table();
-    InternalizedStringTableCleaner internalized_visitor(heap());
+    InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
     string_table->IterateElements(&internalized_visitor);
     string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
 
-    ExternalStringTableCleaner external_visitor(heap());
+    ExternalStringTableCleaner external_visitor(heap(), nullptr);
     heap()->external_string_table_.Iterate(&external_visitor);
     heap()->external_string_table_.CleanUp();
   }
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
     // Process the weak references.
     MarkCompactWeakObjectRetainer mark_compact_object_retainer;
     heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
   }
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
 
     // Remove object groups after marking phase.
     heap()->isolate()->global_handles()->RemoveObjectGroups();
@@ -2159,8 +2223,7 @@
 
   // Flush code from collected candidates.
   if (is_code_flushing_enabled()) {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
     code_flusher_->ProcessCandidates();
   }
 
@@ -2170,7 +2233,7 @@
   ClearWeakCells(&non_live_map_list, &dependent_code_list);
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
     ClearSimpleMapTransitions(non_live_map_list);
     ClearFullMapTransitions();
   }
@@ -2179,14 +2242,13 @@
 
   ClearWeakCollections();
 
-  ClearInvalidStoreAndSlotsBufferEntries();
+  ClearInvalidRememberedSetSlots();
 }
 
 
 void MarkCompactCollector::MarkDependentCodeForDeoptimization(
     DependentCode* list_head) {
-  GCTracer::Scope gc_scope(heap()->tracer(),
-                           GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
+  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
   Isolate* isolate = this->isolate();
   DependentCode* current = list_head;
   while (current->length() > 0) {
@@ -2407,8 +2469,7 @@
 
 
 void MarkCompactCollector::ClearWeakCollections() {
-  GCTracer::Scope gc_scope(heap()->tracer(),
-                           GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
+  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
   Object* weak_collection_obj = heap()->encountered_weak_collections();
   while (weak_collection_obj != Smi::FromInt(0)) {
     JSWeakCollection* weak_collection =
@@ -2445,8 +2506,7 @@
 void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
                                           DependentCode** dependent_code_list) {
   Heap* heap = this->heap();
-  GCTracer::Scope gc_scope(heap->tracer(),
-                           GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
+  TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
   Object* weak_cell_obj = heap->encountered_weak_cells();
   Object* the_hole_value = heap->the_hole_value();
   DependentCode* dependent_code_head =
@@ -2541,215 +2601,77 @@
   heap()->set_encountered_transition_arrays(Smi::FromInt(0));
 }
 
-void MarkCompactCollector::RecordMigratedSlot(
-    Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer,
-    LocalStoreBuffer* local_store_buffer) {
-  // When parallel compaction is in progress, store and slots buffer entries
-  // require synchronization.
-  if (heap_->InNewSpace(value)) {
-    if (compaction_in_progress_) {
-      local_store_buffer->Record(slot);
-    } else {
-      Page* page = Page::FromAddress(slot);
-      RememberedSet<OLD_TO_NEW>::Insert(page, slot);
-    }
-  } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
-    SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
-                       reinterpret_cast<Object**>(slot),
-                       SlotsBuffer::IGNORE_OVERFLOW);
-  }
-}
-
-
-void MarkCompactCollector::RecordMigratedCodeEntrySlot(
-    Address code_entry, Address code_entry_slot,
-    SlotsBuffer** evacuation_slots_buffer) {
-  if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
-    SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
-                       SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
-                       SlotsBuffer::IGNORE_OVERFLOW);
-  }
-}
-
-
-void MarkCompactCollector::RecordMigratedCodeObjectSlot(
-    Address code_object, SlotsBuffer** evacuation_slots_buffer) {
-  SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
-                     SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
-                     SlotsBuffer::IGNORE_OVERFLOW);
-}
-
-
-static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+static inline SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
   if (RelocInfo::IsCodeTarget(rmode)) {
-    return SlotsBuffer::CODE_TARGET_SLOT;
+    return CODE_TARGET_SLOT;
   } else if (RelocInfo::IsCell(rmode)) {
-    return SlotsBuffer::CELL_TARGET_SLOT;
+    return CELL_TARGET_SLOT;
   } else if (RelocInfo::IsEmbeddedObject(rmode)) {
-    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
+    return EMBEDDED_OBJECT_SLOT;
   } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
-    return SlotsBuffer::DEBUG_TARGET_SLOT;
+    return DEBUG_TARGET_SLOT;
   }
   UNREACHABLE();
-  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+  return NUMBER_OF_SLOT_TYPES;
 }
 
-
-static inline SlotsBuffer::SlotType DecodeSlotType(
-    SlotsBuffer::ObjectSlot slot) {
-  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
-}
-
-
-void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
+void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
+                                           Object* target) {
   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
   RelocInfo::Mode rmode = rinfo->rmode();
   if (target_page->IsEvacuationCandidate() &&
       (rinfo->host() == NULL ||
        !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
     Address addr = rinfo->pc();
-    SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
+    SlotType slot_type = SlotTypeForRMode(rmode);
     if (rinfo->IsInConstantPool()) {
       addr = rinfo->constant_pool_entry_address();
       if (RelocInfo::IsCodeTarget(rmode)) {
-        slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
+        slot_type = CODE_ENTRY_SLOT;
       } else {
         DCHECK(RelocInfo::IsEmbeddedObject(rmode));
-        slot_type = SlotsBuffer::OBJECT_SLOT;
+        slot_type = OBJECT_SLOT;
       }
     }
-    bool success = SlotsBuffer::AddTo(
-        slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type,
-        addr, SlotsBuffer::FAIL_ON_OVERFLOW);
-    if (!success) {
-      EvictPopularEvacuationCandidate(target_page);
-    }
+    RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr);
   }
 }
 
-
-class RecordMigratedSlotVisitor final : public ObjectVisitor {
- public:
-  RecordMigratedSlotVisitor(MarkCompactCollector* collector,
-                            SlotsBuffer** evacuation_slots_buffer,
-                            LocalStoreBuffer* local_store_buffer)
-      : collector_(collector),
-        evacuation_slots_buffer_(evacuation_slots_buffer),
-        local_store_buffer_(local_store_buffer) {}
-
-  V8_INLINE void VisitPointer(Object** p) override {
-    collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
-                                   evacuation_slots_buffer_,
-                                   local_store_buffer_);
-  }
-
-  V8_INLINE void VisitPointers(Object** start, Object** end) override {
-    while (start < end) {
-      collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
-                                     evacuation_slots_buffer_,
-                                     local_store_buffer_);
-      ++start;
-    }
-  }
-
-  V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
-    if (collector_->compacting_) {
-      Address code_entry = Memory::Address_at(code_entry_slot);
-      collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
-                                              evacuation_slots_buffer_);
-    }
-  }
-
- private:
-  MarkCompactCollector* collector_;
-  SlotsBuffer** evacuation_slots_buffer_;
-  LocalStoreBuffer* local_store_buffer_;
-};
-
-
-// We scavenge new space simultaneously with sweeping. This is done in two
-// passes.
-//
-// The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space.  Forwarding address is written directly into
-// first word of object without any encoding.  If object is dead we write
-// NULL as a forwarding address.
-//
-// The second pass updates pointers to new space in all spaces.  It is possible
-// to encounter pointers to dead new space objects during traversal of pointers
-// to new space.  We should clear them to avoid encountering them during next
-// pointer iteration.  This is an issue if the store buffer overflows and we
-// have to scan the entire old space, including dead objects, looking for
-// pointers to new space.
-void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
-                                         int size, AllocationSpace dest,
-                                         SlotsBuffer** evacuation_slots_buffer,
-                                         LocalStoreBuffer* local_store_buffer) {
-  Address dst_addr = dst->address();
-  Address src_addr = src->address();
-  DCHECK(heap()->AllowedToBeMigrated(src, dest));
-  DCHECK(dest != LO_SPACE);
-  if (dest == OLD_SPACE) {
-    DCHECK_OBJECT_SIZE(size);
-    DCHECK(evacuation_slots_buffer != nullptr);
-    DCHECK(IsAligned(size, kPointerSize));
-
-    heap()->MoveBlock(dst->address(), src->address(), size);
-    RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer,
-                                      local_store_buffer);
-    dst->IterateBody(&visitor);
-  } else if (dest == CODE_SPACE) {
-    DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
-    DCHECK(evacuation_slots_buffer != nullptr);
-    PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
-    heap()->MoveBlock(dst_addr, src_addr, size);
-    RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
-    Code::cast(dst)->Relocate(dst_addr - src_addr);
-  } else {
-    DCHECK_OBJECT_SIZE(size);
-    DCHECK(evacuation_slots_buffer == nullptr);
-    DCHECK(dest == NEW_SPACE);
-    heap()->MoveBlock(dst_addr, src_addr, size);
-  }
-  heap()->OnMoveEvent(dst, src, size);
-  Memory::Address_at(src_addr) = dst_addr;
-}
-
-
-static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
-                              SlotsBuffer::SlotType slot_type, Address addr) {
+static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v,
+                                   SlotType slot_type, Address addr) {
   switch (slot_type) {
-    case SlotsBuffer::CODE_TARGET_SLOT: {
+    case CODE_TARGET_SLOT: {
       RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
       rinfo.Visit(isolate, v);
       break;
     }
-    case SlotsBuffer::CELL_TARGET_SLOT: {
+    case CELL_TARGET_SLOT: {
       RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
       rinfo.Visit(isolate, v);
       break;
     }
-    case SlotsBuffer::CODE_ENTRY_SLOT: {
+    case CODE_ENTRY_SLOT: {
       v->VisitCodeEntry(addr);
       break;
     }
-    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+    case RELOCATED_CODE_OBJECT: {
       HeapObject* obj = HeapObject::FromAddress(addr);
       Code::BodyDescriptor::IterateBody(obj, v);
       break;
     }
-    case SlotsBuffer::DEBUG_TARGET_SLOT: {
+    case DEBUG_TARGET_SLOT: {
       RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
                       NULL);
       if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
       break;
     }
-    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
+    case EMBEDDED_OBJECT_SLOT: {
       RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
       rinfo.Visit(isolate, v);
       break;
     }
-    case SlotsBuffer::OBJECT_SLOT: {
+    case OBJECT_SLOT: {
       v->VisitPointer(reinterpret_cast<Object**>(addr));
       break;
     }
@@ -2853,48 +2775,6 @@
   Heap* heap_;
 };
 
-
-void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
-  PointersUpdatingVisitor v(heap_);
-  size_t buffer_size = buffer->Size();
-
-  for (size_t slot_idx = 0; slot_idx < buffer_size; ++slot_idx) {
-    SlotsBuffer::ObjectSlot slot = buffer->Get(slot_idx);
-    if (!SlotsBuffer::IsTypedSlot(slot)) {
-      PointersUpdatingVisitor::UpdateSlot(heap_, slot);
-    } else {
-      ++slot_idx;
-      DCHECK(slot_idx < buffer_size);
-      UpdateSlot(heap_->isolate(), &v, DecodeSlotType(slot),
-                 reinterpret_cast<Address>(buffer->Get(slot_idx)));
-    }
-  }
-}
-
-
-void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
-  while (buffer != NULL) {
-    UpdateSlots(buffer);
-    buffer = buffer->next();
-  }
-}
-
-
-static void UpdatePointer(HeapObject** address, HeapObject* object) {
-  MapWord map_word = object->map_word();
-  // Since we only filter invalid slots in old space, the store buffer can
-  // still contain stale pointers in large object and in map spaces. Ignore
-  // these pointers here.
-  DCHECK(map_word.IsForwardingAddress() ||
-         !object->GetHeap()->old_space()->Contains(
-             reinterpret_cast<Address>(address)));
-  if (map_word.IsForwardingAddress()) {
-    // Update the corresponding slot.
-    *address = map_word.ToForwardingAddress();
-  }
-}
-
-
 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
                                                          Object** p) {
   MapWord map_word = HeapObject::cast(*p)->map_word();
@@ -2906,21 +2786,15 @@
   return String::cast(*p);
 }
 
-
-bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
-                                               HeapObject** out_object) {
+bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
   Space* owner = p->owner();
-  if (owner == heap_->lo_space() || owner == NULL) {
-    Object* large_object = heap_->lo_space()->FindObject(slot);
-    // This object has to exist, otherwise we would not have recorded a slot
-    // for it.
-    CHECK(large_object->IsHeapObject());
-    HeapObject* large_heap_object = HeapObject::cast(large_object);
-    if (IsMarked(large_heap_object)) {
-      *out_object = large_heap_object;
-      return true;
-    }
-    return false;
+  DCHECK(owner != heap_->lo_space() && owner != nullptr);
+  USE(owner);
+
+  // If we are on a black page, we cannot find the actual object start
+  // easiliy. We just return true but do not set the out_object.
+  if (p->IsFlagSet(Page::BLACK_PAGE)) {
+    return true;
   }
 
   uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
@@ -2995,66 +2869,49 @@
     // in a live object.
     // Slots pointing to the first word of an object are invalid and removed.
     // This can happen when we move the object header while left trimming.
-    *out_object = object;
     return true;
   }
   return false;
 }
 
-
-bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
-  // This function does not support large objects right now.
+HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
+  Page* p = Page::FromAddress(slot);
   Space* owner = p->owner();
-  if (owner == heap_->lo_space() || owner == NULL) {
+  if (owner == heap_->lo_space() || owner == nullptr) {
     Object* large_object = heap_->lo_space()->FindObject(slot);
     // This object has to exist, otherwise we would not have recorded a slot
     // for it.
     CHECK(large_object->IsHeapObject());
     HeapObject* large_heap_object = HeapObject::cast(large_object);
+
     if (IsMarked(large_heap_object)) {
-      return true;
+      return large_heap_object;
     }
-    return false;
+    return nullptr;
   }
 
-  LiveObjectIterator<kBlackObjects> it(p);
-  HeapObject* object = NULL;
-  while ((object = it.Next()) != NULL) {
-    int size = object->Size();
-
-    if (object->address() > slot) return false;
-    if (object->address() <= slot && slot < (object->address() + size)) {
-      return true;
+  if (p->IsFlagSet(Page::BLACK_PAGE)) {
+    HeapObjectIterator it(p);
+    HeapObject* object = nullptr;
+    while ((object = it.Next()) != nullptr) {
+      int size = object->Size();
+      if (object->address() > slot) return nullptr;
+      if (object->address() <= slot && slot < (object->address() + size)) {
+        return object;
+      }
+    }
+  } else {
+    LiveObjectIterator<kBlackObjects> it(p);
+    HeapObject* object = nullptr;
+    while ((object = it.Next()) != nullptr) {
+      int size = object->Size();
+      if (object->address() > slot) return nullptr;
+      if (object->address() <= slot && slot < (object->address() + size)) {
+        return object;
+      }
     }
   }
-  return false;
-}
-
-
-bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
-  HeapObject* object = NULL;
-  // The target object is black but we don't know if the source slot is black.
-  // The source object could have died and the slot could be part of a free
-  // space. Find out based on mark bits if the slot is part of a live object.
-  if (!IsSlotInBlackObject(Page::FromAddress(slot), slot, &object)) {
-    return false;
-  }
-
-  DCHECK(object != NULL);
-  int offset = static_cast<int>(slot - object->address());
-  return object->IsValidSlot(offset);
-}
-
-
-void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
-                                                    HeapObject* object) {
-  // The target object has to be black.
-  CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
-  // The target object is black but we don't know if the source slot is black.
-  // The source object could have died and the slot could be part of a free
-  // space. Use the mark bit iterator to find out about liveness of the slot.
-  CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
+  return nullptr;
 }
 
 
@@ -3074,36 +2931,20 @@
 }
 
 
-void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
-    SlotsBuffer* evacuation_slots_buffer) {
-  base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
-  evacuation_slots_buffers_.Add(evacuation_slots_buffer);
-}
-
 class MarkCompactCollector::Evacuator : public Malloced {
  public:
-  Evacuator(MarkCompactCollector* collector,
-            const List<Page*>& evacuation_candidates,
-            const List<NewSpacePage*>& newspace_evacuation_candidates)
+  explicit Evacuator(MarkCompactCollector* collector)
       : collector_(collector),
-        evacuation_candidates_(evacuation_candidates),
-        newspace_evacuation_candidates_(newspace_evacuation_candidates),
         compaction_spaces_(collector->heap()),
-        local_slots_buffer_(nullptr),
-        local_store_buffer_(collector->heap()),
         local_pretenuring_feedback_(HashMap::PointersMatch,
                                     kInitialLocalPretenuringFeedbackCapacity),
         new_space_visitor_(collector->heap(), &compaction_spaces_,
-                           &local_slots_buffer_, &local_store_buffer_,
                            &local_pretenuring_feedback_),
-        old_space_visitor_(collector->heap(), &compaction_spaces_,
-                           &local_slots_buffer_, &local_store_buffer_),
+        old_space_visitor_(collector->heap(), &compaction_spaces_),
         duration_(0.0),
-        bytes_compacted_(0),
-        task_id_(0) {}
+        bytes_compacted_(0) {}
 
-  // Evacuate the configured set of pages in parallel.
-  inline void EvacuatePages();
+  inline bool EvacuatePage(MemoryChunk* chunk);
 
   // Merge back locally cached info sequentially. Note that this method needs
   // to be called from the main thread.
@@ -3111,9 +2952,6 @@
 
   CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
 
-  uint32_t task_id() { return task_id_; }
-  void set_task_id(uint32_t id) { task_id_ = id; }
-
  private:
   static const int kInitialLocalPretenuringFeedbackCapacity = 256;
 
@@ -3128,77 +2966,58 @@
 
   MarkCompactCollector* collector_;
 
-  // Pages to process.
-  const List<Page*>& evacuation_candidates_;
-  const List<NewSpacePage*>& newspace_evacuation_candidates_;
-
   // Locally cached collector data.
   CompactionSpaceCollection compaction_spaces_;
-  SlotsBuffer* local_slots_buffer_;
-  LocalStoreBuffer local_store_buffer_;
   HashMap local_pretenuring_feedback_;
 
-  // Vistors for the corresponding spaces.
+  // Visitors for the corresponding spaces.
   EvacuateNewSpaceVisitor new_space_visitor_;
   EvacuateOldSpaceVisitor old_space_visitor_;
 
   // Book keeping info.
   double duration_;
   intptr_t bytes_compacted_;
-
-  // Task id, if this evacuator is executed on a background task instead of
-  // the main thread. Can be used to try to abort the task currently scheduled
-  // to executed to evacuate pages.
-  uint32_t task_id_;
 };
 
 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
     MemoryChunk* p, HeapObjectVisitor* visitor) {
-  bool success = true;
-  if (p->parallel_compaction_state().TrySetValue(
-          MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
-    if (p->IsEvacuationCandidate() || p->InNewSpace()) {
-      DCHECK_EQ(p->parallel_compaction_state().Value(),
-                MemoryChunk::kCompactingInProgress);
-      int saved_live_bytes = p->LiveBytes();
-      double evacuation_time;
-      {
-        AlwaysAllocateScope always_allocate(heap()->isolate());
-        TimedScope timed_scope(&evacuation_time);
-        success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
-      }
-      if (success) {
-        ReportCompactionProgress(evacuation_time, saved_live_bytes);
-        p->parallel_compaction_state().SetValue(
-            MemoryChunk::kCompactingFinalize);
-      } else {
-        p->parallel_compaction_state().SetValue(
-            MemoryChunk::kCompactingAborted);
-      }
-    } else {
-      // There could be popular pages in the list of evacuation candidates
-      // which we do not compact.
-      p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
-    }
+  bool success = false;
+  DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
+  int saved_live_bytes = p->LiveBytes();
+  double evacuation_time;
+  {
+    AlwaysAllocateScope always_allocate(heap()->isolate());
+    TimedScope timed_scope(&evacuation_time);
+    success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
+  }
+  if (FLAG_trace_evacuation) {
+    PrintIsolate(heap()->isolate(),
+                 "evacuation[%p]: page=%p new_space=%d executable=%d "
+                 "live_bytes=%d time=%f\n",
+                 this, p, p->InNewSpace(),
+                 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
+                 evacuation_time);
+  }
+  if (success) {
+    ReportCompactionProgress(evacuation_time, saved_live_bytes);
   }
   return success;
 }
 
-void MarkCompactCollector::Evacuator::EvacuatePages() {
-  for (NewSpacePage* p : newspace_evacuation_candidates_) {
-    DCHECK(p->InNewSpace());
-    DCHECK_EQ(p->concurrent_sweeping_state().Value(),
+bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
+  bool success = false;
+  if (chunk->InNewSpace()) {
+    DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
               NewSpacePage::kSweepingDone);
-    bool success = EvacuateSinglePage(p, &new_space_visitor_);
+    success = EvacuateSinglePage(chunk, &new_space_visitor_);
     DCHECK(success);
     USE(success);
+  } else {
+    DCHECK(chunk->IsEvacuationCandidate());
+    DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
+    success = EvacuateSinglePage(chunk, &old_space_visitor_);
   }
-  for (Page* p : evacuation_candidates_) {
-    DCHECK(p->IsEvacuationCandidate() ||
-           p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
-    DCHECK_EQ(p->concurrent_sweeping_state().Value(), Page::kSweepingDone);
-    EvacuateSinglePage(p, &old_space_visitor_);
-  }
+  return success;
 }
 
 void MarkCompactCollector::Evacuator::Finalize() {
@@ -3213,33 +3032,8 @@
       new_space_visitor_.promoted_size() +
       new_space_visitor_.semispace_copied_size());
   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
-  local_store_buffer_.Process(heap()->store_buffer());
-  collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_);
 }
 
-class MarkCompactCollector::CompactionTask : public CancelableTask {
- public:
-  explicit CompactionTask(Heap* heap, Evacuator* evacuator)
-      : CancelableTask(heap->isolate()), heap_(heap), evacuator_(evacuator) {
-    evacuator->set_task_id(id());
-  }
-
-  virtual ~CompactionTask() {}
-
- private:
-  // v8::internal::CancelableTask overrides.
-  void RunInternal() override {
-    evacuator_->EvacuatePages();
-    heap_->mark_compact_collector()
-        ->pending_compaction_tasks_semaphore_.Signal();
-  }
-
-  Heap* heap_;
-  Evacuator* evacuator_;
-
-  DISALLOW_COPY_AND_ASSIGN(CompactionTask);
-};
-
 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
                                                           intptr_t live_bytes) {
   if (!FLAG_parallel_compaction) return 1;
@@ -3252,15 +3046,17 @@
   const double kTargetCompactionTimeInMs = 1;
   const int kNumSweepingTasks = 3;
 
-  intptr_t compaction_speed =
+  double compaction_speed =
       heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
 
-  const int available_cores =
-      Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1);
+  const int available_cores = Max(
+      1, static_cast<int>(
+             V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
+             kNumSweepingTasks - 1);
   int tasks;
   if (compaction_speed > 0) {
-    tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
-                                 compaction_speed / kTargetCompactionTimeInMs);
+    tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
+                                 kTargetCompactionTimeInMs);
   } else {
     tasks = pages;
   }
@@ -3268,132 +3064,96 @@
   return Min(available_cores, tasks_capped_pages);
 }
 
+class EvacuationJobTraits {
+ public:
+  typedef int* PerPageData;  // Pointer to number of aborted pages.
+  typedef MarkCompactCollector::Evacuator* PerTaskData;
 
-void MarkCompactCollector::EvacuatePagesInParallel() {
-  int num_pages = 0;
-  intptr_t live_bytes = 0;
-  for (Page* page : evacuation_candidates_) {
-    num_pages++;
-    live_bytes += page->LiveBytes();
-  }
-  for (NewSpacePage* page : newspace_evacuation_candidates_) {
-    num_pages++;
-    live_bytes += page->LiveBytes();
-  }
-  DCHECK_GE(num_pages, 1);
+  static const bool NeedSequentialFinalization = true;
 
-  // Used for trace summary.
-  intptr_t compaction_speed = 0;
-  if (FLAG_trace_fragmentation) {
-    compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+  static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
+                                    MemoryChunk* chunk, PerPageData) {
+    return evacuator->EvacuatePage(chunk);
   }
 
-  const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
-
-  // Set up compaction spaces.
-  Evacuator** evacuators = new Evacuator*[num_tasks];
-  for (int i = 0; i < num_tasks; i++) {
-    evacuators[i] = new Evacuator(this, evacuation_candidates_,
-                                  newspace_evacuation_candidates_);
-  }
-
-  // Kick off parallel tasks.
-  StartParallelCompaction(evacuators, num_tasks);
-  // Wait for unfinished and not-yet-started tasks.
-  WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1);
-
-  // Finalize local evacuators by merging back all locally cached data.
-  for (int i = 0; i < num_tasks; i++) {
-    evacuators[i]->Finalize();
-    delete evacuators[i];
-  }
-  delete[] evacuators;
-
-  // Finalize pages sequentially.
-  for (NewSpacePage* p : newspace_evacuation_candidates_) {
-    DCHECK_EQ(p->parallel_compaction_state().Value(),
-              MemoryChunk::kCompactingFinalize);
-    p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
-  }
-
-  int abandoned_pages = 0;
-  for (Page* p : evacuation_candidates_) {
-    switch (p->parallel_compaction_state().Value()) {
-      case MemoryChunk::ParallelCompactingState::kCompactingAborted:
+  static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success,
+                                       PerPageData data) {
+    if (chunk->InNewSpace()) {
+      DCHECK(success);
+    } else {
+      Page* p = static_cast<Page*>(chunk);
+      if (success) {
+        DCHECK(p->IsEvacuationCandidate());
+        DCHECK(p->SweepingDone());
+        p->Unlink();
+      } else {
         // We have partially compacted the page, i.e., some objects may have
         // moved, others are still in place.
         // We need to:
-        // - Leave the evacuation candidate flag for later processing of
-        //   slots buffer entries.
+        // - Leave the evacuation candidate flag for later processing of slots
+        //   buffer entries.
         // - Leave the slots buffer there for processing of entries added by
         //   the write barrier.
         // - Rescan the page as slot recording in the migration buffer only
         //   happens upon moving (which we potentially didn't do).
         // - Leave the page in the list of pages of a space since we could not
         //   fully evacuate it.
-        // - Mark them for rescanning for store buffer entries as we otherwise
-        //   might have stale store buffer entries that become "valid" again
-        //   after reusing the memory. Note that all existing store buffer
-        //   entries of such pages are filtered before rescanning.
         DCHECK(p->IsEvacuationCandidate());
         p->SetFlag(Page::COMPACTION_WAS_ABORTED);
-        abandoned_pages++;
-        break;
-      case MemoryChunk::kCompactingFinalize:
-        DCHECK(p->IsEvacuationCandidate());
-        DCHECK(p->SweepingDone());
-        p->Unlink();
-        break;
-      case MemoryChunk::kCompactingDone:
-        DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
-        DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-        break;
-      default:
-        // MemoryChunk::kCompactingInProgress.
-        UNREACHABLE();
-    }
-    p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
-  }
-  if (FLAG_trace_fragmentation) {
-    PrintIsolate(isolate(),
-                 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
-                 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
-                 "d compaction_speed=%" V8_PTR_PREFIX "d\n",
-                 isolate()->time_millis_since_init(), FLAG_parallel_compaction,
-                 num_pages, abandoned_pages, num_tasks,
-                 base::SysInfo::NumberOfProcessors(), live_bytes,
-                 compaction_speed);
-  }
-}
-
-void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators,
-                                                   int len) {
-  compaction_in_progress_ = true;
-  for (int i = 1; i < len; i++) {
-    CompactionTask* task = new CompactionTask(heap(), evacuators[i]);
-    V8::GetCurrentPlatform()->CallOnBackgroundThread(
-        task, v8::Platform::kShortRunningTask);
-  }
-
-  // Contribute on main thread.
-  evacuators[0]->EvacuatePages();
-}
-
-void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators,
-                                                        int len) {
-  // Try to cancel compaction tasks that have not been run (as they might be
-  // stuck in a worker queue). Tasks that cannot be canceled, have either
-  // already completed or are still running, hence we need to wait for their
-  // semaphore signal.
-  for (int i = 0; i < len; i++) {
-    if (!heap()->isolate()->cancelable_task_manager()->TryAbort(
-            evacuators[i]->task_id())) {
-      pending_compaction_tasks_semaphore_.Wait();
+        *data += 1;
+      }
     }
   }
-  compaction_in_progress_ = false;
-}
+};
 
+void MarkCompactCollector::EvacuatePagesInParallel() {
+  PageParallelJob<EvacuationJobTraits> job(
+      heap_, heap_->isolate()->cancelable_task_manager());
+
+  int abandoned_pages = 0;
+  intptr_t live_bytes = 0;
+  for (Page* page : evacuation_candidates_) {
+    live_bytes += page->LiveBytes();
+    job.AddPage(page, &abandoned_pages);
+  }
+  for (NewSpacePage* page : newspace_evacuation_candidates_) {
+    live_bytes += page->LiveBytes();
+    job.AddPage(page, &abandoned_pages);
+  }
+  DCHECK_GE(job.NumberOfPages(), 1);
+
+  // Used for trace summary.
+  double compaction_speed = 0;
+  if (FLAG_trace_evacuation) {
+    compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+  }
+
+  const int wanted_num_tasks =
+      NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
+  Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
+  for (int i = 0; i < wanted_num_tasks; i++) {
+    evacuators[i] = new Evacuator(this);
+  }
+  job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
+  for (int i = 0; i < wanted_num_tasks; i++) {
+    evacuators[i]->Finalize();
+    delete evacuators[i];
+  }
+  delete[] evacuators;
+
+  if (FLAG_trace_evacuation) {
+    PrintIsolate(
+        isolate(),
+        "%8.0f ms: evacuation-summary: parallel=%s pages=%d aborted=%d "
+        "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
+        "d compaction_speed=%.f\n",
+        isolate()->time_millis_since_init(),
+        FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
+        abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
+        V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
+        live_bytes, compaction_speed);
+  }
+}
 
 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
  public:
@@ -3409,28 +3169,12 @@
   }
 };
 
-
 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
 
-
 enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
 
-
 enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
 
-
-template <MarkCompactCollector::SweepingParallelism mode>
-static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
-                     int size) {
-  if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
-    DCHECK(free_list == NULL);
-    return space->Free(start, size);
-  } else {
-    return size - free_list->Free(start, size);
-  }
-}
-
-
 // Sweeps a page. After sweeping the page can be iterated.
 // Slots in live objects pointing into evacuation candidates are updated
 // if requested.
@@ -3439,9 +3183,9 @@
           MarkCompactCollector::SweepingParallelism parallelism,
           SkipListRebuildingMode skip_list_mode,
           FreeSpaceTreatmentMode free_space_mode>
-static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
-                 ObjectVisitor* v) {
+static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
   DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
+  DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
   DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
             space->identity() == CODE_SPACE);
   DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
@@ -3473,7 +3217,7 @@
       if (free_space_mode == ZAP_FREE_SPACE) {
         memset(free_start, 0xcc, size);
       }
-      freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+      freed_bytes = space->UnaccountedFree(free_start, size);
       max_freed_bytes = Max(freed_bytes, max_freed_bytes);
     }
     Map* map = object->synchronized_map();
@@ -3501,7 +3245,7 @@
     if (free_space_mode == ZAP_FREE_SPACE) {
       memset(free_start, 0xcc, size);
     }
-    freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+    freed_bytes = space->UnaccountedFree(free_start, size);
     max_freed_bytes = Max(freed_bytes, max_freed_bytes);
   }
   p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
@@ -3521,8 +3265,10 @@
     // Ignore all slots that might have been recorded in the body of the
     // deoptimized code object. Assumption: no slots will be recorded for
     // this object after invalidating it.
-    RemoveObjectSlots(code->instruction_start(),
-                      code->address() + code->Size());
+    Page* page = Page::FromAddress(code->address());
+    Address start = code->instruction_start();
+    Address end = code->address() + code->Size();
+    RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
   }
 }
 
@@ -3533,21 +3279,6 @@
 }
 
 
-void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
-                                             Address end_slot) {
-  // Remove entries by replacing them with an old-space slot containing a smi
-  // that is located in an unmovable page.
-  for (Page* p : evacuation_candidates_) {
-    DCHECK(p->IsEvacuationCandidate() ||
-           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-    if (p->IsEvacuationCandidate()) {
-      SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
-                                     end_slot);
-    }
-  }
-}
-
-
 #ifdef VERIFY_HEAP
 static void VerifyAllBlackObjects(MemoryChunk* page) {
   LiveObjectIterator<kAllLiveObjects> it(page);
@@ -3629,33 +3360,36 @@
       switch (space->identity()) {
         case OLD_SPACE:
           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
-                IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+                IGNORE_FREE_SPACE>(space, p, nullptr);
           break;
         case CODE_SPACE:
           if (FLAG_zap_code_space) {
             Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                  ZAP_FREE_SPACE>(space, NULL, p, nullptr);
+                  ZAP_FREE_SPACE>(space, p, nullptr);
           } else {
             Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                  IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
+                  IGNORE_FREE_SPACE>(space, p, nullptr);
           }
           break;
         default:
           UNREACHABLE();
           break;
       }
+      {
+        base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
+        swept_pages(space->identity())->Add(p);
+      }
     }
   }
 }
 
 
 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
-  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
+  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
   Heap::RelocationLock relocation_lock(heap());
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
     EvacuationScope evacuation_scope(this);
 
     EvacuateNewSpacePrologue();
@@ -3673,8 +3407,7 @@
   heap()->FreeQueuedChunks();
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
     // After updating all pointers, we can finally sweep the aborted pages,
     // effectively overriding any forward pointers.
     SweepAbortedPages();
@@ -3695,127 +3428,170 @@
 #endif
 }
 
+template <PointerDirection direction>
+class PointerUpdateJobTraits {
+ public:
+  typedef int PerPageData;  // Per page data is not used in this job.
+  typedef PointersUpdatingVisitor* PerTaskData;
 
-void MarkCompactCollector::UpdatePointersAfterEvacuation() {
-  GCTracer::Scope gc_scope(heap()->tracer(),
-                           GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
-  {
-    GCTracer::Scope gc_scope(
-        heap()->tracer(),
-        GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
-    UpdateSlotsRecordedIn(migration_slots_buffer_);
-    if (FLAG_trace_fragmentation_verbose) {
-      PrintF("  migration slots buffer: %d\n",
-             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
-    }
-    slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
-    DCHECK(migration_slots_buffer_ == NULL);
-
-    // TODO(hpayer): Process the slots buffers in parallel. This has to be done
-    // after evacuation of all pages finishes.
-    int buffers = evacuation_slots_buffers_.length();
-    for (int i = 0; i < buffers; i++) {
-      SlotsBuffer* buffer = evacuation_slots_buffers_[i];
-      UpdateSlotsRecordedIn(buffer);
-      slots_buffer_allocator_->DeallocateChain(&buffer);
-    }
-    evacuation_slots_buffers_.Rewind(0);
+  static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
+                                    MemoryChunk* chunk, PerPageData) {
+    UpdateUntypedPointers(heap, chunk);
+    UpdateTypedPointers(heap, chunk, visitor);
+    return true;
+  }
+  static const bool NeedSequentialFinalization = false;
+  static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
   }
 
-  // Second pass: find pointers to new space and update them.
+ private:
+  static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
+    if (direction == OLD_TO_NEW) {
+      RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk,
+                                                    UpdateOldToNewSlot);
+    } else {
+      RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) {
+        PointersUpdatingVisitor::UpdateSlot(heap,
+                                            reinterpret_cast<Object**>(slot));
+        return REMOVE_SLOT;
+      });
+    }
+  }
+
+  static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
+                                  PointersUpdatingVisitor* visitor) {
+    if (direction == OLD_TO_OLD) {
+      Isolate* isolate = heap->isolate();
+      RememberedSet<OLD_TO_OLD>::IterateTyped(
+          chunk, [isolate, visitor](SlotType type, Address slot) {
+            UpdateTypedSlot(isolate, visitor, type, slot);
+            return REMOVE_SLOT;
+          });
+    }
+  }
+
+  static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
+    MapWord map_word = object->map_word();
+    // Since we only filter invalid slots in old space, the store buffer can
+    // still contain stale pointers in large object and in map spaces. Ignore
+    // these pointers here.
+    DCHECK(map_word.IsForwardingAddress() ||
+           !object->GetHeap()->old_space()->Contains(
+               reinterpret_cast<Address>(address)));
+    if (map_word.IsForwardingAddress()) {
+      // Update the corresponding slot.
+      *address = map_word.ToForwardingAddress();
+    }
+  }
+};
+
+int NumberOfPointerUpdateTasks(int pages) {
+  if (!FLAG_parallel_pointer_update) return 1;
+  const int kMaxTasks = 4;
+  const int kPagesPerTask = 4;
+  return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
+}
+
+template <PointerDirection direction>
+void UpdatePointersInParallel(Heap* heap) {
+  PageParallelJob<PointerUpdateJobTraits<direction> > job(
+      heap, heap->isolate()->cancelable_task_manager());
+  RememberedSet<direction>::IterateMemoryChunks(
+      heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
+  PointersUpdatingVisitor visitor(heap);
+  int num_pages = job.NumberOfPages();
+  int num_tasks = NumberOfPointerUpdateTasks(num_pages);
+  job.Run(num_tasks, [&visitor](int i) { return &visitor; });
+}
+
+class ToSpacePointerUpdateJobTraits {
+ public:
+  typedef std::pair<Address, Address> PerPageData;
+  typedef PointersUpdatingVisitor* PerTaskData;
+
+  static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
+                                    MemoryChunk* chunk, PerPageData limits) {
+    for (Address cur = limits.first; cur < limits.second;) {
+      HeapObject* object = HeapObject::FromAddress(cur);
+      Map* map = object->map();
+      int size = object->SizeFromMap(map);
+      object->IterateBody(map->instance_type(), size, visitor);
+      cur += size;
+    }
+    return true;
+  }
+  static const bool NeedSequentialFinalization = false;
+  static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+  }
+};
+
+void UpdateToSpacePointersInParallel(Heap* heap) {
+  PageParallelJob<ToSpacePointerUpdateJobTraits> job(
+      heap, heap->isolate()->cancelable_task_manager());
+  Address space_start = heap->new_space()->bottom();
+  Address space_end = heap->new_space()->top();
+  NewSpacePageIterator it(space_start, space_end);
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address start =
+        page->Contains(space_start) ? space_start : page->area_start();
+    Address end = page->Contains(space_end) ? space_end : page->area_end();
+    job.AddPage(page, std::make_pair(start, end));
+  }
+  PointersUpdatingVisitor visitor(heap);
+  int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
+  job.Run(num_tasks, [&visitor](int i) { return &visitor; });
+}
+
+void MarkCompactCollector::UpdatePointersAfterEvacuation() {
+  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
+
   PointersUpdatingVisitor updating_visitor(heap());
 
   {
-    GCTracer::Scope gc_scope(
-        heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
-    // Update pointers in to space.
-    SemiSpaceIterator to_it(heap()->new_space());
-    for (HeapObject* object = to_it.Next(); object != NULL;
-         object = to_it.Next()) {
-      Map* map = object->map();
-      object->IterateBody(map->instance_type(), object->SizeFromMap(map),
-                          &updating_visitor);
-    }
+    TRACE_GC(heap()->tracer(),
+             GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
+    UpdateToSpacePointersInParallel(heap_);
     // Update roots.
     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
-
-    RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap_, UpdatePointer);
+    UpdatePointersInParallel<OLD_TO_NEW>(heap_);
   }
 
   {
-    GCTracer::Scope gc_scope(
-        heap()->tracer(),
-        GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
+    Heap* heap = this->heap();
+    TRACE_GC(heap->tracer(),
+             GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
+    UpdatePointersInParallel<OLD_TO_OLD>(heap_);
+  }
+
+  {
+    TRACE_GC(heap()->tracer(),
+             GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
     for (Page* p : evacuation_candidates_) {
-      DCHECK(p->IsEvacuationCandidate() ||
-             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+      DCHECK(p->IsEvacuationCandidate());
+      // Important: skip list should be cleared only after roots were updated
+      // because root iteration traverses the stack and might have to find
+      // code objects from non-updated pc pointing into evacuation candidate.
+      SkipList* list = p->skip_list();
+      if (list != NULL) list->Clear();
 
-      if (p->IsEvacuationCandidate()) {
-        UpdateSlotsRecordedIn(p->slots_buffer());
-        if (FLAG_trace_fragmentation_verbose) {
-          PrintF("  page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
-                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
-        }
-        slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
-
-        // Important: skip list should be cleared only after roots were updated
-        // because root iteration traverses the stack and might have to find
-        // code objects from non-updated pc pointing into evacuation candidate.
-        SkipList* list = p->skip_list();
-        if (list != NULL) list->Clear();
-
-        // First pass on aborted pages, fixing up all live objects.
-        if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
-          p->ClearEvacuationCandidate();
-          VisitLiveObjectsBody(p, &updating_visitor);
-        }
-      }
-
-      if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-        p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
-
-        switch (space->identity()) {
-          case OLD_SPACE:
-            Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
-                  IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
-                                                       &updating_visitor);
-            break;
-          case CODE_SPACE:
-            if (FLAG_zap_code_space) {
-              Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
-                    REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
-                                                       &updating_visitor);
-            } else {
-              Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
-                    REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
-                                                          &updating_visitor);
-            }
-            break;
-          default:
-            UNREACHABLE();
-            break;
-        }
+      // First pass on aborted pages, fixing up all live objects.
+      if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+        p->ClearEvacuationCandidate();
+        VisitLiveObjectsBody(p, &updating_visitor);
       }
     }
   }
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
-    heap_->string_table()->Iterate(&updating_visitor);
-
+    TRACE_GC(heap()->tracer(),
+             GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
     // Update pointers from external string table.
     heap_->UpdateReferencesInExternalStringTable(
         &UpdateReferenceInExternalStringTableEntry);
 
     EvacuationWeakObjectRetainer evacuation_object_retainer;
-    heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
+    heap()->ProcessWeakListRoots(&evacuation_object_retainer);
   }
 }
 
@@ -3824,10 +3600,9 @@
   for (Page* p : evacuation_candidates_) {
     if (!p->IsEvacuationCandidate()) continue;
     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    space->Free(p->area_start(), p->area_size());
     p->ResetLiveBytes();
     CHECK(p->SweepingDone());
-    space->ReleasePage(p, true);
+    space->ReleasePage(p);
   }
   evacuation_candidates_.Rewind(0);
   compacting_ = false;
@@ -3866,25 +3641,20 @@
       return 0;
     }
     page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
-    FreeList* free_list;
-    FreeList private_free_list(space);
     if (space->identity() == OLD_SPACE) {
-      free_list = free_list_old_space_.get();
-      max_freed =
-          Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
-                IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+      max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+                        IGNORE_FREE_SPACE>(space, page, NULL);
     } else if (space->identity() == CODE_SPACE) {
-      free_list = free_list_code_space_.get();
-      max_freed =
-          Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
-                IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+      max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
+                        IGNORE_FREE_SPACE>(space, page, NULL);
     } else {
-      free_list = free_list_map_space_.get();
-      max_freed =
-          Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
-                IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+      max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+                        IGNORE_FREE_SPACE>(space, page, NULL);
     }
-    free_list->Concatenate(&private_free_list);
+    {
+      base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
+      swept_pages(space->identity())->Add(page);
+    }
     page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
     page->mutex()->Unlock();
   }
@@ -3904,13 +3674,22 @@
     Page* p = it.next();
     DCHECK(p->SweepingDone());
 
-    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
-        p->IsEvacuationCandidate()) {
+    if (p->IsEvacuationCandidate()) {
       // Will be processed in EvacuateNewSpaceAndCandidates.
       DCHECK(evacuation_candidates_.length() > 0);
       continue;
     }
 
+    // We can not sweep black pages, since all mark bits are set for these
+    // pages.
+    if (p->IsFlagSet(Page::BLACK_PAGE)) {
+      Bitmap::Clear(p);
+      p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
+      p->ClearFlag(Page::BLACK_PAGE);
+      // TODO(hpayer): Free unused memory of last black page.
+      continue;
+    }
+
     if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
       // We need to sweep the page to get it into an iterable state again. Note
       // that this adds unusable memory into the free list that is later on
@@ -3918,7 +3697,7 @@
       // testing this is fine.
       p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
       Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
-            IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+            IGNORE_FREE_SPACE>(space, p, nullptr);
       continue;
     }
 
@@ -3928,7 +3707,7 @@
         if (FLAG_gc_verbose) {
           PrintIsolate(isolate(), "sweeping: released page: %p", p);
         }
-        space->ReleasePage(p, false);
+        space->ReleasePage(p);
         continue;
       }
       unused_page_present = true;
@@ -3951,7 +3730,7 @@
 
 
 void MarkCompactCollector::SweepSpaces() {
-  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
+  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
   double start_time = 0.0;
   if (FLAG_print_cumulative_gc_stat) {
     start_time = heap_->MonotonicallyIncreasingTimeInMs();
@@ -3999,18 +3778,6 @@
   sweeping_list(heap()->map_space()).clear();
 }
 
-
-// TODO(1466) ReportDeleteIfNeeded is not called currently.
-// Our profiling tools do not expect intersections between
-// code objects. We should either reenable it or change our tools.
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
-                                                Isolate* isolate) {
-  if (obj->IsCode()) {
-    PROFILE(isolate, CodeDeleteEvent(obj->address()));
-  }
-}
-
-
 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
 
 
@@ -4019,41 +3786,13 @@
   IncrementalMarking::Initialize();
 }
 
-
-void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) {
-  if (FLAG_trace_fragmentation) {
-    PrintF("Page %p is too popular. Disabling evacuation.\n",
-           reinterpret_cast<void*>(page));
-  }
-
-  isolate()->CountUsage(v8::Isolate::UseCounterFeature::kSlotsBufferOverflow);
-
-  // TODO(gc) If all evacuation candidates are too popular we
-  // should stop slots recording entirely.
-  page->ClearEvacuationCandidate();
-
-  DCHECK(!page->IsFlagSet(Page::POPULAR_PAGE));
-  page->SetFlag(Page::POPULAR_PAGE);
-
-  // We were not collecting slots on this page that point
-  // to other evacuation candidates thus we have to
-  // rescan the page after evacuation to discover and update all
-  // pointers to evacuated objects.
-  page->SetFlag(Page::RESCAN_ON_EVACUATION);
-}
-
-
-void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot,
+void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
                                                Code* target) {
   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
   if (target_page->IsEvacuationCandidate() &&
-      !ShouldSkipEvacuationSlotRecording(object)) {
-    if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
-                            target_page->slots_buffer_address(),
-                            SlotsBuffer::CODE_ENTRY_SLOT, slot,
-                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
-      EvictPopularEvacuationCandidate(target_page);
-    }
+      !ShouldSkipEvacuationSlotRecording(host)) {
+    RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot);
   }
 }
 
@@ -4067,7 +3806,7 @@
     MarkBit mark_bit = Marking::MarkBitFrom(host);
     if (Marking::IsBlack(mark_bit)) {
       RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
-      RecordRelocSlot(&rinfo, target);
+      RecordRelocSlot(host, &rinfo, target);
     }
   }
 }
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index cc5449f..cd207bc 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -25,9 +25,6 @@
 class MarkCompactCollector;
 class MarkingVisitor;
 class RootMarkingVisitor;
-class SlotsBuffer;
-class SlotsBufferAllocator;
-
 
 class Marking : public AllStatic {
  public:
@@ -160,6 +157,8 @@
 
   // Returns true if the transferred color is black.
   INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
+    if (Page::FromAddress(to->address())->IsFlagSet(Page::BLACK_PAGE))
+      return true;
     MarkBit from_mark_bit = MarkBitFrom(from);
     MarkBit to_mark_bit = MarkBitFrom(to);
     DCHECK(Marking::IsWhite(to_mark_bit));
@@ -318,11 +317,89 @@
 // Defined in isolate.h.
 class ThreadLocalTop;
 
+class MarkBitCellIterator BASE_EMBEDDED {
+ public:
+  explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
+    last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+        chunk_->AddressToMarkbitIndex(chunk_->area_end())));
+    cell_base_ = chunk_->area_start();
+    cell_index_ = Bitmap::IndexToCell(
+        Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
+    cells_ = chunk_->markbits()->cells();
+  }
+
+  inline bool Done() { return cell_index_ == last_cell_index_; }
+
+  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
+
+  inline MarkBit::CellType* CurrentCell() {
+    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+                              chunk_->AddressToMarkbitIndex(cell_base_))));
+    return &cells_[cell_index_];
+  }
+
+  inline Address CurrentCellBase() {
+    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+                              chunk_->AddressToMarkbitIndex(cell_base_))));
+    return cell_base_;
+  }
+
+  inline void Advance() {
+    cell_index_++;
+    cell_base_ += 32 * kPointerSize;
+  }
+
+  // Return the next mark bit cell. If there is no next it returns 0;
+  inline MarkBit::CellType PeekNext() {
+    if (HasNext()) {
+      return cells_[cell_index_ + 1];
+    }
+    return 0;
+  }
+
+ private:
+  MemoryChunk* chunk_;
+  MarkBit::CellType* cells_;
+  unsigned int last_cell_index_;
+  unsigned int cell_index_;
+  Address cell_base_;
+};
+
+// Grey objects can happen on black pages when black objects transition to
+// grey e.g. when calling RecordWrites on them.
+enum LiveObjectIterationMode {
+  kBlackObjects,
+  kGreyObjects,
+  kAllLiveObjects
+};
+
+template <LiveObjectIterationMode T>
+class LiveObjectIterator BASE_EMBEDDED {
+ public:
+  explicit LiveObjectIterator(MemoryChunk* chunk)
+      : chunk_(chunk),
+        it_(chunk_),
+        cell_base_(it_.CurrentCellBase()),
+        current_cell_(*it_.CurrentCell()) {
+    // Black pages can not be iterated.
+    DCHECK(!chunk->IsFlagSet(Page::BLACK_PAGE));
+  }
+
+  HeapObject* Next();
+
+ private:
+  MemoryChunk* chunk_;
+  MarkBitCellIterator it_;
+  Address cell_base_;
+  MarkBit::CellType current_cell_;
+};
 
 // -------------------------------------------------------------------------
 // Mark-Compact collector
 class MarkCompactCollector {
  public:
+  class Evacuator;
+
   enum IterationMode {
     kKeepMarking,
     kClearMarkbits,
@@ -395,8 +472,8 @@
         ->IsEvacuationCandidate();
   }
 
-  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
-  void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
+  void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
+  void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
   void RecordCodeTargetPatch(Address pc, Code* target);
   INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
   INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
@@ -405,11 +482,6 @@
   void UpdateSlots(SlotsBuffer* buffer);
   void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
 
-  void MigrateObject(HeapObject* dst, HeapObject* src, int size,
-                     AllocationSpace to_old_space,
-                     SlotsBuffer** evacuation_slots_buffer,
-                     LocalStoreBuffer* local_store_buffer);
-
   void InvalidateCode(Code* code);
 
   void ClearMarkbits();
@@ -480,38 +552,35 @@
 
   void InitializeMarkingDeque();
 
-  // The following four methods can just be called after marking, when the
+  // The following two methods can just be called after marking, when the
   // whole transitive closure is known. They must be called before sweeping
   // when mark bits are still intact.
-  bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
-  bool IsSlotInBlackObjectSlow(Page* p, Address slot);
-  bool IsSlotInLiveObject(Address slot);
-  void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
+  bool IsSlotInBlackObject(MemoryChunk* p, Address slot);
+  HeapObject* FindBlackObjectBySlotSlow(Address slot);
 
   // Removes all the slots in the slot buffers that are within the given
   // address range.
   void RemoveObjectSlots(Address start_slot, Address end_slot);
 
-  //
-  // Free lists filled by sweeper and consumed by corresponding spaces
-  // (including compaction spaces).
-  //
-  base::SmartPointer<FreeList>& free_list_old_space() {
-    return free_list_old_space_;
-  }
-  base::SmartPointer<FreeList>& free_list_code_space() {
-    return free_list_code_space_;
-  }
-  base::SmartPointer<FreeList>& free_list_map_space() {
-    return free_list_map_space_;
+  base::Mutex* swept_pages_mutex() { return &swept_pages_mutex_; }
+  List<Page*>* swept_pages(AllocationSpace id) {
+    switch (id) {
+      case OLD_SPACE:
+        return &swept_old_space_pages_;
+      case CODE_SPACE:
+        return &swept_code_space_pages_;
+      case MAP_SPACE:
+        return &swept_map_space_pages_;
+      default:
+        UNREACHABLE();
+    }
+    return nullptr;
   }
 
  private:
-  class CompactionTask;
   class EvacuateNewSpaceVisitor;
   class EvacuateOldSpaceVisitor;
   class EvacuateVisitorBase;
-  class Evacuator;
   class HeapObjectVisitor;
   class SweeperTask;
 
@@ -520,8 +589,7 @@
   explicit MarkCompactCollector(Heap* heap);
 
   bool WillBeDeoptimized(Code* code);
-  void EvictPopularEvacuationCandidate(Page* page);
-  void ClearInvalidStoreAndSlotsBufferEntries();
+  void ClearInvalidRememberedSetSlots();
 
   void StartSweeperThreads();
 
@@ -550,10 +618,6 @@
 
   bool evacuation_;
 
-  SlotsBufferAllocator* slots_buffer_allocator_;
-
-  SlotsBuffer* migration_slots_buffer_;
-
   // Finishes GC, performs heap verification if enabled.
   void Finish();
 
@@ -707,17 +771,11 @@
   void EvacuateNewSpacePrologue();
   void EvacuateNewSpaceEpilogue();
 
-  void AddEvacuationSlotsBufferSynchronized(
-      SlotsBuffer* evacuation_slots_buffer);
-
   void EvacuatePagesInParallel();
 
   // The number of parallel compaction tasks, including the main thread.
   int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
 
-  void StartParallelCompaction(Evacuator** evacuators, int len);
-  void WaitUntilCompactionCompleted(Evacuator** evacuators, int len);
-
   void EvacuateNewSpaceAndCandidates();
 
   void UpdatePointersAfterEvacuation();
@@ -743,19 +801,6 @@
   // swept in parallel.
   void ParallelSweepSpacesComplete();
 
-  // Updates store buffer and slot buffer for a pointer in a migrating object.
-  void RecordMigratedSlot(Object* value, Address slot,
-                          SlotsBuffer** evacuation_slots_buffer,
-                          LocalStoreBuffer* local_store_buffer);
-
-  // Adds the code entry slot to the slots buffer.
-  void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
-                                   SlotsBuffer** evacuation_slots_buffer);
-
-  // Adds the slot of a moved code object.
-  void RecordMigratedCodeObjectSlot(Address code_object,
-                                    SlotsBuffer** evacuation_slots_buffer);
-
 #ifdef DEBUG
   friend class MarkObjectVisitor;
   static void VisitObject(HeapObject* obj);
@@ -774,17 +819,10 @@
   List<Page*> evacuation_candidates_;
   List<NewSpacePage*> newspace_evacuation_candidates_;
 
-  // The evacuation_slots_buffers_ are used by the compaction threads.
-  // When a compaction task finishes, it uses
-  // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
-  // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
-  // lock.
-  base::Mutex evacuation_slots_buffers_mutex_;
-  List<SlotsBuffer*> evacuation_slots_buffers_;
-
-  base::SmartPointer<FreeList> free_list_old_space_;
-  base::SmartPointer<FreeList> free_list_code_space_;
-  base::SmartPointer<FreeList> free_list_map_space_;
+  base::Mutex swept_pages_mutex_;
+  List<Page*> swept_old_space_pages_;
+  List<Page*> swept_code_space_pages_;
+  List<Page*> swept_map_space_pages_;
 
   SweepingList sweeping_list_old_space_;
   SweepingList sweeping_list_code_space_;
@@ -797,89 +835,19 @@
   // True if concurrent or parallel sweeping is currently in progress.
   bool sweeping_in_progress_;
 
-  // True if parallel compaction is currently in progress.
-  bool compaction_in_progress_;
-
   // Semaphore used to synchronize sweeper tasks.
   base::Semaphore pending_sweeper_tasks_semaphore_;
 
   // Semaphore used to synchronize compaction tasks.
   base::Semaphore pending_compaction_tasks_semaphore_;
 
+  bool black_allocation_;
+
   friend class Heap;
   friend class StoreBuffer;
 };
 
 
-class MarkBitCellIterator BASE_EMBEDDED {
- public:
-  explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
-    last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
-        chunk_->AddressToMarkbitIndex(chunk_->area_end())));
-    cell_base_ = chunk_->area_start();
-    cell_index_ = Bitmap::IndexToCell(
-        Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
-    cells_ = chunk_->markbits()->cells();
-  }
-
-  inline bool Done() { return cell_index_ == last_cell_index_; }
-
-  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
-
-  inline MarkBit::CellType* CurrentCell() {
-    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
-                              chunk_->AddressToMarkbitIndex(cell_base_))));
-    return &cells_[cell_index_];
-  }
-
-  inline Address CurrentCellBase() {
-    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
-                              chunk_->AddressToMarkbitIndex(cell_base_))));
-    return cell_base_;
-  }
-
-  inline void Advance() {
-    cell_index_++;
-    cell_base_ += 32 * kPointerSize;
-  }
-
-  // Return the next mark bit cell. If there is no next it returns 0;
-  inline MarkBit::CellType PeekNext() {
-    if (HasNext()) {
-      return cells_[cell_index_ + 1];
-    }
-    return 0;
-  }
-
- private:
-  MemoryChunk* chunk_;
-  MarkBit::CellType* cells_;
-  unsigned int last_cell_index_;
-  unsigned int cell_index_;
-  Address cell_base_;
-};
-
-enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
-
-template <LiveObjectIterationMode T>
-class LiveObjectIterator BASE_EMBEDDED {
- public:
-  explicit LiveObjectIterator(MemoryChunk* chunk)
-      : chunk_(chunk),
-        it_(chunk_),
-        cell_base_(it_.CurrentCellBase()),
-        current_cell_(*it_.CurrentCell()) {}
-
-  HeapObject* Next();
-
- private:
-  MemoryChunk* chunk_;
-  MarkBitCellIterator it_;
-  Address cell_base_;
-  MarkBit::CellType current_cell_;
-};
-
-
 class EvacuationScope BASE_EMBEDDED {
  public:
   explicit EvacuationScope(MarkCompactCollector* collector)
diff --git a/src/heap/memory-reducer.cc b/src/heap/memory-reducer.cc
index f537307..699e10e 100644
--- a/src/heap/memory-reducer.cc
+++ b/src/heap/memory-reducer.cc
@@ -84,10 +84,10 @@
       double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
                         kIncrementalMarkingDelayMs;
       heap()->incremental_marking()->AdvanceIncrementalMarking(
-          0, deadline, i::IncrementalMarking::StepActions(
-                           i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
-                           i::IncrementalMarking::FORCE_MARKING,
-                           i::IncrementalMarking::FORCE_COMPLETION));
+          deadline, i::IncrementalMarking::StepActions(
+                        i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+                        i::IncrementalMarking::FORCE_MARKING,
+                        i::IncrementalMarking::FORCE_COMPLETION));
       heap()->FinalizeIncrementalMarkingIfComplete(
           "Memory reducer: finalize incremental marking");
     }
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
index d71c879..c415713 100644
--- a/src/heap/objects-visiting-inl.h
+++ b/src/heap/objects-visiting-inl.h
@@ -220,11 +220,12 @@
     Heap* heap, RelocInfo* rinfo) {
   DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
   HeapObject* object = HeapObject::cast(rinfo->target_object());
-  heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+  Code* host = rinfo->host();
+  heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, object);
   // TODO(ulan): It could be better to record slots only for strongly embedded
   // objects here and record slots for weakly embedded object during clearing
   // of non-live references in mark-compact.
-  if (!rinfo->host()->IsWeakObject(object)) {
+  if (!host->IsWeakObject(object)) {
     StaticVisitor::MarkObject(heap, object);
   }
 }
@@ -235,8 +236,9 @@
                                                     RelocInfo* rinfo) {
   DCHECK(rinfo->rmode() == RelocInfo::CELL);
   Cell* cell = rinfo->target_cell();
-  heap->mark_compact_collector()->RecordRelocSlot(rinfo, cell);
-  if (!rinfo->host()->IsWeakObject(cell)) {
+  Code* host = rinfo->host();
+  heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, cell);
+  if (!host->IsWeakObject(cell)) {
     StaticVisitor::MarkObject(heap, cell);
   }
 }
@@ -248,7 +250,8 @@
   DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
          rinfo->IsPatchedDebugBreakSlotSequence());
   Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
-  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+  Code* host = rinfo->host();
+  heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
   StaticVisitor::MarkObject(heap, target);
 }
 
@@ -268,7 +271,8 @@
                      rinfo->host()->constant_pool());
     target = Code::GetCodeFromTargetAddress(rinfo->target_address());
   }
-  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+  Code* host = rinfo->host();
+  heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
   StaticVisitor::MarkObject(heap, target);
 }
 
@@ -279,7 +283,8 @@
   DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
   Code* target = rinfo->code_age_stub();
   DCHECK(target != NULL);
-  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+  Code* host = rinfo->host();
+  heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
   StaticVisitor::MarkObject(heap, target);
 }
 
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
index 0003a07..0b857dc 100644
--- a/src/heap/objects-visiting.cc
+++ b/src/heap/objects-visiting.cc
@@ -111,6 +111,7 @@
     case JS_ARRAY_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
+    case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_MESSAGE_OBJECT_TYPE:
     case JS_TYPED_ARRAY_TYPE:
     case JS_DATA_VIEW_TYPE:
diff --git a/src/heap/page-parallel-job.h b/src/heap/page-parallel-job.h
new file mode 100644
index 0000000..720e288
--- /dev/null
+++ b/src/heap/page-parallel-job.h
@@ -0,0 +1,188 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PAGE_PARALLEL_JOB_
+#define V8_HEAP_PAGE_PARALLEL_JOB_
+
+#include "src/allocation.h"
+#include "src/cancelable-task.h"
+#include "src/utils.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+
+// This class manages background tasks that process set of pages in parallel.
+// The JobTraits class needs to define:
+// - PerPageData type - state associated with each page.
+// - PerTaskData type - state associated with each task.
+// - static bool ProcessPageInParallel(Heap* heap,
+//                                     PerTaskData task_data,
+//                                     MemoryChunk* page,
+//                                     PerPageData page_data)
+//   The function should return true iff processing succeeded.
+// - static const bool NeedSequentialFinalization
+// - static void FinalizePageSequentially(Heap* heap,
+//                                        bool processing_succeeded,
+//                                        MemoryChunk* page,
+//                                        PerPageData page_data)
+template <typename JobTraits>
+class PageParallelJob {
+ public:
+  PageParallelJob(Heap* heap, CancelableTaskManager* cancelable_task_manager)
+      : heap_(heap),
+        cancelable_task_manager_(cancelable_task_manager),
+        items_(nullptr),
+        num_items_(0),
+        num_tasks_(0),
+        pending_tasks_(new base::Semaphore(0)) {}
+
+  ~PageParallelJob() {
+    Item* item = items_;
+    while (item != nullptr) {
+      Item* next = item->next;
+      delete item;
+      item = next;
+    }
+    delete pending_tasks_;
+  }
+
+  void AddPage(MemoryChunk* chunk, typename JobTraits::PerPageData data) {
+    Item* item = new Item(chunk, data, items_);
+    items_ = item;
+    ++num_items_;
+  }
+
+  int NumberOfPages() { return num_items_; }
+
+  // Returns the number of tasks that were spawned when running the job.
+  int NumberOfTasks() { return num_tasks_; }
+
+  // Runs the given number of tasks in parallel and processes the previously
+  // added pages. This function blocks until all tasks finish.
+  // The callback takes the index of a task and returns data for that task.
+  template <typename Callback>
+  void Run(int num_tasks, Callback per_task_data_callback) {
+    if (num_items_ == 0) return;
+    DCHECK_GE(num_tasks, 1);
+    uint32_t task_ids[kMaxNumberOfTasks];
+    const int max_num_tasks = Min(
+        kMaxNumberOfTasks,
+        static_cast<int>(
+            V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+    num_tasks_ = Max(1, Min(num_tasks, max_num_tasks));
+    int items_per_task = (num_items_ + num_tasks_ - 1) / num_tasks_;
+    int start_index = 0;
+    Task* main_task = nullptr;
+    for (int i = 0; i < num_tasks_; i++, start_index += items_per_task) {
+      if (start_index >= num_items_) {
+        start_index -= num_items_;
+      }
+      Task* task = new Task(heap_, items_, num_items_, start_index,
+                            pending_tasks_, per_task_data_callback(i));
+      task_ids[i] = task->id();
+      if (i > 0) {
+        V8::GetCurrentPlatform()->CallOnBackgroundThread(
+            task, v8::Platform::kShortRunningTask);
+      } else {
+        main_task = task;
+      }
+    }
+    // Contribute on main thread.
+    main_task->Run();
+    delete main_task;
+    // Wait for background tasks.
+    for (int i = 0; i < num_tasks_; i++) {
+      if (!cancelable_task_manager_->TryAbort(task_ids[i])) {
+        pending_tasks_->Wait();
+      }
+    }
+    if (JobTraits::NeedSequentialFinalization) {
+      Item* item = items_;
+      while (item != nullptr) {
+        bool success = (item->state.Value() == kFinished);
+        JobTraits::FinalizePageSequentially(heap_, item->chunk, success,
+                                            item->data);
+        item = item->next;
+      }
+    }
+  }
+
+ private:
+  static const int kMaxNumberOfTasks = 10;
+
+  enum ProcessingState { kAvailable, kProcessing, kFinished, kFailed };
+
+  struct Item : public Malloced {
+    Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next)
+        : chunk(chunk), state(kAvailable), data(data), next(next) {}
+    MemoryChunk* chunk;
+    AtomicValue<ProcessingState> state;
+    typename JobTraits::PerPageData data;
+    Item* next;
+  };
+
+  class Task : public CancelableTask {
+   public:
+    Task(Heap* heap, Item* items, int num_items, int start_index,
+         base::Semaphore* on_finish, typename JobTraits::PerTaskData data)
+        : CancelableTask(heap->isolate()),
+          heap_(heap),
+          items_(items),
+          num_items_(num_items),
+          start_index_(start_index),
+          on_finish_(on_finish),
+          data_(data) {}
+
+    virtual ~Task() {}
+
+   private:
+    // v8::internal::CancelableTask overrides.
+    void RunInternal() override {
+      // Each task starts at a different index to improve parallelization.
+      Item* current = items_;
+      int skip = start_index_;
+      while (skip-- > 0) {
+        current = current->next;
+      }
+      for (int i = 0; i < num_items_; i++) {
+        if (current->state.TrySetValue(kAvailable, kProcessing)) {
+          bool success = JobTraits::ProcessPageInParallel(
+              heap_, data_, current->chunk, current->data);
+          current->state.SetValue(success ? kFinished : kFailed);
+        }
+        current = current->next;
+        // Wrap around if needed.
+        if (current == nullptr) {
+          current = items_;
+        }
+      }
+      on_finish_->Signal();
+    }
+
+    Heap* heap_;
+    Item* items_;
+    int num_items_;
+    int start_index_;
+    base::Semaphore* on_finish_;
+    typename JobTraits::PerTaskData data_;
+    DISALLOW_COPY_AND_ASSIGN(Task);
+  };
+
+  Heap* heap_;
+  CancelableTaskManager* cancelable_task_manager_;
+  Item* items_;
+  int num_items_;
+  int num_tasks_;
+  base::Semaphore* pending_tasks_;
+  DISALLOW_COPY_AND_ASSIGN(PageParallelJob);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_PAGE_PARALLEL_JOB_
diff --git a/src/heap/remembered-set.cc b/src/heap/remembered-set.cc
index d9d5914..403c99b 100644
--- a/src/heap/remembered-set.cc
+++ b/src/heap/remembered-set.cc
@@ -22,10 +22,9 @@
     chunk = it.next();
     SlotSet* slots = GetSlotSet(chunk);
     if (slots != nullptr) {
-      slots->Iterate([heap](Address addr) {
+      slots->Iterate([heap, chunk](Address addr) {
         Object** slot = reinterpret_cast<Object**>(addr);
-        return IsValidSlot(heap, slot) ? SlotSet::KEEP_SLOT
-                                       : SlotSet::REMOVE_SLOT;
+        return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
       });
     }
   }
@@ -33,22 +32,30 @@
 
 template <PointerDirection direction>
 void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
-  STATIC_ASSERT(direction == OLD_TO_NEW);
   Iterate(heap, [heap](Address addr) {
-    Object** slot = reinterpret_cast<Object**>(addr);
-    Object* object = *slot;
-    if (Page::FromAddress(addr)->owner() != nullptr &&
-        Page::FromAddress(addr)->owner()->identity() == OLD_SPACE) {
-      CHECK(IsValidSlot(heap, slot));
-      heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
-          reinterpret_cast<Address>(slot), HeapObject::cast(object));
+    HeapObject* obj =
+        heap->mark_compact_collector()->FindBlackObjectBySlotSlow(addr);
+    if (obj == nullptr) {
+      // The slot is in dead object.
+      MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap, addr);
+      AllocationSpace owner = chunk->owner()->identity();
+      // The old to old remembered set should not have dead slots.
+      CHECK_NE(direction, OLD_TO_OLD);
+      // The old to new remembered set is allowed to have slots in dead
+      // objects only in map and large object space because these space
+      // cannot have raw untagged pointers.
+      CHECK(owner == MAP_SPACE || owner == LO_SPACE);
+    } else {
+      int offset = static_cast<int>(addr - obj->address());
+      CHECK(obj->IsValidSlot(offset));
     }
-    return SlotSet::KEEP_SLOT;
+    return KEEP_SLOT;
   });
 }
 
 template <PointerDirection direction>
-bool RememberedSet<direction>::IsValidSlot(Heap* heap, Object** slot) {
+bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
+                                           Object** slot) {
   STATIC_ASSERT(direction == OLD_TO_NEW);
   Object* object = *slot;
   if (!heap->InNewSpace(object)) {
@@ -58,12 +65,13 @@
   // If the target object is not black, the source slot must be part
   // of a non-black (dead) object.
   return Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
-         heap->mark_compact_collector()->IsSlotInLiveObject(
-             reinterpret_cast<Address>(slot));
+         heap->mark_compact_collector()->IsSlotInBlackObject(
+             chunk, reinterpret_cast<Address>(slot));
 }
 
 template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
 template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
+template void RememberedSet<OLD_TO_OLD>::VerifyValidSlots(Heap* heap);
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/remembered-set.h b/src/heap/remembered-set.h
index 351d76e..45408bf 100644
--- a/src/heap/remembered-set.h
+++ b/src/heap/remembered-set.h
@@ -56,22 +56,44 @@
   }
 
   // Iterates and filters the remembered set with the given callback.
-  // The callback should take (Address slot) and return SlotSet::CallbackResult.
+  // The callback should take (Address slot) and return SlotCallbackResult.
   template <typename Callback>
   static void Iterate(Heap* heap, Callback callback) {
-    PointerChunkIterator it(heap);
+    IterateMemoryChunks(
+        heap, [callback](MemoryChunk* chunk) { Iterate(chunk, callback); });
+  }
+
+  // Iterates over all memory chunks that contains non-empty slot sets.
+  // The callback should take (MemoryChunk* chunk) and return void.
+  template <typename Callback>
+  static void IterateMemoryChunks(Heap* heap, Callback callback) {
+    MemoryChunkIterator it(heap, direction == OLD_TO_OLD
+                                     ? MemoryChunkIterator::ALL
+                                     : MemoryChunkIterator::ALL_BUT_CODE_SPACE);
     MemoryChunk* chunk;
     while ((chunk = it.next()) != nullptr) {
       SlotSet* slots = GetSlotSet(chunk);
-      if (slots != nullptr) {
-        size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
-        int new_count = 0;
-        for (size_t page = 0; page < pages; page++) {
-          new_count += slots[page].Iterate(callback);
-        }
-        if (new_count == 0) {
-          ReleaseSlotSet(chunk);
-        }
+      TypedSlotSet* typed_slots = GetTypedSlotSet(chunk);
+      if (slots != nullptr || typed_slots != nullptr) {
+        callback(chunk);
+      }
+    }
+  }
+
+  // Iterates and filters the remembered set in the given memory chunk with
+  // the given callback. The callback should take (Address slot) and return
+  // SlotCallbackResult.
+  template <typename Callback>
+  static void Iterate(MemoryChunk* chunk, Callback callback) {
+    SlotSet* slots = GetSlotSet(chunk);
+    if (slots != nullptr) {
+      size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
+      int new_count = 0;
+      for (size_t page = 0; page < pages; page++) {
+        new_count += slots[page].Iterate(callback);
+      }
+      if (new_count == 0) {
+        ReleaseSlotSet(chunk);
       }
     }
   }
@@ -89,6 +111,64 @@
     });
   }
 
+  template <typename Callback>
+  static void IterateWithWrapper(Heap* heap, MemoryChunk* chunk,
+                                 Callback callback) {
+    Iterate(chunk, [heap, callback](Address addr) {
+      return Wrapper(heap, addr, callback);
+    });
+  }
+
+  // Given a page and a typed slot in that page, this function adds the slot
+  // to the remembered set.
+  static void InsertTyped(Page* page, SlotType slot_type, Address slot_addr) {
+    STATIC_ASSERT(direction == OLD_TO_OLD);
+    TypedSlotSet* slot_set = page->typed_old_to_old_slots();
+    if (slot_set == nullptr) {
+      page->AllocateTypedOldToOldSlots();
+      slot_set = page->typed_old_to_old_slots();
+    }
+    uintptr_t offset = slot_addr - page->address();
+    DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
+    slot_set->Insert(slot_type, static_cast<uint32_t>(offset));
+  }
+
+  // Given a page and a range of typed slots in that page, this function removes
+  // the slots from the remembered set.
+  static void RemoveRangeTyped(Page* page, Address start, Address end) {
+    TypedSlotSet* slots = page->typed_old_to_old_slots();
+    if (slots != nullptr) {
+      slots->Iterate([start, end](SlotType slot_type, Address slot_addr) {
+        return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT;
+      });
+    }
+  }
+
+  // Iterates and filters typed old to old pointers in the given memory chunk
+  // with the given callback. The callback should take (SlotType slot_type,
+  // Address slot_addr) and return SlotCallbackResult.
+  template <typename Callback>
+  static void IterateTyped(MemoryChunk* chunk, Callback callback) {
+    TypedSlotSet* slots = chunk->typed_old_to_old_slots();
+    if (slots != nullptr) {
+      int new_count = slots->Iterate(callback);
+      if (new_count == 0) {
+        chunk->ReleaseTypedOldToOldSlots();
+      }
+    }
+  }
+
+  // Clear all old to old slots from the remembered set.
+  static void ClearAll(Heap* heap) {
+    STATIC_ASSERT(direction == OLD_TO_OLD);
+    MemoryChunkIterator it(heap, MemoryChunkIterator::ALL);
+    MemoryChunk* chunk;
+    while ((chunk = it.next()) != nullptr) {
+      chunk->ReleaseOldToOldSlots();
+      chunk->ReleaseTypedOldToOldSlots();
+    }
+  }
+
   // Eliminates all stale slots from the remembered set, i.e.
   // slots that are not part of live objects anymore. This method must be
   // called after marking, when the whole transitive closure is known and
@@ -106,6 +186,14 @@
     }
   }
 
+  static TypedSlotSet* GetTypedSlotSet(MemoryChunk* chunk) {
+    if (direction == OLD_TO_OLD) {
+      return chunk->typed_old_to_old_slots();
+    } else {
+      return nullptr;
+    }
+  }
+
   static void ReleaseSlotSet(MemoryChunk* chunk) {
     if (direction == OLD_TO_OLD) {
       chunk->ReleaseOldToOldSlots();
@@ -125,8 +213,8 @@
   }
 
   template <typename Callback>
-  static SlotSet::CallbackResult Wrapper(Heap* heap, Address slot_address,
-                                         Callback slot_callback) {
+  static SlotCallbackResult Wrapper(Heap* heap, Address slot_address,
+                                    Callback slot_callback) {
     STATIC_ASSERT(direction == OLD_TO_NEW);
     Object** slot = reinterpret_cast<Object**>(slot_address);
     Object* object = *slot;
@@ -140,15 +228,15 @@
       // Unfortunately, we do not know about the slot. It could be in a
       // just freed free space object.
       if (heap->InToSpace(object)) {
-        return SlotSet::KEEP_SLOT;
+        return KEEP_SLOT;
       }
     } else {
       DCHECK(!heap->InNewSpace(object));
     }
-    return SlotSet::REMOVE_SLOT;
+    return REMOVE_SLOT;
   }
 
-  static bool IsValidSlot(Heap* heap, Object** slot);
+  static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, Object** slot);
 };
 
 }  // namespace internal
diff --git a/src/heap/scavenge-job.cc b/src/heap/scavenge-job.cc
index 52ba97a..d89c945 100644
--- a/src/heap/scavenge-job.cc
+++ b/src/heap/scavenge-job.cc
@@ -23,8 +23,8 @@
       static_cast<double>(base::Time::kMillisecondsPerSecond);
   double start_ms = heap->MonotonicallyIncreasingTimeInMs();
   double idle_time_in_ms = deadline_in_ms - start_ms;
-  size_t scavenge_speed_in_bytes_per_ms =
-      static_cast<size_t>(heap->tracer()->ScavengeSpeedInBytesPerMillisecond());
+  double scavenge_speed_in_bytes_per_ms =
+      heap->tracer()->ScavengeSpeedInBytesPerMillisecond();
   size_t new_space_size = heap->new_space()->Size();
   size_t new_space_capacity = heap->new_space()->Capacity();
 
@@ -42,9 +42,8 @@
   }
 }
 
-
 bool ScavengeJob::ReachedIdleAllocationLimit(
-    size_t scavenge_speed_in_bytes_per_ms, size_t new_space_size,
+    double scavenge_speed_in_bytes_per_ms, size_t new_space_size,
     size_t new_space_capacity) {
   if (scavenge_speed_in_bytes_per_ms == 0) {
     scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
@@ -52,27 +51,24 @@
 
   // Set the allocation limit to the number of bytes we can scavenge in an
   // average idle task.
-  size_t allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
+  double allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
 
   // Keep the limit smaller than the new space capacity.
   allocation_limit =
-      Min(allocation_limit,
-          static_cast<size_t>(new_space_capacity *
-                              kMaxAllocationLimitAsFractionOfNewSpace));
+      Min<double>(allocation_limit,
+                  new_space_capacity * kMaxAllocationLimitAsFractionOfNewSpace);
   // Adjust the limit to take into account bytes that will be allocated until
-  // the next check.
-  allocation_limit = allocation_limit < kBytesAllocatedBeforeNextIdleTask
-                         ? 0
-                         : allocation_limit - kBytesAllocatedBeforeNextIdleTask;
-  // Keep the limit large enough to avoid scavenges in tiny new space.
-  allocation_limit = Max(allocation_limit, kMinAllocationLimit);
+  // the next check and keep the limit large enough to avoid scavenges in tiny
+  // new space.
+  allocation_limit =
+      Max<double>(allocation_limit - kBytesAllocatedBeforeNextIdleTask,
+                  kMinAllocationLimit);
 
   return allocation_limit <= new_space_size;
 }
 
-
 bool ScavengeJob::EnoughIdleTimeForScavenge(
-    double idle_time_in_ms, size_t scavenge_speed_in_bytes_per_ms,
+    double idle_time_in_ms, double scavenge_speed_in_bytes_per_ms,
     size_t new_space_size) {
   if (scavenge_speed_in_bytes_per_ms == 0) {
     scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
diff --git a/src/heap/scavenge-job.h b/src/heap/scavenge-job.h
index 56299a1..fadfccd 100644
--- a/src/heap/scavenge-job.h
+++ b/src/heap/scavenge-job.h
@@ -47,12 +47,12 @@
   void NotifyIdleTask() { idle_task_pending_ = false; }
   bool IdleTaskRescheduled() { return idle_task_rescheduled_; }
 
-  static bool ReachedIdleAllocationLimit(size_t scavenge_speed_in_bytes_per_ms,
+  static bool ReachedIdleAllocationLimit(double scavenge_speed_in_bytes_per_ms,
                                          size_t new_space_size,
                                          size_t new_space_capacity);
 
   static bool EnoughIdleTimeForScavenge(double idle_time_ms,
-                                        size_t scavenge_speed_in_bytes_per_ms,
+                                        double scavenge_speed_in_bytes_per_ms,
                                         size_t new_space_size);
 
   // If we haven't recorded any scavenger events yet, we use a conservative
diff --git a/src/heap/scavenger.cc b/src/heap/scavenger.cc
index 40aeb74..3f532ea 100644
--- a/src/heap/scavenger.cc
+++ b/src/heap/scavenger.cc
@@ -186,7 +186,9 @@
       *slot = target;
 
       if (object_contents == POINTER_OBJECT) {
-        heap->promotion_queue()->insert(target, object_size);
+        heap->promotion_queue()->insert(
+            target, object_size,
+            Marking::IsBlack(Marking::MarkBitFrom(object)));
       }
       heap->IncrementPromotedObjectsSize(object_size);
       return true;
@@ -236,7 +238,7 @@
     if (Marking::IsBlack(mark_bit)) {
       // This object is black and it might not be rescanned by marker.
       // We should explicitly record code entry slot for compaction because
-      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+      // promotion queue processing (IteratePromotedObjectPointers) will
       // miss it as it is not HeapObject-tagged.
       Address code_entry_slot =
           target->address() + JSFunction::kCodeEntryOffset;
diff --git a/src/heap/slot-set.h b/src/heap/slot-set.h
index 6144706..e55ffe9 100644
--- a/src/heap/slot-set.h
+++ b/src/heap/slot-set.h
@@ -7,10 +7,13 @@
 
 #include "src/allocation.h"
 #include "src/base/bits.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
+enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
+
 // Data structure for maintaining a set of slots in a standard (non-large)
 // page. The base address of the page must be set with SetPageStart before any
 // operation.
@@ -19,8 +22,6 @@
 // Each bucket is a bitmap with a bit corresponding to a single slot offset.
 class SlotSet : public Malloced {
  public:
-  enum CallbackResult { KEEP_SLOT, REMOVE_SLOT };
-
   SlotSet() {
     for (int i = 0; i < kBuckets; i++) {
       bucket[i] = nullptr;
@@ -213,6 +214,124 @@
   Address page_start_;
 };
 
+enum SlotType {
+  EMBEDDED_OBJECT_SLOT,
+  OBJECT_SLOT,
+  RELOCATED_CODE_OBJECT,
+  CELL_TARGET_SLOT,
+  CODE_TARGET_SLOT,
+  CODE_ENTRY_SLOT,
+  DEBUG_TARGET_SLOT,
+  NUMBER_OF_SLOT_TYPES
+};
+
+// Data structure for maintaining a multiset of typed slots in a page.
+// Typed slots can only appear in Code and JSFunction objects, so
+// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
+// The implementation is a chain of chunks, where each chunks is an array of
+// encoded (slot type, slot offset) pairs.
+// There is no duplicate detection and we do not expect many duplicates because
+// typed slots contain V8 internal pointers that are not directly exposed to JS.
+class TypedSlotSet {
+ public:
+  typedef uint32_t TypedSlot;
+  static const int kMaxOffset = 1 << 29;
+
+  explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
+    chunk_ = new Chunk(nullptr, kInitialBufferSize);
+  }
+
+  ~TypedSlotSet() {
+    Chunk* chunk = chunk_;
+    while (chunk != nullptr) {
+      Chunk* next = chunk->next;
+      delete chunk;
+      chunk = next;
+    }
+  }
+
+  // The slot offset specifies a slot at address page_start_ + offset.
+  void Insert(SlotType type, int offset) {
+    TypedSlot slot = ToTypedSlot(type, offset);
+    if (!chunk_->AddSlot(slot)) {
+      chunk_ = new Chunk(chunk_, NextCapacity(chunk_->capacity));
+      bool added = chunk_->AddSlot(slot);
+      DCHECK(added);
+      USE(added);
+    }
+  }
+
+  // Iterate over all slots in the set and for each slot invoke the callback.
+  // If the callback returns REMOVE_SLOT then the slot is removed from the set.
+  // Returns the new number of slots.
+  //
+  // Sample usage:
+  // Iterate([](SlotType slot_type, Address slot_address) {
+  //    if (good(slot_type, slot_address)) return KEEP_SLOT;
+  //    else return REMOVE_SLOT;
+  // });
+  template <typename Callback>
+  int Iterate(Callback callback) {
+    STATIC_ASSERT(NUMBER_OF_SLOT_TYPES < 8);
+    const TypedSlot kRemovedSlot = TypeField::encode(NUMBER_OF_SLOT_TYPES);
+    Chunk* chunk = chunk_;
+    int new_count = 0;
+    while (chunk != nullptr) {
+      TypedSlot* buffer = chunk->buffer;
+      int count = chunk->count;
+      for (int i = 0; i < count; i++) {
+        TypedSlot slot = buffer[i];
+        if (slot != kRemovedSlot) {
+          SlotType type = TypeField::decode(slot);
+          Address addr = page_start_ + OffsetField::decode(slot);
+          if (callback(type, addr) == KEEP_SLOT) {
+            new_count++;
+          } else {
+            buffer[i] = kRemovedSlot;
+          }
+        }
+      }
+      chunk = chunk->next;
+    }
+    return new_count;
+  }
+
+ private:
+  static const int kInitialBufferSize = 100;
+  static const int kMaxBufferSize = 16 * KB;
+
+  static int NextCapacity(int capacity) {
+    return Min(kMaxBufferSize, capacity * 2);
+  }
+
+  static TypedSlot ToTypedSlot(SlotType type, int offset) {
+    return TypeField::encode(type) | OffsetField::encode(offset);
+  }
+
+  class OffsetField : public BitField<int, 0, 29> {};
+  class TypeField : public BitField<SlotType, 29, 3> {};
+
+  struct Chunk : Malloced {
+    explicit Chunk(Chunk* next_chunk, int capacity)
+        : next(next_chunk), count(0), capacity(capacity) {
+      buffer = NewArray<TypedSlot>(capacity);
+    }
+    bool AddSlot(TypedSlot slot) {
+      if (count == capacity) return false;
+      buffer[count++] = slot;
+      return true;
+    }
+    ~Chunk() { DeleteArray(buffer); }
+    Chunk* next;
+    int count;
+    int capacity;
+    TypedSlot* buffer;
+  };
+
+  Address page_start_;
+  Chunk* chunk_;
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/heap/slots-buffer.cc b/src/heap/slots-buffer.cc
deleted file mode 100644
index 5a3db28..0000000
--- a/src/heap/slots-buffer.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/slots-buffer.h"
-
-#include "src/assembler.h"
-#include "src/heap/heap.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
-  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
-}
-
-
-bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
-                        SlotsBuffer** buffer_address, SlotType type,
-                        Address addr, AdditionMode mode) {
-  SlotsBuffer* buffer = *buffer_address;
-  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
-    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
-      allocator->DeallocateChain(buffer_address);
-      return false;
-    }
-    buffer = allocator->AllocateBuffer(buffer);
-    *buffer_address = buffer;
-  }
-  DCHECK(buffer->HasSpaceForTypedSlot());
-  buffer->Add(reinterpret_cast<ObjectSlot>(type));
-  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
-  return true;
-}
-
-
-void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
-  // Remove entries by replacing them with an old-space slot containing a smi
-  // that is located in an unmovable page.
-  const ObjectSlot kRemovedEntry = HeapObject::RawField(
-      heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
-  DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
-             ->NeverEvacuate());
-
-  while (buffer != NULL) {
-    SlotsBuffer::ObjectSlot* slots = buffer->slots_;
-    intptr_t slots_count = buffer->idx_;
-
-    for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
-      ObjectSlot slot = slots[slot_idx];
-      if (!IsTypedSlot(slot)) {
-        Object* object = *slot;
-        // Slots are invalid when they currently:
-        // - do not point to a heap object (SMI)
-        // - point to a heap object in new space
-        // - are not within a live heap object on a valid pointer slot
-        // - point to a heap object not on an evacuation candidate
-        // TODO(mlippautz): Move InNewSpace check above IsSlotInLiveObject once
-        //   we filter out unboxed double slots eagerly.
-        if (!object->IsHeapObject() ||
-            !heap->mark_compact_collector()->IsSlotInLiveObject(
-                reinterpret_cast<Address>(slot)) ||
-            heap->InNewSpace(object) ||
-            !Page::FromAddress(reinterpret_cast<Address>(object))
-                 ->IsEvacuationCandidate()) {
-          // TODO(hpayer): Instead of replacing slots with kRemovedEntry we
-          // could shrink the slots buffer in-place.
-          slots[slot_idx] = kRemovedEntry;
-        }
-      } else {
-        ++slot_idx;
-        DCHECK(slot_idx < slots_count);
-      }
-    }
-    buffer = buffer->next();
-  }
-}
-
-
-void SlotsBuffer::RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
-                                    Address start_slot, Address end_slot) {
-  // Remove entries by replacing them with an old-space slot containing a smi
-  // that is located in an unmovable page.
-  const ObjectSlot kRemovedEntry = HeapObject::RawField(
-      heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
-  DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
-             ->NeverEvacuate());
-
-  while (buffer != NULL) {
-    SlotsBuffer::ObjectSlot* slots = buffer->slots_;
-    intptr_t slots_count = buffer->idx_;
-    bool is_typed_slot = false;
-
-    for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
-      ObjectSlot slot = slots[slot_idx];
-      if (!IsTypedSlot(slot)) {
-        Address slot_address = reinterpret_cast<Address>(slot);
-        if (slot_address >= start_slot && slot_address < end_slot) {
-          // TODO(hpayer): Instead of replacing slots with kRemovedEntry we
-          // could shrink the slots buffer in-place.
-          slots[slot_idx] = kRemovedEntry;
-          if (is_typed_slot) {
-            slots[slot_idx - 1] = kRemovedEntry;
-          }
-        }
-        is_typed_slot = false;
-      } else {
-        is_typed_slot = true;
-        DCHECK(slot_idx < slots_count);
-      }
-    }
-    buffer = buffer->next();
-  }
-}
-
-
-void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
-  while (buffer != NULL) {
-    SlotsBuffer::ObjectSlot* slots = buffer->slots_;
-    intptr_t slots_count = buffer->idx_;
-
-    for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
-      ObjectSlot slot = slots[slot_idx];
-      if (!IsTypedSlot(slot)) {
-        Object* object = *slot;
-        if (object->IsHeapObject()) {
-          HeapObject* heap_object = HeapObject::cast(object);
-          CHECK(!heap->InNewSpace(object));
-          heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
-              reinterpret_cast<Address>(slot), heap_object);
-        }
-      } else {
-        ++slot_idx;
-        DCHECK(slot_idx < slots_count);
-      }
-    }
-    buffer = buffer->next();
-  }
-}
-
-
-SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
-  return new SlotsBuffer(next_buffer);
-}
-
-
-void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
-  delete buffer;
-}
-
-
-void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
-  SlotsBuffer* buffer = *buffer_address;
-  while (buffer != NULL) {
-    SlotsBuffer* next_buffer = buffer->next();
-    DeallocateBuffer(buffer);
-    buffer = next_buffer;
-  }
-  *buffer_address = NULL;
-}
-
-}  // namespace internal
-}  // namespace v8
diff --git a/src/heap/slots-buffer.h b/src/heap/slots-buffer.h
deleted file mode 100644
index dc6c922..0000000
--- a/src/heap/slots-buffer.h
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_SLOTS_BUFFER_H_
-#define V8_HEAP_SLOTS_BUFFER_H_
-
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class SlotsBuffer;
-
-
-// SlotsBufferAllocator manages the allocation and deallocation of slots buffer
-// chunks and links them together. Slots buffer chunks are always created by the
-// SlotsBufferAllocator.
-class SlotsBufferAllocator {
- public:
-  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
-  void DeallocateBuffer(SlotsBuffer* buffer);
-
-  void DeallocateChain(SlotsBuffer** buffer_address);
-};
-
-
-// SlotsBuffer records a sequence of slots that has to be updated
-// after live objects were relocated from evacuation candidates.
-// All slots are either untyped or typed:
-//    - Untyped slots are expected to contain a tagged object pointer.
-//      They are recorded by an address.
-//    - Typed slots are expected to contain an encoded pointer to a heap
-//      object where the way of encoding depends on the type of the slot.
-//      They are recorded as a pair (SlotType, slot address).
-// We assume that zero-page is never mapped this allows us to distinguish
-// untyped slots from typed slots during iteration by a simple comparison:
-// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
-// is the first element of typed slot's pair.
-class SlotsBuffer {
- public:
-  typedef Object** ObjectSlot;
-
-  explicit SlotsBuffer(SlotsBuffer* next_buffer)
-      : idx_(0), chain_length_(1), next_(next_buffer) {
-    if (next_ != NULL) {
-      chain_length_ = next_->chain_length_ + 1;
-    }
-  }
-
-  ~SlotsBuffer() {}
-
-  void Add(ObjectSlot slot) {
-    DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
-#ifdef DEBUG
-    if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
-      DCHECK_NOT_NULL(*slot);
-    }
-#endif
-    slots_[idx_++] = slot;
-  }
-
-  ObjectSlot Get(intptr_t i) {
-    DCHECK(i >= 0 && i < kNumberOfElements);
-    return slots_[i];
-  }
-
-  size_t Size() {
-    DCHECK(idx_ <= kNumberOfElements);
-    return idx_;
-  }
-
-  enum SlotType {
-    EMBEDDED_OBJECT_SLOT,
-    OBJECT_SLOT,
-    RELOCATED_CODE_OBJECT,
-    CELL_TARGET_SLOT,
-    CODE_TARGET_SLOT,
-    CODE_ENTRY_SLOT,
-    DEBUG_TARGET_SLOT,
-    NUMBER_OF_SLOT_TYPES
-  };
-
-  static const char* SlotTypeToString(SlotType type) {
-    switch (type) {
-      case EMBEDDED_OBJECT_SLOT:
-        return "EMBEDDED_OBJECT_SLOT";
-      case OBJECT_SLOT:
-        return "OBJECT_SLOT";
-      case RELOCATED_CODE_OBJECT:
-        return "RELOCATED_CODE_OBJECT";
-      case CELL_TARGET_SLOT:
-        return "CELL_TARGET_SLOT";
-      case CODE_TARGET_SLOT:
-        return "CODE_TARGET_SLOT";
-      case CODE_ENTRY_SLOT:
-        return "CODE_ENTRY_SLOT";
-      case DEBUG_TARGET_SLOT:
-        return "DEBUG_TARGET_SLOT";
-      case NUMBER_OF_SLOT_TYPES:
-        return "NUMBER_OF_SLOT_TYPES";
-    }
-    return "UNKNOWN SlotType";
-  }
-
-  SlotsBuffer* next() { return next_; }
-
-  static int SizeOfChain(SlotsBuffer* buffer) {
-    if (buffer == NULL) return 0;
-    return static_cast<int>(buffer->idx_ +
-                            (buffer->chain_length_ - 1) * kNumberOfElements);
-  }
-
-  inline bool IsFull() { return idx_ == kNumberOfElements; }
-
-  inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
-
-  enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
-
-  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
-    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
-  }
-
-  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
-                           SlotsBuffer** buffer_address, ObjectSlot slot,
-                           AdditionMode mode)) {
-    SlotsBuffer* buffer = *buffer_address;
-    if (buffer == NULL || buffer->IsFull()) {
-      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
-        allocator->DeallocateChain(buffer_address);
-        return false;
-      }
-      buffer = allocator->AllocateBuffer(buffer);
-      *buffer_address = buffer;
-    }
-    buffer->Add(slot);
-    return true;
-  }
-
-  static bool IsTypedSlot(ObjectSlot slot);
-
-  static bool AddTo(SlotsBufferAllocator* allocator,
-                    SlotsBuffer** buffer_address, SlotType type, Address addr,
-                    AdditionMode mode);
-
-  // Eliminates all stale entries from the slots buffer, i.e., slots that
-  // are not part of live objects anymore. This method must be called after
-  // marking, when the whole transitive closure is known and must be called
-  // before sweeping when mark bits are still intact.
-  static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);
-
-  // Eliminate all slots that are within the given address range.
-  static void RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
-                                Address start_slot, Address end_slot);
-
-  // Ensures that there are no invalid slots in the chain of slots buffers.
-  static void VerifySlots(Heap* heap, SlotsBuffer* buffer);
-
-  static const int kNumberOfElements = 1021;
-
- private:
-  static const int kChainLengthThreshold = 15;
-
-  intptr_t idx_;
-  intptr_t chain_length_;
-  SlotsBuffer* next_;
-  ObjectSlot slots_[kNumberOfElements];
-};
-
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_HEAP_SLOTS_BUFFER_H_
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index 515a202..135498f 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -25,6 +25,11 @@
   chunk->ResetLiveBytes();
 }
 
+void Bitmap::SetAllBits(MemoryChunk* chunk) {
+  Bitmap* bitmap = chunk->markbits();
+  for (int i = 0; i < bitmap->CellsCount(); i++)
+    bitmap->cells()[i] = 0xffffffff;
+}
 
 // -----------------------------------------------------------------------------
 // PageIterator
@@ -147,6 +152,19 @@
   return NULL;
 }
 
+// -----------------------------------------------------------------------------
+// LargePageIterator
+
+LargePageIterator::LargePageIterator(LargeObjectSpace* space)
+    : next_page_(space->first_page()) {}
+
+LargePage* LargePageIterator::next() {
+  LargePage* result = next_page_;
+  if (next_page_ != nullptr) {
+    next_page_ = next_page_->next_page();
+  }
+  return result;
+}
 
 // -----------------------------------------------------------------------------
 // MemoryAllocator
@@ -233,6 +251,19 @@
   return static_cast<AllocationSpace>(Smi::cast(object_)->value());
 }
 
+NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
+                                       Executability executable,
+                                       SemiSpace* owner) {
+  DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
+  bool in_to_space = (owner->id() != kFromSpace);
+  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+                             : MemoryChunk::IN_FROM_SPACE);
+  DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+                                       : MemoryChunk::IN_TO_SPACE));
+  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+  heap->incremental_marking()->SetNewSpacePageFlags(page);
+  return page;
+}
 
 // --------------------------------------------------------------------------
 // PagedSpace
@@ -243,14 +274,23 @@
   page->mutex_ = new base::Mutex();
   DCHECK(page->area_size() <= kAllocatableMemory);
   DCHECK(chunk->owner() == owner);
-  owner->IncreaseCapacity(page->area_size());
-  owner->Free(page->area_start(), page->area_size());
 
+  owner->IncreaseCapacity(page->area_size());
   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
 
+  // Make sure that categories are initialized before freeing the area.
+  page->InitializeFreeListCategories();
+  owner->Free(page->area_start(), page->area_size());
+
   return page;
 }
 
+void Page::InitializeFreeListCategories() {
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
+  }
+}
+
 void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
   MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
 }
@@ -264,6 +304,7 @@
 }
 
 void MemoryChunk::IncrementLiveBytes(int by) {
+  if (IsFlagSet(BLACK_PAGE)) return;
   if (FLAG_trace_live_bytes) {
     PrintIsolate(heap()->isolate(),
                  "live-bytes: update page=%p delta=%d %d->%d\n", this, by,
@@ -295,6 +336,24 @@
   return p->owner() == this;
 }
 
+void PagedSpace::UnlinkFreeListCategories(Page* page) {
+  DCHECK_EQ(this, page->owner());
+  page->ForAllFreeListCategories([this](FreeListCategory* category) {
+    DCHECK_EQ(free_list(), category->owner());
+    free_list()->RemoveCategory(category);
+  });
+}
+
+intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
+  DCHECK_EQ(this, page->owner());
+  intptr_t added = 0;
+  page->ForAllFreeListCategories([&added](FreeListCategory* category) {
+    added += category->available();
+    category->Relink();
+  });
+  return added;
+}
+
 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
   MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
   uintptr_t offset = addr - chunk->address();
@@ -308,15 +367,36 @@
   return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
 }
 
+void Page::MarkNeverAllocateForTesting() {
+  DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
+  SetFlag(NEVER_ALLOCATE_ON_PAGE);
+  reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
+}
 
-PointerChunkIterator::PointerChunkIterator(Heap* heap)
+void Page::MarkEvacuationCandidate() {
+  DCHECK(!IsFlagSet(NEVER_EVACUATE));
+  DCHECK_NULL(old_to_old_slots_);
+  DCHECK_NULL(typed_old_to_old_slots_);
+  SetFlag(EVACUATION_CANDIDATE);
+  reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
+}
+
+void Page::ClearEvacuationCandidate() {
+  DCHECK_NULL(old_to_old_slots_);
+  DCHECK_NULL(typed_old_to_old_slots_);
+  ClearFlag(EVACUATION_CANDIDATE);
+  InitializeFreeListCategories();
+}
+
+MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
     : state_(kOldSpaceState),
+      mode_(mode),
       old_iterator_(heap->old_space()),
+      code_iterator_(heap->code_space()),
       map_iterator_(heap->map_space()),
       lo_iterator_(heap->lo_space()) {}
 
-
-MemoryChunk* PointerChunkIterator::next() {
+MemoryChunk* MemoryChunkIterator::next() {
   switch (state_) {
     case kOldSpaceState: {
       if (old_iterator_.has_next()) {
@@ -326,47 +406,59 @@
       // Fall through.
     }
     case kMapState: {
-      if (map_iterator_.has_next()) {
+      if (mode_ != ALL_BUT_MAP_SPACE && map_iterator_.has_next()) {
         return map_iterator_.next();
       }
+      state_ = kCodeState;
+      // Fall through.
+    }
+    case kCodeState: {
+      if (mode_ != ALL_BUT_CODE_SPACE && code_iterator_.has_next()) {
+        return code_iterator_.next();
+      }
       state_ = kLargeObjectState;
       // Fall through.
     }
     case kLargeObjectState: {
-      HeapObject* heap_object;
-      do {
-        heap_object = lo_iterator_.Next();
-        if (heap_object == NULL) {
-          state_ = kFinishedState;
-          return NULL;
-        }
-        // Fixed arrays are the only pointer-containing objects in large
-        // object space.
-      } while (!heap_object->IsFixedArray());
-      MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
-      return answer;
+      MemoryChunk* answer = lo_iterator_.next();
+      if (answer != nullptr) {
+        return answer;
+      }
+      state_ = kFinishedState;
+      // Fall through;
     }
     case kFinishedState:
-      return NULL;
+      return nullptr;
     default:
       break;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
-
 void Page::set_next_page(Page* page) {
   DCHECK(page->owner() == owner());
   set_next_chunk(page);
 }
 
-
 void Page::set_prev_page(Page* page) {
   DCHECK(page->owner() == owner());
   set_prev_chunk(page);
 }
 
+Page* FreeListCategory::page() {
+  return Page::FromAddress(reinterpret_cast<Address>(this));
+}
+
+FreeList* FreeListCategory::owner() {
+  return reinterpret_cast<PagedSpace*>(
+             Page::FromAddress(reinterpret_cast<Address>(this))->owner())
+      ->free_list();
+}
+
+bool FreeListCategory::is_linked() {
+  return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
+}
 
 // Try linear allocation in the page of alloc_info's allocation top.  Does
 // not contain slow case logic (e.g. move to the next page or try free list
@@ -420,7 +512,8 @@
 
 
 // Raw allocation.
-AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawUnaligned(
+    int size_in_bytes, UpdateSkipList update_skip_list) {
   HeapObject* object = AllocateLinearly(size_in_bytes);
 
   if (object == NULL) {
@@ -431,7 +524,7 @@
   }
 
   if (object != NULL) {
-    if (identity() == CODE_SPACE) {
+    if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
       SkipList::Update(object->address(), size_in_bytes);
     }
     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index 6b98fc1..63e7c33 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -8,7 +8,6 @@
 #include "src/base/platform/platform.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/heap/slot-set.h"
-#include "src/heap/slots-buffer.h"
 #include "src/macro-assembler.h"
 #include "src/msan.h"
 #include "src/snapshot/snapshot.h"
@@ -316,15 +315,18 @@
 
 
 void MemoryAllocator::TearDown() {
+  for (MemoryChunk* chunk : chunk_pool_) {
+    FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
+               NOT_EXECUTABLE);
+  }
   // Check that spaces were torn down before MemoryAllocator.
-  DCHECK(size_.Value() == 0);
+  DCHECK_EQ(size_.Value(), 0);
   // TODO(gc) this will be true again when we fix FreeMemory.
   // DCHECK(size_executable_ == 0);
   capacity_ = 0;
   capacity_executable_ = 0;
 }
 
-
 bool MemoryAllocator::CommitMemory(Address base, size_t size,
                                    Executability executable) {
   if (!base::VirtualMemory::CommitRegion(base, size,
@@ -336,20 +338,6 @@
 }
 
 
-void MemoryAllocator::FreeNewSpaceMemory(Address addr,
-                                         base::VirtualMemory* reservation,
-                                         Executability executable) {
-  LOG(isolate_, DeleteEvent("NewSpace", addr));
-
-  DCHECK(reservation->IsReserved());
-  const intptr_t size = static_cast<intptr_t>(reservation->size());
-  DCHECK(size_.Value() >= size);
-  size_.Increment(-size);
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-  FreeMemory(reservation, NOT_EXECUTABLE);
-}
-
-
 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
@@ -434,26 +422,6 @@
   set_next_page(this);
 }
 
-
-NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
-                                       SemiSpace* semi_space) {
-  Address area_start = start + NewSpacePage::kObjectStartOffset;
-  Address area_end = start + Page::kPageSize;
-
-  MemoryChunk* chunk =
-      MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
-                              area_end, NOT_EXECUTABLE, semi_space, nullptr);
-  bool in_to_space = (semi_space->id() != kFromSpace);
-  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
-                             : MemoryChunk::IN_FROM_SPACE);
-  DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
-                                       : MemoryChunk::IN_TO_SPACE));
-  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
-  heap->incremental_marking()->SetNewSpacePageFlags(page);
-  return page;
-}
-
-
 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
   set_owner(semi_space);
   set_next_chunk(this);
@@ -478,15 +446,14 @@
   chunk->flags_ = 0;
   chunk->set_owner(owner);
   chunk->InitializeReservedMemory();
-  chunk->slots_buffer_ = nullptr;
   chunk->old_to_new_slots_ = nullptr;
   chunk->old_to_old_slots_ = nullptr;
+  chunk->typed_old_to_old_slots_ = nullptr;
   chunk->skip_list_ = nullptr;
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
   chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
-  chunk->parallel_compaction_state().SetValue(kCompactingDone);
   chunk->mutex_ = nullptr;
   chunk->available_in_free_list_ = 0;
   chunk->wasted_memory_ = 0;
@@ -717,21 +684,16 @@
   available_in_free_list_ = 0;
 }
 
-
-Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
-                                    Executability executable) {
-  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
-  if (chunk == NULL) return NULL;
-  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
-}
-
-
 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
                                               Space* owner,
                                               Executability executable) {
   MemoryChunk* chunk =
       AllocateChunk(object_size, object_size, executable, owner);
   if (chunk == NULL) return NULL;
+  if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
+    STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
+    FATAL("Code page is too large.");
+  }
   return LargePage::Initialize(isolate_->heap(), chunk);
 }
 
@@ -780,12 +742,75 @@
   }
 }
 
-
+template <MemoryAllocator::AllocationMode mode>
 void MemoryAllocator::Free(MemoryChunk* chunk) {
-  PreFreeMemory(chunk);
-  PerformFreeMemory(chunk);
+  if (mode == kRegular) {
+    PreFreeMemory(chunk);
+    PerformFreeMemory(chunk);
+  } else {
+    DCHECK_EQ(mode, kPooled);
+    FreePooled(chunk);
+  }
 }
 
+template void MemoryAllocator::Free<MemoryAllocator::kRegular>(
+    MemoryChunk* chunk);
+
+template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
+    MemoryChunk* chunk);
+
+template <typename PageType, MemoryAllocator::AllocationMode mode,
+          typename SpaceType>
+PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
+                                        Executability executable) {
+  MemoryChunk* chunk = nullptr;
+  if (mode == kPooled) {
+    DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
+    DCHECK_EQ(executable, NOT_EXECUTABLE);
+    chunk = AllocatePagePooled(owner);
+  }
+  if (chunk == nullptr) {
+    chunk = AllocateChunk(size, size, executable, owner);
+  }
+  if (chunk == nullptr) return nullptr;
+  return PageType::Initialize(isolate_->heap(), chunk, executable, owner);
+}
+
+template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
+                                             PagedSpace>(intptr_t, PagedSpace*,
+                                                         Executability);
+
+template NewSpacePage* MemoryAllocator::AllocatePage<
+    NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
+                                                       Executability);
+
+template <typename SpaceType>
+MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
+  if (chunk_pool_.is_empty()) return nullptr;
+  const int size = MemoryChunk::kPageSize;
+  MemoryChunk* chunk = chunk_pool_.RemoveLast();
+  const Address start = reinterpret_cast<Address>(chunk);
+  const Address area_start = start + MemoryChunk::kObjectStartOffset;
+  const Address area_end = start + size;
+  CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE);
+  base::VirtualMemory reservation(start, size);
+  MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
+                          NOT_EXECUTABLE, owner, &reservation);
+  size_.Increment(size);
+  return chunk;
+}
+
+void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
+  DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
+  DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
+  chunk_pool_.Add(chunk);
+  intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
+  if (chunk->executable() == EXECUTABLE) {
+    size_executable_.Increment(-chunk_size);
+  }
+  size_.Increment(-chunk_size);
+  UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
+}
 
 bool MemoryAllocator::CommitBlock(Address start, size_t size,
                                   Executability executable) {
@@ -932,8 +957,6 @@
 // MemoryChunk implementation
 
 void MemoryChunk::ReleaseAllocatedMemory() {
-  delete slots_buffer_;
-  slots_buffer_ = nullptr;
   delete skip_list_;
   skip_list_ = nullptr;
   delete mutex_;
@@ -972,6 +995,15 @@
   old_to_old_slots_ = nullptr;
 }
 
+void MemoryChunk::AllocateTypedOldToOldSlots() {
+  DCHECK(nullptr == typed_old_to_old_slots_);
+  typed_old_to_old_slots_ = new TypedSlotSet(address());
+}
+
+void MemoryChunk::ReleaseTypedOldToOldSlots() {
+  delete typed_old_to_old_slots_;
+  typed_old_to_old_slots_ = nullptr;
+}
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
 
@@ -1021,79 +1053,46 @@
   accounting_stats_.Clear();
 }
 
-
-void PagedSpace::AddMemory(Address start, intptr_t size) {
-  accounting_stats_.ExpandSpace(static_cast<int>(size));
-  Free(start, static_cast<int>(size));
-}
-
-
 void PagedSpace::RefillFreeList() {
-  MarkCompactCollector* collector = heap()->mark_compact_collector();
-  FreeList* free_list = nullptr;
-  if (this == heap()->old_space()) {
-    free_list = collector->free_list_old_space().get();
-  } else if (this == heap()->code_space()) {
-    free_list = collector->free_list_code_space().get();
-  } else if (this == heap()->map_space()) {
-    free_list = collector->free_list_map_space().get();
-  } else {
-    // Any PagedSpace might invoke RefillFreeList. We filter all but our old
-    // generation spaces out.
+  // Any PagedSpace might invoke RefillFreeList. We filter all but our old
+  // generation spaces out.
+  if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
+      identity() != MAP_SPACE) {
     return;
   }
-  DCHECK(free_list != nullptr);
-  intptr_t added = free_list_.Concatenate(free_list);
-  accounting_stats_.IncreaseCapacity(added);
-}
-
-
-void CompactionSpace::RefillFreeList() {
   MarkCompactCollector* collector = heap()->mark_compact_collector();
-  FreeList* free_list = nullptr;
-  if (identity() == OLD_SPACE) {
-    free_list = collector->free_list_old_space().get();
-  } else if (identity() == CODE_SPACE) {
-    free_list = collector->free_list_code_space().get();
-  } else {
-    // Compaction spaces only represent old or code space.
-    UNREACHABLE();
+  List<Page*>* swept_pages = collector->swept_pages(identity());
+  intptr_t added = 0;
+  {
+    base::LockGuard<base::Mutex> guard(collector->swept_pages_mutex());
+    for (int i = swept_pages->length() - 1; i >= 0; --i) {
+      Page* p = (*swept_pages)[i];
+      // Only during compaction pages can actually change ownership. This is
+      // safe because there exists no other competing action on the page links
+      // during compaction.
+      if (is_local() && (p->owner() != this)) {
+        if (added > kCompactionMemoryWanted) break;
+        base::LockGuard<base::Mutex> guard(
+            reinterpret_cast<PagedSpace*>(p->owner())->mutex());
+        p->Unlink();
+        p->set_owner(this);
+        p->InsertAfter(anchor_.prev_page());
+      }
+      added += RelinkFreeListCategories(p);
+      added += p->wasted_memory();
+      swept_pages->Remove(i);
+    }
   }
-  DCHECK(free_list != nullptr);
-  intptr_t refilled = 0;
-  while (refilled < kCompactionMemoryWanted) {
-    FreeSpace* node =
-        free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
-    if (node == nullptr) return;
-    refilled += node->size();
-    AddMemory(node->address(), node->size());
-  }
-}
-
-void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
-  DCHECK(identity() == other->identity());
-  // Destroy the linear allocation space of {other}. This is needed to
-  //   (a) not waste the memory and
-  //   (b) keep the rest of the chunk in an iterable state (filler is needed).
-  other->EmptyAllocationInfo();
-
-  // Move over the free list. Concatenate makes sure that the source free list
-  // gets properly reset after moving over all nodes.
-  intptr_t added = free_list_.Concatenate(other->free_list());
-
-  // Moved memory is not recorded as allocated memory, but rather increases and
-  // decreases capacity of the corresponding spaces.
-  other->accounting_stats_.DecreaseCapacity(added);
   accounting_stats_.IncreaseCapacity(added);
 }
 
-
 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
+  DCHECK(identity() == other->identity());
   // Unmerged fields:
   //   area_size_
   //   anchor_
 
-  MoveOverFreeMemory(other);
+  other->EmptyAllocationInfo();
 
   // Update and clear accounting statistics.
   accounting_stats_.Merge(other->accounting_stats_);
@@ -1110,9 +1109,14 @@
   Page* p = nullptr;
   while (it.has_next()) {
     p = it.next();
+
+    // Relinking requires the category to be unlinked.
+    other->UnlinkFreeListCategories(p);
+
     p->Unlink();
     p->set_owner(this);
     p->InsertAfter(anchor_.prev_page());
+    RelinkFreeListCategories(p);
   }
 }
 
@@ -1178,8 +1182,8 @@
 
   if (!CanExpand(size)) return false;
 
-  Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
-                                                                executable());
+  Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>(
+      size, this, executable());
   if (p == NULL) return false;
 
   AccountCommitted(static_cast<intptr_t>(p->size()));
@@ -1187,6 +1191,17 @@
   // Pages created during bootstrapping may contain immortal immovable objects.
   if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
 
+  // When incremental marking was activated, old space pages are allocated
+  // black.
+  if (heap()->incremental_marking()->black_allocation() &&
+      identity() == OLD_SPACE) {
+    Bitmap::SetAllBits(p);
+    p->SetFlag(Page::BLACK_PAGE);
+    if (FLAG_trace_incremental_marking) {
+      PrintIsolate(heap()->isolate(), "Added black page %p\n", p);
+    }
+  }
+
   DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
 
   p->InsertAfter(anchor_.prev_page());
@@ -1219,17 +1234,12 @@
   accounting_stats_.ExpandSpace(size);
 }
 
+void PagedSpace::ReleasePage(Page* page) {
+  DCHECK_EQ(page->LiveBytes(), 0);
+  DCHECK_EQ(AreaSize(), page->area_size());
+  DCHECK_EQ(page->owner(), this);
 
-void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
-  DCHECK(page->LiveBytes() == 0);
-  DCHECK(AreaSize() == page->area_size());
-
-  if (evict_free_list_items) {
-    intptr_t size = free_list_.EvictFreeListItems(page);
-    accounting_stats_.AllocateBytes(size);
-    DCHECK_EQ(AreaSize(), static_cast<int>(size));
-  }
-
+  free_list_.EvictFreeListItems(page);
   DCHECK(!free_list_.ContainsPageFreeListItems(page));
 
   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
@@ -1249,7 +1259,6 @@
   accounting_stats_.ShrinkSpace(AreaSize());
 }
 
-
 #ifdef DEBUG
 void PagedSpace::Print() {}
 #endif
@@ -1288,7 +1297,8 @@
       // All the interior pointers should be contained in the heap.
       int size = object->Size();
       object->IterateBody(map->instance_type(), size, visitor);
-      if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
+      if (!page->IsFlagSet(Page::BLACK_PAGE) &&
+          Marking::IsBlack(Marking::MarkBitFrom(object))) {
         black_size += size;
       }
 
@@ -1304,55 +1314,28 @@
 // -----------------------------------------------------------------------------
 // NewSpace implementation
 
-
-bool NewSpace::SetUp(int reserved_semispace_capacity,
+bool NewSpace::SetUp(int initial_semispace_capacity,
                      int maximum_semispace_capacity) {
-  // Set up new space based on the preallocated memory block defined by
-  // start and size. The provided space is divided into two semi-spaces.
-  // To support fast containment testing in the new space, the size of
-  // this chunk must be a power of two and it must be aligned to its size.
-  int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
-
-  size_t size = 2 * reserved_semispace_capacity;
-  Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
-      size, size, &reservation_);
-  if (base == NULL) return false;
-
-  chunk_base_ = base;
-  chunk_size_ = static_cast<uintptr_t>(size);
-  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
-
   DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
   DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
 
+  to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
+  from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
+  if (!to_space_.Commit()) {
+    return false;
+  }
+  DCHECK(!from_space_.is_committed());  // No need to use memory yet.
+  ResetAllocationInfo();
+
   // Allocate and set up the histogram arrays if necessary.
   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
-
 #define SET_NAME(name)                        \
   allocated_histogram_[name].set_name(#name); \
   promoted_histogram_[name].set_name(#name);
   INSTANCE_TYPE_LIST(SET_NAME)
 #undef SET_NAME
 
-  DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
-  DCHECK(static_cast<intptr_t>(chunk_size_) >=
-         2 * heap()->ReservedSemiSpaceSize());
-  DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
-
-  to_space_.SetUp(chunk_base_, initial_semispace_capacity,
-                  maximum_semispace_capacity);
-  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
-                    initial_semispace_capacity, maximum_semispace_capacity);
-  if (!to_space_.Commit()) {
-    return false;
-  }
-  DCHECK(!from_space_.is_committed());  // No need to use memory yet.
-
-  start_ = chunk_base_;
-
-  ResetAllocationInfo();
-
   return true;
 }
 
@@ -1367,18 +1350,10 @@
     promoted_histogram_ = NULL;
   }
 
-  start_ = NULL;
   allocation_info_.Reset(nullptr, nullptr);
 
-
   to_space_.TearDown();
   from_space_.TearDown();
-
-  heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
-      chunk_base_, &reservation_, NOT_EXECUTABLE);
-
-  chunk_base_ = NULL;
-  chunk_size_ = 0;
 }
 
 
@@ -1432,7 +1407,8 @@
   if (IsValid()) {
     heap_->CreateFillerObjectAt(
         allocation_info_.top(),
-        static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+        static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
+        ClearRecordedSlots::kNo);
   }
 }
 
@@ -1443,7 +1419,8 @@
   if (IsValid()) {
     heap_->CreateFillerObjectAt(
         allocation_info_.top(),
-        static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+        static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
+        ClearRecordedSlots::kNo);
   }
 }
 
@@ -1526,7 +1503,7 @@
   }
 
   int remaining_in_page = static_cast<int>(limit - top);
-  heap()->CreateFillerObjectAt(top, remaining_in_page);
+  heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
   pages_used_++;
   UpdateAllocationInfo();
 
@@ -1662,7 +1639,7 @@
 
       // The object should not be code or a map.
       CHECK(!object->IsMap());
-      CHECK(!object->IsCode());
+      CHECK(!object->IsAbstractCode());
 
       // The object itself should look OK.
       object->ObjectVerify();
@@ -1693,43 +1670,45 @@
 // -----------------------------------------------------------------------------
 // SemiSpace implementation
 
-void SemiSpace::SetUp(Address start, int initial_capacity,
-                      int maximum_capacity) {
+void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
   DCHECK_GE(maximum_capacity, Page::kPageSize);
   minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
   current_capacity_ = minimum_capacity_;
   maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
   committed_ = false;
-  start_ = start;
-  age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
 }
 
 
 void SemiSpace::TearDown() {
-  start_ = nullptr;
-  current_capacity_ = 0;
+  // Properly uncommit memory to keep the allocator counters in sync.
+  if (is_committed()) Uncommit();
+  current_capacity_ = maximum_capacity_ = 0;
 }
 
 
 bool SemiSpace::Commit() {
   DCHECK(!is_committed());
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(
-          start_, current_capacity_, executable())) {
-    return false;
-  }
-  AccountCommitted(current_capacity_);
-
   NewSpacePage* current = anchor();
   const int num_pages = current_capacity_ / Page::kPageSize;
-  for (int i = 0; i < num_pages; i++) {
+  for (int pages_added = 0; pages_added < num_pages; pages_added++) {
     NewSpacePage* new_page =
-        NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
+        heap()
+            ->isolate()
+            ->memory_allocator()
+            ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
+                NewSpacePage::kAllocatableMemory, this, executable());
+    if (new_page == nullptr) {
+      RewindPages(current, pages_added);
+      return false;
+    }
     new_page->InsertAfter(current);
     current = new_page;
   }
   Reset();
-
-  set_current_capacity(current_capacity_);
+  AccountCommitted(current_capacity_);
+  if (age_mark_ == nullptr) {
+    age_mark_ = first_page()->area_start();
+  }
   committed_ = true;
   return true;
 }
@@ -1737,16 +1716,14 @@
 
 bool SemiSpace::Uncommit() {
   DCHECK(is_committed());
-  Address start = start_ + maximum_capacity_ - current_capacity_;
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
-          start, current_capacity_)) {
-    return false;
+  NewSpacePageIterator it(this);
+  while (it.has_next()) {
+    heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
+        it.next());
   }
-  AccountUncommitted(current_capacity_);
-
   anchor()->set_next_page(anchor());
   anchor()->set_prev_page(anchor());
-
+  AccountUncommitted(current_capacity_);
   committed_ = false;
   return true;
 }
@@ -1767,27 +1744,25 @@
   if (!is_committed()) {
     if (!Commit()) return false;
   }
-  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+  DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
   DCHECK_LE(new_capacity, maximum_capacity_);
   DCHECK_GT(new_capacity, current_capacity_);
-  int pages_before = current_capacity_ / Page::kPageSize;
-  int pages_after = new_capacity / Page::kPageSize;
-
-  size_t delta = new_capacity - current_capacity_;
-
+  const int delta = new_capacity - current_capacity_;
   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(
-          start_ + current_capacity_, delta, executable())) {
-    return false;
-  }
-  AccountCommitted(static_cast<intptr_t>(delta));
-  set_current_capacity(new_capacity);
+  int delta_pages = delta / NewSpacePage::kPageSize;
   NewSpacePage* last_page = anchor()->prev_page();
   DCHECK_NE(last_page, anchor());
-  for (int i = pages_before; i < pages_after; i++) {
-    Address page_address = start_ + i * Page::kPageSize;
+  for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
     NewSpacePage* new_page =
-        NewSpacePage::Initialize(heap(), page_address, this);
+        heap()
+            ->isolate()
+            ->memory_allocator()
+            ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
+                NewSpacePage::kAllocatableMemory, this, executable());
+    if (new_page == nullptr) {
+      RewindPages(last_page, pages_added);
+      return false;
+    }
     new_page->InsertAfter(last_page);
     Bitmap::Clear(new_page);
     // Duplicate the flags that was set on the old page.
@@ -1795,34 +1770,46 @@
                        NewSpacePage::kCopyOnFlipFlagsMask);
     last_page = new_page;
   }
+  AccountCommitted(static_cast<intptr_t>(delta));
+  current_capacity_ = new_capacity;
   return true;
 }
 
+void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
+  NewSpacePage* new_last_page = nullptr;
+  NewSpacePage* last_page = start;
+  while (num_pages > 0) {
+    DCHECK_NE(last_page, anchor());
+    new_last_page = last_page->prev_page();
+    last_page->prev_page()->set_next_page(last_page->next_page());
+    last_page->next_page()->set_prev_page(last_page->prev_page());
+    last_page = new_last_page;
+    num_pages--;
+  }
+}
 
 bool SemiSpace::ShrinkTo(int new_capacity) {
-  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+  DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
   DCHECK_GE(new_capacity, minimum_capacity_);
   DCHECK_LT(new_capacity, current_capacity_);
   if (is_committed()) {
-    size_t delta = current_capacity_ - new_capacity;
+    const int delta = current_capacity_ - new_capacity;
     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-
-    MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
-    if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
-      return false;
+    int delta_pages = delta / NewSpacePage::kPageSize;
+    NewSpacePage* new_last_page;
+    NewSpacePage* last_page;
+    while (delta_pages > 0) {
+      last_page = anchor()->prev_page();
+      new_last_page = last_page->prev_page();
+      new_last_page->set_next_page(anchor());
+      anchor()->set_prev_page(new_last_page);
+      heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
+          last_page);
+      delta_pages--;
     }
     AccountUncommitted(static_cast<intptr_t>(delta));
-
-    int pages_after = new_capacity / Page::kPageSize;
-    NewSpacePage* new_last_page =
-        NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
-    new_last_page->set_next_page(anchor());
-    anchor()->set_prev_page(new_last_page);
-    DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
   }
-
-  set_current_capacity(new_capacity);
-
+  current_capacity_ = new_capacity;
   return true;
 }
 
@@ -1869,7 +1856,6 @@
   std::swap(from->current_capacity_, to->current_capacity_);
   std::swap(from->maximum_capacity_, to->maximum_capacity_);
   std::swap(from->minimum_capacity_, to->minimum_capacity_);
-  std::swap(from->start_, to->start_);
   std::swap(from->age_mark_, to->age_mark_);
   std::swap(from->committed_, to->committed_);
   std::swap(from->anchor_, to->anchor_);
@@ -2153,137 +2139,54 @@
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces implementation
 
-intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
-  intptr_t free_bytes = 0;
-  if (category->top() != NULL) {
-    DCHECK(category->end_ != NULL);
-    free_bytes = category->available();
-    if (end_ == NULL) {
-      end_ = category->end();
-    } else {
-      category->end()->set_next(top());
-    }
-    set_top(category->top());
-    available_ += category->available();
-    category->Reset();
-  }
-  return free_bytes;
-}
-
 
 void FreeListCategory::Reset() {
   set_top(nullptr);
-  set_end(nullptr);
+  set_prev(nullptr);
+  set_next(nullptr);
   available_ = 0;
 }
 
-
-intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
-  intptr_t sum = 0;
-  FreeSpace* prev_node = nullptr;
-  for (FreeSpace* cur_node = top(); cur_node != nullptr;
-       cur_node = cur_node->next()) {
-    Page* page_for_node = Page::FromAddress(cur_node->address());
-    if (page_for_node == p) {
-      // FreeSpace node on eviction page found, unlink it.
-      int size = cur_node->size();
-      sum += size;
-      DCHECK((prev_node != nullptr) || (top() == cur_node));
-      if (cur_node == top()) {
-        set_top(cur_node->next());
-      }
-      if (cur_node == end()) {
-        set_end(prev_node);
-      }
-      if (prev_node != nullptr) {
-        prev_node->set_next(cur_node->next());
-      }
-      continue;
-    }
-    prev_node = cur_node;
-  }
-  p->add_available_in_free_list(-sum);
-  available_ -= sum;
-  return sum;
-}
-
-
-bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
-  FreeSpace* node = top();
-  while (node != NULL) {
-    if (Page::FromAddress(node->address()) == p) return true;
-    node = node->next();
-  }
-  return false;
-}
-
-
 FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
+  DCHECK(page()->CanAllocate());
+
   FreeSpace* node = top();
   if (node == nullptr) return nullptr;
-
-  Page* page = Page::FromAddress(node->address());
-  while ((node != nullptr) && !page->CanAllocate()) {
-    available_ -= node->size();
-    page->add_available_in_free_list(-(node->Size()));
-    node = node->next();
-  }
-
-  if (node != nullptr) {
-    set_top(node->next());
-    *node_size = node->Size();
-    available_ -= *node_size;
-  } else {
-    set_top(nullptr);
-  }
-
-  if (top() == nullptr) {
-    set_end(nullptr);
-  }
-
+  set_top(node->next());
+  *node_size = node->Size();
+  available_ -= *node_size;
   return node;
 }
 
+FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
+                                                 int* node_size) {
+  DCHECK(page()->CanAllocate());
 
-FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
-                                              int* node_size) {
   FreeSpace* node = PickNodeFromList(node_size);
-  if ((node != nullptr) && (*node_size < size_in_bytes)) {
-    Free(node, *node_size);
+  if ((node != nullptr) && (*node_size < minimum_size)) {
+    Free(node, *node_size, kLinkCategory);
     *node_size = 0;
     return nullptr;
   }
   return node;
 }
 
-
-FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
+FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
                                                  int* node_size) {
+  DCHECK(page()->CanAllocate());
+
   FreeSpace* prev_non_evac_node = nullptr;
   for (FreeSpace* cur_node = top(); cur_node != nullptr;
        cur_node = cur_node->next()) {
     int size = cur_node->size();
-    Page* page_for_node = Page::FromAddress(cur_node->address());
-
-    if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) {
-      // The node is either large enough or contained in an evacuation
-      // candidate. In both cases we need to unlink it from the list.
+    if (size >= minimum_size) {
       available_ -= size;
       if (cur_node == top()) {
         set_top(cur_node->next());
       }
-      if (cur_node == end()) {
-        set_end(prev_non_evac_node);
-      }
       if (prev_non_evac_node != nullptr) {
         prev_non_evac_node->set_next(cur_node->next());
       }
-      // For evacuation candidates we continue.
-      if (!page_for_node->CanAllocate()) {
-        page_for_node->add_available_in_free_list(-size);
-        continue;
-      }
-      // Otherwise we have a large enough node and can return.
       *node_size = size;
       return cur_node;
     }
@@ -2293,14 +2196,17 @@
   return nullptr;
 }
 
+bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
+                            FreeMode mode) {
+  if (!page()->CanAllocate()) return false;
 
-void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
   free_space->set_next(top());
   set_top(free_space);
-  if (end_ == NULL) {
-    end_ = free_space;
-  }
   available_ += size_in_bytes;
+  if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
+    owner()->AddCategory(this);
+  }
+  return true;
 }
 
 
@@ -2317,59 +2223,46 @@
   }
 }
 
+void FreeListCategory::Relink() {
+  DCHECK(!is_linked());
+  owner()->AddCategory(this);
+}
+
+void FreeListCategory::Invalidate() {
+  page()->add_available_in_free_list(-available());
+  Reset();
+  type_ = kInvalidCategory;
+}
+
 FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-    category_[i].Initialize(this, static_cast<FreeListCategoryType>(i));
+    categories_[i] = nullptr;
   }
   Reset();
 }
 
 
-intptr_t FreeList::Concatenate(FreeList* other) {
-  intptr_t usable_bytes = 0;
-  intptr_t wasted_bytes = 0;
-
-  // This is safe (not going to deadlock) since Concatenate operations
-  // are never performed on the same free lists at the same time in
-  // reverse order. Furthermore, we only lock if the PagedSpace containing
-  // the free list is know to be globally available, i.e., not local.
-  if (!owner()->is_local()) mutex_.Lock();
-  if (!other->owner()->is_local()) other->mutex()->Lock();
-
-  wasted_bytes = other->wasted_bytes_;
-  wasted_bytes_ += wasted_bytes;
-  other->wasted_bytes_ = 0;
-
-  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-    usable_bytes += category_[i].Concatenate(
-        other->GetFreeListCategory(static_cast<FreeListCategoryType>(i)));
-  }
-
-  if (!other->owner()->is_local()) other->mutex()->Unlock();
-  if (!owner()->is_local()) mutex_.Unlock();
-  return usable_bytes + wasted_bytes;
-}
-
-
 void FreeList::Reset() {
+  ForAllFreeListCategories(
+      [](FreeListCategory* category) { category->Reset(); });
   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-    category_[i].Reset();
+    categories_[i] = nullptr;
   }
   ResetStats();
 }
 
-
-int FreeList::Free(Address start, int size_in_bytes) {
+int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
   if (size_in_bytes == 0) return 0;
 
-  owner()->heap()->CreateFillerObjectAt(start, size_in_bytes);
+  owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
+                                        ClearRecordedSlots::kNo);
 
   Page* page = Page::FromAddress(start);
 
-  // Early return to drop too-small blocks on the floor.
-  if (size_in_bytes <= kSmallListMin) {
+  // Blocks have to be a minimum size to hold free list items.
+  if (size_in_bytes < kMinBlockSize) {
     page->add_wasted_memory(size_in_bytes);
-    wasted_bytes_ += size_in_bytes;
+    wasted_bytes_.Increment(size_in_bytes);
     return size_in_bytes;
   }
 
@@ -2377,16 +2270,34 @@
   // Insert other blocks at the head of a free list of the appropriate
   // magnitude.
   FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
-  category_[type].Free(free_space, size_in_bytes);
-  page->add_available_in_free_list(size_in_bytes);
-
-  DCHECK(IsVeryLong() || Available() == SumFreeLists());
+  if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
+    page->add_available_in_free_list(size_in_bytes);
+  }
   return 0;
 }
 
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
+  FreeListCategoryIterator it(this, type);
+  FreeSpace* node = nullptr;
+  while (it.HasNext()) {
+    FreeListCategory* current = it.Next();
+    node = current->PickNodeFromList(node_size);
+    if (node != nullptr) {
+      Page::FromAddress(node->address())
+          ->add_available_in_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || Available() == SumFreeLists());
+      return node;
+    }
+    RemoveCategory(current);
+  }
+  return node;
+}
 
-FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
-  FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
+FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
+                                   int minimum_size) {
+  if (categories_[type] == nullptr) return nullptr;
+  FreeSpace* node =
+      categories_[type]->TryPickNodeFromList(minimum_size, node_size);
   if (node != nullptr) {
     Page::FromAddress(node->address())
         ->add_available_in_free_list(-(*node_size));
@@ -2395,10 +2306,25 @@
   return node;
 }
 
+FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
+                                         int* node_size, int minimum_size) {
+  FreeListCategoryIterator it(this, type);
+  FreeSpace* node = nullptr;
+  while (it.HasNext()) {
+    FreeListCategory* current = it.Next();
+    node = current->SearchForNodeInList(minimum_size, node_size);
+    if (node != nullptr) {
+      Page::FromAddress(node->address())
+          ->add_available_in_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || Available() == SumFreeLists());
+      return node;
+    }
+  }
+  return node;
+}
 
 FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
   FreeSpace* node = nullptr;
-  Page* page = nullptr;
 
   // First try the allocation fast path: try to allocate the minimum element
   // size of a free list category. This operation is constant time.
@@ -2411,10 +2337,8 @@
 
   // Next search the huge list for free list nodes. This takes linear time in
   // the number of huge elements.
-  node = category_[kHuge].SearchForNodeInList(size_in_bytes, node_size);
+  node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
   if (node != nullptr) {
-    page = Page::FromAddress(node->address());
-    page->add_available_in_free_list(-(*node_size));
     DCHECK(IsVeryLong() || Available() == SumFreeLists());
     return node;
   }
@@ -2424,51 +2348,14 @@
   if (type == kHuge) return nullptr;
 
   // Now search the best fitting free list for a node that has at least the
-  // requested size. This takes linear time in the number of elements.
+  // requested size.
   type = SelectFreeListCategoryType(size_in_bytes);
-  node = category_[type].PickNodeFromList(size_in_bytes, node_size);
-  if (node != nullptr) {
-    DCHECK(size_in_bytes <= *node_size);
-    page = Page::FromAddress(node->address());
-    page->add_available_in_free_list(-(*node_size));
-  }
+  node = TryFindNodeIn(type, node_size, size_in_bytes);
 
   DCHECK(IsVeryLong() || Available() == SumFreeLists());
   return node;
 }
 
-
-FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
-  hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize);
-  base::LockGuard<base::Mutex> guard(&mutex_);
-  FreeSpace* node = nullptr;
-  int node_size = 0;
-  // Try to find a node that fits exactly.
-  node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
-  // If no node could be found get as much memory as possible.
-  if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
-  if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
-  if (node != nullptr) {
-    // We round up the size to (kSmallListMin + kPointerSize) to (a) have a
-    // size larger then the minimum size required for FreeSpace, and (b) to get
-    // a block that can actually be freed into some FreeList later on.
-    if (hint_size_in_bytes <= kSmallListMin) {
-      hint_size_in_bytes = kSmallListMin + kPointerSize;
-    }
-    // Give back left overs that were not required by {size_in_bytes}.
-    intptr_t left_over = node_size - hint_size_in_bytes;
-
-    // Do not bother to return anything below {kSmallListMin} as it would be
-    // immediately discarded anyways.
-    if (left_over > kSmallListMin) {
-      Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over));
-      node->set_size(static_cast<int>(hint_size_in_bytes));
-    }
-  }
-  return node;
-}
-
-
 // Allocation on the old space free list.  If it succeeds then a new linear
 // allocation space has been set up with the top and limit of the space.  If
 // the allocation fails then NULL is returned, and the caller can perform a GC
@@ -2542,32 +2429,76 @@
   return new_node;
 }
 
-
-intptr_t FreeList::EvictFreeListItems(Page* p) {
-  intptr_t sum = category_[kHuge].EvictFreeListItemsInList(p);
-  if (sum < p->area_size()) {
-    for (int i = kFirstCategory; i <= kLarge; i++) {
-      sum += category_[i].EvictFreeListItemsInList(p);
-    }
-  }
+intptr_t FreeList::EvictFreeListItems(Page* page) {
+  intptr_t sum = 0;
+  page->ForAllFreeListCategories(
+      [this, &sum, page](FreeListCategory* category) {
+        DCHECK_EQ(this, category->owner());
+        sum += category->available();
+        RemoveCategory(category);
+        category->Invalidate();
+      });
   return sum;
 }
 
-
-bool FreeList::ContainsPageFreeListItems(Page* p) {
-  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-    if (category_[i].EvictFreeListItemsInList(p)) {
-      return true;
-    }
-  }
-  return false;
+bool FreeList::ContainsPageFreeListItems(Page* page) {
+  bool contained = false;
+  page->ForAllFreeListCategories(
+      [this, &contained](FreeListCategory* category) {
+        if (category->owner() == this && category->is_linked()) {
+          contained = true;
+        }
+      });
+  return contained;
 }
 
-
 void FreeList::RepairLists(Heap* heap) {
-  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-    category_[i].RepairFreeList(heap);
+  ForAllFreeListCategories(
+      [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
+}
+
+bool FreeList::AddCategory(FreeListCategory* category) {
+  FreeListCategoryType type = category->type_;
+  FreeListCategory* top = categories_[type];
+
+  if (category->is_empty()) return false;
+  if (top == category) return false;
+
+  // Common double-linked list insertion.
+  if (top != nullptr) {
+    top->set_prev(category);
   }
+  category->set_next(top);
+  categories_[type] = category;
+  return true;
+}
+
+void FreeList::RemoveCategory(FreeListCategory* category) {
+  FreeListCategoryType type = category->type_;
+  FreeListCategory* top = categories_[type];
+
+  // Common double-linked list removal.
+  if (top == category) {
+    categories_[type] = category->next();
+  }
+  if (category->prev() != nullptr) {
+    category->prev()->set_next(category->next());
+  }
+  if (category->next() != nullptr) {
+    category->next()->set_prev(category->prev());
+  }
+  category->set_next(nullptr);
+  category->set_prev(nullptr);
+}
+
+void FreeList::PrintCategories(FreeListCategoryType type) {
+  FreeListCategoryIterator it(this, type);
+  PrintF("FreeList[%p, top=%p, %d] ", this, categories_[type], type);
+  while (it.HasNext()) {
+    FreeListCategory* current = it.Next();
+    PrintF("%p -> ", current);
+  }
+  PrintF("null\n");
 }
 
 
@@ -2583,7 +2514,6 @@
   return sum;
 }
 
-
 int FreeListCategory::FreeListLength() {
   int length = 0;
   FreeSpace* cur = top();
@@ -2595,16 +2525,13 @@
   return length;
 }
 
-
-bool FreeListCategory::IsVeryLong() {
-  return FreeListLength() == kVeryLongFreeList;
-}
-
-
 bool FreeList::IsVeryLong() {
+  int len = 0;
   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-    if (category_[i].IsVeryLong()) {
-      return true;
+    FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
+    while (it.HasNext()) {
+      len += it.Next()->FreeListLength();
+      if (len >= FreeListCategory::kVeryLongFreeList) return true;
     }
   }
   return false;
@@ -2616,9 +2543,8 @@
 // kVeryLongFreeList.
 intptr_t FreeList::SumFreeLists() {
   intptr_t sum = 0;
-  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-    sum += category_[i].SumFreeList();
-  }
+  ForAllFreeListCategories(
+      [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
   return sum;
 }
 #endif
@@ -2660,7 +2586,7 @@
     int size = static_cast<int>(page->wasted_memory());
     if (size == 0) continue;
     Address address = page->OffsetToAddress(Page::kPageSize - size);
-    heap()->CreateFillerObjectAt(address, size);
+    heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
   }
 }
 
@@ -2672,7 +2598,8 @@
     // Create filler object to keep page iterable if it was iterable.
     int remaining =
         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
-    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
+    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining,
+                                 ClearRecordedSlots::kNo);
     allocation_info_.Reset(nullptr, nullptr);
   }
 }
@@ -2855,9 +2782,14 @@
   Isolate* isolate = heap()->isolate();
   HeapObjectIterator obj_it(this);
   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
-    if (obj->IsCode()) {
-      Code* code = Code::cast(obj);
+    if (obj->IsAbstractCode()) {
+      AbstractCode* code = AbstractCode::cast(obj);
       isolate->code_kind_statistics()[code->kind()] += code->Size();
+    }
+    if (obj->IsCode()) {
+      // TODO(mythria): Also enable this for BytecodeArray when it supports
+      // RelocInformation.
+      Code* code = Code::cast(obj);
       RelocIterator it(code);
       int delta = 0;
       const byte* prev_pc = code->instruction_start();
@@ -2998,7 +2930,6 @@
   }
 
   HeapObject* object = page->GetObject();
-
   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
 
   if (Heap::ShouldZapGarbage()) {
@@ -3088,8 +3019,6 @@
       }
 
       // Free the chunk.
-      heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
-                                                             heap()->isolate());
       size_ -= static_cast<int>(page->size());
       AccountUncommitted(static_cast<intptr_t>(page->size()));
       objects_size_ -= object->Size();
@@ -3146,7 +3075,7 @@
     // (sequential strings that have been morphed into external
     // strings), fixed arrays, byte arrays, and constant pool arrays in the
     // large object space.
-    CHECK(object->IsCode() || object->IsSeqString() ||
+    CHECK(object->IsAbstractCode() || object->IsSeqString() ||
           object->IsExternalString() || object->IsFixedArray() ||
           object->IsFixedDoubleArray() || object->IsByteArray());
 
@@ -3154,7 +3083,7 @@
     object->ObjectVerify();
 
     // Byte arrays and strings don't have interior pointers.
-    if (object->IsCode()) {
+    if (object->IsAbstractCode()) {
       VerifyPointersVisitor code_visitor;
       object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
     } else if (object->IsFixedArray()) {
@@ -3205,8 +3134,8 @@
   Isolate* isolate = heap()->isolate();
   LargeObjectIterator obj_it(this);
   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
-    if (obj->IsCode()) {
-      Code* code = Code::cast(obj);
+    if (obj->IsAbstractCode()) {
+      AbstractCode* code = AbstractCode::cast(obj);
       isolate->code_kind_statistics()[code->kind()] += code->Size();
     }
   }
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index c0d399f..93a81cc 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -27,11 +27,14 @@
 class Isolate;
 class MemoryAllocator;
 class MemoryChunk;
+class NewSpacePage;
+class Page;
 class PagedSpace;
 class SemiSpace;
 class SkipList;
 class SlotsBuffer;
 class SlotSet;
+class TypedSlotSet;
 class Space;
 
 // -----------------------------------------------------------------------------
@@ -105,10 +108,6 @@
 #define DCHECK_PAGE_OFFSET(offset) \
   DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
 
-#define DCHECK_MAP_PAGE_INDEX(index) \
-  DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
-
-
 class MarkBit {
  public:
   typedef uint32_t CellType;
@@ -204,6 +203,8 @@
 
   static inline void Clear(MemoryChunk* chunk);
 
+  static inline void SetAllBits(MemoryChunk* chunk);
+
   static void PrintWord(uint32_t word, uint32_t himask = 0) {
     for (uint32_t mask = 1; mask != 0; mask <<= 1) {
       if ((mask & himask) != 0) PrintF("[");
@@ -288,6 +289,113 @@
   }
 };
 
+enum FreeListCategoryType {
+  kTiniest,
+  kTiny,
+  kSmall,
+  kMedium,
+  kLarge,
+  kHuge,
+
+  kFirstCategory = kTiniest,
+  kLastCategory = kHuge,
+  kNumberOfCategories = kLastCategory + 1,
+  kInvalidCategory
+};
+
+enum FreeMode { kLinkCategory, kDoNotLinkCategory };
+
+// A free list category maintains a linked list of free memory blocks.
+class FreeListCategory {
+ public:
+  static const int kSize = kIntSize +      // FreeListCategoryType type_
+                           kIntSize +      // int available_
+                           kPointerSize +  // FreeSpace* top_
+                           kPointerSize +  // FreeListCategory* prev_
+                           kPointerSize;   // FreeListCategory* next_
+
+  FreeListCategory()
+      : type_(kInvalidCategory),
+        available_(0),
+        top_(nullptr),
+        prev_(nullptr),
+        next_(nullptr) {}
+
+  void Initialize(FreeListCategoryType type) {
+    type_ = type;
+    available_ = 0;
+    top_ = nullptr;
+    prev_ = nullptr;
+    next_ = nullptr;
+  }
+
+  void Invalidate();
+
+  void Reset();
+
+  void ResetStats() { Reset(); }
+
+  void RepairFreeList(Heap* heap);
+
+  // Relinks the category into the currently owning free list. Requires that the
+  // category is currently unlinked.
+  void Relink();
+
+  bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
+
+  // Picks a node from the list and stores its size in |node_size|. Returns
+  // nullptr if the category is empty.
+  FreeSpace* PickNodeFromList(int* node_size);
+
+  // Performs a single try to pick a node of at least |minimum_size| from the
+  // category. Stores the actual size in |node_size|. Returns nullptr if no
+  // node is found.
+  FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
+
+  // Picks a node of at least |minimum_size| from the category. Stores the
+  // actual size in |node_size|. Returns nullptr if no node is found.
+  FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
+
+  inline FreeList* owner();
+  inline bool is_linked();
+  bool is_empty() { return top() == nullptr; }
+  int available() const { return available_; }
+
+#ifdef DEBUG
+  intptr_t SumFreeList();
+  int FreeListLength();
+#endif
+
+ private:
+  // For debug builds we accurately compute free lists lengths up until
+  // {kVeryLongFreeList} by manually walking the list.
+  static const int kVeryLongFreeList = 500;
+
+  inline Page* page();
+
+  FreeSpace* top() { return top_; }
+  void set_top(FreeSpace* top) { top_ = top; }
+  FreeListCategory* prev() { return prev_; }
+  void set_prev(FreeListCategory* prev) { prev_ = prev; }
+  FreeListCategory* next() { return next_; }
+  void set_next(FreeListCategory* next) { next_ = next; }
+
+  // |type_|: The type of this free list category.
+  FreeListCategoryType type_;
+
+  // |available_|: Total available bytes in all blocks of this free list
+  // category.
+  int available_;
+
+  // |top_|: Points to the top FreeSpace* in the free list category.
+  FreeSpace* top_;
+
+  FreeListCategory* prev_;
+  FreeListCategory* next_;
+
+  friend class FreeList;
+  friend class PagedSpace;
+};
 
 // MemoryChunk represents a memory region owned by a specific space.
 // It is divided into the header and the body. Chunk start is always
@@ -303,9 +411,7 @@
     IN_TO_SPACE,    // All pages in new space has one of these two set.
     NEW_SPACE_BELOW_AGE_MARK,
     EVACUATION_CANDIDATE,
-    RESCAN_ON_EVACUATION,
     NEVER_EVACUATE,  // May contain immortal immutables.
-    POPULAR_PAGE,    // Slots buffer of this page overflowed on the previous GC.
 
     // Large objects can have a progress bar in their page header. These object
     // are scanned in increments and will be kept black while being scanned.
@@ -313,6 +419,11 @@
     // to grey transition is performed in the value.
     HAS_PROGRESS_BAR,
 
+    // A black page has all mark bits set to 1 (black). A black page currently
+    // cannot be iterated because it is not swept. Moreover live bytes are also
+    // not updated.
+    BLACK_PAGE,
+
     // This flag is intended to be used for testing. Works only when both
     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
     // are set. It forces the page to become an evacuation candidate at next
@@ -334,19 +445,6 @@
     NUM_MEMORY_CHUNK_FLAGS
   };
 
-  // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
-  // |kCompactingInProgress|:  Parallel compaction is currently in progress.
-  // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
-  //   be finalized.
-  // |kCompactingAborted|: Parallel compaction has been aborted, which should
-  //   for now only happen in OOM scenarios.
-  enum ParallelCompactingState {
-    kCompactingDone,
-    kCompactingInProgress,
-    kCompactingFinalize,
-    kCompactingAborted,
-  };
-
   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
   //   not be performed on that page. Sweeper threads that are done with their
   //   work will set this value and not touch the page anymore.
@@ -372,8 +470,7 @@
   static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
 
   static const int kSkipEvacuationSlotsRecordingMask =
-      (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
-      (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
+      (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
 
   static const intptr_t kAlignment =
       (static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -382,6 +479,8 @@
 
   static const intptr_t kSizeOffset = 0;
 
+  static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
+
   static const intptr_t kLiveBytesOffset =
       kSizeOffset + kPointerSize  // size_t size
       + kIntptrSize               // intptr_t flags_
@@ -392,25 +491,26 @@
       + kPointerSize              // Heap* heap_
       + kIntSize;                 // int progress_bar_
 
-  static const size_t kSlotsBufferOffset =
+  static const size_t kOldToNewSlotsOffset =
       kLiveBytesOffset + kIntSize;  // int live_byte_count_
 
   static const size_t kWriteBarrierCounterOffset =
-      kSlotsBufferOffset + kPointerSize  // SlotsBuffer* slots_buffer_;
-      + kPointerSize                     // SlotSet* old_to_new_slots_;
-      + kPointerSize                     // SlotSet* old_to_old_slots_;
-      + kPointerSize;                    // SkipList* skip_list_;
+      kOldToNewSlotsOffset + kPointerSize  // SlotSet* old_to_new_slots_;
+      + kPointerSize                       // SlotSet* old_to_old_slots_;
+      + kPointerSize   // TypedSlotSet* typed_old_to_old_slots_;
+      + kPointerSize;  // SkipList* skip_list_;
 
   static const size_t kMinHeaderSize =
       kWriteBarrierCounterOffset +
       kIntptrSize         // intptr_t write_barrier_counter_
       + kPointerSize      // AtomicValue high_water_mark_
       + kPointerSize      // base::Mutex* mutex_
-      + kPointerSize      // base::AtomicWord parallel_sweeping_
-      + kPointerSize      // AtomicValue parallel_compaction_
+      + kPointerSize      // base::AtomicWord concurrent_sweeping_
       + 2 * kPointerSize  // AtomicNumber free-list statistics
       + kPointerSize      // AtomicValue next_chunk_
-      + kPointerSize;     // AtomicValue prev_chunk_
+      + kPointerSize      // AtomicValue prev_chunk_
+      // FreeListCategory categories_[kNumberOfCategories]
+      + FreeListCategory::kSize * kNumberOfCategories;
 
   // We add some more space to the computed header size to amount for missing
   // alignment requirements in our computation.
@@ -428,7 +528,11 @@
       kBodyOffset - 1 +
       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
 
-  static const int kFlagsOffset = kPointerSize;
+  // Page size in bytes.  This must be a multiple of the OS page size.
+  static const int kPageSize = 1 << kPageSizeBits;
+  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+
+  static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
 
   static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
   static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
@@ -474,20 +578,18 @@
     return concurrent_sweeping_;
   }
 
-  AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
-    return parallel_compaction_;
-  }
-
   // Manage live byte count, i.e., count of bytes in black objects.
   inline void ResetLiveBytes();
   inline void IncrementLiveBytes(int by);
 
   int LiveBytes() {
-    DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
+    DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
+    DCHECK(!IsFlagSet(BLACK_PAGE) || live_byte_count_ == 0);
     return live_byte_count_;
   }
 
   void SetLiveBytes(int live_bytes) {
+    if (IsFlagSet(BLACK_PAGE)) return;
     DCHECK_GE(live_bytes, 0);
     DCHECK_LE(static_cast<size_t>(live_bytes), size_);
     live_byte_count_ = live_bytes;
@@ -509,17 +611,18 @@
 
   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
 
-  inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
-
-  inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
-
   inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
   inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
+  inline TypedSlotSet* typed_old_to_old_slots() {
+    return typed_old_to_old_slots_;
+  }
 
   void AllocateOldToNewSlots();
   void ReleaseOldToNewSlots();
   void AllocateOldToOldSlots();
   void ReleaseOldToOldSlots();
+  void AllocateTypedOldToOldSlots();
+  void ReleaseTypedOldToOldSlots();
 
   Address area_start() { return area_start_; }
   Address area_end() { return area_end_; }
@@ -591,17 +694,6 @@
     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
   }
 
-  void MarkEvacuationCandidate() {
-    DCHECK(!IsFlagSet(NEVER_EVACUATE));
-    DCHECK_NULL(slots_buffer_);
-    SetFlag(EVACUATION_CANDIDATE);
-  }
-
-  void ClearEvacuationCandidate() {
-    DCHECK(slots_buffer_ == NULL);
-    ClearFlag(EVACUATION_CANDIDATE);
-  }
-
   bool ShouldSkipEvacuationSlotRecording() {
     return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
   }
@@ -683,13 +775,12 @@
   // Count of bytes marked black on page.
   int live_byte_count_;
 
-  SlotsBuffer* slots_buffer_;
-
   // A single slot set for small pages (of size kPageSize) or an array of slot
   // set for large pages. In the latter case the number of entries in the array
   // is ceil(size() / kPageSize).
   SlotSet* old_to_new_slots_;
   SlotSet* old_to_old_slots_;
+  TypedSlotSet* typed_old_to_old_slots_;
 
   SkipList* skip_list_;
 
@@ -702,7 +793,6 @@
   base::Mutex* mutex_;
 
   AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
-  AtomicValue<ParallelCompactingState> parallel_compaction_;
 
   // PagedSpace free-list statistics.
   AtomicNumber<intptr_t> available_in_free_list_;
@@ -713,6 +803,8 @@
   // prev_chunk_ holds a pointer of type MemoryChunk
   AtomicValue<MemoryChunk*> prev_chunk_;
 
+  FreeListCategory categories_[kNumberOfCategories];
+
  private:
   void InitializeReservedMemory() { reservation_.Reset(); }
 
@@ -720,17 +812,6 @@
   friend class MemoryChunkValidator;
 };
 
-enum FreeListCategoryType {
-  kSmall,
-  kMedium,
-  kLarge,
-  kHuge,
-
-  kFirstCategory = kSmall,
-  kLastCategory = kHuge,
-  kNumberOfCategories = kLastCategory + 1
-};
-
 // -----------------------------------------------------------------------------
 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
 //
@@ -790,9 +871,6 @@
 
   // ---------------------------------------------------------------------
 
-  // Page size in bytes.  This must be a multiple of the OS page size.
-  static const int kPageSize = 1 << kPageSizeBits;
-
   // Maximum object size that gets allocated into regular pages. Objects larger
   // than that size are allocated in large object space and are never moved in
   // memory. This also applies to new space allocation, since objects are never
@@ -802,11 +880,6 @@
   // short living objects >256K.
   static const int kMaxRegularHeapObjectSize = 600 * KB;
 
-  static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
-
-  // Page size mask.
-  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
-
   inline void ClearGCFields();
 
   static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
@@ -834,6 +907,17 @@
                             available_in_free_list());
   }
 
+  template <typename Callback>
+  inline void ForAllFreeListCategories(Callback callback) {
+    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+      callback(&categories_[i]);
+    }
+  }
+
+  FreeListCategory* free_list_category(FreeListCategoryType type) {
+    return &categories_[type];
+  }
+
 #define FRAGMENTATION_STATS_ACCESSORS(type, name)        \
   type name() { return name##_.Value(); }                \
   void set_##name(type name) { name##_.SetValue(name); } \
@@ -848,6 +932,13 @@
   void Print();
 #endif  // DEBUG
 
+  inline void MarkNeverAllocateForTesting();
+  inline void MarkEvacuationCandidate();
+  inline void ClearEvacuationCandidate();
+
+ private:
+  inline void InitializeFreeListCategories();
+
   friend class MemoryAllocator;
 };
 
@@ -862,6 +953,12 @@
 
   inline void set_next_page(LargePage* page) { set_next_chunk(page); }
 
+  // A limit to guarantee that we do not overflow typed slot offset in
+  // the old to old remembered set.
+  // Note that this limit is higher than what assembler already imposes on
+  // x64 and ia32 architectures.
+  static const int kMaxCodePageSize = 512 * MB;
+
  private:
   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
 
@@ -977,8 +1074,8 @@
   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
   STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
                 offsetof(MemoryChunk, live_byte_count_));
-  STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
-                offsetof(MemoryChunk, slots_buffer_));
+  STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
+                offsetof(MemoryChunk, old_to_new_slots_));
   STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
                 offsetof(MemoryChunk, write_barrier_counter_));
 
@@ -1104,7 +1201,14 @@
     int start_region = RegionNumber(addr);
     int end_region = RegionNumber(addr + size - kPointerSize);
     for (int idx = start_region; idx <= end_region; idx++) {
-      if (starts_[idx] > addr) starts_[idx] = addr;
+      if (starts_[idx] > addr) {
+        starts_[idx] = addr;
+      } else {
+        // In the first region, there may already be an object closer to the
+        // start of the region. Do not change the start in that case. If this
+        // is not the first region, you probably added overlapping objects.
+        DCHECK_EQ(start_region, idx);
+      }
     }
   }
 
@@ -1143,6 +1247,11 @@
 //
 class MemoryAllocator {
  public:
+  enum AllocationMode {
+    kRegular,
+    kPooled,
+  };
+
   explicit MemoryAllocator(Isolate* isolate);
 
   // Initializes its internal bookkeeping structures.
@@ -1151,8 +1260,13 @@
 
   void TearDown();
 
-  Page* AllocatePage(intptr_t size, PagedSpace* owner,
-                     Executability executable);
+  // Allocates either Page or NewSpacePage from the allocator. AllocationMode
+  // is used to indicate whether pooled allocation, which only works for
+  // MemoryChunk::kPageSize, should be tried first.
+  template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular,
+            typename SpaceType>
+  PageType* AllocatePage(intptr_t size, SpaceType* owner,
+                         Executability executable);
 
   LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
                                Executability executable);
@@ -1164,8 +1278,9 @@
   // FreeMemory can be called concurrently when PreFree was executed before.
   void PerformFreeMemory(MemoryChunk* chunk);
 
-  // Free is a wrapper method, which calls PreFree and PerformFreeMemory
-  // together.
+  // Free is a wrapper method. For kRegular AllocationMode it  calls PreFree and
+  // PerformFreeMemory together. For kPooled it will dispatch to pooled free.
+  template <MemoryAllocator::AllocationMode mode = kRegular>
   void Free(MemoryChunk* chunk);
 
   // Returns allocated spaces in bytes.
@@ -1219,8 +1334,6 @@
 
   bool CommitMemory(Address addr, size_t size, Executability executable);
 
-  void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
-                          Executability executable);
   void FreeMemory(base::VirtualMemory* reservation, Executability executable);
   void FreeMemory(Address addr, size_t size, Executability executable);
 
@@ -1273,6 +1386,14 @@
                                               size_t reserved_size);
 
  private:
+  // See AllocatePage for public interface. Note that currently we only support
+  // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
+  template <typename SpaceType>
+  MemoryChunk* AllocatePagePooled(SpaceType* owner);
+
+  // Free that chunk into the pool.
+  void FreePooled(MemoryChunk* chunk);
+
   Isolate* isolate_;
 
   // Maximum space size in bytes.
@@ -1326,6 +1447,8 @@
     } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
   }
 
+  List<MemoryChunk*> chunk_pool_;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
 };
 
@@ -1481,12 +1604,6 @@
 
   void ClearSize() { size_ = capacity_; }
 
-  // Reset the allocation statistics (i.e., available = capacity with no wasted
-  // or allocated bytes).
-  void Reset() {
-    size_ = 0;
-  }
-
   // Accessors for the allocation statistics.
   intptr_t Capacity() { return capacity_; }
   intptr_t MaxCapacity() { return max_capacity_; }
@@ -1513,13 +1630,13 @@
   void ShrinkSpace(int size_in_bytes) {
     capacity_ -= size_in_bytes;
     size_ -= size_in_bytes;
-    CHECK(size_ >= 0);
+    CHECK_GE(size_, 0);
   }
 
   // Allocate from available bytes (available -> size).
   void AllocateBytes(intptr_t size_in_bytes) {
     size_ += size_in_bytes;
-    CHECK(size_ >= 0);
+    CHECK_GE(size_, 0);
   }
 
   // Free allocated bytes, making them available (size -> available).
@@ -1558,80 +1675,6 @@
   intptr_t size_;
 };
 
-
-// A free list category maintains a linked list of free memory blocks.
-class FreeListCategory {
- public:
-  FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {}
-
-  void Initialize(FreeList* owner, FreeListCategoryType type) {
-    owner_ = owner;
-    type_ = type;
-  }
-
-  // Concatenates {category} into {this}.
-  //
-  // Note: Thread-safe.
-  intptr_t Concatenate(FreeListCategory* category);
-
-  void Reset();
-
-  void Free(FreeSpace* node, int size_in_bytes);
-
-  // Pick a node from the list.
-  FreeSpace* PickNodeFromList(int* node_size);
-
-  // Pick a node from the list and compare it against {size_in_bytes}. If the
-  // node's size is greater or equal return the node and null otherwise.
-  FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
-
-  // Search for a node of size {size_in_bytes}.
-  FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
-
-  intptr_t EvictFreeListItemsInList(Page* p);
-  bool ContainsPageFreeListItemsInList(Page* p);
-
-  void RepairFreeList(Heap* heap);
-
-  bool IsEmpty() { return top() == nullptr; }
-
-  FreeList* owner() { return owner_; }
-  int available() const { return available_; }
-
-#ifdef DEBUG
-  intptr_t SumFreeList();
-  int FreeListLength();
-  bool IsVeryLong();
-#endif
-
- private:
-  // For debug builds we accurately compute free lists lengths up until
-  // {kVeryLongFreeList} by manually walking the list.
-  static const int kVeryLongFreeList = 500;
-
-  FreeSpace* top() { return top_.Value(); }
-  void set_top(FreeSpace* top) { top_.SetValue(top); }
-
-  FreeSpace* end() const { return end_; }
-  void set_end(FreeSpace* end) { end_ = end; }
-
-  // |type_|: The type of this free list category.
-  FreeListCategoryType type_;
-
-  // |top_|: Points to the top FreeSpace* in the free list category.
-  AtomicValue<FreeSpace*> top_;
-
-  // |end_|: Points to the end FreeSpace* in the free list category.
-  FreeSpace* end_;
-
-  // |available_|: Total available bytes in all blocks of this free list
-  //   category.
-  int available_;
-
-  // |owner_|: The owning free list of this category.
-  FreeList* owner_;
-};
-
 // A free list maintaining free blocks of memory. The free list is organized in
 // a way to encourage objects allocated around the same time to be near each
 // other. The normal way to allocate is intended to be by bumping a 'top'
@@ -1641,9 +1684,10 @@
 // categories would scatter allocation more.
 
 // The free list is organized in categories as follows:
-// 1-31 words (too small): Such small free areas are discarded for efficiency
-//   reasons. They can be reclaimed by the compactor. However the distance
-//   between top and limit may be this small.
+// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
+//   allocation, when categories >= small do not have entries anymore.
+// 11-31 words (tiny): The tiny blocks are only used for allocation, when
+//   categories >= small do not have entries anymore.
 // 32-255 words (small): Used for allocating free space between 1-31 words in
 //   size.
 // 256-2047 words (medium): Used for allocating free space between 32-255 words
@@ -1657,8 +1701,12 @@
   // This method returns how much memory can be allocated after freeing
   // maximum_freed memory.
   static inline int GuaranteedAllocatable(int maximum_freed) {
-    if (maximum_freed <= kSmallListMin) {
+    if (maximum_freed <= kTiniestListMax) {
+      // Since we are not iterating over all list entries, we cannot guarantee
+      // that we can find the maximum freed block in that free list.
       return 0;
+    } else if (maximum_freed <= kTinyListMax) {
+      return kTinyAllocationMax;
     } else if (maximum_freed <= kSmallListMax) {
       return kSmallAllocationMax;
     } else if (maximum_freed <= kMediumListMax) {
@@ -1671,19 +1719,13 @@
 
   explicit FreeList(PagedSpace* owner);
 
-  // The method concatenates {other} into {this} and returns the added bytes,
-  // including waste.
-  //
-  // Note: Thread-safe.
-  intptr_t Concatenate(FreeList* other);
-
   // Adds a node on the free list. The block of size {size_in_bytes} starting
   // at {start} is placed on the free list. The return value is the number of
   // bytes that were not added to the free list, because they freed memory block
   // was too small. Bookkeeping information will be written to the block, i.e.,
   // its contents will be destroyed. The start address should be word aligned,
   // and the size should be a non-zero multiple of the word size.
-  int Free(Address start, int size_in_bytes);
+  int Free(Address start, int size_in_bytes, FreeMode mode);
 
   // Allocate a block of size {size_in_bytes} from the free list. The block is
   // unitialized. A failure is returned if no block is available. The size
@@ -1693,70 +1735,118 @@
   // Clear the free list.
   void Reset();
 
-  void ResetStats() { wasted_bytes_ = 0; }
+  void ResetStats() {
+    wasted_bytes_.SetValue(0);
+    ForAllFreeListCategories(
+        [](FreeListCategory* category) { category->ResetStats(); });
+  }
 
   // Return the number of bytes available on the free list.
   intptr_t Available() {
     intptr_t available = 0;
-    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-      available += category_[i].available();
-    }
+    ForAllFreeListCategories([&available](FreeListCategory* category) {
+      available += category->available();
+    });
     return available;
   }
 
-  // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
-  // size in the free list category exactly matching the size. If no suitable
-  // node could be found, the method falls back to retrieving a {FreeSpace}
-  // from the large or huge free list category.
-  //
-  // Can be used concurrently.
-  MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
-
   bool IsEmpty() {
-    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-      if (!category_[i].IsEmpty()) return false;
-    }
-    return true;
+    bool empty = true;
+    ForAllFreeListCategories([&empty](FreeListCategory* category) {
+      if (!category->is_empty()) empty = false;
+    });
+    return empty;
   }
 
   // Used after booting the VM.
   void RepairLists(Heap* heap);
 
-  intptr_t EvictFreeListItems(Page* p);
-  bool ContainsPageFreeListItems(Page* p);
+  intptr_t EvictFreeListItems(Page* page);
+  bool ContainsPageFreeListItems(Page* page);
 
   PagedSpace* owner() { return owner_; }
-  intptr_t wasted_bytes() { return wasted_bytes_; }
-  base::Mutex* mutex() { return &mutex_; }
+  intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
+
+  template <typename Callback>
+  void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
+    FreeListCategory* current = categories_[type];
+    while (current != nullptr) {
+      FreeListCategory* next = current->next();
+      callback(current);
+      current = next;
+    }
+  }
+
+  template <typename Callback>
+  void ForAllFreeListCategories(Callback callback) {
+    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+      ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
+    }
+  }
+
+  bool AddCategory(FreeListCategory* category);
+  void RemoveCategory(FreeListCategory* category);
+  void PrintCategories(FreeListCategoryType type);
 
 #ifdef DEBUG
-  void Zap();
   intptr_t SumFreeLists();
   bool IsVeryLong();
 #endif
 
  private:
+  class FreeListCategoryIterator {
+   public:
+    FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
+        : current_(free_list->categories_[type]) {}
+
+    bool HasNext() { return current_ != nullptr; }
+
+    FreeListCategory* Next() {
+      DCHECK(HasNext());
+      FreeListCategory* tmp = current_;
+      current_ = current_->next();
+      return tmp;
+    }
+
+   private:
+    FreeListCategory* current_;
+  };
+
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
   static const int kMaxBlockSize = Page::kAllocatableMemory;
 
-  static const int kSmallListMin = 0x1f * kPointerSize;
+  static const int kTiniestListMax = 0xa * kPointerSize;
+  static const int kTinyListMax = 0x1f * kPointerSize;
   static const int kSmallListMax = 0xff * kPointerSize;
   static const int kMediumListMax = 0x7ff * kPointerSize;
   static const int kLargeListMax = 0x3fff * kPointerSize;
-  static const int kSmallAllocationMax = kSmallListMin;
+  static const int kTinyAllocationMax = kTiniestListMax;
+  static const int kSmallAllocationMax = kTinyListMax;
   static const int kMediumAllocationMax = kSmallListMax;
   static const int kLargeAllocationMax = kMediumListMax;
 
   FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
-  FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
 
-  FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
-    return &category_[category];
-  }
+  // Walks all available categories for a given |type| and tries to retrieve
+  // a node. Returns nullptr if the category is empty.
+  FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
+
+  // Tries to retrieve a node from the first category in a given |type|.
+  // Returns nullptr if the category is empty.
+  FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
+                           int minimum_size);
+
+  // Searches a given |type| for a node of at least |minimum_size|.
+  FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
+                                 int minimum_size);
 
   FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
-    if (size_in_bytes <= kSmallListMax) {
+    if (size_in_bytes <= kTiniestListMax) {
+      return kTiniest;
+    } else if (size_in_bytes <= kTinyListMax) {
+      return kTiny;
+    } else if (size_in_bytes <= kSmallListMax) {
       return kSmall;
     } else if (size_in_bytes <= kMediumListMax) {
       return kMedium;
@@ -1766,6 +1856,7 @@
     return kHuge;
   }
 
+  // The tiny categories are not used for fast allocation.
   FreeListCategoryType SelectFastAllocationFreeListCategoryType(
       size_t size_in_bytes) {
     if (size_in_bytes <= kSmallAllocationMax) {
@@ -1778,10 +1869,13 @@
     return kHuge;
   }
 
+  FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
+
   PagedSpace* owner_;
-  base::Mutex mutex_;
-  intptr_t wasted_bytes_;
-  FreeListCategory category_[kNumberOfCategories];
+  AtomicNumber<intptr_t> wasted_bytes_;
+  FreeListCategory* categories_[kNumberOfCategories];
+
+  friend class FreeListCategory;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
 };
@@ -1883,7 +1977,6 @@
   AllocationInfo allocation_info_;
 };
 
-
 class PagedSpace : public Space {
  public:
   static const intptr_t kCompactionMemoryWanted = 500 * KB;
@@ -1940,11 +2033,6 @@
     ResetFreeListStatistics();
   }
 
-  // Increases the number of available bytes of that space.
-  void AddToAccountingStats(intptr_t bytes) {
-    accounting_stats_.DeallocateBytes(bytes);
-  }
-
   // Available bytes without growing.  These are the bytes on the free list.
   // The bytes in the linear allocation area are not included in this total
   // because updating the stats would slow down allocation.  New pages are
@@ -1977,10 +2065,13 @@
     return allocation_info_.limit_address();
   }
 
+  enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
+
   // Allocate the requested number of bytes in the space if possible, return a
-  // failure object if not.
+  // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
+  // to be manually updated later.
   MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
-      int size_in_bytes);
+      int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
 
   MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
       int size_in_bytes);
@@ -2000,11 +2091,16 @@
   // If add_to_freelist is false then just accounting stats are updated and
   // no attempt to add area to free list is made.
   int Free(Address start, int size_in_bytes) {
-    int wasted = free_list_.Free(start, size_in_bytes);
+    int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
     accounting_stats_.DeallocateBytes(size_in_bytes);
     return size_in_bytes - wasted;
   }
 
+  int UnaccountedFree(Address start, int size_in_bytes) {
+    int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+    return size_in_bytes - wasted;
+  }
+
   void ResetFreeList() { free_list_.Reset(); }
 
   // Set space allocation info.
@@ -2029,7 +2125,7 @@
   void IncreaseCapacity(int size);
 
   // Releases an unused page and shrinks the space.
-  void ReleasePage(Page* page, bool evict_free_list_items);
+  void ReleasePage(Page* page);
 
   // The dummy page that anchors the linked list of pages.
   Page* anchor() { return &anchor_; }
@@ -2085,17 +2181,18 @@
   // sweeper.
   virtual void RefillFreeList();
 
+  FreeList* free_list() { return &free_list_; }
+
+  base::Mutex* mutex() { return &space_mutex_; }
+
+  inline void UnlinkFreeListCategories(Page* page);
+  inline intptr_t RelinkFreeListCategories(Page* page);
+
  protected:
-  void AddMemory(Address start, intptr_t size);
-
-  void MoveOverFreeMemory(PagedSpace* other);
-
   // PagedSpaces that should be included in snapshots have different, i.e.,
   // smaller, initial pages.
   virtual bool snapshotable() { return true; }
 
-  FreeList* free_list() { return &free_list_; }
-
   bool HasPages() { return anchor_.next_page() != &anchor_; }
 
   // Cleans up the space, frees all pages in this space except those belonging
@@ -2143,6 +2240,7 @@
   // Mutex guarding any concurrent access to the space.
   base::Mutex space_mutex_;
 
+  friend class IncrementalMarking;
   friend class MarkCompactCollector;
   friend class PageIterator;
 
@@ -2191,29 +2289,9 @@
 
 class NewSpacePage : public MemoryChunk {
  public:
-  // GC related flags copied from from-space to to-space when
-  // flipping semispaces.
-  static const intptr_t kCopyOnFlipFlagsMask =
-      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
-      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
-  static const int kAreaSize = Page::kAllocatableMemory;
-
-  inline NewSpacePage* next_page() {
-    return static_cast<NewSpacePage*>(next_chunk());
-  }
-
-  inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
-
-  inline NewSpacePage* prev_page() {
-    return static_cast<NewSpacePage*>(prev_chunk());
-  }
-
-  inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
-
-  SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
-
-  bool is_anchor() { return !this->InNewSpace(); }
+  static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
+                                         Executability executable,
+                                         SemiSpace* owner);
 
   static bool IsAtStart(Address addr) {
     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
@@ -2224,8 +2302,6 @@
     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
   }
 
-  Address address() { return reinterpret_cast<Address>(this); }
-
   // Finds the NewSpacePage containing the given address.
   static inline NewSpacePage* FromAddress(Address address_in_page) {
     Address page_start =
@@ -2247,14 +2323,33 @@
            NewSpacePage::FromAddress(address2);
   }
 
+  inline NewSpacePage* next_page() {
+    return static_cast<NewSpacePage*>(next_chunk());
+  }
+
+  inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
+
+  inline NewSpacePage* prev_page() {
+    return static_cast<NewSpacePage*>(prev_chunk());
+  }
+
+  inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
+
+  SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
+
+  bool is_anchor() { return !this->InNewSpace(); }
+
  private:
+  // GC related flags copied from from-space to to-space when
+  // flipping semispaces.
+  static const intptr_t kCopyOnFlipFlagsMask =
+      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+
   // Create a NewSpacePage object that is only used as anchor
   // for the doubly-linked list of real pages.
   explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
 
-  static NewSpacePage* Initialize(Heap* heap, Address start,
-                                  SemiSpace* semi_space);
-
   // Intialize a fake NewSpacePage used as sentinel at the ends
   // of a doubly-linked list of real NewSpacePages.
   // Only uses the prev/next links, and sets flags to not be in new-space.
@@ -2280,7 +2375,6 @@
         current_capacity_(0),
         maximum_capacity_(0),
         minimum_capacity_(0),
-        start_(nullptr),
         age_mark_(nullptr),
         committed_(false),
         id_(semispace),
@@ -2291,39 +2385,38 @@
   inline bool Contains(Object* o);
   inline bool ContainsSlow(Address a);
 
-  // Creates a space in the young generation. The constructor does not
-  // allocate memory from the OS.
-  void SetUp(Address start, int initial_capacity, int maximum_capacity);
-
-  // Tear down the space.  Heap memory was not allocated by the space, so it
-  // is not deallocated here.
+  void SetUp(int initial_capacity, int maximum_capacity);
   void TearDown();
+  bool HasBeenSetUp() { return maximum_capacity_ != 0; }
 
-  // True if the space has been set up but not torn down.
-  bool HasBeenSetUp() { return start_ != nullptr; }
+  bool Commit();
+  bool Uncommit();
+  bool is_committed() { return committed_; }
 
-  // Grow the semispace to the new capacity.  The new capacity
-  // requested must be larger than the current capacity and less than
-  // the maximum capacity.
+  // Grow the semispace to the new capacity.  The new capacity requested must
+  // be larger than the current capacity and less than the maximum capacity.
   bool GrowTo(int new_capacity);
 
-  // Shrinks the semispace to the new capacity.  The new capacity
-  // requested must be more than the amount of used memory in the
-  // semispace and less than the current capacity.
+  // Shrinks the semispace to the new capacity.  The new capacity requested
+  // must be more than the amount of used memory in the semispace and less
+  // than the current capacity.
   bool ShrinkTo(int new_capacity);
 
   // Returns the start address of the first page of the space.
   Address space_start() {
-    DCHECK_NE(anchor_.next_page(), &anchor_);
+    DCHECK_NE(anchor_.next_page(), anchor());
     return anchor_.next_page()->area_start();
   }
 
-  // Returns the start address of the current page of the space.
-  Address page_low() { return current_page_->area_start(); }
+  NewSpacePage* first_page() { return anchor_.next_page(); }
+  NewSpacePage* current_page() { return current_page_; }
 
   // Returns one past the end address of the space.
   Address space_end() { return anchor_.prev_page()->area_end(); }
 
+  // Returns the start address of the current page of the space.
+  Address page_low() { return current_page_->area_start(); }
+
   // Returns one past the end address of the current page of the space.
   Address page_high() { return current_page_->area_end(); }
 
@@ -2341,17 +2434,10 @@
   Address age_mark() { return age_mark_; }
   void set_age_mark(Address mark);
 
-  bool is_committed() { return committed_; }
-  bool Commit();
-  bool Uncommit();
-
-  NewSpacePage* first_page() { return anchor_.next_page(); }
-  NewSpacePage* current_page() { return current_page_; }
-
-  // Returns the current total capacity of the semispace.
+  // Returns the current capacity of the semispace.
   int current_capacity() { return current_capacity_; }
 
-  // Returns the maximum total capacity of the semispace.
+  // Returns the maximum capacity of the semispace.
   int maximum_capacity() { return maximum_capacity_; }
 
   // Returns the initial capacity of the semispace.
@@ -2393,11 +2479,9 @@
 #endif
 
  private:
-  NewSpacePage* anchor() { return &anchor_; }
+  void RewindPages(NewSpacePage* start, int num_pages);
 
-  void set_current_capacity(int new_capacity) {
-    current_capacity_ = new_capacity;
-  }
+  inline NewSpacePage* anchor() { return &anchor_; }
 
   // Copies the flags into the masked positions on all pages in the space.
   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
@@ -2408,11 +2492,9 @@
   // The maximum capacity that can be used by this space.
   int maximum_capacity_;
 
-  // The mimnimum capacity for the space. A space cannot shrink below this size.
+  // The minimum capacity for the space. A space cannot shrink below this size.
   int minimum_capacity_;
 
-  // The start address of the space.
-  Address start_;
   // Used to govern object promotion during mark-compact collection.
   Address age_mark_;
 
@@ -2488,20 +2570,21 @@
 
 class NewSpace : public Space {
  public:
-  // Constructor.
   explicit NewSpace(Heap* heap)
       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
         to_space_(heap, kToSpace),
         from_space_(heap, kFromSpace),
         reservation_(),
-        top_on_previous_step_(0) {}
+        pages_used_(0),
+        top_on_previous_step_(0),
+        allocated_histogram_(nullptr),
+        promoted_histogram_(nullptr) {}
 
   inline bool Contains(HeapObject* o);
   inline bool ContainsSlow(Address a);
   inline bool Contains(Object* o);
 
-  // Sets up the new space using the given chunk.
-  bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
+  bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
 
   // Tears down the space.  Heap memory was not allocated by the space, so it
   // is not deallocated here.
@@ -2524,7 +2607,7 @@
 
   // Return the allocated bytes in the active semispace.
   intptr_t Size() override {
-    return pages_used_ * NewSpacePage::kAreaSize +
+    return pages_used_ * NewSpacePage::kAllocatableMemory +
            static_cast<int>(top() - to_space_.page_low());
   }
 
@@ -2537,7 +2620,7 @@
   intptr_t Capacity() {
     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
     return (to_space_.current_capacity() / Page::kPageSize) *
-           NewSpacePage::kAreaSize;
+           NewSpacePage::kAllocatableMemory;
   }
 
   // Return the current size of a semispace, allocatable and non-allocatable
@@ -2564,22 +2647,40 @@
   // Return the available bytes without growing.
   intptr_t Available() override { return Capacity() - Size(); }
 
-  intptr_t PagesFromStart(Address addr) {
-    return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
-  }
-
   size_t AllocatedSinceLastGC() {
-    intptr_t allocated = top() - to_space_.age_mark();
-    if (allocated < 0) {
-      // Runtime has lowered the top below the age mark.
+    bool seen_age_mark = false;
+    Address age_mark = to_space_.age_mark();
+    NewSpacePage* current_page = to_space_.first_page();
+    NewSpacePage* age_mark_page = NewSpacePage::FromAddress(age_mark);
+    NewSpacePage* last_page = NewSpacePage::FromAddress(top() - kPointerSize);
+    if (age_mark_page == last_page) {
+      if (top() - age_mark >= 0) {
+        return top() - age_mark;
+      }
+      // Top was reset at some point, invalidating this metric.
       return 0;
     }
-    // Correctly account for non-allocatable regions at the beginning of
-    // each page from the age_mark() to the top().
-    intptr_t pages =
-        PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
-    allocated -= pages * (NewSpacePage::kObjectStartOffset);
-    DCHECK(0 <= allocated && allocated <= Size());
+    while (current_page != last_page) {
+      if (current_page == age_mark_page) {
+        seen_age_mark = true;
+        break;
+      }
+      current_page = current_page->next_page();
+    }
+    if (!seen_age_mark) {
+      // Top was reset at some point, invalidating this metric.
+      return 0;
+    }
+    intptr_t allocated = age_mark_page->area_end() - age_mark;
+    DCHECK_EQ(current_page, age_mark_page);
+    current_page = age_mark_page->next_page();
+    while (current_page != last_page) {
+      allocated += NewSpacePage::kAllocatableMemory;
+      current_page = current_page->next_page();
+    }
+    allocated += top() - current_page->area_start();
+    DCHECK_LE(0, allocated);
+    DCHECK_LE(allocated, Size());
     return static_cast<size_t>(allocated);
   }
 
@@ -2617,10 +2718,6 @@
   // Set the age mark in the active semispace.
   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
 
-  // The start address of the space and a bit mask. Anding an address in the
-  // new space with the mask will result in the start address.
-  Address start() { return start_; }
-
   // The allocation top and limit address.
   Address* allocation_top_address() { return allocation_info_.top_address(); }
 
@@ -2735,18 +2832,12 @@
 
   base::Mutex mutex_;
 
-  Address chunk_base_;
-  uintptr_t chunk_size_;
-
   // The semispaces.
   SemiSpace to_space_;
   SemiSpace from_space_;
   base::VirtualMemory reservation_;
   int pages_used_;
 
-  // Start address and bit mask for containment testing.
-  Address start_;
-
   // Allocation pointer and limit for normal allocation and allocation during
   // mark-compact collection.
   AllocationInfo allocation_info_;
@@ -2792,8 +2883,6 @@
 
   bool is_local() override { return true; }
 
-  void RefillFreeList() override;
-
  protected:
   // The space is temporary and not included in any snapshots.
   bool snapshotable() override { return false; }
@@ -2856,12 +2945,7 @@
  public:
   // Creates a map space object.
   MapSpace(Heap* heap, AllocationSpace id)
-      : PagedSpace(heap, id, NOT_EXECUTABLE),
-        max_map_space_pages_(kMaxMapPageIndex - 1) {}
-
-  // Given an index, returns the page address.
-  // TODO(1600): this limit is artifical just to keep code compilable
-  static const int kMaxMapPageIndex = 1 << 16;
+      : PagedSpace(heap, id, NOT_EXECUTABLE) {}
 
   int RoundSizeDownToObjectAlignment(int size) override {
     if (base::bits::IsPowerOfTwo32(Map::kSize)) {
@@ -2874,16 +2958,6 @@
 #ifdef VERIFY_HEAP
   void VerifyObject(HeapObject* obj) override;
 #endif
-
- private:
-  static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
-
-  // Do map space compaction if there is a page gap.
-  int CompactionThreshold() {
-    return kMapsPerPage * (max_map_space_pages_ - 1);
-  }
-
-  const int max_map_space_pages_;
 };
 
 
@@ -2950,6 +3024,8 @@
   // Checks whether the space is empty.
   bool IsEmpty() { return first_page_ == NULL; }
 
+  void AdjustLiveBytes(int by) { objects_size_ += by; }
+
   LargePage* first_page() { return first_page_; }
 
 #ifdef VERIFY_HEAP
@@ -2988,25 +3064,42 @@
   LargePage* current_;
 };
 
+class LargePageIterator BASE_EMBEDDED {
+ public:
+  explicit inline LargePageIterator(LargeObjectSpace* space);
+
+  inline LargePage* next();
+
+ private:
+  LargePage* next_page_;
+};
 
 // Iterates over the chunks (pages and large object pages) that can contain
-// pointers to new space.
-class PointerChunkIterator BASE_EMBEDDED {
+// pointers to new space or to evacuation candidates.
+class MemoryChunkIterator BASE_EMBEDDED {
  public:
-  inline explicit PointerChunkIterator(Heap* heap);
+  enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE };
+  inline explicit MemoryChunkIterator(Heap* heap, Mode mode);
 
   // Return NULL when the iterator is done.
   inline MemoryChunk* next();
 
  private:
-  enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
+  enum State {
+    kOldSpaceState,
+    kMapState,
+    kCodeState,
+    kLargeObjectState,
+    kFinishedState
+  };
   State state_;
+  const Mode mode_;
   PageIterator old_iterator_;
+  PageIterator code_iterator_;
   PageIterator map_iterator_;
-  LargeObjectIterator lo_iterator_;
+  LargePageIterator lo_iterator_;
 };
 
-
 #ifdef DEBUG
 struct CommentStatistic {
   const char* comment;
diff --git a/src/heap/store-buffer-inl.h b/src/heap/store-buffer-inl.h
deleted file mode 100644
index 920ec34..0000000
--- a/src/heap/store-buffer-inl.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_STORE_BUFFER_INL_H_
-#define V8_STORE_BUFFER_INL_H_
-
-#include "src/heap/heap.h"
-#include "src/heap/remembered-set.h"
-#include "src/heap/spaces-inl.h"
-#include "src/heap/store-buffer.h"
-
-namespace v8 {
-namespace internal {
-
-void LocalStoreBuffer::Record(Address addr) {
-  if (top_->is_full()) top_ = new Node(top_);
-  top_->buffer[top_->count++] = addr;
-}
-
-void LocalStoreBuffer::Process(StoreBuffer* store_buffer) {
-  Node* current = top_;
-  while (current != nullptr) {
-    for (int i = 0; i < current->count; i++) {
-      Address slot = current->buffer[i];
-      Page* page = Page::FromAnyPointerAddress(heap_, slot);
-      RememberedSet<OLD_TO_NEW>::Insert(page, slot);
-    }
-    current = current->next;
-  }
-}
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_STORE_BUFFER_INL_H_
diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc
index 21f375b..a982eb3 100644
--- a/src/heap/store-buffer.cc
+++ b/src/heap/store-buffer.cc
@@ -8,7 +8,6 @@
 
 #include "src/counters.h"
 #include "src/heap/incremental-marking.h"
-#include "src/heap/store-buffer-inl.h"
 #include "src/isolate.h"
 #include "src/objects-inl.h"
 #include "src/v8.h"
@@ -17,17 +16,20 @@
 namespace internal {
 
 StoreBuffer::StoreBuffer(Heap* heap)
-    : heap_(heap), start_(nullptr), limit_(nullptr), virtual_memory_(nullptr) {}
+    : heap_(heap),
+      top_(nullptr),
+      start_(nullptr),
+      limit_(nullptr),
+      virtual_memory_(nullptr) {}
 
 void StoreBuffer::SetUp() {
   // Allocate 3x the buffer size, so that we can start the new store buffer
   // aligned to 2x the size.  This lets us use a bit test to detect the end of
   // the area.
-  virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
+  virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 2);
   uintptr_t start_as_int =
       reinterpret_cast<uintptr_t>(virtual_memory_->address());
-  start_ =
-      reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
+  start_ = reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
   limit_ = start_ + (kStoreBufferSize / kPointerSize);
 
   DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
@@ -38,23 +40,20 @@
   DCHECK(start_ <= vm_limit);
   DCHECK(limit_ <= vm_limit);
   USE(vm_limit);
-  DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
-  DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
-         0);
+  DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferMask) == 0);
 
   if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
                                kStoreBufferSize,
                                false)) {  // Not executable.
     V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
   }
-  heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
+  top_ = start_;
 }
 
 
 void StoreBuffer::TearDown() {
   delete virtual_memory_;
-  start_ = limit_ = NULL;
-  heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
+  top_ = start_ = limit_ = nullptr;
 }
 
 
@@ -64,16 +63,15 @@
 }
 
 void StoreBuffer::MoveEntriesToRememberedSet() {
-  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-  if (top == start_) return;
-  DCHECK(top <= limit_);
-  heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
-  for (Address* current = start_; current < top; current++) {
+  if (top_ == start_) return;
+  DCHECK(top_ <= limit_);
+  for (Address* current = start_; current < top_; current++) {
     DCHECK(!heap_->code_space()->Contains(*current));
     Address addr = *current;
     Page* page = Page::FromAnyPointerAddress(heap_, addr);
     RememberedSet<OLD_TO_NEW>::Insert(page, addr);
   }
+  top_ = start_;
 }
 
 }  // namespace internal
diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
index e7e9c98..1b3fcb0 100644
--- a/src/heap/store-buffer.h
+++ b/src/heap/store-buffer.h
@@ -18,20 +18,25 @@
 // code. On buffer overflow the slots are moved to the remembered set.
 class StoreBuffer {
  public:
-  explicit StoreBuffer(Heap* heap);
+  static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
+  static const int kStoreBufferMask = kStoreBufferSize - 1;
+
   static void StoreBufferOverflow(Isolate* isolate);
+
+  explicit StoreBuffer(Heap* heap);
   void SetUp();
   void TearDown();
 
-  static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
-  static const int kStoreBufferSize = kStoreBufferOverflowBit;
-  static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
+  // Used to add entries from generated code.
+  inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
 
   void MoveEntriesToRememberedSet();
 
  private:
   Heap* heap_;
 
+  Address* top_;
+
   // The start and the limit of the buffer that contains store slots
   // added from the generated code.
   Address* start_;
@@ -40,41 +45,6 @@
   base::VirtualMemory* virtual_memory_;
 };
 
-
-class LocalStoreBuffer BASE_EMBEDDED {
- public:
-  explicit LocalStoreBuffer(Heap* heap)
-      : top_(new Node(nullptr)), heap_(heap) {}
-
-  ~LocalStoreBuffer() {
-    Node* current = top_;
-    while (current != nullptr) {
-      Node* tmp = current->next;
-      delete current;
-      current = tmp;
-    }
-  }
-
-  inline void Record(Address addr);
-  inline void Process(StoreBuffer* store_buffer);
-
- private:
-  static const int kBufferSize = 16 * KB;
-
-  struct Node : Malloced {
-    explicit Node(Node* next_node) : next(next_node), count(0) {}
-
-    inline bool is_full() { return count == kBufferSize; }
-
-    Node* next;
-    Address buffer[kBufferSize];
-    int count;
-  };
-
-  Node* top_;
-  Heap* heap_;
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/i18n.cc b/src/i18n.cc
index 8de2d29..623de50 100644
--- a/src/i18n.cc
+++ b/src/i18n.cc
@@ -39,7 +39,8 @@
                           const char* key,
                           icu::UnicodeString* setting) {
   Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
-  Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked();
+  Handle<Object> object =
+      JSReceiver::GetProperty(options, str).ToHandleChecked();
   if (object->IsString()) {
     v8::String::Utf8Value utf8_string(
         v8::Utils::ToLocal(Handle<String>::cast(object)));
@@ -55,7 +56,8 @@
                            const char* key,
                            int32_t* value) {
   Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
-  Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked();
+  Handle<Object> object =
+      JSReceiver::GetProperty(options, str).ToHandleChecked();
   if (object->IsNumber()) {
     object->ToInt32(value);
     return true;
@@ -69,7 +71,8 @@
                            const char* key,
                            bool* value) {
   Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
-  Handle<Object> object = Object::GetProperty(options, str).ToHandleChecked();
+  Handle<Object> object =
+      JSReceiver::GetProperty(options, str).ToHandleChecked();
   if (object->IsBoolean()) {
     *value = object->BooleanValue();
     return true;
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index cb6bad8..cafa676 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -81,6 +81,10 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Memory::Address_at(pc_);
+}
 
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -115,6 +119,20 @@
   }
 }
 
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, size_t old_size, size_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  DCHECK(old_base <= wasm_memory_reference() &&
+         wasm_memory_reference() < old_base + old_size);
+  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+  DCHECK(new_base <= updated_reference &&
+         updated_reference < new_base + new_size);
+  Memory::Address_at(pc_) = updated_reference;
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
+  }
+}
 
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -139,8 +157,8 @@
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
       target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target));
   }
 }
 
@@ -321,6 +339,10 @@
   rmode_ = RelocInfo::NONE32;
 }
 
+Immediate::Immediate(Address x, RelocInfo::Mode rmode) {
+  x_ = reinterpret_cast<int32_t>(x);
+  rmode_ = rmode;
+}
 
 Immediate::Immediate(const ExternalReference& ext) {
   x_ = reinterpret_cast<int32_t>(ext.address());
@@ -429,6 +451,11 @@
   }
 }
 
+void Assembler::emit_b(Immediate x) {
+  DCHECK(x.is_int8() || x.is_uint8());
+  uint8_t value = static_cast<uint8_t>(x.x_);
+  *pc_++ = value;
+}
 
 void Assembler::emit_w(const Immediate& x) {
   DCHECK(RelocInfo::IsNone(x.rmode_));
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 2ac3088..150131c 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -746,8 +746,8 @@
   emit_operand(src, dst);
 }
 
-
-void Assembler::cmpb(const Operand& op, int8_t imm8) {
+void Assembler::cmpb(const Operand& op, Immediate imm8) {
+  DCHECK(imm8.is_int8() || imm8.is_uint8());
   EnsureSpace ensure_space(this);
   if (op.is_reg(eax)) {
     EMIT(0x3C);
@@ -755,7 +755,7 @@
     EMIT(0x80);
     emit_operand(edi, op);  // edi == 7
   }
-  EMIT(imm8);
+  emit_b(imm8);
 }
 
 
@@ -784,6 +784,19 @@
   emit_w(imm16);
 }
 
+void Assembler::cmpw(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x39);
+  emit_operand(reg, op);
+}
+
+void Assembler::cmpw(const Operand& op, Register reg) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x3B);
+  emit_operand(reg, op);
+}
 
 void Assembler::cmp(Register reg, int32_t imm32) {
   EnsureSpace ensure_space(this);
@@ -1068,19 +1081,26 @@
   emit_operand(edi, dst);
 }
 
-
 void Assembler::sbb(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   EMIT(0x1B);
   emit_operand(dst, src);
 }
 
+void Assembler::shld(Register dst, Register src, uint8_t shift) {
+  DCHECK(is_uint5(shift));
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xA4);
+  emit_operand(src, Operand(dst));
+  EMIT(shift);
+}
 
-void Assembler::shld(Register dst, const Operand& src) {
+void Assembler::shld_cl(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0xA5);
-  emit_operand(dst, src);
+  emit_operand(src, Operand(dst));
 }
 
 
@@ -1104,15 +1124,6 @@
   emit_operand(esp, dst);
 }
 
-
-void Assembler::shrd(Register dst, const Operand& src) {
-  EnsureSpace ensure_space(this);
-  EMIT(0x0F);
-  EMIT(0xAD);
-  emit_operand(dst, src);
-}
-
-
 void Assembler::shr(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
   DCHECK(is_uint5(imm8));  // illegal shift count
@@ -1133,6 +1144,21 @@
   emit_operand(ebp, dst);
 }
 
+void Assembler::shrd(Register dst, Register src, uint8_t shift) {
+  DCHECK(is_uint5(shift));
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xAC);
+  emit_operand(dst, Operand(src));
+  EMIT(shift);
+}
+
+void Assembler::shrd_cl(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xAD);
+  emit_operand(src, dst);
+}
 
 void Assembler::sub(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
@@ -1155,8 +1181,8 @@
 
 
 void Assembler::test(Register reg, const Immediate& imm) {
-  if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
-    test_b(reg, imm.x_);
+  if (imm.is_uint8()) {
+    test_b(reg, imm);
     return;
   }
 
@@ -1193,8 +1219,8 @@
     test(op.reg(), imm);
     return;
   }
-  if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
-    return test_b(op, imm.x_);
+  if (imm.is_uint8()) {
+    return test_b(op, imm);
   }
   EnsureSpace ensure_space(this);
   EMIT(0xF7);
@@ -1202,25 +1228,25 @@
   emit(imm);
 }
 
-
-void Assembler::test_b(Register reg, uint8_t imm8) {
+void Assembler::test_b(Register reg, Immediate imm8) {
+  DCHECK(imm8.is_uint8());
   EnsureSpace ensure_space(this);
   // Only use test against byte for registers that have a byte
   // variant: eax, ebx, ecx, and edx.
   if (reg.is(eax)) {
     EMIT(0xA8);
-    EMIT(imm8);
+    emit_b(imm8);
   } else if (reg.is_byte_register()) {
-    emit_arith_b(0xF6, 0xC0, reg, imm8);
+    emit_arith_b(0xF6, 0xC0, reg, static_cast<uint8_t>(imm8.x_));
   } else {
+    EMIT(0x66);
     EMIT(0xF7);
     EMIT(0xC0 | reg.code());
-    emit(imm8);
+    emit_w(imm8);
   }
 }
 
-
-void Assembler::test_b(const Operand& op, uint8_t imm8) {
+void Assembler::test_b(const Operand& op, Immediate imm8) {
   if (op.is_reg_only()) {
     test_b(op.reg(), imm8);
     return;
@@ -1228,9 +1254,42 @@
   EnsureSpace ensure_space(this);
   EMIT(0xF6);
   emit_operand(eax, op);
-  EMIT(imm8);
+  emit_b(imm8);
 }
 
+void Assembler::test_w(Register reg, Immediate imm16) {
+  DCHECK(imm16.is_int16() || imm16.is_uint16());
+  EnsureSpace ensure_space(this);
+  if (reg.is(eax)) {
+    EMIT(0xA9);
+    emit_w(imm16);
+  } else {
+    EMIT(0x66);
+    EMIT(0xF7);
+    EMIT(0xc0 | reg.code());
+    emit_w(imm16);
+  }
+}
+
+void Assembler::test_w(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x85);
+  emit_operand(reg, op);
+}
+
+void Assembler::test_w(const Operand& op, Immediate imm16) {
+  DCHECK(imm16.is_int16() || imm16.is_uint16());
+  if (op.is_reg_only()) {
+    test_w(op.reg(), imm16);
+    return;
+  }
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0xF7);
+  emit_operand(eax, op);
+  emit_w(imm16);
+}
 
 void Assembler::xor_(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index f517c98..5105ff5 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -271,6 +271,7 @@
   inline explicit Immediate(Handle<Object> handle);
   inline explicit Immediate(Smi* value);
   inline explicit Immediate(Address addr);
+  inline explicit Immediate(Address x, RelocInfo::Mode rmode);
 
   static Immediate CodeRelativeOffset(Label* label) {
     return Immediate(label);
@@ -280,9 +281,15 @@
   bool is_int8() const {
     return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
   }
+  bool is_uint8() const {
+    return v8::internal::is_uint8(x_) && RelocInfo::IsNone(rmode_);
+  }
   bool is_int16() const {
     return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
   }
+  bool is_uint16() const {
+    return v8::internal::is_uint16(x_) && RelocInfo::IsNone(rmode_);
+  }
 
  private:
   inline explicit Immediate(Label* value);
@@ -666,13 +673,18 @@
   void and_(const Operand& dst, Register src);
   void and_(const Operand& dst, const Immediate& x);
 
-  void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
-  void cmpb(const Operand& op, int8_t imm8);
+  void cmpb(Register reg, Immediate imm8) { cmpb(Operand(reg), imm8); }
+  void cmpb(const Operand& op, Immediate imm8);
   void cmpb(Register reg, const Operand& op);
   void cmpb(const Operand& op, Register reg);
+  void cmpb(Register dst, Register src) { cmpb(Operand(dst), src); }
   void cmpb_al(const Operand& op);
   void cmpw_ax(const Operand& op);
-  void cmpw(const Operand& op, Immediate imm16);
+  void cmpw(const Operand& dst, Immediate src);
+  void cmpw(Register dst, Immediate src) { cmpw(Operand(dst), src); }
+  void cmpw(Register dst, const Operand& src);
+  void cmpw(Register dst, Register src) { cmpw(Operand(dst), src); }
+  void cmpw(const Operand& dst, Register src);
   void cmp(Register reg, int32_t imm32);
   void cmp(Register reg, Handle<Object> handle);
   void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
@@ -738,21 +750,20 @@
 
   void sbb(Register dst, const Operand& src);
 
-  void shld(Register dst, Register src) { shld(dst, Operand(src)); }
-  void shld(Register dst, const Operand& src);
-
   void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
   void shl(const Operand& dst, uint8_t imm8);
   void shl_cl(Register dst) { shl_cl(Operand(dst)); }
   void shl_cl(const Operand& dst);
-
-  void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
-  void shrd(Register dst, const Operand& src);
+  void shld(Register dst, Register src, uint8_t shift);
+  void shld_cl(Register dst, Register src);
 
   void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
   void shr(const Operand& dst, uint8_t imm8);
   void shr_cl(Register dst) { shr_cl(Operand(dst)); }
   void shr_cl(const Operand& dst);
+  void shrd(Register dst, Register src, uint8_t shift);
+  void shrd_cl(Register dst, Register src) { shrd_cl(Operand(dst), src); }
+  void shrd_cl(const Operand& dst, Register src);
 
   void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
   void sub(const Operand& dst, const Immediate& x);
@@ -763,10 +774,18 @@
   void test(Register reg, const Immediate& imm);
   void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
   void test(Register reg, const Operand& op);
-  void test_b(Register reg, const Operand& op);
   void test(const Operand& op, const Immediate& imm);
-  void test_b(Register reg, uint8_t imm8);
-  void test_b(const Operand& op, uint8_t imm8);
+  void test(const Operand& op, Register reg) { test(reg, op); }
+  void test_b(Register reg, const Operand& op);
+  void test_b(Register reg, Immediate imm8);
+  void test_b(const Operand& op, Immediate imm8);
+  void test_b(const Operand& op, Register reg) { test_b(reg, op); }
+  void test_b(Register dst, Register src) { test_b(dst, Operand(src)); }
+  void test_w(Register reg, const Operand& op);
+  void test_w(Register reg, Immediate imm16);
+  void test_w(const Operand& op, Immediate imm16);
+  void test_w(const Operand& op, Register reg) { test_w(reg, op); }
+  void test_w(Register dst, Register src) { test_w(dst, Operand(src)); }
 
   void xor_(Register dst, int32_t imm32);
   void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
@@ -1435,7 +1454,9 @@
 
   static bool IsNop(Address addr);
 
-  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+  AssemblerPositionsRecorder* positions_recorder() {
+    return &positions_recorder_;
+  }
 
   int relocation_writer_size() {
     return (buffer_ + buffer_size_) - reloc_info_writer.pos();
@@ -1482,6 +1503,7 @@
                    RelocInfo::Mode rmode,
                    TypeFeedbackId id = TypeFeedbackId::None());
   inline void emit(const Immediate& x);
+  inline void emit_b(Immediate x);
   inline void emit_w(const Immediate& x);
   inline void emit_q(uint64_t x);
 
@@ -1542,8 +1564,8 @@
   // code generation
   RelocInfoWriter reloc_info_writer;
 
-  PositionsRecorder positions_recorder_;
-  friend class PositionsRecorder;
+  AssemblerPositionsRecorder positions_recorder_;
+  friend class AssemblerPositionsRecorder;
 };
 
 
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index c48c74a..b7e33d9 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -123,6 +123,7 @@
                                            bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- eax: number of arguments
+  //  -- esi: context
   //  -- edi: constructor function
   //  -- ebx: allocation site or undefined
   //  -- edx: new target
@@ -134,6 +135,7 @@
 
     // Preserve the incoming parameters on the stack.
     __ AssertUndefinedOrAllocationSite(ebx);
+    __ push(esi);
     __ push(ebx);
     __ SmiTag(eax);
     __ push(eax);
@@ -201,7 +203,7 @@
     }
 
     // Restore context from the frame.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
 
     if (create_implicit_receiver) {
       // If the result is an object (in the ECMA sense), we should get rid
@@ -324,9 +326,6 @@
                                              bool is_construct) {
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
-  // Clear the context before we push it when entering the internal frame.
-  __ Move(esi, Immediate(0));
-
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
@@ -602,27 +601,24 @@
   //           they are to be pushed onto the stack.
   // -----------------------------------
 
-  // Save number of arguments on the stack below where arguments are going
-  // to be pushed.
-  __ mov(ecx, eax);
-  __ neg(ecx);
-  __ mov(Operand(esp, ecx, times_pointer_size, -kPointerSize), eax);
-  __ mov(eax, ecx);
-
   // Pop return address to allow tail-call after pushing arguments.
   __ Pop(ecx);
 
+  // Push edi in the slot meant for receiver. We need an extra register
+  // so store edi temporarily on stack.
+  __ Push(edi);
+
   // Find the address of the last argument.
-  __ shl(eax, kPointerSizeLog2);
-  __ add(eax, ebx);
+  __ mov(edi, eax);
+  __ neg(edi);
+  __ shl(edi, kPointerSizeLog2);
+  __ add(edi, ebx);
 
-  // Push padding for receiver.
-  __ Push(Immediate(0));
+  Generate_InterpreterPushArgs(masm, edi);
 
-  Generate_InterpreterPushArgs(masm, eax);
-
-  // Restore number of arguments from slot on stack.
-  __ mov(eax, Operand(esp, -kPointerSize));
+  // Restore the constructor from slot on stack. It was pushed at the slot
+  // meant for receiver.
+  __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
 
   // Re-push return address.
   __ Push(ecx);
@@ -960,6 +956,28 @@
   }
 }
 
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : argc
+  //  -- esp[0] : return address
+  //  -- esp[4] : first argument (left-hand side)
+  //  -- esp[8] : receiver (right-hand side)
+  // -----------------------------------
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ mov(InstanceOfDescriptor::LeftRegister(),
+           Operand(ebp, 2 * kPointerSize));  // Load left-hand side.
+    __ mov(InstanceOfDescriptor::RightRegister(),
+           Operand(ebp, 3 * kPointerSize));  // Load right-hand side.
+    InstanceOfStub stub(masm->isolate(), true);
+    __ CallStub(&stub);
+  }
+
+  // Pop the argument and the receiver.
+  __ ret(2 * kPointerSize);
+}
 
 // static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1007,7 +1025,8 @@
   Label receiver_not_callable;
   __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
   __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
   __ j(zero, &receiver_not_callable, Label::kNear);
 
   // 3. Tail call with no arguments if argArray is null or undefined.
@@ -1130,7 +1149,8 @@
   Label target_not_callable;
   __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
   __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
   __ j(zero, &target_not_callable, Label::kNear);
 
   // 3a. Apply the target to the given argumentsList (passing undefined for
@@ -1146,7 +1166,6 @@
   }
 }
 
-
 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax     : argc
@@ -1195,14 +1214,16 @@
   Label target_not_constructor;
   __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
   __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
   __ j(zero, &target_not_constructor, Label::kNear);
 
   // 3. Make sure the target is actually a constructor.
   Label new_target_not_constructor;
   __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
   __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
   __ j(zero, &new_target_not_constructor, Label::kNear);
 
   // 4a. Construct the target with the given new.target and argumentsList.
@@ -1865,18 +1886,20 @@
   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
   Comment cmnt(masm, "[ PrepareForTailCall");
 
-  // Prepare for tail call only if the debugger is not active.
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
   Label done;
-  ExternalReference debug_is_active =
-      ExternalReference::debug_is_active_address(masm->isolate());
-  __ movzx_b(scratch1, Operand::StaticVariable(debug_is_active));
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ movzx_b(scratch1,
+             Operand::StaticVariable(is_tail_call_elimination_enabled));
   __ cmp(scratch1, Immediate(0));
-  __ j(not_equal, &done, Label::kNear);
+  __ j(equal, &done, Label::kNear);
 
   // Drop possible interpreter handler/stub frame.
   {
     Label no_interpreter_frame;
-    __ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+    __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
            Immediate(Smi::FromInt(StackFrame::STUB)));
     __ j(not_equal, &no_interpreter_frame, Label::kNear);
     __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -1884,16 +1907,18 @@
   }
 
   // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+  __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &no_arguments_adaptor, Label::kNear);
 
-  // Drop arguments adaptor frame and load arguments count.
+  // Drop current frame and load arguments count from arguments adaptor frame.
   __ mov(ebp, scratch2);
-  __ mov(scratch1, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(scratch1);
+  __ mov(caller_args_count_reg,
+         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
   __ jmp(&formal_parameter_count_loaded, Label::kNear);
 
   __ bind(&no_arguments_adaptor);
@@ -1902,57 +1927,15 @@
   __ mov(scratch1,
          FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
   __ mov(
-      scratch1,
+      caller_args_count_reg,
       FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ SmiUntag(scratch1);
+  __ SmiUntag(caller_args_count_reg);
 
   __ bind(&formal_parameter_count_loaded);
 
-  // Calculate the destination address where we will put the return address
-  // after we drop current frame.
-  Register new_sp_reg = scratch2;
-  __ sub(scratch1, args_reg);
-  __ lea(new_sp_reg, Operand(ebp, scratch1, times_pointer_size,
-                             StandardFrameConstants::kCallerPCOffset));
-
-  if (FLAG_debug_code) {
-    __ cmp(esp, new_sp_reg);
-    __ Check(below, kStackAccessBelowStackPointer);
-  }
-
-  // Copy receiver and return address as well.
-  Register count_reg = scratch1;
-  __ lea(count_reg, Operand(args_reg, 2));
-
-  // Copy return address from caller's frame to current frame's return address
-  // to avoid its trashing and let the following loop copy it to the right
-  // place.
-  Register tmp_reg = scratch3;
-  __ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
-  __ mov(Operand(esp, 0), tmp_reg);
-
-  // Restore caller's frame pointer now as it could be overwritten by
-  // the copying loop.
-  __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
-  Operand src(esp, count_reg, times_pointer_size, 0);
-  Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
-
-  // Now copy callee arguments to the caller frame going backwards to avoid
-  // callee arguments corruption (source and destination areas could overlap).
-  Label loop, entry;
-  __ jmp(&entry, Label::kNear);
-  __ bind(&loop);
-  __ dec(count_reg);
-  __ mov(tmp_reg, src);
-  __ mov(dst, tmp_reg);
-  __ bind(&entry);
-  __ cmp(count_reg, Immediate(0));
-  __ j(not_equal, &loop, Label::kNear);
-
-  // Leave current frame.
-  __ mov(esp, new_sp_reg);
-
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack, 0);
   __ bind(&done);
 }
 }  // namespace
@@ -1972,7 +1955,7 @@
   Label class_constructor;
   __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
-            SharedFunctionInfo::kClassConstructorBitsWithinByte);
+            Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
   __ j(not_zero, &class_constructor);
 
   // Enter the context of the function; ToObject has to run in the function
@@ -1984,8 +1967,8 @@
   // We need to convert the receiver for non-native sloppy mode functions.
   Label done_convert;
   __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
-            (1 << SharedFunctionInfo::kNativeBitWithinByte) |
-                (1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+            Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+                      (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
   __ j(not_zero, &done_convert);
   {
     // ----------- S t a t e -------------
@@ -2207,7 +2190,8 @@
        RelocInfo::CODE_TARGET);
 
   // Check if target has a [[Call]] internal method.
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
   __ j(zero, &non_callable);
 
   __ CmpInstanceType(ecx, JS_PROXY_TYPE);
@@ -2343,7 +2327,8 @@
        RelocInfo::CODE_TARGET);
 
   // Check if target has a [[Construct]] internal method.
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
   __ j(zero, &non_constructor, Label::kNear);
 
   // Only dispatch to bound functions after checking whether they are
@@ -2415,27 +2400,6 @@
 
   {  // Too few parameters: Actual < expected.
     __ bind(&too_few);
-
-    // If the function is strong we need to throw an error.
-    Label no_strong_error;
-    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrongModeByteOffset),
-              1 << SharedFunctionInfo::kStrongModeBitWithinByte);
-    __ j(equal, &no_strong_error, Label::kNear);
-
-    // What we really care about is the required number of arguments.
-    __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kLengthOffset));
-    __ SmiUntag(ecx);
-    __ cmp(eax, ecx);
-    __ j(greater_equal, &no_strong_error, Label::kNear);
-
-    {
-      FrameScope frame(masm, StackFrame::MANUAL);
-      EnterArgumentsAdaptorFrame(masm);
-      __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
-    }
-
-    __ bind(&no_strong_error);
     EnterArgumentsAdaptorFrame(masm);
     ArgumentsAdaptorStackCheck(masm, &stack_overflow);
 
@@ -2474,7 +2438,7 @@
   // Call the entry point.
   __ bind(&invoke);
   // Restore function pointer.
-  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
   // eax : expected number of arguments
   // edx : new target (passed through to callee)
   // edi : function (passed through to callee)
@@ -2649,24 +2613,6 @@
 }
 
 
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
-  // We check the stack limit as indicator that recompilation might be done.
-  Label ok;
-  ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(masm->isolate());
-  __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above_equal, &ok, Label::kNear);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kStackGuard);
-  }
-  __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
-         RelocInfo::CODE_TARGET);
-
-  __ bind(&ok);
-  __ ret(0);
-}
-
 #undef __
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 510b58e..53b35a3 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -4,9 +4,10 @@
 
 #if V8_TARGET_ARCH_IA32
 
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
 #include "src/base/bits.h"
 #include "src/bootstrapper.h"
-#include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/ia32/code-stubs-ia32.h"
 #include "src/ia32/frames-ia32.h"
@@ -84,6 +85,10 @@
   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+  descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
 
 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
@@ -275,7 +280,7 @@
             Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
     __ add(result_reg,
            Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
-    __ shrd(result_reg, scratch1);
+    __ shrd_cl(scratch1, result_reg);
     __ shr_cl(result_reg);
     __ test(ecx, Immediate(32));
     __ cmov(not_equal, scratch1, result_reg);
@@ -553,7 +558,8 @@
     __ fstp(1);    // 2^X
     // Bail out to runtime in case of exceptions in the status word.
     __ fnstsw_ax();
-    __ test_b(eax, 0x5F);  // We check for all but precision exception.
+    __ test_b(eax,
+              Immediate(0x5F));  // We check for all but precision exception.
     __ j(not_zero, &fast_power_failed, Label::kNear);
     __ fstp_d(Operand(esp, 0));
     __ movsd(double_result, Operand(esp, 0));
@@ -665,34 +671,6 @@
 }
 
 
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Label slow;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  Register scratch = eax;
-  DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
-  // Check that the key is an array index, that is Uint32.
-  __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
-  __ j(not_zero, &slow);
-
-  // Everything is fine, call runtime.
-  __ pop(scratch);
-  __ push(receiver);  // receiver
-  __ push(key);       // key
-  __ push(scratch);   // return address
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
-  __ bind(&slow);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
   // Return address is on the stack.
   Label miss;
@@ -1103,7 +1081,7 @@
   if (FLAG_debug_code) {
     // Assert that we do not have a cons or slice (indirect strings) here.
     // Sequential strings have already been ruled out.
-    __ test_b(ebx, kIsIndirectStringMask);
+    __ test_b(ebx, Immediate(kIsIndirectStringMask));
     __ Assert(zero, kExternalStringExpectedButNotFound);
   }
   __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
@@ -1112,7 +1090,7 @@
   __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   STATIC_ASSERT(kTwoByteStringTag == 0);
   // (8) Is the external string one byte?  If yes, go to (5).
-  __ test_b(ebx, kStringEncodingMask);
+  __ test_b(ebx, Immediate(kStringEncodingMask));
   __ j(not_zero, &seq_one_byte_string);  // Go to (5).
 
   // eax: sequential subject string (or look-alike, external string)
@@ -1237,13 +1215,13 @@
       __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
       __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
       // Call runtime on identical JSObjects.  Otherwise return equal.
-      __ cmpb(ecx, static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE));
+      __ cmpb(ecx, Immediate(FIRST_JS_RECEIVER_TYPE));
       __ j(above_equal, &runtime_call, Label::kFar);
       // Call runtime on identical symbols since we need to throw a TypeError.
-      __ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
+      __ cmpb(ecx, Immediate(SYMBOL_TYPE));
       __ j(equal, &runtime_call, Label::kFar);
       // Call runtime on identical SIMD values since we must throw a TypeError.
-      __ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
+      __ cmpb(ecx, Immediate(SIMD128_VALUE_TYPE));
       __ j(equal, &runtime_call, Label::kFar);
     }
     __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -1389,7 +1367,7 @@
     // Non-strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    Label return_unequal, undetectable;
+    Label return_equal, return_unequal, undetectable;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -1397,16 +1375,16 @@
     STATIC_ASSERT(kSmiTagMask == 1);
     __ lea(ecx, Operand(eax, edx, times_1, 0));
     __ test(ecx, Immediate(kSmiTagMask));
-    __ j(not_zero, &runtime_call, Label::kNear);
+    __ j(not_zero, &runtime_call);
 
     __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
     __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
 
     __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
+              Immediate(1 << Map::kIsUndetectable));
     __ j(not_zero, &undetectable, Label::kNear);
     __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
+              Immediate(1 << Map::kIsUndetectable));
     __ j(not_zero, &return_unequal, Label::kNear);
 
     __ CmpInstanceType(ebx, FIRST_JS_RECEIVER_TYPE);
@@ -1420,8 +1398,18 @@
 
     __ bind(&undetectable);
     __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
+              Immediate(1 << Map::kIsUndetectable));
     __ j(zero, &return_unequal, Label::kNear);
+
+    // If both sides are JSReceivers, then the result is false according to
+    // the HTML specification, which says that only comparisons with null or
+    // undefined are affected by special casing for document.all.
+    __ CmpInstanceType(ebx, ODDBALL_TYPE);
+    __ j(zero, &return_equal, Label::kNear);
+    __ CmpInstanceType(ecx, ODDBALL_TYPE);
+    __ j(not_zero, &return_unequal, Label::kNear);
+
+    __ bind(&return_equal);
     __ Move(eax, Immediate(EQUAL));
     __ ret(0);  // eax, edx were pushed
   }
@@ -1980,8 +1968,9 @@
 
   // Push marker in two places.
   int marker = type();
-  __ push(Immediate(Smi::FromInt(marker)));  // context slot
-  __ push(Immediate(Smi::FromInt(marker)));  // function slot
+  __ push(Immediate(Smi::FromInt(marker)));  // marker
+  ExternalReference context_address(Isolate::kContextAddress, isolate());
+  __ push(Operand::StaticVariable(context_address));  // context
   // Save callee-saved registers (C calling conventions).
   __ push(edi);
   __ push(esi);
@@ -2110,9 +2099,14 @@
   __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
   __ j(not_equal, &slow_case);
 
+  // Go to the runtime if the function is not a constructor.
+  __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &slow_case);
+
   // Ensure that {function} has an instance prototype.
   __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
-            static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
+            Immediate(1 << Map::kHasNonInstancePrototype));
   __ j(not_zero, &slow_case);
 
   // Get the "prototype" (or initial map) of the {function}.
@@ -2146,7 +2140,7 @@
 
   // Check if the object needs to be access checked.
   __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded);
+            Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &fast_runtime_fallback, Label::kNear);
   // Check if the current object is a Proxy.
   __ CmpInstanceType(object_map, JS_PROXY_TYPE);
@@ -2181,7 +2175,8 @@
   __ Push(object);
   __ Push(function);
   __ PushReturnAddressFrom(scratch);
-  __ TailCallRuntime(Runtime::kInstanceOf);
+  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+                                         : Runtime::kInstanceOf);
 }
 
 
@@ -2493,13 +2488,13 @@
   Label two_byte_sequential, runtime_drop_two, sequential_string;
   STATIC_ASSERT(kExternalStringTag != 0);
   STATIC_ASSERT(kSeqStringTag == 0);
-  __ test_b(ebx, kExternalStringTag);
+  __ test_b(ebx, Immediate(kExternalStringTag));
   __ j(zero, &sequential_string);
 
   // Handle external string.
   // Rule out short external strings.
   STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ test_b(ebx, kShortExternalStringMask);
+  __ test_b(ebx, Immediate(kShortExternalStringMask));
   __ j(not_zero, &runtime);
   __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
   // Move the pointer so that offset-wise, it looks like a sequential string.
@@ -2512,7 +2507,7 @@
   __ push(edi);
   __ SmiUntag(ecx);
   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ test_b(ebx, kStringEncodingMask);
+  __ test_b(ebx, Immediate(kStringEncodingMask));
   __ j(zero, &two_byte_sequential);
 
   // Sequential one byte string.  Allocate the result.
@@ -2601,23 +2596,21 @@
   __ Ret();
   __ bind(&not_heap_number);
 
-  Label not_string, slow_string;
+  NonNumberToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in eax.
+  __ AssertNotNumber(eax);
+
+  Label not_string;
   __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
   // eax: object
   // edi: object map
   __ j(above_equal, &not_string, Label::kNear);
-  // Check if string has a cached array index.
-  __ test(FieldOperand(eax, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  __ j(not_zero, &slow_string, Label::kNear);
-  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
-  __ IndexFromHash(eax, eax);
-  __ Ret();
-  __ bind(&slow_string);
-  __ pop(ecx);   // Pop return address.
-  __ push(eax);  // Push argument.
-  __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kStringToNumber);
+  StringToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
   __ bind(&not_string);
 
   Label not_oddball;
@@ -2633,26 +2626,26 @@
   __ TailCallRuntime(Runtime::kToNumber);
 }
 
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in eax.
+  __ AssertString(eax);
 
-void ToLengthStub::Generate(MacroAssembler* masm) {
-  // The ToLength stub takes on argument in eax.
-  Label not_smi, positive_smi;
-  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ test(eax, eax);
-  __ j(greater_equal, &positive_smi, Label::kNear);
-  __ xor_(eax, eax);
-  __ bind(&positive_smi);
+  // Check if string has a cached array index.
+  Label runtime;
+  __ test(FieldOperand(eax, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  __ j(not_zero, &runtime, Label::kNear);
+  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+  __ IndexFromHash(eax, eax);
   __ Ret();
-  __ bind(&not_smi);
 
-  __ pop(ecx);   // Pop return address.
-  __ push(eax);  // Push argument.
-  __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToLength);
+  __ bind(&runtime);
+  __ PopReturnAddressTo(ecx);     // Pop return address.
+  __ Push(eax);                   // Push argument.
+  __ PushReturnAddressFrom(ecx);  // Push return address.
+  __ TailCallRuntime(Runtime::kStringToNumber);
 }
 
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in eax.
   Label is_number;
@@ -2849,44 +2842,6 @@
 }
 
 
-void StringCompareStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- edx    : left string
-  //  -- eax    : right string
-  //  -- esp[0] : return address
-  // -----------------------------------
-  __ AssertString(edx);
-  __ AssertString(eax);
-
-  Label not_same;
-  __ cmp(edx, eax);
-  __ j(not_equal, &not_same, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
-  __ Ret();
-
-  __ bind(&not_same);
-
-  // Check that both objects are sequential one-byte strings.
-  Label runtime;
-  __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
-
-  // Compare flat one-byte strings.
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
-  StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
-                                                  edi);
-
-  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
-  // tagged as a small integer.
-  __ bind(&runtime);
-  __ PopReturnAddressTo(ecx);
-  __ Push(edx);
-  __ Push(eax);
-  __ PushReturnAddressFrom(ecx);
-  __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- edx    : left
@@ -3217,13 +3172,20 @@
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
-  __ pop(tmp1);  // Return address.
-  __ push(left);
-  __ push(right);
-  __ push(tmp1);
   if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(left);
+      __ Push(right);
+      __ CallRuntime(Runtime::kStringEqual);
+    }
+    __ sub(eax, Immediate(masm->isolate()->factory()->true_value()));
+    __ Ret();
   } else {
+    __ pop(tmp1);  // Return address.
+    __ push(left);
+    __ push(right);
+    __ push(tmp1);
     __ TailCallRuntime(Runtime::kStringCompare);
   }
 
@@ -3708,7 +3670,7 @@
   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
   int parameter_count_offset =
-      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   __ mov(ebx, MemOperand(ebp, parameter_count_offset));
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   __ pop(ecx);
@@ -4426,7 +4388,7 @@
     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
 
     // is the low bit set? If so, we are holey and that is good.
-    __ test_b(edx, 1);
+    __ test_b(edx, Immediate(1));
     __ j(not_zero, &normal_sequence);
   }
 
@@ -4875,7 +4837,7 @@
     __ bind(&loop);
     __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+    __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
     __ j(not_equal, &loop);
   }
 
@@ -4883,7 +4845,7 @@
   // arguments adaptor frame below the function frame).
   Label no_rest_parameters;
   __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+  __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &no_rest_parameters, Label::kNear);
 
@@ -5026,7 +4988,7 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+  __ mov(eax, Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
@@ -5263,14 +5225,14 @@
     __ bind(&loop);
     __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+    __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
     __ j(not_equal, &loop);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
   Label arguments_adaptor, arguments_done;
   __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+  __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &arguments_adaptor, Label::kNear);
   {
@@ -5570,7 +5532,7 @@
   Label profiler_disabled;
   Label end_profiler_check;
   __ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
-  __ cmpb(Operand(eax, 0), 0);
+  __ cmpb(Operand(eax, 0), Immediate(0));
   __ j(zero, &profiler_disabled);
 
   // Additional parameter is the address of the actual getter function.
@@ -5693,17 +5655,13 @@
   __ jmp(&leave_exit_frame);
 }
 
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
-                                      const ParameterCount& argc,
-                                      bool return_first_arg,
-                                      bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- edi                 : callee
   //  -- ebx                 : call_data
   //  -- ecx                 : holder
   //  -- edx                 : api_function_address
   //  -- esi                 : context
-  //  -- eax                 : number of arguments if argc is a register
   //  --
   //  -- esp[0]              : return address
   //  -- esp[4]              : last argument
@@ -5730,17 +5688,9 @@
   STATIC_ASSERT(FCA::kHolderIndex == 0);
   STATIC_ASSERT(FCA::kArgsLength == 7);
 
-  DCHECK(argc.is_immediate() || eax.is(argc.reg()));
-
-  if (argc.is_immediate()) {
-    __ pop(return_address);
-    // context save.
-    __ push(context);
-  } else {
-    // pop return address and save context
-    __ xchg(context, Operand(esp, 0));
-    return_address = context;
-  }
+  __ pop(return_address);
+  // context save.
+  __ push(context);
 
   // callee
   __ push(callee);
@@ -5749,7 +5699,7 @@
   __ push(call_data);
 
   Register scratch = call_data;
-  if (!call_data_undefined) {
+  if (!call_data_undefined()) {
     // return value
     __ push(Immediate(masm->isolate()->factory()->undefined_value()));
     // return value default
@@ -5770,7 +5720,7 @@
   // push return address
   __ push(return_address);
 
-  if (!is_lazy) {
+  if (!is_lazy()) {
     // load context from callee
     __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
   }
@@ -5789,27 +5739,13 @@
 
   // FunctionCallbackInfo::implicit_args_.
   __ mov(ApiParameterOperand(2), scratch);
-  if (argc.is_immediate()) {
-    __ add(scratch,
-           Immediate((argc.immediate() + FCA::kArgsLength - 1) * kPointerSize));
-    // FunctionCallbackInfo::values_.
-    __ mov(ApiParameterOperand(3), scratch);
-    // FunctionCallbackInfo::length_.
-    __ Move(ApiParameterOperand(4), Immediate(argc.immediate()));
-    // FunctionCallbackInfo::is_construct_call_.
-    __ Move(ApiParameterOperand(5), Immediate(0));
-  } else {
-    __ lea(scratch, Operand(scratch, argc.reg(), times_pointer_size,
-                            (FCA::kArgsLength - 1) * kPointerSize));
-    // FunctionCallbackInfo::values_.
-    __ mov(ApiParameterOperand(3), scratch);
-    // FunctionCallbackInfo::length_.
-    __ mov(ApiParameterOperand(4), argc.reg());
-    // FunctionCallbackInfo::is_construct_call_.
-    __ lea(argc.reg(), Operand(argc.reg(), times_pointer_size,
-                               (FCA::kArgsLength + 1) * kPointerSize));
-    __ mov(ApiParameterOperand(5), argc.reg());
-  }
+  __ add(scratch, Immediate((argc() + FCA::kArgsLength - 1) * kPointerSize));
+  // FunctionCallbackInfo::values_.
+  __ mov(ApiParameterOperand(3), scratch);
+  // FunctionCallbackInfo::length_.
+  __ Move(ApiParameterOperand(4), Immediate(argc()));
+  // FunctionCallbackInfo::is_construct_call_.
+  __ Move(ApiParameterOperand(5), Immediate(0));
 
   // v8::InvocationCallback's argument.
   __ lea(scratch, ApiParameterOperand(2));
@@ -5822,7 +5758,7 @@
                                   (2 + FCA::kContextSaveIndex) * kPointerSize);
   // Stores return the first js argument
   int return_value_offset = 0;
-  if (return_first_arg) {
+  if (is_store()) {
     return_value_offset = 2 + FCA::kArgsLength;
   } else {
     return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5831,10 +5767,8 @@
   int stack_space = 0;
   Operand is_construct_call_operand = ApiParameterOperand(5);
   Operand* stack_space_operand = &is_construct_call_operand;
-  if (argc.is_immediate()) {
-    stack_space = argc.immediate() + FCA::kArgsLength + 1;
-    stack_space_operand = nullptr;
-  }
+  stack_space = argc() + FCA::kArgsLength + 1;
+  stack_space_operand = nullptr;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
                            ApiParameterOperand(1), stack_space,
                            stack_space_operand, return_value_operand,
@@ -5842,23 +5776,6 @@
 }
 
 
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
-  bool call_data_undefined = this->call_data_undefined();
-  CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
-                            call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
-  bool is_store = this->is_store();
-  int argc = this->argc();
-  bool call_data_undefined = this->call_data_undefined();
-  bool is_lazy = this->is_lazy();
-  CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined, is_lazy);
-}
-
-
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- esp[0]                        : return address
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 2f94f35..2190531 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -704,6 +704,7 @@
   __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
   __ j(equal, &only_change_map);
 
+  __ push(esi);
   __ push(eax);
   __ push(edx);
   __ push(ebx);
@@ -753,10 +754,10 @@
 
   // Call into runtime if GC is required.
   __ bind(&gc_required);
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   __ pop(ebx);
   __ pop(edx);
   __ pop(eax);
+  __ pop(esi);
   __ jmp(fail);
 
   // Box doubles into heap numbers.
@@ -818,7 +819,7 @@
 
   // Restore registers.
   __ pop(eax);
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ pop(esi);
 
   __ bind(&success);
 }
@@ -886,11 +887,11 @@
   }
   // Rule out short external strings.
   STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ test_b(result, kShortExternalStringMask);
+  __ test_b(result, Immediate(kShortExternalStringMask));
   __ j(not_zero, call_runtime);
   // Check encoding.
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ test_b(result, kStringEncodingMask);
+  __ test_b(result, Immediate(kStringEncodingMask));
   __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
   __ j(not_equal, &one_byte_external, Label::kNear);
   // Two-byte string.
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index a3756ae..656d3e9 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -186,20 +186,6 @@
   }
 }
 
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
-  int parameter_count = shared->internal_formal_parameter_count() + 1;
-  unsigned input_frame_size = input_->GetFrameSize();
-  unsigned alignment_state_offset =
-      input_frame_size - parameter_count * kPointerSize -
-      StandardFrameConstants::kFixedFrameSize -
-      kPointerSize;
-  DCHECK(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
-      JavaScriptFrameConstants::kLocal0Offset);
-  int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
-  return (alignment_state == kAlignmentPaddingPushed);
-}
-
-
 #define __ masm()->
 
 void Deoptimizer::TableEntryGenerator::Generate() {
@@ -240,7 +226,12 @@
 
   // Allocate a new deoptimizer object.
   __ PrepareCallCFunction(6, eax);
+  __ mov(eax, Immediate(0));
+  Label context_check;
+  __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ JumpIfSmi(edi, &context_check);
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ bind(&context_check);
   __ mov(Operand(esp, 0 * kPointerSize), eax);  // Function.
   __ mov(Operand(esp, 1 * kPointerSize), Immediate(type()));  // Bailout type.
   __ mov(Operand(esp, 2 * kPointerSize), ebx);  // Bailout id.
@@ -311,19 +302,9 @@
   }
   __ pop(eax);
 
-  // If frame was dynamically aligned, pop padding.
-  Label no_padding;
-  __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
-         Immediate(0));
-  __ j(equal, &no_padding);
-  __ pop(ecx);
-  if (FLAG_debug_code) {
-    __ cmp(ecx, Immediate(kAlignmentZapValue));
-    __ Assert(equal, kAlignmentMarkerExpected);
-  }
-  __ bind(&no_padding);
+  __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
 
-  // Replace the current frame with the output frames.
+  // Replace the current (input) frame with the output frames.
   Label outer_push_loop, inner_push_loop,
       outer_loop_header, inner_loop_header;
   // Outer loop state: eax = current FrameDescription**, edx = one past the
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index b11ff97..3cd0ac6 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -29,29 +29,18 @@
 };
 
 static const ByteMnemonic two_operands_instr[] = {
-    {0x01, "add", OPER_REG_OP_ORDER},
-    {0x03, "add", REG_OPER_OP_ORDER},
-    {0x09, "or", OPER_REG_OP_ORDER},
-    {0x0B, "or", REG_OPER_OP_ORDER},
-    {0x1B, "sbb", REG_OPER_OP_ORDER},
-    {0x21, "and", OPER_REG_OP_ORDER},
-    {0x23, "and", REG_OPER_OP_ORDER},
-    {0x29, "sub", OPER_REG_OP_ORDER},
-    {0x2A, "subb", REG_OPER_OP_ORDER},
-    {0x2B, "sub", REG_OPER_OP_ORDER},
-    {0x31, "xor", OPER_REG_OP_ORDER},
-    {0x33, "xor", REG_OPER_OP_ORDER},
-    {0x38, "cmpb", OPER_REG_OP_ORDER},
-    {0x39, "cmp", OPER_REG_OP_ORDER},
-    {0x3A, "cmpb", REG_OPER_OP_ORDER},
-    {0x3B, "cmp", REG_OPER_OP_ORDER},
-    {0x84, "test_b", REG_OPER_OP_ORDER},
-    {0x85, "test", REG_OPER_OP_ORDER},
-    {0x87, "xchg", REG_OPER_OP_ORDER},
-    {0x8A, "mov_b", REG_OPER_OP_ORDER},
-    {0x8B, "mov", REG_OPER_OP_ORDER},
-    {0x8D, "lea", REG_OPER_OP_ORDER},
-    {-1, "", UNSET_OP_ORDER}};
+    {0x01, "add", OPER_REG_OP_ORDER},   {0x03, "add", REG_OPER_OP_ORDER},
+    {0x09, "or", OPER_REG_OP_ORDER},    {0x0B, "or", REG_OPER_OP_ORDER},
+    {0x13, "adc", REG_OPER_OP_ORDER},   {0x1B, "sbb", REG_OPER_OP_ORDER},
+    {0x21, "and", OPER_REG_OP_ORDER},   {0x23, "and", REG_OPER_OP_ORDER},
+    {0x29, "sub", OPER_REG_OP_ORDER},   {0x2A, "subb", REG_OPER_OP_ORDER},
+    {0x2B, "sub", REG_OPER_OP_ORDER},   {0x31, "xor", OPER_REG_OP_ORDER},
+    {0x33, "xor", REG_OPER_OP_ORDER},   {0x38, "cmpb", OPER_REG_OP_ORDER},
+    {0x39, "cmp", OPER_REG_OP_ORDER},   {0x3A, "cmpb", REG_OPER_OP_ORDER},
+    {0x3B, "cmp", REG_OPER_OP_ORDER},   {0x84, "test_b", REG_OPER_OP_ORDER},
+    {0x85, "test", REG_OPER_OP_ORDER},  {0x87, "xchg", REG_OPER_OP_ORDER},
+    {0x8A, "mov_b", REG_OPER_OP_ORDER}, {0x8B, "mov", REG_OPER_OP_ORDER},
+    {0x8D, "lea", REG_OPER_OP_ORDER},   {-1, "", UNSET_OP_ORDER}};
 
 static const ByteMnemonic zero_operands_instr[] = {
   {0xC3, "ret", UNSET_OP_ORDER},
@@ -1218,20 +1207,34 @@
   switch (f0byte) {
     case 0x0B:
       return "ud2";
-    case 0x18: return "prefetch";
-    case 0xA2: return "cpuid";
-    case 0xBE: return "movsx_b";
-    case 0xBF: return "movsx_w";
-    case 0xB6: return "movzx_b";
-    case 0xB7: return "movzx_w";
-    case 0xAF: return "imul";
-    case 0xA5: return "shld";
-    case 0xAD: return "shrd";
-    case 0xAC: return "shrd";  // 3-operand version.
-    case 0xAB: return "bts";
+    case 0x18:
+      return "prefetch";
+    case 0xA2:
+      return "cpuid";
+    case 0xBE:
+      return "movsx_b";
+    case 0xBF:
+      return "movsx_w";
+    case 0xB6:
+      return "movzx_b";
+    case 0xB7:
+      return "movzx_w";
+    case 0xAF:
+      return "imul";
+    case 0xA4:
+      return "shld";
+    case 0xA5:
+      return "shld";
+    case 0xAD:
+      return "shrd";
+    case 0xAC:
+      return "shrd";  // 3-operand version.
+    case 0xAB:
+      return "bts";
     case 0xBC:
       return "bsf";
-    case 0xBD: return "bsr";
+    case 0xBD:
+      return "bsr";
     default: return NULL;
   }
 }
@@ -1470,8 +1473,18 @@
             data += SetCC(data);
           } else if ((f0byte & 0xF0) == 0x40) {
             data += CMov(data);
+          } else if (f0byte == 0xA4 || f0byte == 0xAC) {
+            // shld, shrd
+            data += 2;
+            AppendToBuffer("%s ", f0mnem);
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            int8_t imm8 = static_cast<int8_t>(data[1]);
+            data += 2;
+            AppendToBuffer("%s,%s,%d", NameOfCPURegister(rm),
+                           NameOfCPURegister(regop), static_cast<int>(imm8));
           } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
-            // shrd, shld, bts
+            // shrd_cl, shld_cl, bts
             data += 2;
             AppendToBuffer("%s ", f0mnem);
             int mod, regop, rm;
@@ -1608,6 +1621,13 @@
           int imm = *reinterpret_cast<int16_t*>(data);
           AppendToBuffer(",0x%x", imm);
           data += 2;
+        } else if (*data == 0xF7) {
+          data++;
+          AppendToBuffer("%s ", "test_w");
+          data += PrintRightOperand(data);
+          int imm = *reinterpret_cast<int16_t*>(data);
+          AppendToBuffer(",0x%x", imm);
+          data += 2;
         } else if (*data == 0x0F) {
           data++;
           if (*data == 0x38) {
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 609dfec..2d7cd02 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -28,10 +28,6 @@
 // Number of registers for which space is reserved in safepoints.
 const int kNumSafepointRegisters = 8;
 
-const int kNoAlignmentPadding = 0;
-const int kAlignmentPaddingPushed = 2;
-const int kAlignmentZapValue = 0x12345678;  // Not heap object tagged.
-
 // ----------------------------------------------------
 
 
@@ -46,13 +42,11 @@
   static const int kArgvOffset          = +6 * kPointerSize;
 };
 
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
  public:
-  static const int kFrameSize      = 2 * kPointerSize;
-
-  static const int kCodeOffset     = -2 * kPointerSize;
-  static const int kSPOffset       = -1 * kPointerSize;
+  static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
 
   static const int kCallerFPOffset =  0 * kPointerSize;
   static const int kCallerPCOffset = +1 * kPointerSize;
@@ -70,13 +64,11 @@
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
   static const int kLastParameterOffset = +2 * kPointerSize;
-  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+  static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
 
   // Caller SP-relative.
   static const int kParam0Offset   = -2 * kPointerSize;
   static const int kReceiverOffset = -1 * kPointerSize;
-
-  static const int kDynamicAlignmentStateOffset = kLocal0Offset;
 };
 
 
diff --git a/src/ia32/interface-descriptors-ia32.cc b/src/ia32/interface-descriptors-ia32.cc
index b36cf63..2748f90 100644
--- a/src/ia32/interface-descriptors-ia32.cc
+++ b/src/ia32/interface-descriptors-ia32.cc
@@ -114,36 +114,8 @@
 }
 
 
-void ToNumberDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // ToNumberStub invokes a function, and therefore needs a context.
-  Register registers[] = {eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 // static
-const Register ToLengthDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
 
 void TypeofDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -272,6 +244,13 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
+      CallInterfaceDescriptorData* data) {                          \
+    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
+  }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
 
 void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -316,6 +295,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (arg count)
+  Register registers[] = {eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void CompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -324,20 +309,6 @@
 }
 
 
-void CompareNilDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void BinaryOpDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {edx, eax};
@@ -397,21 +368,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {
-      edi,  // callee
-      ebx,  // call_data
-      ecx,  // holder
-      edx,  // api_function_address
-      eax,  // actual number of arguments
-  };
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       edi,  // callee
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 12daec8..f9fd8d6 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -204,15 +204,15 @@
   mov(Operand::StaticVariable(store_buffer), scratch);
   // Call stub on end of buffer.
   // Check for end of buffer.
-  test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+  test(scratch, Immediate(StoreBuffer::kStoreBufferMask));
   if (and_then == kReturnAtEnd) {
     Label buffer_overflowed;
-    j(not_equal, &buffer_overflowed, Label::kNear);
+    j(equal, &buffer_overflowed, Label::kNear);
     ret(0);
     bind(&buffer_overflowed);
   } else {
     DCHECK(and_then == kFallThroughAtEnd);
-    j(equal, &done, Label::kNear);
+    j(not_equal, &done, Label::kNear);
   }
   StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
   CallStub(&store_buffer_overflow);
@@ -454,7 +454,7 @@
   lea(dst, FieldOperand(object, offset));
   if (emit_debug_code()) {
     Label ok;
-    test_b(dst, (1 << kPointerSizeLog2) - 1);
+    test_b(dst, Immediate((1 << kPointerSizeLog2) - 1));
     j(zero, &ok, Label::kNear);
     int3();
     bind(&ok);
@@ -487,7 +487,7 @@
   if (emit_debug_code()) {
     Label ok;
     lea(address, FieldOperand(object, HeapObject::kMapOffset));
-    test_b(address, (1 << kPointerSizeLog2) - 1);
+    test_b(address, Immediate((1 << kPointerSizeLog2) - 1));
     j(zero, &ok, Label::kNear);
     int3();
     bind(&ok);
@@ -682,7 +682,6 @@
   call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
 }
 
-
 void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
   xorps(dst, dst);
   cvtsi2sd(dst, src);
@@ -707,6 +706,71 @@
   bind(&jmp_return);
 }
 
+void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
+  if (shift >= 32) {
+    mov(high, low);
+    shl(high, shift - 32);
+    xor_(low, low);
+  } else {
+    shld(high, low, shift);
+    shl(low, shift);
+  }
+}
+
+void MacroAssembler::ShlPair_cl(Register high, Register low) {
+  shld_cl(high, low);
+  shl_cl(low);
+  Label done;
+  test(ecx, Immediate(0x20));
+  j(equal, &done, Label::kNear);
+  mov(high, low);
+  xor_(low, low);
+  bind(&done);
+}
+
+void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
+  if (shift >= 32) {
+    mov(low, high);
+    shr(low, shift - 32);
+    xor_(high, high);
+  } else {
+    shrd(high, low, shift);
+    shr(high, shift);
+  }
+}
+
+void MacroAssembler::ShrPair_cl(Register high, Register low) {
+  shrd_cl(low, high);
+  shr_cl(high);
+  Label done;
+  test(ecx, Immediate(0x20));
+  j(equal, &done, Label::kNear);
+  mov(low, high);
+  xor_(high, high);
+  bind(&done);
+}
+
+void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
+  if (shift >= 32) {
+    mov(low, high);
+    sar(low, shift - 32);
+    sar(high, 31);
+  } else {
+    shrd(high, low, shift);
+    sar(high, shift);
+  }
+}
+
+void MacroAssembler::SarPair_cl(Register high, Register low) {
+  shrd_cl(low, high);
+  sar_cl(high);
+  Label done;
+  test(ecx, Immediate(0x20));
+  j(equal, &done, Label::kNear);
+  mov(low, high);
+  sar(high, 31);
+  bind(&done);
+}
 
 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
   static const int kMaxImmediateBits = 17;
@@ -744,8 +808,7 @@
 
 
 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
-  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
-       static_cast<int8_t>(type));
+  cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
 }
 
 
@@ -757,7 +820,7 @@
   STATIC_ASSERT(FAST_ELEMENTS == 2);
   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastHoleyElementValue);
+       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
   j(above, fail, distance);
 }
 
@@ -770,10 +833,10 @@
   STATIC_ASSERT(FAST_ELEMENTS == 2);
   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastHoleySmiElementValue);
+       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
   j(below_equal, fail, distance);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastHoleyElementValue);
+       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
   j(above, fail, distance);
 }
 
@@ -784,7 +847,7 @@
   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastHoleySmiElementValue);
+       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
   j(above, fail, distance);
 }
 
@@ -873,7 +936,7 @@
                                            Register instance_type) {
   mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
-  cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
+  cmpb(instance_type, Immediate(LAST_NAME_TYPE));
   return below_equal;
 }
 
@@ -895,6 +958,15 @@
   }
 }
 
+void MacroAssembler::AssertNotNumber(Register object) {
+  if (emit_debug_code()) {
+    test(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsANumber);
+    cmp(FieldOperand(object, HeapObject::kMapOffset),
+        isolate()->factory()->heap_number_map());
+    Check(not_equal, kOperandIsANumber);
+  }
+}
 
 void MacroAssembler::AssertSmi(Register object) {
   if (emit_debug_code()) {
@@ -988,15 +1060,12 @@
   }
 }
 
-
-void MacroAssembler::StubPrologue() {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
   push(ebp);  // Caller's frame pointer.
   mov(ebp, esp);
-  push(esi);  // Callee's context.
-  push(Immediate(Smi::FromInt(StackFrame::STUB)));
+  push(Immediate(Smi::FromInt(type)));
 }
 
-
 void MacroAssembler::Prologue(bool code_pre_aging) {
   PredictableCodeSizeScope predictible_code_size_scope(this,
       kNoCodeAgeSequenceLength);
@@ -1031,9 +1100,10 @@
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
   mov(ebp, esp);
-  push(esi);
   push(Immediate(Smi::FromInt(type)));
-  push(Immediate(CodeObject()));
+  if (type == StackFrame::INTERNAL) {
+    push(Immediate(CodeObject()));
+  }
   if (emit_debug_code()) {
     cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
     Check(not_equal, kCodeObjectNotProperlyPatched);
@@ -1043,7 +1113,7 @@
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   if (emit_debug_code()) {
-    cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+    cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
         Immediate(Smi::FromInt(type)));
     Check(equal, kStackFrameTypesMustMatch);
   }
@@ -1053,15 +1123,17 @@
 
 void MacroAssembler::EnterExitFramePrologue() {
   // Set up the frame structure on the stack.
-  DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
-  DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
-  DCHECK(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
+  DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+  DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
   push(ebp);
   mov(ebp, esp);
 
   // Reserve room for entry stack pointer and push the code object.
-  DCHECK(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
+  push(Immediate(Smi::FromInt(StackFrame::EXIT)));
+  DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
   push(Immediate(0));  // Saved entry sp, patched before call.
+  DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
   push(Immediate(CodeObject()));  // Accessed from ExitFrame::code_slot.
 
   // Save the frame pointer and the context in top.
@@ -1080,7 +1152,7 @@
     int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
                 argc * kPointerSize;
     sub(esp, Immediate(space));
-    const int offset = -2 * kPointerSize;
+    const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
       movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
@@ -1123,7 +1195,7 @@
 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
   // Optionally restore all XMM registers.
   if (save_doubles) {
-    const int offset = -2 * kPointerSize;
+    const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
       movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
@@ -1206,8 +1278,18 @@
   DCHECK(!holder_reg.is(scratch2));
   DCHECK(!scratch1.is(scratch2));
 
-  // Load current lexical context from the stack frame.
-  mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
+  // Load current lexical context from the active StandardFrame, which
+  // may require crawling past STUB frames.
+  Label load_context;
+  Label has_context;
+  mov(scratch2, ebp);
+  bind(&load_context);
+  mov(scratch1,
+      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+  JumpIfNotSmi(scratch1, &has_context);
+  mov(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
+  jmp(&load_context);
+  bind(&has_context);
 
   // When generating debug code, make sure the lexical context is set.
   if (emit_debug_code()) {
@@ -1920,7 +2002,7 @@
   int byte_index = bit_index / kBitsPerByte;
   int byte_bit_index = bit_index & (kBitsPerByte - 1);
   test_b(FieldOperand(object, field_offset + byte_index),
-         static_cast<byte>(1 << byte_bit_index));
+         Immediate(1 << byte_bit_index));
 }
 
 
@@ -2086,6 +2168,87 @@
   jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
 }
 
+void MacroAssembler::PrepareForTailCall(
+    const ParameterCount& callee_args_count, Register caller_args_count_reg,
+    Register scratch0, Register scratch1, ReturnAddressState ra_state,
+    int number_of_temp_values_after_return_address) {
+#if DEBUG
+  if (callee_args_count.is_reg()) {
+    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+                       scratch1));
+  } else {
+    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+  }
+  DCHECK(ra_state != ReturnAddressState::kNotOnStack ||
+         number_of_temp_values_after_return_address == 0);
+#endif
+
+  // Calculate the destination address where we will put the return address
+  // after we drop current frame.
+  Register new_sp_reg = scratch0;
+  if (callee_args_count.is_reg()) {
+    sub(caller_args_count_reg, callee_args_count.reg());
+    lea(new_sp_reg,
+        Operand(ebp, caller_args_count_reg, times_pointer_size,
+                StandardFrameConstants::kCallerPCOffset -
+                    number_of_temp_values_after_return_address * kPointerSize));
+  } else {
+    lea(new_sp_reg, Operand(ebp, caller_args_count_reg, times_pointer_size,
+                            StandardFrameConstants::kCallerPCOffset -
+                                (callee_args_count.immediate() +
+                                 number_of_temp_values_after_return_address) *
+                                    kPointerSize));
+  }
+
+  if (FLAG_debug_code) {
+    cmp(esp, new_sp_reg);
+    Check(below, kStackAccessBelowStackPointer);
+  }
+
+  // Copy return address from caller's frame to current frame's return address
+  // to avoid its trashing and let the following loop copy it to the right
+  // place.
+  Register tmp_reg = scratch1;
+  if (ra_state == ReturnAddressState::kOnStack) {
+    mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+    mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
+        tmp_reg);
+  } else {
+    DCHECK(ReturnAddressState::kNotOnStack == ra_state);
+    DCHECK_EQ(0, number_of_temp_values_after_return_address);
+    Push(Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+  }
+
+  // Restore caller's frame pointer now as it could be overwritten by
+  // the copying loop.
+  mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+  // +2 here is to copy both receiver and return address.
+  Register count_reg = caller_args_count_reg;
+  if (callee_args_count.is_reg()) {
+    lea(count_reg, Operand(callee_args_count.reg(),
+                           2 + number_of_temp_values_after_return_address));
+  } else {
+    mov(count_reg, Immediate(callee_args_count.immediate() + 2 +
+                             number_of_temp_values_after_return_address));
+    // TODO(ishell): Unroll copying loop for small immediate values.
+  }
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+  Label loop, entry;
+  jmp(&entry, Label::kNear);
+  bind(&loop);
+  dec(count_reg);
+  mov(tmp_reg, Operand(esp, count_reg, times_pointer_size, 0));
+  mov(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
+  bind(&entry);
+  cmp(count_reg, Immediate(0));
+  j(not_equal, &loop, Label::kNear);
+
+  // Leave current frame.
+  mov(esp, new_sp_reg);
+}
 
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
@@ -2160,7 +2323,7 @@
   Label skip_flooding;
   ExternalReference step_in_enabled =
       ExternalReference::debug_step_in_enabled_address(isolate());
-  cmpb(Operand::StaticVariable(step_in_enabled), 0);
+  cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
   j(equal, &skip_flooding);
   {
     FrameScope frame(this,
@@ -2850,7 +3013,7 @@
   Label succeed;
   test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
   j(zero, &succeed);
-  cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
+  cmpb(operand, Immediate(SYMBOL_TYPE));
   j(not_equal, not_unique_name, distance);
 
   bind(&succeed);
@@ -2998,8 +3161,7 @@
     and_(scratch, object);
   }
   if (mask < (1 << kBitsPerByte)) {
-    test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
-           static_cast<uint8_t>(mask));
+    test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
   } else {
     test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
   }
@@ -3022,7 +3184,7 @@
   DCHECK(!isolate()->heap()->mark_compact_collector()->
          IsOnEvacuationCandidate(*map));
   if (mask < (1 << kBitsPerByte)) {
-    test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
+    test_b(Operand::StaticVariable(reference), Immediate(mask));
   } else {
     test(Operand::StaticVariable(reference), Immediate(mask));
   }
@@ -3062,7 +3224,8 @@
   jmp(&other_color, Label::kNear);
 
   bind(&word_boundary);
-  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
+  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize),
+         Immediate(1));
 
   j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
   bind(&other_color);
@@ -3164,19 +3327,40 @@
     Register receiver_reg,
     Register scratch_reg,
     Label* no_memento_found) {
-  ExternalReference new_space_start =
-      ExternalReference::new_space_start(isolate());
+  Label map_check;
+  Label top_check;
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
+  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
 
-  lea(scratch_reg, Operand(receiver_reg,
-      JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
-  cmp(scratch_reg, Immediate(new_space_start));
-  j(less, no_memento_found);
+  // Bail out if the object is not in new space.
+  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+  // If the object is in new space, we need to check whether it is on the same
+  // page as the current top.
+  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
+  test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+  j(zero, &top_check);
+  // The object is on a different page than allocation top. Bail out if the
+  // object sits on the page boundary as no memento can follow and we cannot
+  // touch the memory following it.
+  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  xor_(scratch_reg, receiver_reg);
+  test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+  j(not_zero, no_memento_found);
+  // Continue with the actual map check.
+  jmp(&map_check);
+  // If top is on the same page as the current object, we need to check whether
+  // we are below top.
+  bind(&top_check);
+  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
   cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
   j(greater, no_memento_found);
-  cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
-      Immediate(isolate()->factory()->allocation_memento_map()));
+  // Memento map check.
+  bind(&map_check);
+  mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
+  cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
 }
 
 
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 9ebae1f..be11f66 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -44,6 +44,8 @@
 
 enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
 
+enum class ReturnAddressState { kOnStack, kNotOnStack };
+
 #ifdef DEBUG
 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
                 Register reg4 = no_reg, Register reg5 = no_reg,
@@ -234,7 +236,7 @@
   void DebugBreak();
 
   // Generates function and stub prologue code.
-  void StubPrologue();
+  void StubPrologue(StackFrame::Type type);
   void Prologue(bool code_pre_aging);
 
   // Enter specific kind of exit frame. Expects the number of
@@ -323,6 +325,20 @@
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Removes current frame and its arguments from the stack preserving
+  // the arguments and a return address pushed to the stack for the next call.
+  // |ra_state| defines whether return address is already pushed to stack or
+  // not. Both |callee_args_count| and |caller_args_count_reg| do not include
+  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+  // is trashed. |number_of_temp_values_after_return_address| specifies
+  // the number of words pushed to the stack after the return address. This is
+  // to allow "allocation" of scratch registers that this function requires
+  // by saving their values on the stack.
+  void PrepareForTailCall(const ParameterCount& callee_args_count,
+                          Register caller_args_count_reg, Register scratch0,
+                          Register scratch1, ReturnAddressState ra_state,
+                          int number_of_temp_values_after_return_address);
+
   // Invoke the JavaScript function code by either calling or jumping.
 
   void InvokeFunctionCode(Register function, Register new_target,
@@ -358,6 +374,13 @@
 
   void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
 
+  void ShlPair(Register high, Register low, uint8_t imm8);
+  void ShlPair_cl(Register high, Register low);
+  void ShrPair(Register high, Register low, uint8_t imm8);
+  void ShrPair_cl(Register high, Register src);
+  void SarPair(Register high, Register low, uint8_t imm8);
+  void SarPair_cl(Register high, Register low);
+
   // Support for constant splitting.
   bool IsUnsafeImmediate(const Immediate& x);
   void SafeMove(Register dst, const Immediate& x);
@@ -522,6 +545,7 @@
 
   // Abort execution if argument is not a number, enabled via --debug-code.
   void AssertNumber(Register object);
+  void AssertNotNumber(Register object);
 
   // Abort execution if argument is not a smi, enabled via --debug-code.
   void AssertSmi(Register object);
@@ -777,12 +801,6 @@
   void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
   void Popcnt(Register dst, const Operand& src);
 
-  // Emit call to the code we are currently generating.
-  void CallSelf() {
-    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
-    call(self, RelocInfo::CODE_TARGET);
-  }
-
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
@@ -796,6 +814,7 @@
   void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
   void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
 
+  void Move(Register dst, Handle<Object> handle) { LoadObject(dst, handle); }
   void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
 
   // Push a handle value.
diff --git a/src/ic/access-compiler.cc b/src/ic/access-compiler.cc
index 0f1b7b9..c992192 100644
--- a/src/ic/access-compiler.cc
+++ b/src/ic/access-compiler.cc
@@ -18,7 +18,8 @@
   if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey());
 #ifdef ENABLE_DISASSEMBLER
   if (FLAG_print_code_stubs) {
-    OFStream os(stdout);
+    CodeTracer::Scope trace_scope(isolate()->GetCodeTracer());
+    OFStream os(trace_scope.file());
     code->Disassemble(name, os);
   }
 #endif
diff --git a/src/ic/arm/handler-compiler-arm.cc b/src/ic/arm/handler-compiler-arm.cc
index 5f2b861..a3f23d3 100644
--- a/src/ic/arm/handler-compiler-arm.cc
+++ b/src/ic/arm/handler-compiler-arm.cc
@@ -6,6 +6,7 @@
 
 #include "src/ic/handler-compiler.h"
 
+#include "src/api-arguments.h"
 #include "src/field-type.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
@@ -28,6 +29,9 @@
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ push(cp);
+
     if (accessor_index >= 0) {
       DCHECK(!holder.is(scratch));
       DCHECK(!receiver.is(scratch));
@@ -39,11 +43,11 @@
         receiver = scratch;
       }
       __ push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(r1, holder, accessor_index, ACCESSOR_GETTER);
-      __ InvokeFunction(r1, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ mov(r0, Operand(0));
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -51,7 +55,7 @@
     }
 
     // Restore context register.
-    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ pop(cp);
   }
   __ Ret();
 }
@@ -66,6 +70,8 @@
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ push(cp);
     // Save value register, so we can restore it later.
     __ push(value());
 
@@ -81,11 +87,11 @@
         receiver = scratch;
       }
       __ Push(receiver, value());
-      ParameterCount actual(1);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(r1, holder, accessor_index, ACCESSOR_SETTER);
-      __ InvokeFunction(r1, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ mov(r0, Operand(1));
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -96,7 +102,7 @@
     __ pop(r0);
 
     // Restore context register.
-    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ pop(cp);
   }
   __ Ret();
 }
@@ -248,7 +254,7 @@
   }
   DCHECK(optimization.is_simple_api_call());
 
-  // Abi for CallApiFunctionStub.
+  // Abi for CallApiCallbackStub.
   Register callee = r0;
   Register data = r4;
   Register holder = r2;
@@ -317,7 +323,7 @@
   __ mov(api_function_address, Operand(ref));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+  CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
                            !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
@@ -742,18 +748,6 @@
 }
 
 
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
-    Handle<Name> name) {
-  __ Push(receiver(), this->name(), value());
-
-  // Do tail-call to the runtime system.
-  __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
 Register NamedStoreHandlerCompiler::value() {
   return StoreDescriptor::ValueRegister();
 }
diff --git a/src/ic/arm64/handler-compiler-arm64.cc b/src/ic/arm64/handler-compiler-arm64.cc
index 51ae3b5..a704492 100644
--- a/src/ic/arm64/handler-compiler-arm64.cc
+++ b/src/ic/arm64/handler-compiler-arm64.cc
@@ -6,6 +6,7 @@
 
 #include "src/ic/handler-compiler.h"
 
+#include "src/api-arguments.h"
 #include "src/field-type.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
@@ -160,7 +161,7 @@
 
   DCHECK(optimization.is_simple_api_call());
 
-  // Abi for CallApiFunctionStub.
+  // Abi for CallApiCallbackStub.
   Register callee = x0;
   Register data = x4;
   Register holder = x2;
@@ -229,7 +230,7 @@
   __ Mov(api_function_address, ref);
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+  CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
                            !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
@@ -245,6 +246,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ Push(cp);
     // Save value register, so we can restore it later.
     __ Push(value());
 
@@ -260,11 +263,11 @@
         receiver = scratch;
       }
       __ Push(receiver, value());
-      ParameterCount actual(1);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(x1, holder, accessor_index, ACCESSOR_SETTER);
-      __ InvokeFunction(x1, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ Mov(x0, 1);
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -275,7 +278,7 @@
     __ Pop(x0);
 
     // Restore context register.
-    __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ Pop(cp);
   }
   __ Ret();
 }
@@ -287,6 +290,9 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ Push(cp);
+
     if (accessor_index >= 0) {
       DCHECK(!AreAliased(holder, scratch));
       DCHECK(!AreAliased(receiver, scratch));
@@ -298,11 +304,11 @@
         receiver = scratch;
       }
       __ Push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(x1, holder, accessor_index, ACCESSOR_GETTER);
-      __ InvokeFunction(x1, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ Mov(x0, 0);
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -310,7 +316,7 @@
     }
 
     // Restore context register.
-    __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ Pop(cp);
   }
   __ Ret();
 }
@@ -380,22 +386,6 @@
 }
 
 
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
-    Handle<Name> name) {
-  Label miss;
-
-  ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreInterceptor");
-
-  __ Push(receiver(), this->name(), value());
-
-  // Do tail-call to the runtime system.
-  __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
 Register NamedStoreHandlerCompiler::value() {
   return StoreDescriptor::ValueRegister();
 }
diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc
index 803281e..714888c 100644
--- a/src/ic/handler-compiler.cc
+++ b/src/ic/handler-compiler.cc
@@ -81,7 +81,8 @@
                                               Handle<Name> name) {
   Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
   Handle<Code> code = GetCodeWithFlags(flags, name);
-  PROFILE(isolate(), CodeCreateEvent(Logger::HANDLER_TAG, *code, *name));
+  PROFILE(isolate(), CodeCreateEvent(Logger::HANDLER_TAG,
+                                     AbstractCode::cast(*code), *name));
 #ifdef DEBUG
   code->VerifyEmbeddedObjects();
 #endif
@@ -581,7 +582,9 @@
           (is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
            *receiver_map == isolate()->get_initial_js_array_map(elements_kind));
 
-      if (receiver_map->has_indexed_interceptor()) {
+      if (receiver_map->has_indexed_interceptor() &&
+          !receiver_map->GetIndexedInterceptor()->getter()->IsUndefined() &&
+          !receiver_map->GetIndexedInterceptor()->non_masking()) {
         cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode();
       } else if (IsSloppyArgumentsElements(elements_kind)) {
         cached_stub = KeyedLoadSloppyArgumentsStub(isolate()).GetCode();
diff --git a/src/ic/handler-compiler.h b/src/ic/handler-compiler.h
index 45d7d73..76036a2 100644
--- a/src/ic/handler-compiler.h
+++ b/src/ic/handler-compiler.h
@@ -231,7 +231,6 @@
   Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name,
                                      int accessor_index,
                                      int expected_arguments);
-  Handle<Code> CompileStoreInterceptor(Handle<Name> name);
 
   static void GenerateStoreViaSetter(MacroAssembler* masm, Handle<Map> map,
                                      Register receiver, Register holder,
diff --git a/src/ic/ia32/handler-compiler-ia32.cc b/src/ic/ia32/handler-compiler-ia32.cc
index 3bdddf9..132090d 100644
--- a/src/ic/ia32/handler-compiler-ia32.cc
+++ b/src/ic/ia32/handler-compiler-ia32.cc
@@ -6,6 +6,7 @@
 
 #include "src/ic/handler-compiler.h"
 
+#include "src/api-arguments.h"
 #include "src/field-type.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
@@ -23,6 +24,9 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ push(esi);
+
     if (accessor_index >= 0) {
       DCHECK(!holder.is(scratch));
       DCHECK(!receiver.is(scratch));
@@ -34,11 +38,11 @@
         receiver = scratch;
       }
       __ push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
-      __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ Set(eax, 0);
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -46,7 +50,7 @@
     }
 
     // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ pop(esi);
   }
   __ ret(0);
 }
@@ -90,7 +94,7 @@
 
   // Bail out if the receiver has a named interceptor or requires access checks.
   __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
-            kInterceptorOrAccessCheckNeededMask);
+            Immediate(kInterceptorOrAccessCheckNeededMask));
   __ j(not_zero, miss_label);
 
   // Check that receiver is a JSObject.
@@ -158,7 +162,7 @@
   // Stack now matches JSFunction abi.
   DCHECK(optimization.is_simple_api_call());
 
-  // Abi for CallApiFunctionStub.
+  // Abi for CallApiCallbackStub.
   Register callee = edi;
   Register data = ebx;
   Register holder = ecx;
@@ -220,7 +224,7 @@
   __ mov(api_function_address, Immediate(function_address));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+  CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
                            !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
@@ -252,6 +256,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ push(esi);
     // Save value register, so we can restore it later.
     __ push(value());
 
@@ -267,11 +273,11 @@
       }
       __ push(receiver);
       __ push(value());
-      ParameterCount actual(1);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
-      __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ Set(eax, 1);
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -280,9 +286,8 @@
 
     // We have to return the passed value, not the return value of the setter.
     __ pop(eax);
-
     // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ pop(esi);
   }
   __ ret(0);
 }
@@ -758,22 +763,6 @@
 }
 
 
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
-    Handle<Name> name) {
-  __ pop(scratch1());  // remove the return address
-  __ push(receiver());
-  __ push(this->name());
-  __ push(value());
-  __ push(scratch1());  // restore return address
-
-  // Do tail-call to the runtime system.
-  __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
 Register NamedStoreHandlerCompiler::value() {
   return StoreDescriptor::ValueRegister();
 }
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
index 0eba427..e66716f 100644
--- a/src/ic/ia32/ic-ia32.cc
+++ b/src/ic/ia32/ic-ia32.cc
@@ -150,8 +150,9 @@
   __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
 
   // Check bit field.
-  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
-            (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
+  __ test_b(
+      FieldOperand(map, Map::kBitFieldOffset),
+      Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
   __ j(not_zero, slow);
   // Check that the object is some kind of JS object EXCEPT JS Value type. In
   // the case that the object is a value-wrapper object, we enter the runtime
@@ -202,9 +203,9 @@
   // scratch2: map of current prototype
   __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
   __ j(below, slow);
-  __ test_b(
-      FieldOperand(scratch2, Map::kBitFieldOffset),
-      (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor));
+  __ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
+            Immediate((1 << Map::kIsAccessCheckNeeded) |
+                      (1 << Map::kHasIndexedInterceptor)));
   __ j(not_zero, slow);
   __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
   __ j(not_equal, slow);
@@ -251,7 +252,7 @@
   // bit test is enough.
   STATIC_ASSERT(kNotInternalizedTag != 0);
   __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
-            kIsNotInternalizedMask);
+            Immediate(kIsNotInternalizedMask));
   __ j(not_zero, not_unique);
 
   __ bind(&unique);
@@ -521,7 +522,7 @@
   // Check that the receiver does not require access checks and is not observed.
   // The generic stub does not perform map checks or handle observed objects.
   __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
+            Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
   __ j(not_zero, &slow);
   // Check that the key is a smi.
   __ JumpIfNotSmi(key, &maybe_name_key);
diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc
index f74c69e..9f1d87a 100644
--- a/src/ic/ic-compiler.cc
+++ b/src/ic/ic-compiler.cc
@@ -132,29 +132,6 @@
 }
 
 
-Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
-                                                   CompareNilICStub* stub) {
-  Isolate* isolate = receiver_map->GetIsolate();
-  Handle<String> name(isolate->heap()->empty_string());
-  if (!receiver_map->is_dictionary_map()) {
-    Handle<Code> cached_ic =
-        Find(name, receiver_map, Code::COMPARE_NIL_IC, stub->GetExtraICState());
-    if (!cached_ic.is_null()) return cached_ic;
-  }
-
-  Code::FindAndReplacePattern pattern;
-  Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-  pattern.Add(isolate->factory()->meta_map(), cell);
-  Handle<Code> ic = stub->GetCodeCopy(pattern);
-
-  if (!receiver_map->is_dictionary_map()) {
-    Map::UpdateCodeCache(receiver_map, name, ic);
-  }
-
-  return ic;
-}
-
-
 void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
     MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
     CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
@@ -175,7 +152,8 @@
 Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
   LoadIC::GenerateInitialize(masm());
   Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
-  PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0));
+  PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG,
+                                     AbstractCode::cast(*code), 0));
   return code;
 }
 
@@ -183,7 +161,8 @@
 Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
   StoreIC::GenerateInitialize(masm());
   Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
-  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0));
+  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG,
+                                     AbstractCode::cast(*code), 0));
   return code;
 }
 
@@ -191,8 +170,8 @@
 Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
   StoreIC::GeneratePreMonomorphic(masm());
   Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0));
+  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG,
+                                     AbstractCode::cast(*code), 0));
   return code;
 }
 
@@ -202,7 +181,8 @@
   LanguageMode language_mode = StoreICState::GetLanguageMode(extra_state);
   GenerateRuntimeSetProperty(masm(), language_mode);
   Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
-  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
+  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG,
+                                     AbstractCode::cast(*code), 0));
   return code;
 }
 
@@ -210,7 +190,8 @@
 Handle<Code> PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) {
   StoreIC::GenerateMegamorphic(masm());
   Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
-  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
+  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG,
+                                     AbstractCode::cast(*code), 0));
   return code;
 }
 
@@ -221,7 +202,8 @@
   Code::Flags flags =
       Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
   Handle<Code> code = GetCodeWithFlags(flags, name);
-  PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
+  PROFILE(isolate(),
+          CodeCreateEvent(log_kind(code), AbstractCode::cast(*code), *name));
 #ifdef DEBUG
   code->VerifyEmbeddedObjects();
 #endif
diff --git a/src/ic/ic-compiler.h b/src/ic/ic-compiler.h
index 08444df..3a5aecc 100644
--- a/src/ic/ic-compiler.h
+++ b/src/ic/ic-compiler.h
@@ -33,10 +33,6 @@
       CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
       LanguageMode language_mode);
 
-  // Compare nil
-  static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
-                                        CompareNilICStub* stub);
-
   // Helpers
   // TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
   // and make the helpers private.
diff --git a/src/ic/ic-state.cc b/src/ic/ic-state.cc
index 933803c..bf1e45f 100644
--- a/src/ic/ic-state.cc
+++ b/src/ic/ic-state.cc
@@ -404,7 +404,9 @@
       if (value->IsInternalizedString()) return INTERNALIZED_STRING;
       if (value->IsString()) return STRING;
       if (value->IsSymbol()) return UNIQUE_NAME;
-      if (value->IsJSReceiver()) return RECEIVER;
+      if (value->IsJSReceiver() && !value->IsUndetectable()) {
+        return RECEIVER;
+      }
       break;
     case BOOLEAN:
       if (value->IsBoolean()) return BOOLEAN;
@@ -428,7 +430,9 @@
       if (value->IsUniqueName()) return UNIQUE_NAME;
       break;
     case RECEIVER:
-      if (value->IsJSReceiver()) return RECEIVER;
+      if (value->IsJSReceiver() && !value->IsUndetectable()) {
+        return RECEIVER;
+      }
       break;
     case GENERIC:
       break;
@@ -464,6 +468,9 @@
       }
       if (x->IsString() && y->IsString()) return STRING;
       if (x->IsJSReceiver() && y->IsJSReceiver()) {
+        if (x->IsUndetectable() || y->IsUndetectable()) {
+          return GENERIC;
+        }
         if (Handle<JSReceiver>::cast(x)->map() ==
             Handle<JSReceiver>::cast(y)->map()) {
           return KNOWN_RECEIVER;
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index c0b3e49..c5835e4 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -6,8 +6,10 @@
 
 #include "src/accessors.h"
 #include "src/api.h"
+#include "src/api-arguments.h"
 #include "src/arguments.h"
 #include "src/base/bits.h"
+#include "src/code-factory.h"
 #include "src/codegen.h"
 #include "src/conversions.h"
 #include "src/execution.h"
@@ -465,8 +467,6 @@
       return;
     case Code::COMPARE_IC:
       return CompareIC::Clear(isolate, address, target, constant_pool);
-    case Code::COMPARE_NIL_IC:
-      return CompareNilIC::Clear(address, target, constant_pool);
     case Code::CALL_IC:  // CallICs are vector-based and cleared differently.
     case Code::BINARY_OP_IC:
     case Code::TO_BOOLEAN_IC:
@@ -1106,9 +1106,8 @@
 
   // TODO(mvstanton): we'd only like to cache code on the map when it's custom
   // code compiled for this map, otherwise it's already cached in the global
-  // code
-  // cache. We are also guarding against installing code with flags that don't
-  // match the desired CacheHolderFlag computed above, which would lead to
+  // code cache. We are also guarding against installing code with flags that
+  // don't match the desired CacheHolderFlag computed above, which would lead to
   // invalid lookups later.
   if (code->type() != Code::NORMAL &&
       Code::ExtractCacheHolderFromFlags(code->flags()) == flag) {
@@ -1208,6 +1207,7 @@
             break;
           }
           if (!holder->HasFastProperties()) break;
+          if (info->is_sloppy() && !receiver->IsJSReceiver()) break;
           NamedLoadHandlerCompiler compiler(isolate(), map, holder,
                                             cache_holder);
           return compiler.CompileLoadCallback(lookup->name(), info);
@@ -1297,10 +1297,10 @@
 Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
   Handle<Code> null_handle;
   Handle<Map> receiver_map(receiver->map(), isolate());
+  DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE);  // Checked by caller.
   MapHandleList target_receiver_maps;
   TargetMaps(&target_receiver_maps);
 
-
   if (target_receiver_maps.length() == 0) {
     Handle<Code> handler =
         PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
@@ -1309,6 +1309,14 @@
     return null_handle;
   }
 
+  for (int i = 0; i < target_receiver_maps.length(); i++) {
+    if (!target_receiver_maps.at(i).is_null() &&
+        target_receiver_maps.at(i)->instance_type() == JS_VALUE_TYPE) {
+      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSValue");
+      return megamorphic_stub();
+    }
+  }
+
   // The first time a receiver is seen that is a transitioned version of the
   // previous monomorphic receiver type, assume the new ElementsKind is the
   // monomorphic type. This benefits global arrays that only transition
@@ -1422,7 +1430,8 @@
         Handle<JSObject> holder = it->GetHolder<JSObject>();
         InterceptorInfo* info = holder->GetNamedInterceptor();
         if (it->HolderIsReceiverOrHiddenPrototype()) {
-          if (!info->setter()->IsUndefined()) return true;
+          return !info->non_masking() && receiver.is_identical_to(holder) &&
+                 !info->setter()->IsUndefined();
         } else if (!info->getter()->IsUndefined() ||
                    !info->query()->IsUndefined()) {
           return false;
@@ -1722,8 +1731,7 @@
 
     case LookupIterator::INTERCEPTOR: {
       DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
-      NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
-      return compiler.CompileStoreInterceptor(lookup->name());
+      return CodeFactory::StoreInterceptor(isolate()).code();
     }
 
     case LookupIterator::ACCESSOR: {
@@ -1749,6 +1757,7 @@
           TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
           break;
         }
+        if (info->is_sloppy() && !receiver->IsJSReceiver()) break;
         NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
         return compiler.CompileStoreCallback(receiver, lookup->name(), info,
                                              language_mode());
@@ -2722,57 +2731,6 @@
 }
 
 
-void CompareNilIC::Clear(Address address, Code* target, Address constant_pool) {
-  if (IsCleared(target)) return;
-  ExtraICState state = target->extra_ic_state();
-
-  CompareNilICStub stub(target->GetIsolate(), state,
-                        HydrogenCodeStub::UNINITIALIZED);
-  stub.ClearState();
-
-  Code* code = NULL;
-  CHECK(stub.FindCodeInCache(&code));
-
-  SetTargetAtAddress(address, code, constant_pool);
-}
-
-
-Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
-  ExtraICState extra_ic_state = target()->extra_ic_state();
-
-  CompareNilICStub stub(isolate(), extra_ic_state);
-
-  // Extract the current supported types from the patched IC and calculate what
-  // types must be supported as a result of the miss.
-  bool already_monomorphic = stub.IsMonomorphic();
-
-  stub.UpdateStatus(object);
-
-  // Find or create the specialized stub to support the new set of types.
-  Handle<Code> code;
-  if (stub.IsMonomorphic()) {
-    Handle<Map> monomorphic_map(already_monomorphic && FirstTargetMap() != NULL
-                                    ? FirstTargetMap()
-                                    : HeapObject::cast(*object)->map());
-    code = PropertyICCompiler::ComputeCompareNil(monomorphic_map, &stub);
-  } else {
-    code = stub.GetCode();
-  }
-  set_target(*code);
-  return isolate()->factory()->ToBoolean(object->IsUndetectableObject());
-}
-
-
-RUNTIME_FUNCTION(Runtime_CompareNilIC_Miss) {
-  TimerEventScope<TimerEventIcMiss> timer(isolate);
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
-  HandleScope scope(isolate);
-  Handle<Object> object = args.at<Object>(0);
-  CompareNilIC ic(isolate);
-  return *ic.CompareNil(object);
-}
-
-
 RUNTIME_FUNCTION(Runtime_Unreachable) {
   UNREACHABLE();
   CHECK(false);
@@ -2781,7 +2739,7 @@
 
 
 Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
-  ToBooleanStub stub(isolate(), target()->extra_ic_state());
+  ToBooleanICStub stub(isolate(), target()->extra_ic_state());
   bool to_boolean_value = stub.UpdateStatus(object);
   Handle<Code> code = stub.GetCode();
   set_target(*code);
@@ -2821,12 +2779,11 @@
       FUNCTION_CAST<v8::AccessorNameSetterCallback>(setter_address);
   DCHECK(fun != NULL);
 
-  LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name));
   Object::ShouldThrow should_throw =
       is_sloppy(language_mode) ? Object::DONT_THROW : Object::THROW_ON_ERROR;
   PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
                                         *holder, should_throw);
-  custom_args.Call(fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
+  custom_args.Call(fun, name, value);
   RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
   return *value;
 }
@@ -2843,17 +2800,29 @@
   DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
   Handle<Name> name =
       args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
-  Handle<JSObject> receiver =
-      args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+  Handle<Object> receiver =
+      args.at<Object>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
   Handle<JSObject> holder =
       args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
   HandleScope scope(isolate);
-  LookupIterator it(receiver, name, holder, LookupIterator::OWN);
-  bool done;
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, JSObject::GetPropertyWithInterceptor(&it, &done));
-  if (done) return *result;
+
+  if (!receiver->IsJSReceiver()) {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, receiver, Object::ConvertReceiver(isolate, receiver));
+  }
+
+  InterceptorInfo* interceptor = holder->GetNamedInterceptor();
+  PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
+                                      *holder, Object::DONT_THROW);
+
+  v8::GenericNamedPropertyGetterCallback getter =
+      v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+          interceptor->getter());
+  Handle<Object> result = arguments.Call(getter, name);
+
+  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+
+  if (!result.is_null()) return *result;
   return isolate->heap()->no_interceptor_result_sentinel();
 }
 
@@ -2867,21 +2836,42 @@
   DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
   Handle<Name> name =
       args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
-  Handle<JSObject> receiver =
-      args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+  Handle<Object> receiver =
+      args.at<Object>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
   Handle<JSObject> holder =
       args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
 
-  Handle<Object> result;
+  if (!receiver->IsJSReceiver()) {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, receiver, Object::ConvertReceiver(isolate, receiver));
+  }
+
+  InterceptorInfo* interceptor = holder->GetNamedInterceptor();
+  PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
+                                      *holder, Object::DONT_THROW);
+
+  v8::GenericNamedPropertyGetterCallback getter =
+      v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+          interceptor->getter());
+  Handle<Object> result = arguments.Call(getter, name);
+
+  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+
+  if (!result.is_null()) return *result;
+
   LookupIterator it(receiver, name, holder);
-  // TODO(conradw): Investigate strong mode semantics for this.
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     JSObject::GetProperty(&it));
+  // Skip any lookup work until we hit the (possibly non-masking) interceptor.
+  while (it.state() != LookupIterator::INTERCEPTOR ||
+         !it.GetHolder<JSObject>().is_identical_to(holder)) {
+    DCHECK(it.state() != LookupIterator::ACCESS_CHECK || it.HasAccess());
+    it.Next();
+  }
+  // Skip past the interceptor.
+  it.Next();
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::GetProperty(&it));
 
   if (it.IsFound()) return *result;
 
-  // Return the undefined result if the reference error should not be thrown.
-  // Note that both keyed and non-keyed loads may end up here.
   LoadICNexus nexus(isolate);
   LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
   if (!ic.ShouldThrowReferenceError(it.GetReceiver())) {
@@ -2902,26 +2892,34 @@
   Handle<JSObject> receiver = args.at<JSObject>(0);
   Handle<Name> name = args.at<Name>(1);
   Handle<Object> value = args.at<Object>(2);
-#ifdef DEBUG
-  PrototypeIterator iter(isolate, receiver,
-                         PrototypeIterator::START_AT_RECEIVER,
-                         PrototypeIterator::END_AT_NON_HIDDEN);
-  bool found = false;
-  for (; !iter.IsAtEnd(); iter.Advance()) {
-    Handle<Object> current = PrototypeIterator::GetCurrent(iter);
-    if (current->IsJSObject() &&
-        Handle<JSObject>::cast(current)->HasNamedInterceptor()) {
-      found = true;
-      break;
-    }
+
+  DCHECK(receiver->HasNamedInterceptor());
+  InterceptorInfo* interceptor = receiver->GetNamedInterceptor();
+  DCHECK(!interceptor->non_masking());
+  PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
+                                      *receiver, Object::DONT_THROW);
+
+  v8::GenericNamedPropertySetterCallback setter =
+      v8::ToCData<v8::GenericNamedPropertySetterCallback>(
+          interceptor->setter());
+  Handle<Object> result = arguments.Call(setter, name, value);
+  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+  if (!result.is_null()) return *value;
+
+  LookupIterator it(receiver, name, receiver);
+  // Skip past any access check on the receiver.
+  if (it.state() == LookupIterator::ACCESS_CHECK) {
+    DCHECK(it.HasAccess());
+    it.Next();
   }
-  DCHECK(found);
-#endif
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      JSObject::SetProperty(receiver, name, value, ic.language_mode()));
-  return *result;
+  // Skip past the interceptor on the receiver.
+  DCHECK_EQ(LookupIterator::INTERCEPTOR, it.state());
+  it.Next();
+
+  MAYBE_RETURN(Object::SetProperty(&it, value, ic.language_mode(),
+                                   JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED),
+               isolate->heap()->exception());
+  return *value;
 }
 
 
@@ -2931,9 +2929,25 @@
   Handle<JSObject> receiver = args.at<JSObject>(0);
   DCHECK(args.smi_at(1) >= 0);
   uint32_t index = args.smi_at(1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::GetElement(isolate, receiver, index));
+
+  InterceptorInfo* interceptor = receiver->GetIndexedInterceptor();
+  PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
+                                      *receiver, Object::DONT_THROW);
+
+  v8::IndexedPropertyGetterCallback getter =
+      v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
+  Handle<Object> result = arguments.Call(getter, index);
+
+  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+
+  if (result.is_null()) {
+    LookupIterator it(isolate, receiver, index, receiver);
+    DCHECK_EQ(LookupIterator::INTERCEPTOR, it.state());
+    it.Next();
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                       Object::GetProperty(&it));
+  }
+
   return *result;
 }
 
diff --git a/src/ic/ic.h b/src/ic/ic.h
index 0a324a8..8bd2f44 100644
--- a/src/ic/ic.h
+++ b/src/ic/ic.h
@@ -606,18 +606,6 @@
 };
 
 
-class CompareNilIC : public IC {
- public:
-  explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
-
-  Handle<Object> CompareNil(Handle<Object> object);
-
-  static Handle<Code> GetUninitialized();
-
-  static void Clear(Address address, Code* target, Address constant_pool);
-};
-
-
 class ToBooleanIC : public IC {
  public:
   explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
diff --git a/src/ic/mips/handler-compiler-mips.cc b/src/ic/mips/handler-compiler-mips.cc
index f3af1cf..b924bda 100644
--- a/src/ic/mips/handler-compiler-mips.cc
+++ b/src/ic/mips/handler-compiler-mips.cc
@@ -6,6 +6,7 @@
 
 #include "src/ic/handler-compiler.h"
 
+#include "src/api-arguments.h"
 #include "src/field-type.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
@@ -28,6 +29,9 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ push(cp);
+
     if (accessor_index >= 0) {
       DCHECK(!holder.is(scratch));
       DCHECK(!receiver.is(scratch));
@@ -39,11 +43,11 @@
         receiver = scratch;
       }
       __ push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER);
-      __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ li(a0, Operand(0));
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -51,7 +55,7 @@
     }
 
     // Restore context register.
-    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ pop(cp);
   }
   __ Ret();
 }
@@ -66,8 +70,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Save value register, so we can restore it later.
-    __ push(value());
+    // Save context and value registers, so we can restore them later.
+    __ Push(cp, value());
 
     if (accessor_index >= 0) {
       DCHECK(!holder.is(scratch));
@@ -81,11 +85,11 @@
         receiver = scratch;
       }
       __ Push(receiver, value());
-      ParameterCount actual(1);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
-      __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ li(a0, Operand(1));
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -93,10 +97,8 @@
     }
 
     // We have to return the passed value, not the return value of the setter.
-    __ pop(v0);
-
     // Restore context register.
-    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ Pop(cp, v0);
   }
   __ Ret();
 }
@@ -241,7 +243,7 @@
   }
   DCHECK(optimization.is_simple_api_call());
 
-  // Abi for CallApiFunctionStub.
+  // Abi for CallApiCallbackStub.
   Register callee = a0;
   Register data = t0;
   Register holder = a2;
@@ -308,7 +310,7 @@
   __ li(api_function_address, Operand(ref));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+  CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
                            !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
@@ -739,18 +741,6 @@
 }
 
 
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
-    Handle<Name> name) {
-  __ Push(receiver(), this->name(), value());
-
-  // Do tail-call to the runtime system.
-  __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
 Register NamedStoreHandlerCompiler::value() {
   return StoreDescriptor::ValueRegister();
 }
diff --git a/src/ic/mips64/handler-compiler-mips64.cc b/src/ic/mips64/handler-compiler-mips64.cc
index 968effd..52260ee 100644
--- a/src/ic/mips64/handler-compiler-mips64.cc
+++ b/src/ic/mips64/handler-compiler-mips64.cc
@@ -6,6 +6,7 @@
 
 #include "src/ic/handler-compiler.h"
 
+#include "src/api-arguments.h"
 #include "src/field-type.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
@@ -28,6 +29,9 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ push(cp);
+
     if (accessor_index >= 0) {
       DCHECK(!holder.is(scratch));
       DCHECK(!receiver.is(scratch));
@@ -39,11 +43,11 @@
         receiver = scratch;
       }
       __ push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER);
-      __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ li(a0, Operand(V8_INT64_C(0)));
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -51,7 +55,7 @@
     }
 
     // Restore context register.
-    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ pop(cp);
   }
   __ Ret();
 }
@@ -66,8 +70,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Save value register, so we can restore it later.
-    __ push(value());
+    // Save context and value registers, so we can restore them later.
+    __ Push(cp, value());
 
     if (accessor_index >= 0) {
       DCHECK(!holder.is(scratch));
@@ -81,11 +85,11 @@
         receiver = scratch;
       }
       __ Push(receiver, value());
-      ParameterCount actual(1);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
-      __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ li(a0, Operand(1));
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -93,10 +97,8 @@
     }
 
     // We have to return the passed value, not the return value of the setter.
-    __ pop(v0);
-
     // Restore context register.
-    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ Pop(cp, v0);
   }
   __ Ret();
 }
@@ -241,7 +243,7 @@
   }
   DCHECK(optimization.is_simple_api_call());
 
-  // Abi for CallApiFunctionStub.
+  // Abi for CallApiCallbackStub.
   Register callee = a0;
   Register data = a4;
   Register holder = a2;
@@ -308,7 +310,7 @@
   __ li(api_function_address, Operand(ref));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+  CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
                            !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
@@ -739,18 +741,6 @@
 }
 
 
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
-    Handle<Name> name) {
-  __ Push(receiver(), this->name(), value());
-
-  // Do tail-call to the runtime system.
-  __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
 Register NamedStoreHandlerCompiler::value() {
   return StoreDescriptor::ValueRegister();
 }
diff --git a/src/ic/ppc/handler-compiler-ppc.cc b/src/ic/ppc/handler-compiler-ppc.cc
index 6e7d78a..832c25a 100644
--- a/src/ic/ppc/handler-compiler-ppc.cc
+++ b/src/ic/ppc/handler-compiler-ppc.cc
@@ -6,6 +6,7 @@
 
 #include "src/ic/handler-compiler.h"
 
+#include "src/api-arguments.h"
 #include "src/field-type.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
@@ -28,6 +29,9 @@
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ push(cp);
+
     if (accessor_index >= 0) {
       DCHECK(!holder.is(scratch));
       DCHECK(!receiver.is(scratch));
@@ -39,11 +43,11 @@
         receiver = scratch;
       }
       __ push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(r4, holder, accessor_index, ACCESSOR_GETTER);
-      __ InvokeFunction(r4, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ li(r3, Operand::Zero());
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -51,7 +55,7 @@
     }
 
     // Restore context register.
-    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ pop(cp);
   }
   __ Ret();
 }
@@ -66,8 +70,9 @@
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
     // Save value register, so we can restore it later.
-    __ push(value());
+    __ Push(cp, value());
 
     if (accessor_index >= 0) {
       DCHECK(!holder.is(scratch));
@@ -81,11 +86,11 @@
         receiver = scratch;
       }
       __ Push(receiver, value());
-      ParameterCount actual(1);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(r4, holder, accessor_index, ACCESSOR_SETTER);
-      __ InvokeFunction(r4, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ li(r3, Operand(1));
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -93,10 +98,8 @@
     }
 
     // We have to return the passed value, not the return value of the setter.
-    __ pop(r3);
-
     // Restore context register.
-    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ Pop(cp, r3);
   }
   __ Ret();
 }
@@ -246,7 +249,7 @@
   }
   DCHECK(optimization.is_simple_api_call());
 
-  // Abi for CallApiFunctionStub.
+  // Abi for CallApiCallbackStub.
   Register callee = r3;
   Register data = r7;
   Register holder = r5;
@@ -315,7 +318,7 @@
   __ mov(api_function_address, Operand(ref));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+  CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
                            !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
@@ -738,18 +741,6 @@
 }
 
 
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
-    Handle<Name> name) {
-  __ Push(receiver(), this->name(), value());
-
-  // Do tail-call to the runtime system.
-  __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
 Register NamedStoreHandlerCompiler::value() {
   return StoreDescriptor::ValueRegister();
 }
diff --git a/src/ic/s390/OWNERS b/src/ic/s390/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/ic/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/ic/s390/access-compiler-s390.cc b/src/ic/s390/access-compiler-s390.cc
new file mode 100644
index 0000000..316be71
--- /dev/null
+++ b/src/ic/s390/access-compiler-s390.cc
@@ -0,0 +1,41 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+                                              Handle<Code> code) {
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  static Register registers[] = {receiver, name, r5, r2, r6, r7};
+  return registers;
+}
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  static Register registers[] = {receiver, name, r5, r6, r7};
+  return registers;
+}
+
+#undef __
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/ic/s390/handler-compiler-s390.cc b/src/ic/s390/handler-compiler-s390.cc
new file mode 100644
index 0000000..1b39782
--- /dev/null
+++ b/src/ic/s390/handler-compiler-s390.cc
@@ -0,0 +1,750 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ic/handler-compiler.h"
+
+#include "src/api-arguments.h"
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+    MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+    int accessor_index, int expected_arguments, Register scratch) {
+  // ----------- S t a t e -------------
+  //  -- r2    : receiver
+  //  -- r4    : name
+  //  -- lr    : return address
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Save context register
+    __ push(cp);
+
+    if (accessor_index >= 0) {
+      DCHECK(!holder.is(scratch));
+      DCHECK(!receiver.is(scratch));
+      // Call the JavaScript getter with the receiver on the stack.
+      if (map->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ LoadP(scratch,
+                 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+        receiver = scratch;
+      }
+      __ Push(receiver);
+      __ LoadAccessor(r3, holder, accessor_index, ACCESSOR_GETTER);
+      __ LoadImmP(r2, Operand::Zero());
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context register.
+    __ pop(cp);
+  }
+  __ Ret();
+}
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+    MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+    int accessor_index, int expected_arguments, Register scratch) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Save context register
+    // Save value register, so we can restore it later.
+    __ Push(cp, value());
+
+    if (accessor_index >= 0) {
+      DCHECK(!holder.is(scratch));
+      DCHECK(!receiver.is(scratch));
+      DCHECK(!value().is(scratch));
+      // Call the JavaScript setter with receiver and value on the stack.
+      if (map->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ LoadP(scratch,
+                 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+        receiver = scratch;
+      }
+      __ Push(receiver, value());
+      __ LoadAccessor(r3, holder, accessor_index, ACCESSOR_SETTER);
+      __ LoadImmP(r2, Operand(1));
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // We have to return the passed value, not the return value of the setter.
+    // Restore context register.
+    __ Pop(cp, r2);
+  }
+  __ Ret();
+}
+
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+                                                Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ Push(vector, slot);
+}
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ Pop(vector, slot);
+}
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+  MacroAssembler* masm = this->masm();
+  // Remove vector and slot.
+  __ la(sp, MemOperand(sp, 2 * kPointerSize));
+}
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm, Label* miss_label, Register receiver,
+    Handle<Name> name, Register scratch0, Register scratch1) {
+  DCHECK(name->IsUniqueName());
+  DCHECK(!receiver.is(scratch0));
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  Label done;
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  Register map = scratch1;
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ LoadlB(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ AndP(r0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ bne(miss_label);
+
+  // Check that receiver is a JSObject.
+  // TODO(joransiu): Merge into SI compare
+  __ LoadlB(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ CmpP(scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ blt(miss_label);
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  // Check that the properties array is a dictionary.
+  __ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+  __ CompareRoot(map, Heap::kHashTableMapRootIndex);
+  __ bne(miss_label);
+
+  // Restore the temporarily used register.
+  __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+  NameDictionaryLookupStub::GenerateNegativeLookup(
+      masm, miss_label, &done, receiver, properties, name, scratch1);
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+    MacroAssembler* masm, int index, Register result, Label* miss) {
+  __ LoadNativeContextSlot(index, result);
+  // Load its initial map. The global functions all have initial maps.
+  __ LoadP(result,
+           FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the prototype from the initial map.
+  __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+}
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+    MacroAssembler* masm, Register receiver, Register scratch1,
+    Register scratch2, Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ LoadRR(r2, scratch1);
+  __ Ret();
+}
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+    MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+    Register scratch, Label* miss) {
+  Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+  DCHECK(cell->value()->IsTheHole());
+  Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+  __ LoadWeakValue(scratch, weak_cell, miss);
+  __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
+  __ bne(miss);
+}
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+                                     Register holder, Register name,
+                                     Handle<JSObject> holder_obj) {
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name);
+  __ Push(receiver);
+  __ Push(holder);
+}
+
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+         Runtime::FunctionForId(id)->nargs);
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+  __ CallRuntime(id);
+}
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateApiAccessorCall(
+    MacroAssembler* masm, const CallOptimization& optimization,
+    Handle<Map> receiver_map, Register receiver, Register scratch_in,
+    bool is_store, Register store_parameter, Register accessor_holder,
+    int accessor_index) {
+  DCHECK(!accessor_holder.is(scratch_in));
+  DCHECK(!receiver.is(scratch_in));
+  __ Push(receiver);
+  // Write the arguments to stack frame.
+  if (is_store) {
+    DCHECK(!receiver.is(store_parameter));
+    DCHECK(!scratch_in.is(store_parameter));
+    __ Push(store_parameter);
+  }
+  DCHECK(optimization.is_simple_api_call());
+
+  // Abi for CallApiCallbackStub.
+  Register callee = r2;
+  Register data = r6;
+  Register holder = r4;
+  Register api_function_address = r3;
+
+  // Put callee in place.
+  __ LoadAccessor(callee, accessor_holder, accessor_index,
+                  is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
+
+  // Put holder in place.
+  CallOptimization::HolderLookup holder_lookup;
+  int holder_depth = 0;
+  optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
+                                          &holder_depth);
+  switch (holder_lookup) {
+    case CallOptimization::kHolderIsReceiver:
+      __ Move(holder, receiver);
+      break;
+    case CallOptimization::kHolderFound:
+      __ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
+      __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+      for (int i = 1; i < holder_depth; i++) {
+        __ LoadP(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
+        __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+      }
+      break;
+    case CallOptimization::kHolderNotFound:
+      UNREACHABLE();
+      break;
+  }
+
+  Isolate* isolate = masm->isolate();
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  bool call_data_undefined = false;
+  // Put call data in place.
+  if (api_call_info->data()->IsUndefined()) {
+    call_data_undefined = true;
+    __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
+  } else {
+    if (optimization.is_constant_call()) {
+      __ LoadP(data,
+               FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+      __ LoadP(data,
+               FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+      __ LoadP(data,
+               FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    } else {
+      __ LoadP(data,
+               FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+    }
+    __ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
+  }
+
+  if (api_call_info->fast_handler()->IsCode()) {
+    // Just tail call into the fast handler if present.
+    __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+            RelocInfo::CODE_TARGET);
+    return;
+  }
+
+  // Put api_function_address in place.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
+  __ mov(api_function_address, Operand(ref));
+
+  // Jump to stub.
+  CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
+                           !optimization.is_constant_call());
+  __ TailCallStub(&stub);
+}
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister(),
+          VectorStoreICDescriptor::SlotRegister(),
+          VectorStoreICDescriptor::VectorRegister());
+}
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+  StoreIC_PushArgs(masm);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  __ TailCallRuntime(Runtime::kStoreIC_Slow);
+}
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+  StoreIC_PushArgs(masm);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
+}
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+                                                    Handle<Name> name) {
+  if (!label->is_unused()) {
+    __ bind(label);
+    __ mov(this->name(), Operand(name));
+  }
+}
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
+  __ mov(this->name(), Operand(name));
+}
+
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+    Register current_map, Register destination_map) {
+  DCHECK(false);  // Not implemented.
+}
+
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+                                                   Register map_reg,
+                                                   Register scratch,
+                                                   Label* miss) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+  DCHECK(!map_reg.is(scratch));
+  __ LoadWeakValue(map_reg, cell, miss);
+  if (transition->CanBeDeprecated()) {
+    __ LoadlW(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+    __ DecodeField<Map::Deprecated>(r0, scratch);
+    __ bne(miss);
+  }
+}
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+                                                      int descriptor,
+                                                      Register value_reg,
+                                                      Register scratch,
+                                                      Label* miss_label) {
+  DCHECK(!map_reg.is(scratch));
+  DCHECK(!map_reg.is(value_reg));
+  DCHECK(!value_reg.is(scratch));
+  __ LoadInstanceDescriptors(map_reg, scratch);
+  __ CmpP(value_reg, FieldMemOperand(
+                         scratch, DescriptorArray::GetValueOffset(descriptor)));
+  __ bne(miss_label);
+}
+
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
+                                                        Register value_reg,
+                                                        Label* miss_label) {
+  Register map_reg = scratch1();
+  Register scratch = scratch2();
+  DCHECK(!value_reg.is(map_reg));
+  DCHECK(!value_reg.is(scratch));
+  __ JumpIfSmi(value_reg, miss_label);
+  if (field_type->IsClass()) {
+    __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+                    scratch);
+    __ bne(miss_label);
+  }
+}
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+    Register object_reg, Register holder_reg, Register scratch1,
+    Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+    ReturnHolder return_what) {
+  Handle<Map> receiver_map = map();
+
+  // Make sure there's no overlap between holder and object registers.
+  DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+         !scratch2.is(scratch1));
+
+  if (FLAG_eliminate_prototype_chain_checks) {
+    Handle<Cell> validity_cell =
+        Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+    if (!validity_cell.is_null()) {
+      DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
+                validity_cell->value());
+      __ mov(scratch1, Operand(validity_cell));
+      __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+      __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
+      __ bne(miss);
+    }
+
+    // The prototype chain of primitives (and their JSValue wrappers) depends
+    // on the native context, which can't be guarded by validity cells.
+    // |object_reg| holds the native context specific prototype in this case;
+    // we need to check its map.
+    if (check == CHECK_ALL_MAPS) {
+      __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+      Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+      __ CmpWeakValue(scratch1, cell, scratch2);
+      __ b(ne, miss);
+    }
+  }
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  Handle<JSObject> current = Handle<JSObject>::null();
+  if (receiver_map->IsJSGlobalObjectMap()) {
+    current = isolate()->global_object();
+  }
+  // Check access rights to the global object.  This has to happen after
+  // the map check so that we know that the object is actually a global
+  // object.
+  // This allows us to install generated handlers for accesses to the
+  // global proxy (as opposed to using slow ICs). See corresponding code
+  // in LookupForRead().
+  if (receiver_map->IsJSGlobalProxyMap()) {
+    __ CheckAccessGlobalProxy(reg, scratch2, miss);
+  }
+
+  Handle<JSObject> prototype = Handle<JSObject>::null();
+  Handle<Map> current_map = receiver_map;
+  Handle<Map> holder_map(holder()->map());
+  // Traverse the prototype chain and check the maps in the prototype chain for
+  // fast and global objects or do negative lookup for normal objects.
+  while (!current_map.is_identical_to(holder_map)) {
+    ++depth;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    DCHECK(current_map->IsJSGlobalProxyMap() ||
+           !current_map->is_access_check_needed());
+
+    prototype = handle(JSObject::cast(current_map->prototype()));
+    if (current_map->is_dictionary_map() &&
+        !current_map->IsJSGlobalObjectMap()) {
+      DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
+      if (!name->IsUniqueName()) {
+        DCHECK(name->IsString());
+        name = factory()->InternalizeString(Handle<String>::cast(name));
+      }
+      DCHECK(current.is_null() ||
+             current->property_dictionary()->FindEntry(name) ==
+                 NameDictionary::kNotFound);
+
+      if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+        // TODO(jkummerow): Cache and re-use weak cell.
+        __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+      }
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+                                       scratch2);
+      if (!FLAG_eliminate_prototype_chain_checks) {
+        __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+        __ LoadP(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+      }
+    } else {
+      Register map_reg = scratch1;
+      if (!FLAG_eliminate_prototype_chain_checks) {
+        __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+      }
+      if (current_map->IsJSGlobalObjectMap()) {
+        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                  name, scratch2, miss);
+      } else if (!FLAG_eliminate_prototype_chain_checks &&
+                 (depth != 1 || check == CHECK_ALL_MAPS)) {
+        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+        __ CmpWeakValue(map_reg, cell, scratch2);
+        __ bne(miss);
+      }
+      if (!FLAG_eliminate_prototype_chain_checks) {
+        __ LoadP(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+      }
+    }
+
+    reg = holder_reg;  // From now on the object will be in holder_reg.
+    // Go to the next object in the prototype chain.
+    current = prototype;
+    current_map = handle(current->map());
+  }
+
+  DCHECK(!current_map->IsJSGlobalProxyMap());
+
+  // Log the check depth.
+  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+  if (!FLAG_eliminate_prototype_chain_checks &&
+      (depth != 0 || check == CHECK_ALL_MAPS)) {
+    // Check the holder map.
+    __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ bne(miss);
+  }
+
+  bool return_holder = return_what == RETURN_HOLDER;
+  if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+    __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+  }
+
+  // Return the register containing the holder.
+  return return_holder ? reg : no_reg;
+}
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ b(&success);
+    __ bind(miss);
+    if (IC::ICUseVector(kind())) {
+      DCHECK(kind() == Code::LOAD_IC);
+      PopVectorAndSlot();
+    }
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ b(&success);
+    GenerateRestoreName(miss, name);
+    if (IC::ICUseVector(kind())) PopVectorAndSlot();
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+  // Return the constant value.
+  __ Move(r2, value);
+  __ Ret();
+}
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+    Register reg, Handle<AccessorInfo> callback) {
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+  __ Push(receiver());
+  // Push data from AccessorInfo.
+  Handle<Object> data(callback->data(), isolate());
+  if (data->IsUndefined() || data->IsSmi()) {
+    __ Move(scratch2(), data);
+  } else {
+    Handle<WeakCell> cell =
+        isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
+    // The callback is alive if this instruction is executed,
+    // so the weak cell is not cleared and points to data.
+    __ GetWeakValue(scratch2(), cell);
+  }
+  __ push(scratch2());
+  __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
+  __ Push(scratch2(), scratch2());
+  __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+  // should_throw_on_error -> false
+  __ mov(scratch3(), Operand(Smi::FromInt(0)));
+  __ Push(scratch2(), reg, scratch3(), name());
+
+  // Abi for CallApiGetter
+  Register getter_address_reg = ApiGetterDescriptor::function_address();
+
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, isolate());
+  __ mov(getter_address_reg, Operand(ref));
+
+  CallApiGetterStub stub(isolate());
+  __ TailCallStub(&stub);
+}
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+    LookupIterator* it, Register holder_reg) {
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Compile the interceptor call, followed by inline code to load the
+  // property from further up the prototype chain if the call fails.
+  // Check that the maps haven't changed.
+  DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+  // Preserve the receiver register explicitly whenever it is different from the
+  // holder and it is needed should the interceptor return without any result.
+  // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+  // case might cause a miss during the prototype check.
+  bool must_perform_prototype_check =
+      !holder().is_identical_to(it->GetHolder<JSObject>());
+  bool must_preserve_receiver_reg =
+      !receiver().is(holder_reg) &&
+      (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+  // Save necessary data before invoking an interceptor.
+  // Requires a frame to make GC aware of pushed pointers.
+  {
+    FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+    if (must_preserve_receiver_reg) {
+      __ Push(receiver(), holder_reg, this->name());
+    } else {
+      __ Push(holder_reg, this->name());
+    }
+    InterceptorVectorSlotPush(holder_reg);
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
+    CompileCallLoadPropertyWithInterceptor(
+        masm(), receiver(), holder_reg, this->name(), holder(),
+        Runtime::kLoadPropertyWithInterceptorOnly);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ CompareRoot(r2, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ beq(&interceptor_failed, Label::kNear);
+    frame_scope.GenerateLeaveFrame();
+    __ Ret();
+
+    __ bind(&interceptor_failed);
+    InterceptorVectorSlotPop(holder_reg);
+    __ Pop(this->name());
+    __ Pop(holder_reg);
+    if (must_preserve_receiver_reg) {
+      __ Pop(receiver());
+    }
+    // Leave the internal frame.
+  }
+
+  GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+  // Call the runtime system to load the interceptor.
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+                           holder());
+
+  __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
+}
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+    Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+    LanguageMode language_mode) {
+  Register holder_reg = Frontend(name);
+
+  __ Push(receiver(), holder_reg);  // receiver
+
+  // If the callback cannot leak, then push the callback directly,
+  // otherwise wrap it in a weak cell.
+  if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+    __ mov(ip, Operand(callback));
+  } else {
+    Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+    __ mov(ip, Operand(cell));
+  }
+  __ Push(ip);
+  __ mov(ip, Operand(name));
+  __ Push(ip, value());
+  __ Push(Smi::FromInt(language_mode));
+
+  // Do tail-call to the runtime system.
+  __ TailCallRuntime(Runtime::kStoreCallbackProperty);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+Register NamedStoreHandlerCompiler::value() {
+  return StoreDescriptor::ValueRegister();
+}
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+    Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+  Label miss;
+  if (IC::ICUseVector(kind())) {
+    PushVectorAndSlot();
+  }
+  FrontendHeader(receiver(), name, &miss, DONT_RETURN_ANYTHING);
+
+  // Get the value from the cell.
+  Register result = StoreDescriptor::ValueRegister();
+  Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+  __ LoadWeakValue(result, weak_cell, &miss);
+  __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (is_configurable) {
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    __ beq(&miss);
+  }
+
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->ic_named_load_global_stub(), 1, r3, r5);
+  if (IC::ICUseVector(kind())) {
+    DiscardVectorAndSlot();
+  }
+  __ Ret();
+
+  FrontendFooter(name, &miss);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, name);
+}
+
+#undef __
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/ic/s390/ic-compiler-s390.cc b/src/ic/s390/ic-compiler-s390.cc
new file mode 100644
index 0000000..a7691d8
--- /dev/null
+++ b/src/ic/s390/ic-compiler-s390.cc
@@ -0,0 +1,29 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(
+    MacroAssembler* masm, LanguageMode language_mode) {
+  __ mov(r0, Operand(Smi::FromInt(language_mode)));
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister(), r0);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty);
+}
+
+#undef __
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/ic/s390/ic-s390.cc b/src/ic/s390/ic-s390.cc
new file mode 100644
index 0000000..d4f2886
--- /dev/null
+++ b/src/ic/s390/ic-s390.cc
@@ -0,0 +1,897 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ic/ic.h"
+#include "src/codegen.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ CmpP(type, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ beq(global_object);
+  __ CmpP(type, Operand(JS_GLOBAL_PROXY_TYPE));
+  __ beq(global_object);
+}
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// result:   Register for the result. It is only updated if a jump to the miss
+//           label is not done. Can be the same as elements or name clobbering
+//           one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+                                   Register elements, Register name,
+                                   Register result, Register scratch1,
+                                   Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry check that the value is a normal
+  // property.
+  __ bind(&done);  // scratch2 == elements + 4 * index
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ LoadRR(r0, scratch2);
+  __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
+  __ AndP(scratch2, scratch1);
+  __ bne(miss);
+  __ LoadRR(scratch2, r0);
+
+  // Get the value at the masked, scaled index and return.
+  __ LoadP(result,
+           FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// value:    The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+                                    Register elements, Register name,
+                                    Register value, Register scratch1,
+                                    Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry in the dictionary check that the value
+  // is a normal property that is not read only.
+  __ bind(&done);  // scratch2 == elements + 4 * index
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  int kTypeAndReadOnlyMask =
+      PropertyDetails::TypeField::kMask |
+      PropertyDetails::AttributesField::encode(READ_ONLY);
+  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ LoadRR(r0, scratch2);
+  __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
+  __ AndP(scratch2, scratch1);
+  __ bne(miss /*, cr0*/);
+  __ LoadRR(scratch2, r0);
+
+  // Store the value at the masked, scaled index and return.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ AddP(scratch2, Operand(kValueOffset - kHeapObjectTag));
+  __ StoreP(value, MemOperand(scratch2));
+
+  // Update the write barrier. Make sure not to clobber the value.
+  __ LoadRR(scratch1, value);
+  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
+                 kDontSaveFPRegs);
+}
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver, Register map,
+                                           Register scratch,
+                                           int interceptor_bit, Label* slow) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+  // Get the map of the receiver.
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check bit field.
+  __ LoadlB(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+  DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
+  __ mov(r0,
+         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+  __ AndP(r0, scratch);
+  __ bne(slow /*, cr0*/);
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing into string
+  // objects work as intended.
+  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ LoadlB(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ CmpP(scratch, Operand(JS_OBJECT_TYPE));
+  __ blt(slow);
+}
+
+// Loads an indexed element from a fast case array.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+                                  Register key, Register elements,
+                                  Register scratch1, Register scratch2,
+                                  Register result, Label* slow) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the the same as 'receiver' or 'key'.
+  //            Unchanged on bailout so 'receiver' and 'key' can be safely
+  //            used by further computation.
+  //
+  // Scratch registers:
+  //
+  // elements - holds the elements of the receiver and its protoypes.
+  //
+  // scratch1 - used to hold elements length, bit fields, base addresses.
+  //
+  // scratch2 - used to hold maps, prototypes, and the loaded value.
+  Label check_prototypes, check_next_prototype;
+  Label done, in_bounds, absent;
+
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ AssertFastElements(elements);
+
+  // Check that the key (index) is within bounds.
+  __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ CmpLogicalP(key, scratch1);
+  __ blt(&in_bounds, Label::kNear);
+  // Out-of-bounds. Check the prototype chain to see if we can just return
+  // 'undefined'.
+  __ CmpP(key, Operand::Zero());
+  __ blt(slow);  // Negative keys can't take the fast OOB path.
+  __ bind(&check_prototypes);
+  __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ bind(&check_next_prototype);
+  __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
+  // scratch2: current prototype
+  __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
+  __ beq(&absent, Label::kNear);
+  __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
+  __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+  // elements: elements of current prototype
+  // scratch2: map of current prototype
+  __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
+  __ blt(slow);
+  __ LoadlB(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
+  __ AndP(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+                                (1 << Map::kHasIndexedInterceptor)));
+  __ bne(slow);
+  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+  __ bne(slow);
+  __ jmp(&check_next_prototype);
+
+  __ bind(&absent);
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
+
+  __ bind(&in_bounds);
+  // Fast case: Do the load.
+  __ AddP(scratch1, elements,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // The key is a smi.
+  __ SmiToPtrArrayOffset(scratch2, key);
+  __ LoadP(scratch2, MemOperand(scratch2, scratch1));
+  __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+  // In case the loaded value is the_hole we have to check the prototype chain.
+  __ beq(&check_prototypes);
+  __ LoadRR(result, scratch2);
+  __ bind(&done);
+}
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+                                 Register map, Register hash,
+                                 Label* index_string, Label* not_unique) {
+  // The key is not a smi.
+  Label unique;
+  // Is it a name?
+  __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
+  __ bgt(not_unique);
+  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+  __ beq(&unique, Label::kNear);
+
+  // Is the string an array index, with cached numeric value?
+  __ LoadlW(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+  __ mov(r7, Operand(Name::kContainsCachedArrayIndexMask));
+  __ AndP(r0, hash, r7);
+  __ beq(index_string);
+
+  // Is the string internalized? We know it's a string, so a single
+  // bit test is enough.
+  // map: key map
+  __ LoadlB(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kInternalizedTag == 0);
+  __ tmll(hash, Operand(kIsNotInternalizedMask));
+  __ bne(not_unique);
+
+  __ bind(&unique);
+}
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  Register dictionary = r2;
+  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+
+  Label slow;
+
+  __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+                                       JSObject::kPropertiesOffset));
+  GenerateDictionaryLoad(masm, &slow, dictionary,
+                         LoadDescriptor::NameRegister(), r2, r5, r6);
+  __ Ret();
+
+  // Dictionary load failed, go slow (but don't miss).
+  __ bind(&slow);
+  GenerateRuntimeGetProperty(masm);
+}
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return r5; }
+
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  Register slot = LoadDescriptor::SlotRegister();
+  Register vector = LoadWithVectorDescriptor::VectorRegister();
+
+  __ Push(receiver, name, slot, vector);
+}
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in lr.
+  Isolate* isolate = masm->isolate();
+
+  DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
+                     LoadWithVectorDescriptor::VectorRegister()));
+  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r6, r7);
+
+  LoadIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(Runtime::kLoadIC_Miss);
+}
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in lr.
+
+  __ LoadRR(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kGetProperty);
+}
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in lr.
+  Isolate* isolate = masm->isolate();
+
+  DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
+                     LoadWithVectorDescriptor::VectorRegister()));
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r6, r7);
+
+  LoadIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
+}
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in lr.
+
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kKeyedGetProperty);
+}
+
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // The return address is in lr.
+  Label slow, check_name, index_smi, index_name, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register key = LoadDescriptor::NameRegister();
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  DCHECK(key.is(r4));
+  DCHECK(receiver.is(r3));
+
+  Isolate* isolate = masm->isolate();
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_name);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
+                                 Map::kHasIndexedInterceptor, &slow);
+
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(r2, r5, &check_number_dictionary);
+
+  GenerateFastArrayLoad(masm, receiver, key, r2, r5, r6, r2, &slow);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r6,
+                      r5);
+  __ Ret();
+
+  __ bind(&check_number_dictionary);
+  __ LoadP(r6, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ LoadP(r5, FieldMemOperand(r6, JSObject::kMapOffset));
+
+  // Check whether the elements is a number dictionary.
+  // r5: elements map
+  // r6: elements
+  __ CompareRoot(r5, Heap::kHashTableMapRootIndex);
+  __ bne(&slow, Label::kNear);
+  __ SmiUntag(r2, key);
+  __ LoadFromNumberDictionary(&slow, r6, key, r2, r2, r5, r7);
+  __ Ret();
+
+  // Slow case, key and receiver still in r2 and r3.
+  __ bind(&slow);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r6,
+                      r5);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_name);
+  GenerateKeyNameCheck(masm, key, r2, r5, &index_name, &slow);
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
+                                 Map::kHasNamedInterceptor, &slow);
+
+  // If the receiver is a fast-case object, check the stub cache. Otherwise
+  // probe the dictionary.
+  __ LoadP(r5, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
+  __ CompareRoot(r6, Heap::kHashTableMapRootIndex);
+  __ beq(&probe_dictionary);
+
+  // The handlers in the stub cache expect a vector and slot. Since we won't
+  // change the IC from any downstream misses, a dummy vector can be used.
+  Register vector = LoadWithVectorDescriptor::VectorRegister();
+  Register slot = LoadWithVectorDescriptor::SlotRegister();
+  DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
+  Handle<TypeFeedbackVector> dummy_vector =
+      TypeFeedbackVector::DummyVector(masm->isolate());
+  int slot_index = dummy_vector->GetIndex(
+      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+  __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
+
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
+                                               receiver, key, r6, r7, r8, r9);
+  // Cache miss.
+  GenerateMiss(masm);
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  // r5: elements
+  __ LoadP(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
+  // Load the property to r2.
+  GenerateDictionaryLoad(masm, &slow, r5, key, r2, r7, r6);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
+                      r6, r5);
+  __ Ret();
+
+  __ bind(&index_name);
+  __ IndexFromHash(r5, key);
+  // Now jump to the place where smi keys are handled.
+  __ b(&index_smi);
+}
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister(),
+          VectorStoreICDescriptor::SlotRegister(),
+          VectorStoreICDescriptor::VectorRegister());
+}
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  StoreIC_PushArgs(masm);
+
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
+}
+
+static void KeyedStoreGenerateMegamorphicHelper(
+    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+    Register value, Register key, Register receiver, Register receiver_map,
+    Register elements_map, Register elements) {
+  Label transition_smi_elements;
+  Label finish_object_store, non_double_value, transition_double_elements;
+  Label fast_double_without_map_check;
+
+  // Fast case: Do the store, could be either Object or double.
+  __ bind(fast_object);
+  Register scratch = r6;
+  Register address = r7;
+  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+                     scratch, address));
+
+  if (check_map == kCheckMap) {
+    __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ CmpP(elements_map,
+            Operand(masm->isolate()->factory()->fixed_array_map()));
+    __ bne(fast_double);
+  }
+
+  // HOLECHECK: guards "A[i] = V"
+  // We have to go to the runtime if the current value is the hole because
+  // there may be a callback on the element
+  Label holecheck_passed1;
+  // @TODO(joransiu) : Fold AddP into memref of LoadP
+  __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiToPtrArrayOffset(scratch, key);
+  __ LoadP(scratch, MemOperand(address, scratch));
+  __ CmpP(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
+  __ bne(&holecheck_passed1, Label::kNear);
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
+
+  __ bind(&holecheck_passed1);
+
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiToPtrArrayOffset(scratch, key);
+  __ StoreP(value, MemOperand(address, scratch));
+  __ Ret();
+
+  __ bind(&non_smi_value);
+  // Escape to elements kind transition case.
+  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
+
+  // Fast elements array, store the value to the elements backing store.
+  __ bind(&finish_object_store);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiToPtrArrayOffset(scratch, key);
+  __ StoreP(value, MemOperand(address, scratch));
+  __ la(address, MemOperand(address, scratch));
+  // Update write barrier for the elements array address.
+  __ LoadRR(scratch, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
+                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret();
+
+  __ bind(fast_double);
+  if (check_map == kCheckMap) {
+    // Check for fast double array case. If this fails, call through to the
+    // runtime.
+    __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+    __ bne(slow);
+  }
+
+  // HOLECHECK: guards "A[i] double hole?"
+  // We have to see if the double version of the hole is present. If so
+  // go to the runtime.
+  // @TODO(joransiu) : Fold AddP Operand into LoadlW
+  __ AddP(address, elements,
+          Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
+                   kHeapObjectTag)));
+  __ SmiToDoubleArrayOffset(scratch, key);
+  __ LoadlW(scratch, MemOperand(address, scratch));
+  __ CmpP(scratch, Operand(kHoleNanUpper32));
+  __ bne(&fast_double_without_map_check, Label::kNear);
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
+
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
+                                 &transition_double_elements);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  __ Ret();
+
+  __ bind(&transition_smi_elements);
+  // Transition the array appropriately depending on the value type.
+  __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+  __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+  __ bne(&non_double_value);
+
+  // Value is a double. Transition FAST_SMI_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(
+      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
+  AllocationSiteMode mode =
+      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+                                                   receiver_map, mode, slow);
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ b(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, scratch, slow);
+  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ b(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, scratch, slow);
+  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateDoubleToObject(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ b(&finish_object_store);
+}
+
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+                                       LanguageMode language_mode) {
+  // ---------- S t a t e --------------
+  //  -- r2     : value
+  //  -- r3     : key
+  //  -- r4     : receiver
+  //  -- lr     : return address
+  // -----------------------------------
+  Label slow, fast_object, fast_object_grow;
+  Label fast_double, fast_double_grow;
+  Label array, extra, check_if_double_array, maybe_name_key, miss;
+
+  // Register usage.
+  Register value = StoreDescriptor::ValueRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  DCHECK(receiver.is(r3));
+  DCHECK(key.is(r4));
+  DCHECK(value.is(r2));
+  Register receiver_map = r5;
+  Register elements_map = r8;
+  Register elements = r9;  // Elements array of the receiver.
+  // r6 and r7 are used as general scratch registers.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &maybe_name_key);
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow);
+  // Get the map of the object.
+  __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks and is not observed.
+  // The generic stub does not perform map checks or handle observed objects.
+  __ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+  __ AndP(r0, ip,
+          Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ bne(&slow, Label::kNear);
+  // Check if the object is a JS array or not.
+  __ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+  __ CmpP(r6, Operand(JS_ARRAY_TYPE));
+  __ beq(&array);
+  // Check that the object is some kind of JSObject.
+  __ CmpP(r6, Operand(FIRST_JS_OBJECT_TYPE));
+  __ blt(&slow, Label::kNear);
+
+  // Object case: Check key against length in the elements array.
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ blt(&fast_object);
+
+  // Slow case, handle jump to runtime.
+  __ bind(&slow);
+  // Entry registers are intact.
+  // r2: value.
+  // r3: key.
+  // r4: receiver.
+  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
+  // Never returns to here.
+
+  __ bind(&maybe_name_key);
+  __ LoadP(r6, FieldMemOperand(key, HeapObject::kMapOffset));
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
+  __ JumpIfNotUniqueNameInstanceType(r6, &slow);
+
+  // The handlers in the stub cache expect a vector and slot. Since we won't
+  // change the IC from any downstream misses, a dummy vector can be used.
+  Register vector = VectorStoreICDescriptor::VectorRegister();
+  Register slot = VectorStoreICDescriptor::SlotRegister();
+  DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip));
+  Handle<TypeFeedbackVector> dummy_vector =
+      TypeFeedbackVector::DummyVector(masm->isolate());
+  int slot_index = dummy_vector->GetIndex(
+      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+  __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
+
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+                                               receiver, key, r7, r8, r9, ip);
+  // Cache miss.
+  __ b(&miss);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // Condition code from comparing key and array length is still available.
+  __ bne(&slow);  // Only support writing to writing to array[array.length].
+  // Check for room in the elements backing store.
+  // Both the key and the length of FixedArray are smis.
+  __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ bge(&slow);
+  __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ CmpP(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ bne(&check_if_double_array, Label::kNear);
+  __ b(&fast_object_grow);
+
+  __ bind(&check_if_double_array);
+  __ CmpP(elements_map,
+          Operand(masm->isolate()->factory()->fixed_double_array_map()));
+  __ bne(&slow);
+  __ b(&fast_double_grow);
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+  __ bind(&array);
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+  // Check the key against the length in the array.
+  __ CmpLogicalP(key, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ bge(&extra);
+
+  KeyedStoreGenerateMegamorphicHelper(
+      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+      value, key, receiver, receiver_map, elements_map, elements);
+  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+                                      &fast_double_grow, &slow, kDontCheckMap,
+                                      kIncrementLength, value, key, receiver,
+                                      receiver_map, elements_map, elements);
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(receiver.is(r3));
+  DCHECK(name.is(r4));
+  DCHECK(StoreDescriptor::ValueRegister().is(r2));
+
+  // Get the receiver from the stack and probe the stub cache.
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+                                               receiver, name, r5, r6, r7, r8);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
+}
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  StoreIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(Runtime::kStoreIC_Miss);
+}
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  Label miss;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Register dictionary = r7;
+  DCHECK(receiver.is(r3));
+  DCHECK(name.is(r4));
+  DCHECK(value.is(r2));
+  DCHECK(VectorStoreICDescriptor::VectorRegister().is(r5));
+  DCHECK(VectorStoreICDescriptor::SlotRegister().is(r6));
+
+  __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->ic_store_normal_hit(), 1, r8, r9);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->ic_store_normal_miss(), 1, r8, r9);
+  GenerateMiss(masm);
+}
+
+#undef __
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address cmp_instruction_address =
+      Assembler::return_address_from_call_start(address);
+
+  // If the instruction following the call is not a CHI, nothing
+  // was inlined.
+  return (Instruction::S390OpcodeValue(cmp_instruction_address) == CHI);
+}
+
+//
+// This code is paired with the JumpPatchSite class in full-codegen-s390.cc
+//
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+                         InlinedSmiCheck check) {
+  Address cmp_instruction_address =
+      Assembler::return_address_from_call_start(address);
+
+  // If the instruction following the call is not a cmp rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(cmp_instruction_address);
+  if (Instruction::S390OpcodeValue(cmp_instruction_address) != CHI) {
+    return;
+  }
+
+  if (Instruction::S390OpcodeValue(address) != BRASL) {
+    return;
+  }
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  int delta = instr & 0x0000ffff;
+
+  // If the delta is 0 the instruction is cmp r0, #0 which also signals that
+  // nothing was inlined.
+  if (delta == 0) {
+    return;
+  }
+
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n", address,
+           cmp_instruction_address, delta);
+  }
+
+  // Expected sequence to enable by changing the following
+  //   CR/CGR  Rx, Rx    // 2 / 4 bytes
+  //   LR  R0, R0        // 2 bytes   // 31-bit only!
+  //   BRC/BRCL          // 4 / 6 bytes
+  // into
+  //   TMLL    Rx, XXX   // 4 bytes
+  //   BRC/BRCL          // 4 / 6 bytes
+  // And vice versa to disable.
+
+  // The following constant is the size of the CR/CGR + LR + LR
+  const int kPatchAreaSizeNoBranch = 4;
+  Address patch_address = cmp_instruction_address - delta;
+  Address branch_address = patch_address + kPatchAreaSizeNoBranch;
+
+  Instr instr_at_patch = Assembler::instr_at(patch_address);
+  SixByteInstr branch_instr = Assembler::instr_at(branch_address);
+
+  // This is patching a conditional "jump if not smi/jump if smi" site.
+  size_t patch_size = 0;
+  if (Instruction::S390OpcodeValue(branch_address) == BRC) {
+    patch_size = kPatchAreaSizeNoBranch + 4;
+  } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
+    patch_size = kPatchAreaSizeNoBranch + 6;
+  } else {
+    DCHECK(false);
+  }
+  CodePatcher patcher(isolate, patch_address, patch_size);
+  Register reg;
+  reg.reg_code = instr_at_patch & 0xf;
+  if (check == ENABLE_INLINED_SMI_CHECK) {
+    patcher.masm()->TestIfSmi(reg);
+  } else {
+    // Emit the NOP to ensure sufficient place for patching
+    // (replaced by LR + NILL)
+    DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+    patcher.masm()->CmpP(reg, reg);
+#ifndef V8_TARGET_ARCH_S390X
+    patcher.masm()->nop();
+#endif
+  }
+
+  Condition cc = al;
+  if (Instruction::S390OpcodeValue(branch_address) == BRC) {
+    cc = static_cast<Condition>((branch_instr & 0x00f00000) >> 20);
+    DCHECK((cc == ne) || (cc == eq));
+    cc = (cc == ne) ? eq : ne;
+    patcher.masm()->brc(cc, Operand((branch_instr & 0xffff) << 1));
+  } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
+    cc = static_cast<Condition>(
+        (branch_instr & (static_cast<uint64_t>(0x00f0) << 32)) >> 36);
+    DCHECK((cc == ne) || (cc == eq));
+    cc = (cc == ne) ? eq : ne;
+    patcher.masm()->brcl(cc, Operand((branch_instr & 0xffffffff) << 1));
+  } else {
+    DCHECK(false);
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/ic/s390/stub-cache-s390.cc b/src/ic/s390/stub-cache-s390.cc
new file mode 100644
index 0000000..054b946
--- /dev/null
+++ b/src/ic/s390/stub-cache-s390.cc
@@ -0,0 +1,187 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ic/stub-cache.h"
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+                       Code::Kind ic_kind, Code::Flags flags,
+                       StubCache::Table table, Register receiver, Register name,
+                       // Number of the cache entry, not scaled.
+                       Register offset, Register scratch, Register scratch2,
+                       Register offset_scratch) {
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+  uintptr_t value_off_addr =
+      reinterpret_cast<uintptr_t>(value_offset.address());
+  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+  // Check the relative positions of the address fields.
+  DCHECK(value_off_addr > key_off_addr);
+  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+  DCHECK(map_off_addr > key_off_addr);
+  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+  Label miss;
+  Register base_addr = scratch;
+  scratch = no_reg;
+
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ ShiftLeftP(offset_scratch, offset, Operand(1));
+  __ AddP(offset_scratch, offset, offset_scratch);
+
+  // Calculate the base address of the entry.
+  __ mov(base_addr, Operand(key_offset));
+#if V8_TARGET_ARCH_S390X
+  DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
+  __ ShiftLeftP(offset_scratch, offset_scratch,
+                Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
+#else
+  DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
+#endif
+  __ AddP(base_addr, base_addr, offset_scratch);
+
+  // Check that the key in the entry matches the name.
+  __ CmpP(name, MemOperand(base_addr, 0));
+  __ bne(&miss, Label::kNear);
+
+  // Check the map matches.
+  __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
+  __ CmpP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ bne(&miss, Label::kNear);
+
+  // Get the code entry from the cache.
+  Register code = scratch2;
+  scratch2 = no_reg;
+  __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+  // Check that the flags match what we're looking for.
+  Register flags_reg = base_addr;
+  base_addr = no_reg;
+  __ LoadlW(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+
+  DCHECK(!r0.is(flags_reg));
+  __ AndP(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
+  __ CmpLogicalP(flags_reg, Operand(flags));
+  __ bne(&miss, Label::kNear);
+
+#ifdef DEBUG
+  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+    __ b(&miss, Label::kNear);
+  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+    __ b(&miss, Label::kNear);
+  }
+#endif
+
+  // Jump to the first instruction in the code stub.
+  // TODO(joransiu): Combine into indirect branch
+  __ la(code, MemOperand(code, Code::kHeaderSize - kHeapObjectTag));
+  __ b(code);
+
+  // Miss: fall through.
+  __ bind(&miss);
+}
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+                              Code::Flags flags, Register receiver,
+                              Register name, Register scratch, Register extra,
+                              Register extra2, Register extra3) {
+  Isolate* isolate = masm->isolate();
+  Label miss;
+
+#if V8_TARGET_ARCH_S390X
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 24.
+  DCHECK(sizeof(Entry) == 24);
+#else
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 12.
+  DCHECK(sizeof(Entry) == 12);
+#endif
+
+  // Make sure the flags does not name a specific type.
+  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+
+  // Check scratch, extra and extra2 registers are valid.
+  DCHECK(!scratch.is(no_reg));
+  DCHECK(!extra.is(no_reg));
+  DCHECK(!extra2.is(no_reg));
+  DCHECK(!extra3.is(no_reg));
+
+#ifdef DEBUG
+  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+  // extra3 don't conflict with the vector and slot registers, which need
+  // to be preserved for a handler call or miss.
+  if (IC::ICUseVector(ic_kind)) {
+    Register vector, slot;
+    if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+      vector = VectorStoreICDescriptor::VectorRegister();
+      slot = VectorStoreICDescriptor::SlotRegister();
+    } else {
+      vector = LoadWithVectorDescriptor::VectorRegister();
+      slot = LoadWithVectorDescriptor::SlotRegister();
+    }
+    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+  }
+#endif
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+                      extra3);
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ LoadlW(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+  __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ AddP(scratch, scratch, ip);
+  __ XorP(scratch, scratch, Operand(flags));
+  // The mask omits the last two bits because they are not part of the hash.
+  __ AndP(scratch, scratch,
+          Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
+
+  // Probe the primary table.
+  ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
+             extra, extra2, extra3);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ SubP(scratch, scratch, name);
+  __ AddP(scratch, scratch, Operand(flags));
+  __ AndP(scratch, scratch,
+          Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
+
+  // Probe the secondary table.
+  ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
+             extra, extra2, extra3);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+                      extra3);
+}
+
+#undef __
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/ic/x64/handler-compiler-x64.cc b/src/ic/x64/handler-compiler-x64.cc
index ac3dd9a..dde6169 100644
--- a/src/ic/x64/handler-compiler-x64.cc
+++ b/src/ic/x64/handler-compiler-x64.cc
@@ -6,6 +6,7 @@
 
 #include "src/ic/handler-compiler.h"
 
+#include "src/api-arguments.h"
 #include "src/field-type.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
@@ -142,7 +143,7 @@
   __ PushReturnAddressFrom(scratch);
   // Stack now matches JSFunction abi.
 
-  // Abi for CallApiFunctionStub.
+  // Abi for CallApiCallbackStub.
   Register callee = rdi;
   Register data = rbx;
   Register holder = rcx;
@@ -209,7 +210,7 @@
           RelocInfo::EXTERNAL_REFERENCE);
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+  CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
                            !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
@@ -238,6 +239,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ pushq(rsi);
     // Save value register, so we can restore it later.
     __ Push(value());
 
@@ -254,11 +257,11 @@
       }
       __ Push(receiver);
       __ Push(value());
-      ParameterCount actual(1);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_SETTER);
-      __ InvokeFunction(rdi, no_reg, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ Set(rax, 1);
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -269,7 +272,7 @@
     __ Pop(rax);
 
     // Restore context register.
-    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    __ popq(rsi);
   }
   __ ret(0);
 }
@@ -286,6 +289,9 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ pushq(rsi);
+
     if (accessor_index >= 0) {
       DCHECK(!holder.is(scratch));
       DCHECK(!receiver.is(scratch));
@@ -297,11 +303,11 @@
         receiver = scratch;
       }
       __ Push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_GETTER);
-      __ InvokeFunction(rdi, no_reg, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ Set(rax, 0);
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -309,7 +315,7 @@
     }
 
     // Restore context register.
-    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    __ popq(rsi);
   }
   __ ret(0);
 }
@@ -754,22 +760,6 @@
 }
 
 
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
-    Handle<Name> name) {
-  __ PopReturnAddressTo(scratch1());
-  __ Push(receiver());
-  __ Push(this->name());
-  __ Push(value());
-  __ PushReturnAddressFrom(scratch1());
-
-  // Do tail-call to the runtime system.
-  __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
 Register NamedStoreHandlerCompiler::value() {
   return StoreDescriptor::ValueRegister();
 }
diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc
index 1b25f06..281faba 100644
--- a/src/ic/x87/handler-compiler-x87.cc
+++ b/src/ic/x87/handler-compiler-x87.cc
@@ -6,6 +6,7 @@
 
 #include "src/ic/handler-compiler.h"
 
+#include "src/api-arguments.h"
 #include "src/field-type.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
@@ -23,6 +24,9 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ push(esi);
+
     if (accessor_index >= 0) {
       DCHECK(!holder.is(scratch));
       DCHECK(!receiver.is(scratch));
@@ -34,11 +38,11 @@
         receiver = scratch;
       }
       __ push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
-      __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ Set(eax, 0);
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -46,7 +50,7 @@
     }
 
     // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ pop(esi);
   }
   __ ret(0);
 }
@@ -90,7 +94,7 @@
 
   // Bail out if the receiver has a named interceptor or requires access checks.
   __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
-            kInterceptorOrAccessCheckNeededMask);
+            Immediate(kInterceptorOrAccessCheckNeededMask));
   __ j(not_zero, miss_label);
 
   // Check that receiver is a JSObject.
@@ -158,7 +162,7 @@
   // Stack now matches JSFunction abi.
   DCHECK(optimization.is_simple_api_call());
 
-  // Abi for CallApiFunctionStub.
+  // Abi for CallApiCallbackStub.
   Register callee = edi;
   Register data = ebx;
   Register holder = ecx;
@@ -220,7 +224,7 @@
   __ mov(api_function_address, Immediate(function_address));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+  CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
                            !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
@@ -252,6 +256,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Save context register
+    __ push(esi);
     // Save value register, so we can restore it later.
     __ push(value());
 
@@ -267,11 +273,11 @@
       }
       __ push(receiver);
       __ push(value());
-      ParameterCount actual(1);
-      ParameterCount expected(expected_arguments);
       __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
-      __ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
+      __ Set(eax, 1);
+      __ Call(masm->isolate()->builtins()->CallFunction(
+                  ConvertReceiverMode::kNotNullOrUndefined),
+              RelocInfo::CODE_TARGET);
     } else {
       // If we generate a global code snippet for deoptimization only, remember
       // the place to continue after deoptimization.
@@ -280,9 +286,8 @@
 
     // We have to return the passed value, not the return value of the setter.
     __ pop(eax);
-
     // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ pop(esi);
   }
   __ ret(0);
 }
@@ -758,22 +763,6 @@
 }
 
 
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
-    Handle<Name> name) {
-  __ pop(scratch1());  // remove the return address
-  __ push(receiver());
-  __ push(this->name());
-  __ push(value());
-  __ push(scratch1());  // restore return address
-
-  // Do tail-call to the runtime system.
-  __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
 Register NamedStoreHandlerCompiler::value() {
   return StoreDescriptor::ValueRegister();
 }
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
index 5bbd9c5..b51045b 100644
--- a/src/ic/x87/ic-x87.cc
+++ b/src/ic/x87/ic-x87.cc
@@ -150,8 +150,9 @@
   __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
 
   // Check bit field.
-  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
-            (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
+  __ test_b(
+      FieldOperand(map, Map::kBitFieldOffset),
+      Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
   __ j(not_zero, slow);
   // Check that the object is some kind of JS object EXCEPT JS Value type. In
   // the case that the object is a value-wrapper object, we enter the runtime
@@ -202,9 +203,9 @@
   // scratch2: map of current prototype
   __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
   __ j(below, slow);
-  __ test_b(
-      FieldOperand(scratch2, Map::kBitFieldOffset),
-      (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor));
+  __ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
+            Immediate((1 << Map::kIsAccessCheckNeeded) |
+                      (1 << Map::kHasIndexedInterceptor)));
   __ j(not_zero, slow);
   __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
   __ j(not_equal, slow);
@@ -251,7 +252,7 @@
   // bit test is enough.
   STATIC_ASSERT(kNotInternalizedTag != 0);
   __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
-            kIsNotInternalizedMask);
+            Immediate(kIsNotInternalizedMask));
   __ j(not_zero, not_unique);
 
   __ bind(&unique);
@@ -521,7 +522,7 @@
   // Check that the receiver does not require access checks and is not observed.
   // The generic stub does not perform map checks or handle observed objects.
   __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
+            Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
   __ j(not_zero, &slow);
   // Check that the key is a smi.
   __ JumpIfNotSmi(key, &maybe_name_key);
diff --git a/src/interface-descriptors.cc b/src/interface-descriptors.cc
index cc46a56..9ee4269 100644
--- a/src/interface-descriptors.cc
+++ b/src/interface-descriptors.cc
@@ -197,31 +197,9 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void ToLengthDescriptor::InitializePlatformSpecific(
+void TypeConversionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {ReceiverRegister()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToStringDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {ReceiverRegister()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToNameDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {ReceiverRegister()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToObjectDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {ReceiverRegister()};
+  Register registers[] = {ArgumentRegister()};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -352,6 +330,15 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+FunctionType* FastArrayPushDescriptor::BuildCallInterfaceDescriptorFunctionType(
+    Isolate* isolate, int paramater_count) {
+  Zone* zone = isolate->interface_descriptor_zone();
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), AnyTagged(zone), 1, zone)->AsFunction();
+  function->InitParameter(0, UntaggedIntegral32(zone));  // actual #arguments
+  return function;
+}
+
 FunctionType*
 FastCloneRegExpDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
@@ -494,28 +481,45 @@
   return function;
 }
 
-FunctionType* ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int paramater_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
-  function->InitParameter(0, AnyTagged(zone));           // callee
-  function->InitParameter(1, AnyTagged(zone));           // call_data
-  function->InitParameter(2, AnyTagged(zone));           // holder
-  function->InitParameter(3, ExternalPointer(zone));     // api_function_address
-  function->InitParameter(4, UntaggedIntegral32(zone));  // actual #arguments
-  return function;
+CallInterfaceDescriptor ApiCallbackDescriptorBase::ForArgs(Isolate* isolate,
+                                                           int argc) {
+  switch (argc) {
+    case 0:
+      return ApiCallbackWith0ArgsDescriptor(isolate);
+    case 1:
+      return ApiCallbackWith1ArgsDescriptor(isolate);
+    case 2:
+      return ApiCallbackWith2ArgsDescriptor(isolate);
+    case 3:
+      return ApiCallbackWith3ArgsDescriptor(isolate);
+    case 4:
+      return ApiCallbackWith4ArgsDescriptor(isolate);
+    case 5:
+      return ApiCallbackWith5ArgsDescriptor(isolate);
+    case 6:
+      return ApiCallbackWith6ArgsDescriptor(isolate);
+    case 7:
+      return ApiCallbackWith7ArgsDescriptor(isolate);
+    default:
+      UNREACHABLE();
+      return VoidDescriptor(isolate);
+  }
 }
 
-FunctionType* ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int paramater_count) {
+FunctionType*
+ApiCallbackDescriptorBase::BuildCallInterfaceDescriptorFunctionTypeWithArg(
+    Isolate* isolate, int parameter_count, int argc) {
   Zone* zone = isolate->interface_descriptor_zone();
   FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
+      Type::Function(AnyTagged(zone), Type::Undefined(), 4 + argc, zone)
+          ->AsFunction();
   function->InitParameter(0, AnyTagged(zone));        // callee
   function->InitParameter(1, AnyTagged(zone));        // call_data
   function->InitParameter(2, AnyTagged(zone));        // holder
   function->InitParameter(3, ExternalPointer(zone));  // api_function_address
+  for (int i = 0; i < argc; i++) {
+    function->InitParameter(i, AnyTagged(zone));
+  }
   return function;
 }
 
diff --git a/src/interface-descriptors.h b/src/interface-descriptors.h
index fb1969d..dcce0af 100644
--- a/src/interface-descriptors.h
+++ b/src/interface-descriptors.h
@@ -23,18 +23,14 @@
   V(VectorStoreIC)                            \
   V(InstanceOf)                               \
   V(LoadWithVector)                           \
+  V(FastArrayPush)                            \
   V(FastNewClosure)                           \
   V(FastNewContext)                           \
   V(FastNewObject)                            \
   V(FastNewRestParameter)                     \
   V(FastNewSloppyArguments)                   \
   V(FastNewStrictArguments)                   \
-  V(ToNumber)                                 \
-  V(ToLength)                                 \
-  V(ToString)                                 \
-  V(ToName)                                   \
-  V(ToObject)                                 \
-  V(NumberToString)                           \
+  V(TypeConversion)                           \
   V(Typeof)                                   \
   V(FastCloneRegExp)                          \
   V(FastCloneShallowArray)                    \
@@ -52,14 +48,22 @@
   V(TransitionElementsKind)                   \
   V(AllocateHeapNumber)                       \
   V(AllocateMutableHeapNumber)                \
+  V(AllocateFloat32x4)                        \
+  V(AllocateInt32x4)                          \
+  V(AllocateUint32x4)                         \
+  V(AllocateBool32x4)                         \
+  V(AllocateInt16x8)                          \
+  V(AllocateUint16x8)                         \
+  V(AllocateBool16x8)                         \
+  V(AllocateInt8x16)                          \
+  V(AllocateUint8x16)                         \
+  V(AllocateBool8x16)                         \
   V(AllocateInNewSpace)                       \
   V(ArrayConstructorConstantArgCount)         \
   V(ArrayConstructor)                         \
   V(InternalArrayConstructorConstantArgCount) \
   V(InternalArrayConstructor)                 \
   V(Compare)                                  \
-  V(CompareNil)                               \
-  V(ToBoolean)                                \
   V(BinaryOp)                                 \
   V(BinaryOpWithAllocationSite)               \
   V(StringAdd)                                \
@@ -68,8 +72,14 @@
   V(Named)                                    \
   V(CallHandler)                              \
   V(ArgumentAdaptor)                          \
-  V(ApiFunction)                              \
-  V(ApiAccessor)                              \
+  V(ApiCallbackWith0Args)                     \
+  V(ApiCallbackWith1Args)                     \
+  V(ApiCallbackWith2Args)                     \
+  V(ApiCallbackWith3Args)                     \
+  V(ApiCallbackWith4Args)                     \
+  V(ApiCallbackWith5Args)                     \
+  V(ApiCallbackWith6Args)                     \
+  V(ApiCallbackWith7Args)                     \
   V(ApiGetter)                                \
   V(LoadGlobalViaContext)                     \
   V(StoreGlobalViaContext)                    \
@@ -200,6 +210,7 @@
   void Initialize(Isolate* isolate, CallDescriptors::Key key) {
     if (!data()->IsInitialized()) {
       CallInterfaceDescriptorData* d = isolate->call_descriptor_data(key);
+      DCHECK(d == data());  // d should be a modifiable pointer to data().
       InitializePlatformSpecific(d);
       FunctionType* function_type = BuildCallInterfaceDescriptorFunctionType(
           isolate, d->register_param_count());
@@ -211,18 +222,20 @@
   const CallInterfaceDescriptorData* data_;
 };
 
+#define DECLARE_DESCRIPTOR_WITH_BASE(name, base)           \
+ public:                                                   \
+  explicit name(Isolate* isolate) : base(isolate, key()) { \
+    Initialize(isolate, key());                            \
+  }                                                        \
+  static inline CallDescriptors::Key key();
 
 #define DECLARE_DESCRIPTOR(name, base)                                         \
-  explicit name(Isolate* isolate) : base(isolate, key()) {                     \
-    Initialize(isolate, key());                                                \
-  }                                                                            \
-                                                                               \
+  DECLARE_DESCRIPTOR_WITH_BASE(name, base)                                     \
  protected:                                                                    \
   void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override; \
   name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {}     \
                                                                                \
- public:                                                                       \
-  static inline CallDescriptors::Key key();
+ public:
 
 #define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
   DECLARE_DESCRIPTOR(name, base)                                 \
@@ -232,6 +245,17 @@
                                                                  \
  public:
 
+#define DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(name, base, arg) \
+  DECLARE_DESCRIPTOR_WITH_BASE(name, base)                                  \
+ protected:                                                                 \
+  FunctionType* BuildCallInterfaceDescriptorFunctionType(                   \
+      Isolate* isolate, int register_param_count) override {                \
+    return BuildCallInterfaceDescriptorFunctionTypeWithArg(                 \
+        isolate, register_param_count, arg);                                \
+  }                                                                         \
+                                                                            \
+ public:
+
 class VoidDescriptor : public CallInterfaceDescriptor {
  public:
   DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
@@ -397,56 +421,13 @@
                      CallInterfaceDescriptor)
 };
 
-
-class ToNumberDescriptor : public CallInterfaceDescriptor {
+class TypeConversionDescriptor final : public CallInterfaceDescriptor {
  public:
-  DECLARE_DESCRIPTOR(ToNumberDescriptor, CallInterfaceDescriptor)
-};
+  enum ParameterIndices { kArgumentIndex };
 
+  DECLARE_DESCRIPTOR(TypeConversionDescriptor, CallInterfaceDescriptor)
 
-class ToLengthDescriptor : public CallInterfaceDescriptor {
- public:
-  enum ParameterIndices { kReceiverIndex };
-
-  DECLARE_DESCRIPTOR(ToLengthDescriptor, CallInterfaceDescriptor)
-
-  static const Register ReceiverRegister();
-};
-
-
-class ToStringDescriptor : public CallInterfaceDescriptor {
- public:
-  enum ParameterIndices { kReceiverIndex };
-
-  DECLARE_DESCRIPTOR(ToStringDescriptor, CallInterfaceDescriptor)
-
-  static const Register ReceiverRegister();
-};
-
-
-class ToNameDescriptor : public CallInterfaceDescriptor {
- public:
-  enum ParameterIndices { kReceiverIndex };
-
-  DECLARE_DESCRIPTOR(ToNameDescriptor, CallInterfaceDescriptor)
-
-  static const Register ReceiverRegister();
-};
-
-
-class ToObjectDescriptor : public CallInterfaceDescriptor {
- public:
-  enum ParameterIndices { kReceiverIndex };
-
-  DECLARE_DESCRIPTOR(ToObjectDescriptor, CallInterfaceDescriptor)
-
-  static const Register ReceiverRegister();
-};
-
-
-class NumberToStringDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR(NumberToStringDescriptor, CallInterfaceDescriptor)
+  static const Register ArgumentRegister();
 };
 
 
@@ -581,6 +562,13 @@
   DECLARE_DESCRIPTOR(AllocateHeapNumberDescriptor, CallInterfaceDescriptor)
 };
 
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type)         \
+  class Allocate##Type##Descriptor : public CallInterfaceDescriptor {       \
+   public:                                                                  \
+    DECLARE_DESCRIPTOR(Allocate##Type##Descriptor, CallInterfaceDescriptor) \
+  };
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
 
 class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor {
  public:
@@ -631,18 +619,6 @@
 };
 
 
-class CompareNilDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR(CompareNilDescriptor, CallInterfaceDescriptor)
-};
-
-
-class ToBooleanDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR(ToBooleanDescriptor, CallInterfaceDescriptor)
-};
-
-
 class BinaryOpDescriptor : public CallInterfaceDescriptor {
  public:
   DECLARE_DESCRIPTOR(BinaryOpDescriptor, CallInterfaceDescriptor)
@@ -696,18 +672,75 @@
                                                CallInterfaceDescriptor)
 };
 
-
-class ApiFunctionDescriptor : public CallInterfaceDescriptor {
+// The ApiCallback*Descriptors have a lot of boilerplate. The superclass
+// ApiCallbackDescriptorBase contains all the logic, and the
+// ApiCallbackWith*ArgsDescriptor merely instantiate these with a
+// parameter for the number of args.
+//
+// The base class is not meant to be instantiated directly and has no
+// public constructors to ensure this is so.
+//
+// The simplest usage for all the ApiCallback*Descriptors is probably
+//   ApiCallbackDescriptorBase::ForArgs(isolate, argc)
+//
+class ApiCallbackDescriptorBase : public CallInterfaceDescriptor {
  public:
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiFunctionDescriptor,
-                                               CallInterfaceDescriptor)
+  static CallInterfaceDescriptor ForArgs(Isolate* isolate, int argc);
+
+ protected:
+  ApiCallbackDescriptorBase(Isolate* isolate, CallDescriptors::Key key)
+      : CallInterfaceDescriptor(isolate, key) {}
+  void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override;
+  FunctionType* BuildCallInterfaceDescriptorFunctionTypeWithArg(
+      Isolate* isolate, int parameter_count, int argc);
 };
 
-
-class ApiAccessorDescriptor : public CallInterfaceDescriptor {
+class ApiCallbackWith0ArgsDescriptor : public ApiCallbackDescriptorBase {
  public:
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiAccessorDescriptor,
-                                               CallInterfaceDescriptor)
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+      ApiCallbackWith0ArgsDescriptor, ApiCallbackDescriptorBase, 0)
+};
+
+class ApiCallbackWith1ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+      ApiCallbackWith1ArgsDescriptor, ApiCallbackDescriptorBase, 1)
+};
+
+class ApiCallbackWith2ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+      ApiCallbackWith2ArgsDescriptor, ApiCallbackDescriptorBase, 2)
+};
+
+class ApiCallbackWith3ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+      ApiCallbackWith3ArgsDescriptor, ApiCallbackDescriptorBase, 3)
+};
+
+class ApiCallbackWith4ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+      ApiCallbackWith4ArgsDescriptor, ApiCallbackDescriptorBase, 4)
+};
+
+class ApiCallbackWith5ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+      ApiCallbackWith5ArgsDescriptor, ApiCallbackDescriptorBase, 5)
+};
+
+class ApiCallbackWith6ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+      ApiCallbackWith6ArgsDescriptor, ApiCallbackDescriptorBase, 6)
+};
+
+class ApiCallbackWith7ArgsDescriptor : public ApiCallbackDescriptorBase {
+ public:
+  DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(
+      ApiCallbackWith7ArgsDescriptor, ApiCallbackDescriptorBase, 7)
 };
 
 
@@ -741,6 +774,11 @@
   DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
 };
 
+class FastArrayPushDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastArrayPushDescriptor,
+                                               CallInterfaceDescriptor)
+};
 
 class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
  public:
@@ -751,7 +789,7 @@
   static const Register KeyRegister();
 };
 
-class InterpreterDispatchDescriptor  : public CallInterfaceDescriptor {
+class InterpreterDispatchDescriptor : public CallInterfaceDescriptor {
  public:
   DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterDispatchDescriptor,
                                                CallInterfaceDescriptor)
@@ -784,8 +822,10 @@
   DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
 };
 
+#undef DECLARE_DESCRIPTOR_WITH_BASE
 #undef DECLARE_DESCRIPTOR
-
+#undef DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE
+#undef DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG
 
 // We define the association between CallDescriptors::Key and the specialized
 // descriptor here to reduce boilerplate and mistakes.
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index 7103c72..109b01e 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -4,6 +4,7 @@
 
 #include "src/interpreter/bytecode-array-builder.h"
 #include "src/compiler.h"
+#include "src/interpreter/interpreter-intrinsics.h"
 
 namespace v8 {
 namespace internal {
@@ -17,49 +18,75 @@
     // This helper is expected to be instantiated only when the last bytecode is
     // in the same basic block.
     DCHECK(array_builder_.LastBytecodeInSameBlock());
+    bytecode_ = Bytecodes::FromByte(
+        array_builder_.bytecodes()->at(previous_bytecode_start_));
+    operand_scale_ = OperandScale::kSingle;
+    if (Bytecodes::IsPrefixScalingBytecode(bytecode_)) {
+      operand_scale_ = Bytecodes::PrefixBytecodeToOperandScale(bytecode_);
+      bytecode_ = Bytecodes::FromByte(
+          array_builder_.bytecodes()->at(previous_bytecode_start_ + 1));
+    }
   }
 
   // Returns the previous bytecode in the same basic block.
   MUST_USE_RESULT Bytecode GetBytecode() const {
     DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
-    return Bytecodes::FromByte(
-        array_builder_.bytecodes()->at(previous_bytecode_start_));
+    return bytecode_;
   }
 
-  // Returns the operand at operand_index for the previous bytecode in the
-  // same basic block.
-  MUST_USE_RESULT uint32_t GetOperand(int operand_index) const {
-    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
-    Bytecode bytecode = GetBytecode();
-    DCHECK_GE(operand_index, 0);
-    DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode));
-    size_t operand_offset =
-        previous_bytecode_start_ +
-        Bytecodes::GetOperandOffset(bytecode, operand_index);
-    OperandSize size = Bytecodes::GetOperandSize(bytecode, operand_index);
-    switch (size) {
-      case OperandSize::kNone:
-        UNREACHABLE();
-        break;
-      case OperandSize::kByte:
-        return static_cast<uint32_t>(
-            array_builder_.bytecodes()->at(operand_offset));
-      case OperandSize::kShort:
-        uint16_t operand =
-            (array_builder_.bytecodes()->at(operand_offset) << 8) +
-            array_builder_.bytecodes()->at(operand_offset + 1);
-        return static_cast<uint32_t>(operand);
-    }
-    return 0;
+  MUST_USE_RESULT Register GetRegisterOperand(int operand_index) const {
+    return Register::FromOperand(GetSignedOperand(operand_index));
+  }
+
+  MUST_USE_RESULT uint32_t GetIndexOperand(int operand_index) const {
+    return GetUnsignedOperand(operand_index);
   }
 
   Handle<Object> GetConstantForIndexOperand(int operand_index) const {
     return array_builder_.constant_array_builder()->At(
-        GetOperand(operand_index));
+        GetIndexOperand(operand_index));
   }
 
  private:
+  // Returns the signed operand at operand_index for the previous
+  // bytecode in the same basic block.
+  MUST_USE_RESULT int32_t GetSignedOperand(int operand_index) const {
+    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+    OperandType operand_type =
+        Bytecodes::GetOperandType(bytecode_, operand_index);
+    DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+    const uint8_t* operand_start = GetOperandStart(operand_index);
+    return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
+                                          operand_scale_);
+  }
+
+  // Returns the unsigned operand at operand_index for the previous
+  // bytecode in the same basic block.
+  MUST_USE_RESULT uint32_t GetUnsignedOperand(int operand_index) const {
+    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+    OperandType operand_type =
+        Bytecodes::GetOperandType(bytecode_, operand_index);
+    DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+    const uint8_t* operand_start = GetOperandStart(operand_index);
+    return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
+                                            operand_scale_);
+  }
+
+  const uint8_t* GetOperandStart(int operand_index) const {
+    size_t operand_offset =
+        previous_bytecode_start_ + prefix_offset() +
+        Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale_);
+    return &(*array_builder_.bytecodes())[0] + operand_offset;
+  }
+
+  int prefix_offset() const {
+    return Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_) ? 1
+                                                                         : 0;
+  }
+
   const BytecodeArrayBuilder& array_builder_;
+  OperandScale operand_scale_;
+  Bytecode bytecode_;
   size_t previous_bytecode_start_;
 
   DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
@@ -67,7 +94,8 @@
 
 BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
                                            int parameter_count,
-                                           int context_count, int locals_count)
+                                           int context_count, int locals_count,
+                                           FunctionLiteral* literal)
     : isolate_(isolate),
       zone_(zone),
       bytecodes_(zone),
@@ -82,11 +110,15 @@
       parameter_count_(parameter_count),
       local_register_count_(locals_count),
       context_register_count_(context_count),
-      temporary_allocator_(zone, fixed_register_count()),
-      register_translator_(this) {
+      temporary_allocator_(zone, fixed_register_count()) {
   DCHECK_GE(parameter_count_, 0);
   DCHECK_GE(context_register_count_, 0);
   DCHECK_GE(local_register_count_, 0);
+  return_position_ =
+      literal ? std::max(literal->start_position(), literal->end_position() - 1)
+              : RelocInfo::kNoPosition;
+  LOG_CODE_EVENT(isolate_, CodeStartLinePosInfoRecordEvent(
+                               source_position_table_builder()));
 }
 
 BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
@@ -119,45 +151,49 @@
   DCHECK(exit_seen_in_block_);
 
   int bytecode_size = static_cast<int>(bytecodes_.size());
-  int register_count =
-      fixed_and_temporary_register_count() + translation_register_count();
+  int register_count = fixed_and_temporary_register_count();
   int frame_size = register_count * kPointerSize;
   Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
   Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
-  Handle<FixedArray> source_position_table =
-      source_position_table_builder()->ToFixedArray();
-  Handle<BytecodeArray> output = isolate_->factory()->NewBytecodeArray(
+  Handle<ByteArray> source_position_table =
+      source_position_table_builder()->ToSourcePositionTable();
+  Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
       bytecode_size, &bytecodes_.front(), frame_size, parameter_count(),
       constant_pool);
-  output->set_handler_table(*handler_table);
-  output->set_source_position_table(*source_position_table);
+  bytecode_array->set_handler_table(*handler_table);
+  bytecode_array->set_source_position_table(*source_position_table);
+
+  void* line_info = source_position_table_builder()->DetachJITHandlerData();
+  LOG_CODE_EVENT(isolate_, CodeEndLinePosInfoRecordEvent(
+                               AbstractCode::cast(*bytecode_array), line_info));
+
   bytecode_generated_ = true;
-  return output;
+  return bytecode_array;
 }
 
-
 template <size_t N>
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t (&operands)[N],
+                                  OperandScale operand_scale) {
   // Don't output dead code.
-  if (exit_seen_in_block_) {
-    source_position_table_builder_.RevertPosition(bytecodes()->size());
-    return;
-  }
+  if (exit_seen_in_block_) return;
 
   int operand_count = static_cast<int>(N);
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
 
-  int register_operand_count = Bytecodes::NumberOfRegisterOperands(bytecode);
-  if (register_operand_count > 0) {
-    register_translator()->TranslateInputRegisters(bytecode, operands,
-                                                   operand_count);
+  last_bytecode_start_ = bytecodes()->size();
+  // Emit prefix bytecode for scale if required.
+  if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
+    bytecodes()->push_back(Bytecodes::ToByte(
+        Bytecodes::OperandScaleToPrefixBytecode(operand_scale)));
   }
 
-  last_bytecode_start_ = bytecodes()->size();
+  // Emit bytecode.
   bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+
+  // Emit operands.
   for (int i = 0; i < operand_count; i++) {
-    DCHECK(OperandIsValid(bytecode, i, operands[i]));
-    switch (Bytecodes::GetOperandSize(bytecode, i)) {
+    DCHECK(OperandIsValid(bytecode, operand_scale, i, operands[i]));
+    switch (Bytecodes::GetOperandSize(bytecode, i, operand_scale)) {
       case OperandSize::kNone:
         UNREACHABLE();
         break;
@@ -171,58 +207,61 @@
                             operand_bytes + 2);
         break;
       }
+      case OperandSize::kQuad: {
+        uint8_t operand_bytes[4];
+        WriteUnalignedUInt32(operand_bytes, operands[i]);
+        bytecodes()->insert(bytecodes()->end(), operand_bytes,
+                            operand_bytes + 4);
+        break;
+      }
     }
   }
-
-  if (register_operand_count > 0) {
-    register_translator()->TranslateOutputRegisters();
-  }
 }
 
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
-                                  uint32_t operand1, uint32_t operand2,
-                                  uint32_t operand3) {
-  uint32_t operands[] = {operand0, operand1, operand2, operand3};
-  Output(bytecode, operands);
-}
-
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
-                                  uint32_t operand1, uint32_t operand2) {
-  uint32_t operands[] = {operand0, operand1, operand2};
-  Output(bytecode, operands);
-}
-
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
-                                  uint32_t operand1) {
-  uint32_t operands[] = {operand0, operand1};
-  Output(bytecode, operands);
-}
-
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
-  uint32_t operands[] = {operand0};
-  Output(bytecode, operands);
-}
-
-
 void BytecodeArrayBuilder::Output(Bytecode bytecode) {
   // Don't output dead code.
-  if (exit_seen_in_block_) {
-    source_position_table_builder_.RevertPosition(bytecodes()->size());
-    return;
-  }
+  if (exit_seen_in_block_) return;
 
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
   last_bytecode_start_ = bytecodes()->size();
   bytecodes()->push_back(Bytecodes::ToByte(bytecode));
 }
 
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+                                        OperandScale operand_scale,
+                                        uint32_t operand0, uint32_t operand1,
+                                        uint32_t operand2, uint32_t operand3) {
+  uint32_t operands[] = {operand0, operand1, operand2, operand3};
+  Output(bytecode, operands, operand_scale);
+}
+
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+                                        OperandScale operand_scale,
+                                        uint32_t operand0, uint32_t operand1,
+                                        uint32_t operand2) {
+  uint32_t operands[] = {operand0, operand1, operand2};
+  Output(bytecode, operands, operand_scale);
+}
+
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+                                        OperandScale operand_scale,
+                                        uint32_t operand0, uint32_t operand1) {
+  uint32_t operands[] = {operand0, operand1};
+  Output(bytecode, operands, operand_scale);
+}
+
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+                                        OperandScale operand_scale,
+                                        uint32_t operand0) {
+  uint32_t operands[] = {operand0};
+  Output(bytecode, operands, operand_scale);
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
                                                             Register reg) {
-  Output(BytecodeForBinaryOperation(op), reg.ToRawOperand());
+  OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
+  OutputScaled(BytecodeForBinaryOperation(op), operand_scale,
+               RegisterOperand(reg));
   return *this;
 }
 
@@ -245,7 +284,9 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
                                                              Register reg) {
-  Output(BytecodeForCompareOperation(op), reg.ToRawOperand());
+  OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
+  OutputScaled(BytecodeForCompareOperation(op), operand_scale,
+               RegisterOperand(reg));
   return *this;
 }
 
@@ -255,10 +296,11 @@
   int32_t raw_smi = smi->value();
   if (raw_smi == 0) {
     Output(Bytecode::kLdaZero);
-  } else if (raw_smi >= -128 && raw_smi <= 127) {
-    Output(Bytecode::kLdaSmi8, static_cast<uint8_t>(raw_smi));
   } else {
-    LoadLiteral(Handle<Object>(smi, isolate_));
+    OperandSize operand_size = SizeForSignedOperand(raw_smi);
+    OperandScale operand_scale = OperandSizesToScale(operand_size);
+    OutputScaled(Bytecode::kLdaSmi, operand_scale,
+                 SignedOperand(raw_smi, operand_size));
   }
   return *this;
 }
@@ -266,13 +308,9 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
   size_t entry = GetConstantPoolEntry(object);
-  if (FitsInIdx8Operand(entry)) {
-    Output(Bytecode::kLdaConstant, static_cast<uint8_t>(entry));
-  } else if (FitsInIdx16Operand(entry)) {
-    Output(Bytecode::kLdaConstantWide, static_cast<uint16_t>(entry));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(entry));
+  OutputScaled(Bytecode::kLdaConstant, operand_scale, UnsignedOperand(entry));
   return *this;
 }
 
@@ -306,20 +344,12 @@
   return *this;
 }
 
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadBooleanConstant(bool value) {
-  if (value) {
-    LoadTrue();
-  } else {
-    LoadFalse();
-  }
-  return *this;
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
     Register reg) {
   if (!IsRegisterInAccumulator(reg)) {
-    Output(Bytecode::kLdar, reg.ToRawOperand());
+    OperandScale operand_scale =
+        OperandSizesToScale(SizeForRegisterOperand(reg));
+    OutputScaled(Bytecode::kLdar, operand_scale, RegisterOperand(reg));
   }
   return *this;
 }
@@ -328,7 +358,9 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
     Register reg) {
   if (!IsRegisterInAccumulator(reg)) {
-    Output(Bytecode::kStar, reg.ToRawOperand());
+    OperandScale operand_scale =
+        OperandSizesToScale(SizeForRegisterOperand(reg));
+    OutputScaled(Bytecode::kStar, operand_scale, RegisterOperand(reg));
   }
   return *this;
 }
@@ -337,164 +369,98 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
                                                          Register to) {
   DCHECK(from != to);
-  if (FitsInReg8Operand(from) && FitsInReg8Operand(to)) {
-    Output(Bytecode::kMov, from.ToRawOperand(), to.ToRawOperand());
-  } else if (FitsInReg16Operand(from) && FitsInReg16Operand(to)) {
-    Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(from),
+                                                   SizeForRegisterOperand(to));
+  OutputScaled(Bytecode::kMov, operand_scale, RegisterOperand(from),
+               RegisterOperand(to));
   return *this;
 }
 
-void BytecodeArrayBuilder::MoveRegisterUntranslated(Register from,
-                                                    Register to) {
-  // Move bytecodes modify the stack. Checking validity is an
-  // essential mitigation against corrupting the stack.
-  if (FitsInReg8OperandUntranslated(from)) {
-    CHECK(RegisterIsValid(from, OperandType::kReg8) &&
-          RegisterIsValid(to, OperandType::kReg16));
-  } else if (FitsInReg8OperandUntranslated(to)) {
-    CHECK(RegisterIsValid(from, OperandType::kReg16) &&
-          RegisterIsValid(to, OperandType::kReg8));
-  } else {
-    UNIMPLEMENTED();
-  }
-  Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
     const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
   // TODO(rmcilroy): Potentially store typeof information in an
   // operand rather than having extra bytecodes.
   Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, static_cast<uint8_t>(name_index),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(name_index) &&
-             FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(name_index),
+                          SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
+               UnsignedOperand(feedback_slot));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
     const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, static_cast<uint8_t>(name_index),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(name_index) &&
-             FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(name_index),
+                          SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
+               UnsignedOperand(feedback_slot));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
                                                             int slot_index) {
-  DCHECK(slot_index >= 0);
-  if (FitsInIdx8Operand(slot_index)) {
-    Output(Bytecode::kLdaContextSlot, context.ToRawOperand(),
-           static_cast<uint8_t>(slot_index));
-  } else if (FitsInIdx16Operand(slot_index)) {
-    Output(Bytecode::kLdaContextSlotWide, context.ToRawOperand(),
-           static_cast<uint16_t>(slot_index));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
+  OutputScaled(Bytecode::kLdaContextSlot, operand_scale,
+               RegisterOperand(context), UnsignedOperand(slot_index));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
                                                              int slot_index) {
-  DCHECK(slot_index >= 0);
-  if (FitsInIdx8Operand(slot_index)) {
-    Output(Bytecode::kStaContextSlot, context.ToRawOperand(),
-           static_cast<uint8_t>(slot_index));
-  } else if (FitsInIdx16Operand(slot_index)) {
-    Output(Bytecode::kStaContextSlotWide, context.ToRawOperand(),
-           static_cast<uint16_t>(slot_index));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
+  OutputScaled(Bytecode::kStaContextSlot, operand_scale,
+               RegisterOperand(context), UnsignedOperand(slot_index));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
     const Handle<String> name, TypeofMode typeof_mode) {
   Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
                           ? Bytecode::kLdaLookupSlotInsideTypeof
                           : Bytecode::kLdaLookupSlot;
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index)) {
-    Output(bytecode, static_cast<uint8_t>(name_index));
-  } else if (FitsInIdx16Operand(name_index)) {
-    Output(BytecodeForWideOperands(bytecode),
-           static_cast<uint16_t>(name_index));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(name_index));
+  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
     const Handle<String> name, LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index)) {
-    Output(bytecode, static_cast<uint8_t>(name_index));
-  } else if (FitsInIdx16Operand(name_index)) {
-    Output(BytecodeForWideOperands(bytecode),
-           static_cast<uint16_t>(name_index));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(name_index));
+  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
     Register object, const Handle<Name> name, int feedback_slot) {
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(Bytecode::kLoadIC, object.ToRawOperand(),
-           static_cast<uint8_t>(name_index),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(name_index) &&
-             FitsInIdx16Operand(feedback_slot)) {
-    Output(Bytecode::kLoadICWide, object.ToRawOperand(),
-           static_cast<uint16_t>(name_index),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
+      SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(Bytecode::kLoadIC, operand_scale, RegisterOperand(object),
+               UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
     Register object, int feedback_slot) {
-  if (FitsInIdx8Operand(feedback_slot)) {
-    Output(Bytecode::kKeyedLoadIC, object.ToRawOperand(),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(feedback_slot)) {
-    Output(Bytecode::kKeyedLoadICWide, object.ToRawOperand(),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(object), SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(Bytecode::kKeyedLoadIC, operand_scale, RegisterOperand(object),
+               UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -503,17 +469,11 @@
     LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreIC(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToRawOperand(), static_cast<uint8_t>(name_index),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(name_index) &&
-             FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
-           static_cast<uint16_t>(name_index),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
+      SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(bytecode, operand_scale, RegisterOperand(object),
+               UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -522,15 +482,11 @@
     Register object, Register key, int feedback_slot,
     LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
-  if (FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToRawOperand(), key.ToRawOperand(),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
-           key.ToRawOperand(), static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(object), SizeForRegisterOperand(key),
+      SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(bytecode, operand_scale, RegisterOperand(object),
+               RegisterOperand(key), UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -538,16 +494,10 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
     Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
   size_t entry = GetConstantPoolEntry(shared_info);
-  DCHECK(FitsInImm8Operand(tenured));
-  if (FitsInIdx8Operand(entry)) {
-    Output(Bytecode::kCreateClosure, static_cast<uint8_t>(entry),
-           static_cast<uint8_t>(tenured));
-  } else if (FitsInIdx16Operand(entry)) {
-    Output(Bytecode::kCreateClosureWide, static_cast<uint16_t>(entry),
-           static_cast<uint8_t>(tenured));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(entry));
+  OutputScaled(Bytecode::kCreateClosure, operand_scale, UnsignedOperand(entry),
+               UnsignedOperand(static_cast<size_t>(tenured)));
   return *this;
 }
 
@@ -565,73 +515,55 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
     Handle<String> pattern, int literal_index, int flags) {
-  DCHECK(FitsInImm8Operand(flags));  // Flags should fit in 8 bits.
   size_t pattern_entry = GetConstantPoolEntry(pattern);
-  if (FitsInIdx8Operand(literal_index) && FitsInIdx8Operand(pattern_entry)) {
-    Output(Bytecode::kCreateRegExpLiteral, static_cast<uint8_t>(pattern_entry),
-           static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
-  } else if (FitsInIdx16Operand(literal_index) &&
-             FitsInIdx16Operand(pattern_entry)) {
-    Output(Bytecode::kCreateRegExpLiteralWide,
-           static_cast<uint16_t>(pattern_entry),
-           static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForUnsignedOperand(pattern_entry),
+      SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+  OutputScaled(Bytecode::kCreateRegExpLiteral, operand_scale,
+               UnsignedOperand(pattern_entry), UnsignedOperand(literal_index),
+               UnsignedOperand(flags));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
     Handle<FixedArray> constant_elements, int literal_index, int flags) {
-  DCHECK(FitsInImm8Operand(flags));  // Flags should fit in 8 bits.
   size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
-  if (FitsInIdx8Operand(literal_index) &&
-      FitsInIdx8Operand(constant_elements_entry)) {
-    Output(Bytecode::kCreateArrayLiteral,
-           static_cast<uint8_t>(constant_elements_entry),
-           static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
-  } else if (FitsInIdx16Operand(literal_index) &&
-             FitsInIdx16Operand(constant_elements_entry)) {
-    Output(Bytecode::kCreateArrayLiteralWide,
-           static_cast<uint16_t>(constant_elements_entry),
-           static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForUnsignedOperand(constant_elements_entry),
+      SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+  OutputScaled(Bytecode::kCreateArrayLiteral, operand_scale,
+               UnsignedOperand(constant_elements_entry),
+               UnsignedOperand(literal_index), UnsignedOperand(flags));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
     Handle<FixedArray> constant_properties, int literal_index, int flags) {
-  DCHECK(FitsInImm8Operand(flags));  // Flags should fit in 8 bits.
   size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
-  if (FitsInIdx8Operand(literal_index) &&
-      FitsInIdx8Operand(constant_properties_entry)) {
-    Output(Bytecode::kCreateObjectLiteral,
-           static_cast<uint8_t>(constant_properties_entry),
-           static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
-  } else if (FitsInIdx16Operand(literal_index) &&
-             FitsInIdx16Operand(constant_properties_entry)) {
-    Output(Bytecode::kCreateObjectLiteralWide,
-           static_cast<uint16_t>(constant_properties_entry),
-           static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForUnsignedOperand(constant_properties_entry),
+      SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+  OutputScaled(Bytecode::kCreateObjectLiteral, operand_scale,
+               UnsignedOperand(constant_properties_entry),
+               UnsignedOperand(literal_index), UnsignedOperand(flags));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
-  Output(Bytecode::kPushContext, context.ToRawOperand());
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForRegisterOperand(context));
+  OutputScaled(Bytecode::kPushContext, operand_scale, RegisterOperand(context));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
-  Output(Bytecode::kPopContext, context.ToRawOperand());
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForRegisterOperand(context));
+  OutputScaled(Bytecode::kPopContext, operand_scale, RegisterOperand(context));
   return *this;
 }
 
@@ -649,7 +581,6 @@
     case Bytecode::kTestEqual:
     case Bytecode::kTestNotEqual:
     case Bytecode::kTestEqualStrict:
-    case Bytecode::kTestNotEqualStrict:
     case Bytecode::kTestLessThan:
     case Bytecode::kTestLessThanOrEqual:
     case Bytecode::kTestGreaterThan:
@@ -677,7 +608,6 @@
       case Bytecode::kToName:
       case Bytecode::kTypeOf:
         return *this;
-      case Bytecode::kLdaConstantWide:
       case Bytecode::kLdaConstant: {
         Handle<Object> object = previous_bytecode.GetConstantForIndexOperand(0);
         if (object->IsName()) return *this;
@@ -716,8 +646,12 @@
                                                  BytecodeLabel* label) {
   DCHECK(!label->is_bound());
   DCHECK(target.is_bound());
-  PatchJump(bytecodes()->begin() + target.offset(),
-            bytecodes()->begin() + label->offset());
+  if (label->is_forward_target()) {
+    // An earlier jump instruction refers to this label. Update it's location.
+    PatchJump(bytecodes()->begin() + target.offset(),
+              bytecodes()->begin() + label->offset());
+    // Now treat as if the label will only be back referred to.
+  }
   label->bind_to(target.offset());
   LeaveBasicBlock();
   return *this;
@@ -746,38 +680,10 @@
       return Bytecode::kJumpIfUndefinedConstant;
     default:
       UNREACHABLE();
-      return static_cast<Bytecode>(-1);
+      return Bytecode::kIllegal;
   }
 }
 
-
-// static
-Bytecode BytecodeArrayBuilder::GetJumpWithConstantWideOperand(
-    Bytecode jump_bytecode) {
-  switch (jump_bytecode) {
-    case Bytecode::kJump:
-      return Bytecode::kJumpConstantWide;
-    case Bytecode::kJumpIfTrue:
-      return Bytecode::kJumpIfTrueConstantWide;
-    case Bytecode::kJumpIfFalse:
-      return Bytecode::kJumpIfFalseConstantWide;
-    case Bytecode::kJumpIfToBooleanTrue:
-      return Bytecode::kJumpIfToBooleanTrueConstantWide;
-    case Bytecode::kJumpIfToBooleanFalse:
-      return Bytecode::kJumpIfToBooleanFalseConstantWide;
-    case Bytecode::kJumpIfNotHole:
-      return Bytecode::kJumpIfNotHoleConstantWide;
-    case Bytecode::kJumpIfNull:
-      return Bytecode::kJumpIfNullConstantWide;
-    case Bytecode::kJumpIfUndefined:
-      return Bytecode::kJumpIfUndefinedConstantWide;
-    default:
-      UNREACHABLE();
-      return static_cast<Bytecode>(-1);
-  }
-}
-
-
 // static
 Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
   switch (jump_bytecode) {
@@ -793,7 +699,7 @@
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 
@@ -803,54 +709,88 @@
   DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
   ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
   DCHECK_EQ(*operand_location, 0);
-  if (FitsInImm8Operand(delta)) {
-    // The jump fits within the range of an Imm8 operand, so cancel
+  if (SizeForSignedOperand(delta) == OperandSize::kByte) {
+    // The jump fits within the range of an Imm operand, so cancel
     // the reservation and jump directly.
     constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
     *operand_location = static_cast<uint8_t>(delta);
   } else {
-    // The jump does not fit within the range of an Imm8 operand, so
+    // The jump does not fit within the range of an Imm operand, so
     // commit reservation putting the offset into the constant pool,
     // and update the jump instruction and operand.
     size_t entry = constant_array_builder()->CommitReservedEntry(
         OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
-    DCHECK(FitsInIdx8Operand(entry));
+    DCHECK(SizeForUnsignedOperand(entry) == OperandSize::kByte);
     jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
     *jump_location = Bytecodes::ToByte(jump_bytecode);
     *operand_location = static_cast<uint8_t>(entry);
   }
 }
 
-
 void BytecodeArrayBuilder::PatchIndirectJumpWith16BitOperand(
     const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
-  DCHECK(Bytecodes::IsJumpConstantWide(Bytecodes::FromByte(*jump_location)));
+  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
   ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
-  size_t entry = constant_array_builder()->CommitReservedEntry(
-      OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
-  DCHECK(FitsInIdx16Operand(entry));
   uint8_t operand_bytes[2];
-  WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+  if (SizeForSignedOperand(delta) <= OperandSize::kShort) {
+    constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
+    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
+  } else {
+    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+    *jump_location = Bytecodes::ToByte(jump_bytecode);
+    size_t entry = constant_array_builder()->CommitReservedEntry(
+        OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
+    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+  }
   DCHECK(*operand_location == 0 && *(operand_location + 1) == 0);
   *operand_location++ = operand_bytes[0];
   *operand_location = operand_bytes[1];
 }
 
+void BytecodeArrayBuilder::PatchIndirectJumpWith32BitOperand(
+    const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+  DCHECK(Bytecodes::IsJumpImmediate(Bytecodes::FromByte(*jump_location)));
+  constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
+  ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+  uint8_t operand_bytes[4];
+  WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
+  DCHECK(*operand_location == 0 && *(operand_location + 1) == 0 &&
+         *(operand_location + 2) == 0 && *(operand_location + 3) == 0);
+  *operand_location++ = operand_bytes[0];
+  *operand_location++ = operand_bytes[1];
+  *operand_location++ = operand_bytes[2];
+  *operand_location = operand_bytes[3];
+}
 
 void BytecodeArrayBuilder::PatchJump(
     const ZoneVector<uint8_t>::iterator& jump_target,
     const ZoneVector<uint8_t>::iterator& jump_location) {
-  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
   int delta = static_cast<int>(jump_target - jump_location);
+  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+  int prefix_offset = 0;
+  OperandScale operand_scale = OperandScale::kSingle;
+  if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
+    // If a prefix scaling bytecode is emitted the target offset is one
+    // less than the case of no prefix scaling bytecode.
+    delta -= 1;
+    prefix_offset = 1;
+    operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
+    jump_bytecode = Bytecodes::FromByte(*(jump_location + prefix_offset));
+  }
+
   DCHECK(Bytecodes::IsJump(jump_bytecode));
-  switch (Bytecodes::GetOperandSize(jump_bytecode, 0)) {
-    case OperandSize::kByte:
+  switch (operand_scale) {
+    case OperandScale::kSingle:
       PatchIndirectJumpWith8BitOperand(jump_location, delta);
       break;
-    case OperandSize::kShort:
-      PatchIndirectJumpWith16BitOperand(jump_location, delta);
+    case OperandScale::kDouble:
+      PatchIndirectJumpWith16BitOperand(jump_location + prefix_offset, delta);
       break;
-    case OperandSize::kNone:
+    case OperandScale::kQuadruple:
+      PatchIndirectJumpWith32BitOperand(jump_location + prefix_offset, delta);
+      break;
+    default:
       UNREACHABLE();
   }
   unbound_jumps_--;
@@ -860,10 +800,7 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
                                                        BytecodeLabel* label) {
   // Don't emit dead code.
-  if (exit_seen_in_block_) {
-    source_position_table_builder_.RevertPosition(bytecodes()->size());
-    return *this;
-  }
+  if (exit_seen_in_block_) return *this;
 
   // Check if the value in accumulator is boolean, if not choose an
   // appropriate JumpIfToBoolean bytecode.
@@ -877,22 +814,14 @@
     CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
     size_t abs_delta = bytecodes()->size() - label->offset();
     int delta = -static_cast<int>(abs_delta);
-
-    if (FitsInImm8Operand(delta)) {
-      Output(jump_bytecode, static_cast<uint8_t>(delta));
-    } else {
-      size_t entry =
-          GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
-      if (FitsInIdx8Operand(entry)) {
-        Output(GetJumpWithConstantOperand(jump_bytecode),
-               static_cast<uint8_t>(entry));
-      } else if (FitsInIdx16Operand(entry)) {
-        Output(GetJumpWithConstantWideOperand(jump_bytecode),
-               static_cast<uint16_t>(entry));
-      } else {
-        UNREACHABLE();
-      }
+    OperandSize operand_size = SizeForSignedOperand(delta);
+    if (operand_size > OperandSize::kByte) {
+      // Adjust for scaling byte prefix for wide jump offset.
+      DCHECK_LE(delta, 0);
+      delta -= 1;
     }
+    OutputScaled(jump_bytecode, OperandSizesToScale(operand_size),
+                 SignedOperand(delta, operand_size));
   } else {
     // The label has not yet been bound so this is a forward reference
     // that will be patched when the label is bound. We create a
@@ -904,16 +833,7 @@
     unbound_jumps_++;
     OperandSize reserved_operand_size =
         constant_array_builder()->CreateReservedEntry();
-    switch (reserved_operand_size) {
-      case OperandSize::kByte:
-        Output(jump_bytecode, 0);
-        break;
-      case OperandSize::kShort:
-        Output(GetJumpWithConstantWideOperand(jump_bytecode), 0);
-        break;
-      case OperandSize::kNone:
-        UNREACHABLE();
-    }
+    OutputScaled(jump_bytecode, OperandSizesToScale(reserved_operand_size), 0);
   }
   LeaveBasicBlock();
   return *this;
@@ -970,6 +890,7 @@
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
+  SetReturnPosition();
   Output(Bytecode::kReturn);
   exit_seen_in_block_ = true;
   return *this;
@@ -982,44 +903,40 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
     Register cache_info_triple) {
-  if (FitsInReg8Operand(cache_info_triple)) {
-    Output(Bytecode::kForInPrepare, cache_info_triple.ToRawOperand());
-  } else if (FitsInReg16Operand(cache_info_triple)) {
-    Output(Bytecode::kForInPrepareWide, cache_info_triple.ToRawOperand());
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForRegisterOperand(cache_info_triple));
+  OutputScaled(Bytecode::kForInPrepare, operand_scale,
+               RegisterOperand(cache_info_triple));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
                                                       Register cache_length) {
-  Output(Bytecode::kForInDone, index.ToRawOperand(),
-         cache_length.ToRawOperand());
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(index), SizeForRegisterOperand(cache_length));
+  OutputScaled(Bytecode::kForInDone, operand_scale, RegisterOperand(index),
+               RegisterOperand(cache_length));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
-    Register receiver, Register index, Register cache_type_array_pair) {
-  if (FitsInReg8Operand(receiver) && FitsInReg8Operand(index) &&
-      FitsInReg8Operand(cache_type_array_pair)) {
-    Output(Bytecode::kForInNext, receiver.ToRawOperand(), index.ToRawOperand(),
-           cache_type_array_pair.ToRawOperand());
-  } else if (FitsInReg16Operand(receiver) && FitsInReg16Operand(index) &&
-             FitsInReg16Operand(cache_type_array_pair)) {
-    Output(Bytecode::kForInNextWide, receiver.ToRawOperand(),
-           index.ToRawOperand(), cache_type_array_pair.ToRawOperand());
-  } else {
-    UNIMPLEMENTED();
-  }
+    Register receiver, Register index, Register cache_type_array_pair,
+    int feedback_slot) {
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(receiver), SizeForRegisterOperand(index),
+      SizeForRegisterOperand(cache_type_array_pair),
+      SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(Bytecode::kForInNext, operand_scale, RegisterOperand(receiver),
+               RegisterOperand(index), RegisterOperand(cache_type_array_pair),
+               UnsignedOperand(feedback_slot));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
-  Output(Bytecode::kForInStep, index.ToRawOperand());
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForRegisterOperand(index));
+  OutputScaled(Bytecode::kForInStep, operand_scale, RegisterOperand(index));
   return *this;
 }
 
@@ -1051,12 +968,12 @@
   exit_seen_in_block_ = false;
 }
 
-void BytecodeArrayBuilder::EnsureReturn(FunctionLiteral* literal) {
+void BytecodeArrayBuilder::EnsureReturn() {
   if (!exit_seen_in_block_) {
     LoadUndefined();
-    SetReturnPosition(literal);
     Return();
   }
+  DCHECK(exit_seen_in_block_);
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
@@ -1065,23 +982,14 @@
                                                  int feedback_slot,
                                                  TailCallMode tail_call_mode) {
   Bytecode bytecode = BytecodeForCall(tail_call_mode);
-  if (FitsInReg8Operand(callable) && FitsInReg8Operand(receiver_args) &&
-      FitsInIdx8Operand(receiver_args_count) &&
-      FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
-           static_cast<uint8_t>(receiver_args_count),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInReg16Operand(callable) &&
-             FitsInReg16Operand(receiver_args) &&
-             FitsInIdx16Operand(receiver_args_count) &&
-             FitsInIdx16Operand(feedback_slot)) {
-    bytecode = BytecodeForWideOperands(bytecode);
-    Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
-           static_cast<uint16_t>(receiver_args_count),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(callable), SizeForRegisterOperand(receiver_args),
+      SizeForUnsignedOperand(receiver_args_count),
+      SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(bytecode, operand_scale, RegisterOperand(callable),
+               RegisterOperand(receiver_args),
+               UnsignedOperand(receiver_args_count),
+               UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -1092,17 +1000,11 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  if (FitsInReg8Operand(constructor) && FitsInReg8Operand(first_arg) &&
-      FitsInIdx8Operand(arg_count)) {
-    Output(Bytecode::kNew, constructor.ToRawOperand(), first_arg.ToRawOperand(),
-           static_cast<uint8_t>(arg_count));
-  } else if (FitsInReg16Operand(constructor) && FitsInReg16Operand(first_arg) &&
-             FitsInIdx16Operand(arg_count)) {
-    Output(Bytecode::kNewWide, constructor.ToRawOperand(),
-           first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(constructor), SizeForRegisterOperand(first_arg),
+      SizeForUnsignedOperand(arg_count));
+  OutputScaled(Bytecode::kNew, operand_scale, RegisterOperand(constructor),
+               RegisterOperand(first_arg), UnsignedOperand(arg_count));
   return *this;
 }
 
@@ -1110,20 +1012,18 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
   DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
-  DCHECK(FitsInIdx16Operand(function_id));
+  DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
   if (!first_arg.is_valid()) {
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count)) {
-    Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
-           first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count));
-  } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count)) {
-    Output(Bytecode::kCallRuntimeWide, static_cast<uint16_t>(function_id),
-           first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
-  } else {
-    UNIMPLEMENTED();
-  }
+  Bytecode bytecode = IntrinsicsHelper::IsSupported(function_id)
+                          ? Bytecode::kInvokeIntrinsic
+                          : Bytecode::kCallRuntime;
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count));
+  OutputScaled(bytecode, operand_scale, static_cast<uint16_t>(function_id),
+               RegisterOperand(first_arg), UnsignedOperand(arg_count));
   return *this;
 }
 
@@ -1132,180 +1032,145 @@
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
     Register first_return) {
   DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
-  DCHECK(FitsInIdx16Operand(function_id));
+  DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
   if (!first_arg.is_valid()) {
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count) &&
-      FitsInReg8Operand(first_return)) {
-    Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
-           first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count),
-           first_return.ToRawOperand());
-  } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count) &&
-             FitsInReg16Operand(first_return)) {
-    Output(Bytecode::kCallRuntimeForPairWide,
-           static_cast<uint16_t>(function_id), first_arg.ToRawOperand(),
-           static_cast<uint16_t>(arg_count), first_return.ToRawOperand());
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count),
+      SizeForRegisterOperand(first_return));
+  OutputScaled(Bytecode::kCallRuntimeForPair, operand_scale,
+               static_cast<uint16_t>(function_id), RegisterOperand(first_arg),
+               UnsignedOperand(arg_count), RegisterOperand(first_return));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
     int context_index, Register receiver_args, size_t receiver_args_count) {
-  DCHECK(FitsInIdx16Operand(context_index));
-  if (FitsInReg8Operand(receiver_args) &&
-      FitsInIdx8Operand(receiver_args_count)) {
-    Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
-           receiver_args.ToRawOperand(),
-           static_cast<uint8_t>(receiver_args_count));
-  } else if (FitsInReg16Operand(receiver_args) &&
-             FitsInIdx16Operand(receiver_args_count)) {
-    Output(Bytecode::kCallJSRuntimeWide, static_cast<uint16_t>(context_index),
-           receiver_args.ToRawOperand(),
-           static_cast<uint16_t>(receiver_args_count));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(context_index),
+                          SizeForRegisterOperand(receiver_args),
+                          SizeForUnsignedOperand(receiver_args_count));
+  OutputScaled(Bytecode::kCallJSRuntime, operand_scale,
+               UnsignedOperand(context_index), RegisterOperand(receiver_args),
+               UnsignedOperand(receiver_args_count));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
                                                    LanguageMode language_mode) {
-  Output(BytecodeForDelete(language_mode), object.ToRawOperand());
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForRegisterOperand(object));
+  OutputScaled(BytecodeForDelete(language_mode), operand_scale,
+               RegisterOperand(object));
   return *this;
 }
 
-
 size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
   return constant_array_builder()->Insert(object);
 }
 
-void BytecodeArrayBuilder::SetReturnPosition(FunctionLiteral* fun) {
-  int pos = std::max(fun->start_position(), fun->end_position() - 1);
-  source_position_table_builder_.AddStatementPosition(bytecodes_.size(), pos);
+void BytecodeArrayBuilder::SetReturnPosition() {
+  if (return_position_ == RelocInfo::kNoPosition) return;
+  if (exit_seen_in_block_) return;
+  source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
+                                                      return_position_);
 }
 
 void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
   if (stmt->position() == RelocInfo::kNoPosition) return;
+  if (exit_seen_in_block_) return;
   source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
                                                       stmt->position());
 }
 
 void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
   if (expr->position() == RelocInfo::kNoPosition) return;
+  if (exit_seen_in_block_) return;
   source_position_table_builder_.AddExpressionPosition(bytecodes_.size(),
                                                        expr->position());
 }
 
+void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
+  if (expr->position() == RelocInfo::kNoPosition) return;
+  if (exit_seen_in_block_) return;
+  source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
+                                                      expr->position());
+}
+
 bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
   return temporary_register_allocator()->RegisterIsLive(reg);
 }
 
-bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
+bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode,
+                                          OperandScale operand_scale,
+                                          int operand_index,
                                           uint32_t operand_value) const {
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode, operand_index, operand_scale);
   OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
   switch (operand_type) {
     case OperandType::kNone:
       return false;
-    case OperandType::kRegCount16: {
-      // Expect kRegCount16 is part of a range previous operand is a
-      // valid operand to start a range.
+    case OperandType::kRegCount: {
       if (operand_index > 0) {
         OperandType previous_operand_type =
             Bytecodes::GetOperandType(bytecode, operand_index - 1);
-        return ((previous_operand_type == OperandType::kMaybeReg16 ||
-                 previous_operand_type == OperandType::kReg16) &&
-                static_cast<uint16_t>(operand_value) == operand_value);
-      } else {
-        return false;
+        if (previous_operand_type != OperandType::kMaybeReg &&
+            previous_operand_type != OperandType::kReg) {
+          return false;
+        }
       }
+    }  // Fall-through
+    case OperandType::kFlag8:
+    case OperandType::kIdx:
+    case OperandType::kRuntimeId:
+    case OperandType::kImm: {
+      size_t unsigned_value = static_cast<size_t>(operand_value);
+      return SizeForUnsignedOperand(unsigned_value) <= operand_size;
     }
-    case OperandType::kRegCount8: {
-      // Expect kRegCount8 is part of a range previous operand is a
-      // valid operand to start a range.
-      if (operand_index > 0) {
-        OperandType previous_operand_type =
-            Bytecodes::GetOperandType(bytecode, operand_index - 1);
-        return ((previous_operand_type == OperandType::kMaybeReg8 ||
-                 previous_operand_type == OperandType::kReg8 ||
-                 previous_operand_type == OperandType::kMaybeReg16) &&
-                static_cast<uint8_t>(operand_value) == operand_value);
-      } else {
-        return false;
-      }
-    }
-    case OperandType::kIdx16:
-      return static_cast<uint16_t>(operand_value) == operand_value;
-    case OperandType::kImm8:
-    case OperandType::kIdx8:
-      return static_cast<uint8_t>(operand_value) == operand_value;
-    case OperandType::kMaybeReg8:
+    case OperandType::kMaybeReg:
       if (operand_value == 0) {
         return true;
       }
-    // Fall-through to kReg8 case.
-    case OperandType::kReg8:
-    case OperandType::kRegOut8:
-      return RegisterIsValid(Register::FromRawOperand(operand_value),
-                             operand_type);
-    case OperandType::kRegOutPair8:
-    case OperandType::kRegOutPair16:
-    case OperandType::kRegPair8:
-    case OperandType::kRegPair16: {
-      Register reg0 = Register::FromRawOperand(operand_value);
-      Register reg1 = Register(reg0.index() + 1);
-      return RegisterIsValid(reg0, operand_type) &&
-             RegisterIsValid(reg1, operand_type);
+    // Fall-through to kReg case.
+    case OperandType::kReg:
+    case OperandType::kRegOut: {
+      Register reg = RegisterFromOperand(operand_value);
+      return RegisterIsValid(reg, operand_size);
     }
-    case OperandType::kRegOutTriple8:
-    case OperandType::kRegOutTriple16: {
-      Register reg0 = Register::FromRawOperand(operand_value);
+    case OperandType::kRegOutPair:
+    case OperandType::kRegPair: {
+      Register reg0 = RegisterFromOperand(operand_value);
+      Register reg1 = Register(reg0.index() + 1);
+      // The size of reg1 is immaterial.
+      return RegisterIsValid(reg0, operand_size) &&
+             RegisterIsValid(reg1, OperandSize::kQuad);
+    }
+    case OperandType::kRegOutTriple: {
+      Register reg0 = RegisterFromOperand(operand_value);
       Register reg1 = Register(reg0.index() + 1);
       Register reg2 = Register(reg0.index() + 2);
-      return RegisterIsValid(reg0, operand_type) &&
-             RegisterIsValid(reg1, operand_type) &&
-             RegisterIsValid(reg2, operand_type);
-    }
-    case OperandType::kMaybeReg16:
-      if (operand_value == 0) {
-        return true;
-      }
-    // Fall-through to kReg16 case.
-    case OperandType::kReg16:
-    case OperandType::kRegOut16: {
-      Register reg = Register::FromRawOperand(operand_value);
-      return RegisterIsValid(reg, operand_type);
+      // The size of reg1 and reg2 is immaterial.
+      return RegisterIsValid(reg0, operand_size) &&
+             RegisterIsValid(reg1, OperandSize::kQuad) &&
+             RegisterIsValid(reg2, OperandSize::kQuad);
     }
   }
   UNREACHABLE();
   return false;
 }
 
-
 bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
-                                           OperandType reg_type) const {
+                                           OperandSize reg_size) const {
   if (!reg.is_valid()) {
     return false;
   }
 
-  switch (Bytecodes::SizeOfOperand(reg_type)) {
-    case OperandSize::kByte:
-      if (!FitsInReg8OperandUntranslated(reg)) {
-        return false;
-      }
-      break;
-    case OperandSize::kShort:
-      if (!FitsInReg16OperandUntranslated(reg)) {
-        return false;
-      }
-      break;
-    case OperandSize::kNone:
-      UNREACHABLE();
-      return false;
+  if (SizeForRegisterOperand(reg) > reg_size) {
+    return false;
   }
 
   if (reg.is_current_context() || reg.is_function_closure() ||
@@ -1314,15 +1179,10 @@
   } else if (reg.is_parameter()) {
     int parameter_index = reg.ToParameterIndex(parameter_count());
     return parameter_index >= 0 && parameter_index < parameter_count();
-  } else if (RegisterTranslator::InTranslationWindow(reg)) {
-    return translation_register_count() > 0;
+  } else if (reg.index() < fixed_register_count()) {
+    return true;
   } else {
-    reg = RegisterTranslator::UntranslateRegister(reg);
-    if (reg.index() < fixed_register_count()) {
-      return true;
-    } else {
-      return TemporaryRegisterIsLive(reg);
-    }
+    return TemporaryRegisterIsLive(reg);
   }
 }
 
@@ -1338,9 +1198,7 @@
     PreviousBytecodeHelper previous_bytecode(*this);
     Bytecode bytecode = previous_bytecode.GetBytecode();
     if (bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) {
-      Register previous_reg =
-          Register::FromOperand(previous_bytecode.GetOperand(0));
-      return previous_reg == reg;
+      return previous_bytecode.GetRegisterOperand(0) == reg;
     }
   }
   return false;
@@ -1374,7 +1232,7 @@
       return Bytecode::kShiftRightLogical;
     default:
       UNREACHABLE();
-      return static_cast<Bytecode>(-1);
+      return Bytecode::kIllegal;
   }
 }
 
@@ -1388,7 +1246,7 @@
       return Bytecode::kDec;
     default:
       UNREACHABLE();
-      return static_cast<Bytecode>(-1);
+      return Bytecode::kIllegal;
   }
 }
 
@@ -1402,8 +1260,6 @@
       return Bytecode::kTestNotEqual;
     case Token::Value::EQ_STRICT:
       return Bytecode::kTestEqualStrict;
-    case Token::Value::NE_STRICT:
-      return Bytecode::kTestNotEqualStrict;
     case Token::Value::LT:
       return Bytecode::kTestLessThan;
     case Token::Value::GT:
@@ -1418,49 +1274,7 @@
       return Bytecode::kTestIn;
     default:
       UNREACHABLE();
-      return static_cast<Bytecode>(-1);
-  }
-}
-
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
-  switch (bytecode) {
-    case Bytecode::kCall:
-      return Bytecode::kCallWide;
-    case Bytecode::kTailCall:
-      return Bytecode::kTailCallWide;
-    case Bytecode::kLoadIC:
-      return Bytecode::kLoadICWide;
-    case Bytecode::kKeyedLoadIC:
-      return Bytecode::kKeyedLoadICWide;
-    case Bytecode::kStoreICSloppy:
-      return Bytecode::kStoreICSloppyWide;
-    case Bytecode::kStoreICStrict:
-      return Bytecode::kStoreICStrictWide;
-    case Bytecode::kKeyedStoreICSloppy:
-      return Bytecode::kKeyedStoreICSloppyWide;
-    case Bytecode::kKeyedStoreICStrict:
-      return Bytecode::kKeyedStoreICStrictWide;
-    case Bytecode::kLdaGlobal:
-      return Bytecode::kLdaGlobalWide;
-    case Bytecode::kLdaGlobalInsideTypeof:
-      return Bytecode::kLdaGlobalInsideTypeofWide;
-    case Bytecode::kStaGlobalSloppy:
-      return Bytecode::kStaGlobalSloppyWide;
-    case Bytecode::kStaGlobalStrict:
-      return Bytecode::kStaGlobalStrictWide;
-    case Bytecode::kLdaLookupSlot:
-      return Bytecode::kLdaLookupSlotWide;
-    case Bytecode::kLdaLookupSlotInsideTypeof:
-      return Bytecode::kLdaLookupSlotInsideTypeofWide;
-    case Bytecode::kStaLookupSlotStrict:
-      return Bytecode::kStaLookupSlotStrictWide;
-    case Bytecode::kStaLookupSlotSloppy:
-      return Bytecode::kStaLookupSlotSloppyWide;
-    default:
-      UNREACHABLE();
-      return static_cast<Bytecode>(-1);
+      return Bytecode::kIllegal;
   }
 }
 
@@ -1472,12 +1286,10 @@
       return Bytecode::kStoreICSloppy;
     case STRICT:
       return Bytecode::kStoreICStrict;
-    case STRONG:
-      UNIMPLEMENTED();
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 
@@ -1489,12 +1301,10 @@
       return Bytecode::kKeyedStoreICSloppy;
     case STRICT:
       return Bytecode::kKeyedStoreICStrict;
-    case STRONG:
-      UNIMPLEMENTED();
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 
@@ -1513,12 +1323,10 @@
       return Bytecode::kStaGlobalSloppy;
     case STRICT:
       return Bytecode::kStaGlobalStrict;
-    case STRONG:
-      UNIMPLEMENTED();
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 
@@ -1530,12 +1338,10 @@
       return Bytecode::kStaLookupSlotSloppy;
     case STRICT:
       return Bytecode::kStaLookupSlotStrict;
-    case STRONG:
-      UNIMPLEMENTED();
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 // static
@@ -1550,7 +1356,7 @@
       return Bytecode::kCreateRestParameter;
   }
   UNREACHABLE();
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 
@@ -1561,12 +1367,10 @@
       return Bytecode::kDeletePropertySloppy;
     case STRICT:
       return Bytecode::kDeletePropertyStrict;
-    case STRONG:
-      UNIMPLEMENTED();
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 // static
@@ -1579,58 +1383,109 @@
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 // static
-bool BytecodeArrayBuilder::FitsInIdx8Operand(int value) {
-  return kMinUInt8 <= value && value <= kMaxUInt8;
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInIdx8Operand(size_t value) {
-  return value <= static_cast<size_t>(kMaxUInt8);
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInImm8Operand(int value) {
-  return kMinInt8 <= value && value <= kMaxInt8;
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInIdx16Operand(int value) {
-  return kMinUInt16 <= value && value <= kMaxUInt16;
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInIdx16Operand(size_t value) {
-  return value <= static_cast<size_t>(kMaxUInt16);
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInReg8Operand(Register value) {
-  return RegisterTranslator::FitsInReg8Operand(value);
+OperandSize BytecodeArrayBuilder::SizeForRegisterOperand(Register value) {
+  if (value.is_byte_operand()) {
+    return OperandSize::kByte;
+  } else if (value.is_short_operand()) {
+    return OperandSize::kShort;
+  } else {
+    return OperandSize::kQuad;
+  }
 }
 
 // static
-bool BytecodeArrayBuilder::FitsInReg8OperandUntranslated(Register value) {
-  return value.is_byte_operand();
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInReg16Operand(Register value) {
-  return RegisterTranslator::FitsInReg16Operand(value);
+OperandSize BytecodeArrayBuilder::SizeForSignedOperand(int value) {
+  if (kMinInt8 <= value && value <= kMaxInt8) {
+    return OperandSize::kByte;
+  } else if (kMinInt16 <= value && value <= kMaxInt16) {
+    return OperandSize::kShort;
+  } else {
+    return OperandSize::kQuad;
+  }
 }
 
 // static
-bool BytecodeArrayBuilder::FitsInReg16OperandUntranslated(Register value) {
-  return value.is_short_operand();
+OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(int value) {
+  DCHECK_GE(value, 0);
+  if (value <= kMaxUInt8) {
+    return OperandSize::kByte;
+  } else if (value <= kMaxUInt16) {
+    return OperandSize::kShort;
+  } else {
+    return OperandSize::kQuad;
+  }
+}
+
+OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(size_t value) {
+  if (value <= static_cast<size_t>(kMaxUInt8)) {
+    return OperandSize::kByte;
+  } else if (value <= static_cast<size_t>(kMaxUInt16)) {
+    return OperandSize::kShort;
+  } else if (value <= kMaxUInt32) {
+    return OperandSize::kQuad;
+  } else {
+    UNREACHABLE();
+    return OperandSize::kQuad;
+  }
+}
+
+OperandScale BytecodeArrayBuilder::OperandSizesToScale(OperandSize size0,
+                                                       OperandSize size1,
+                                                       OperandSize size2,
+                                                       OperandSize size3) {
+  OperandSize upper = std::max(size0, size1);
+  OperandSize lower = std::max(size2, size3);
+  OperandSize result = std::max(upper, lower);
+  // Operand sizes have been scaled before calling this function.
+  // Currently all scalable operands are byte sized at
+  // OperandScale::kSingle.
+  STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
+                    static_cast<int>(OperandScale::kSingle) &&
+                static_cast<int>(OperandSize::kShort) ==
+                    static_cast<int>(OperandScale::kDouble) &&
+                static_cast<int>(OperandSize::kQuad) ==
+                    static_cast<int>(OperandScale::kQuadruple));
+  OperandScale operand_scale = static_cast<OperandScale>(result);
+  DCHECK(operand_scale == OperandScale::kSingle ||
+         operand_scale == OperandScale::kDouble ||
+         operand_scale == OperandScale::kQuadruple);
+  return operand_scale;
+}
+
+uint32_t BytecodeArrayBuilder::RegisterOperand(Register reg) {
+  return static_cast<uint32_t>(reg.ToOperand());
+}
+
+Register BytecodeArrayBuilder::RegisterFromOperand(uint32_t operand) {
+  return Register::FromOperand(static_cast<int32_t>(operand));
+}
+
+uint32_t BytecodeArrayBuilder::SignedOperand(int value, OperandSize size) {
+  switch (size) {
+    case OperandSize::kByte:
+      return static_cast<uint8_t>(value & 0xff);
+    case OperandSize::kShort:
+      return static_cast<uint16_t>(value & 0xffff);
+    case OperandSize::kQuad:
+      return static_cast<uint32_t>(value);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return 0;
+}
+
+uint32_t BytecodeArrayBuilder::UnsignedOperand(int value) {
+  DCHECK_GE(value, 0);
+  return static_cast<uint32_t>(value);
+}
+
+uint32_t BytecodeArrayBuilder::UnsignedOperand(size_t value) {
+  DCHECK_LE(value, kMaxUInt32);
+  return static_cast<uint32_t>(value);
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index fe69337..4446a63 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -10,7 +10,6 @@
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/constant-array-builder.h"
 #include "src/interpreter/handler-table-builder.h"
-#include "src/interpreter/register-translator.h"
 #include "src/interpreter/source-position-table.h"
 #include "src/zone-containers.h"
 
@@ -24,10 +23,11 @@
 class BytecodeLabel;
 class Register;
 
-class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
+class BytecodeArrayBuilder final : public ZoneObject {
  public:
   BytecodeArrayBuilder(Isolate* isolate, Zone* zone, int parameter_count,
-                       int context_count, int locals_count);
+                       int context_count, int locals_count,
+                       FunctionLiteral* literal = nullptr);
   ~BytecodeArrayBuilder();
 
   Handle<BytecodeArray> ToBytecodeArray();
@@ -65,13 +65,6 @@
     return temporary_register_allocator()->allocation_count();
   }
 
-  // Returns the number of registers used for translating wide
-  // register operands into byte sized register operands.
-  int translation_register_count() const {
-    return RegisterTranslator::RegisterCountAdjustment(
-        fixed_and_temporary_register_count(), parameter_count());
-  }
-
   Register Parameter(int parameter_index) const;
 
   // Return true if the register |reg| represents a parameter or a
@@ -89,7 +82,6 @@
   BytecodeArrayBuilder& LoadTheHole();
   BytecodeArrayBuilder& LoadTrue();
   BytecodeArrayBuilder& LoadFalse();
-  BytecodeArrayBuilder& LoadBooleanConstant(bool value);
 
   // Global loads to the accumulator and stores from the accumulator.
   BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
@@ -245,7 +237,8 @@
   BytecodeArrayBuilder& ForInPrepare(Register cache_info_triple);
   BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
   BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
-                                  Register cache_type_array_pair);
+                                  Register cache_type_array_pair,
+                                  int feedback_slot);
   BytecodeArrayBuilder& ForInStep(Register index);
 
   // Exception handling.
@@ -257,8 +250,11 @@
   // entry, so that it can be referenced by above exception handling support.
   int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
 
+  void InitializeReturnPosition(FunctionLiteral* literal);
+
   void SetStatementPosition(Statement* stmt);
   void SetExpressionPosition(Expression* expr);
+  void SetExpressionAsStatementPosition(Expression* expr);
 
   // Accessors
   Zone* zone() const { return zone_; }
@@ -269,7 +265,23 @@
     return &temporary_allocator_;
   }
 
-  void EnsureReturn(FunctionLiteral* literal);
+  void EnsureReturn();
+
+  static OperandScale OperandSizesToScale(
+      OperandSize size0, OperandSize size1 = OperandSize::kByte,
+      OperandSize size2 = OperandSize::kByte,
+      OperandSize size3 = OperandSize::kByte);
+
+  static OperandSize SizeForRegisterOperand(Register reg);
+  static OperandSize SizeForSignedOperand(int value);
+  static OperandSize SizeForUnsignedOperand(int value);
+  static OperandSize SizeForUnsignedOperand(size_t value);
+
+  static uint32_t RegisterOperand(Register reg);
+  static Register RegisterFromOperand(uint32_t operand);
+  static uint32_t SignedOperand(int value, OperandSize size);
+  static uint32_t UnsignedOperand(int value);
+  static uint32_t UnsignedOperand(size_t value);
 
  private:
   class PreviousBytecodeHelper;
@@ -278,7 +290,6 @@
   static Bytecode BytecodeForBinaryOperation(Token::Value op);
   static Bytecode BytecodeForCountOperation(Token::Value op);
   static Bytecode BytecodeForCompareOperation(Token::Value op);
-  static Bytecode BytecodeForWideOperands(Bytecode bytecode);
   static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
   static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
   static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
@@ -288,32 +299,22 @@
   static Bytecode BytecodeForDelete(LanguageMode language_mode);
   static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
 
-  static bool FitsInIdx8Operand(int value);
-  static bool FitsInIdx8Operand(size_t value);
-  static bool FitsInImm8Operand(int value);
-  static bool FitsInIdx16Operand(int value);
-  static bool FitsInIdx16Operand(size_t value);
-  static bool FitsInReg8Operand(Register value);
-  static bool FitsInReg8OperandUntranslated(Register value);
-  static bool FitsInReg16Operand(Register value);
-  static bool FitsInReg16OperandUntranslated(Register value);
-
-  // RegisterMover interface.
-  void MoveRegisterUntranslated(Register from, Register to) override;
-
   static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
-  static Bytecode GetJumpWithConstantWideOperand(Bytecode jump_smi8_operand);
   static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
 
   template <size_t N>
-  INLINE(void Output(Bytecode bytecode, uint32_t(&operands)[N]));
-  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-              uint32_t operand2, uint32_t operand3);
-  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-              uint32_t operand2);
-  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
-  void Output(Bytecode bytecode, uint32_t operand0);
+  INLINE(void Output(Bytecode bytecode, uint32_t (&operands)[N],
+                     OperandScale operand_scale = OperandScale::kSingle));
   void Output(Bytecode bytecode);
+  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+                    uint32_t operand0, uint32_t operand1, uint32_t operand2,
+                    uint32_t operand3);
+  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+                    uint32_t operand0, uint32_t operand1, uint32_t operand2);
+  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+                    uint32_t operand0, uint32_t operand1);
+  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+                    uint32_t operand0);
 
   BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
                                    BytecodeLabel* label);
@@ -323,19 +324,21 @@
       const ZoneVector<uint8_t>::iterator& jump_location, int delta);
   void PatchIndirectJumpWith16BitOperand(
       const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+  void PatchIndirectJumpWith32BitOperand(
+      const ZoneVector<uint8_t>::iterator& jump_location, int delta);
 
   void LeaveBasicBlock();
 
-  bool OperandIsValid(Bytecode bytecode, int operand_index,
-                      uint32_t operand_value) const;
-  bool RegisterIsValid(Register reg, OperandType reg_type) const;
+  bool OperandIsValid(Bytecode bytecode, OperandScale operand_scale,
+                      int operand_index, uint32_t operand_value) const;
+  bool RegisterIsValid(Register reg, OperandSize reg_size) const;
 
   bool LastBytecodeInSameBlock() const;
   bool NeedToBooleanCast();
   bool IsRegisterInAccumulator(Register reg);
 
-  // Set position for implicit return.
-  void SetReturnPosition(FunctionLiteral* fun);
+  // Set position for return.
+  void SetReturnPosition();
 
   // Gets a constant pool entry for the |object|.
   size_t GetConstantPoolEntry(Handle<Object> object);
@@ -355,7 +358,6 @@
   SourcePositionTableBuilder* source_position_table_builder() {
     return &source_position_table_builder_;
   }
-  RegisterTranslator* register_translator() { return &register_translator_; }
 
   Isolate* isolate_;
   Zone* zone_;
@@ -371,8 +373,8 @@
   int parameter_count_;
   int local_register_count_;
   int context_register_count_;
+  int return_position_;
   TemporaryRegisterAllocator temporary_allocator_;
-  RegisterTranslator register_translator_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
 };
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index 0fea985..a17efcb 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -12,103 +12,119 @@
 
 BytecodeArrayIterator::BytecodeArrayIterator(
     Handle<BytecodeArray> bytecode_array)
-    : bytecode_array_(bytecode_array), bytecode_offset_(0) {}
-
-
-void BytecodeArrayIterator::Advance() {
-  bytecode_offset_ += Bytecodes::Size(current_bytecode());
+    : bytecode_array_(bytecode_array),
+      bytecode_offset_(0),
+      operand_scale_(OperandScale::kSingle),
+      prefix_offset_(0) {
+  UpdateOperandScale();
 }
 
+void BytecodeArrayIterator::Advance() {
+  bytecode_offset_ += current_bytecode_size();
+  UpdateOperandScale();
+}
+
+void BytecodeArrayIterator::UpdateOperandScale() {
+  if (!done()) {
+    uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
+    Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+    if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
+      operand_scale_ =
+          Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
+      prefix_offset_ = 1;
+    } else {
+      operand_scale_ = OperandScale::kSingle;
+      prefix_offset_ = 0;
+    }
+  }
+}
 
 bool BytecodeArrayIterator::done() const {
   return bytecode_offset_ >= bytecode_array()->length();
 }
 
-
 Bytecode BytecodeArrayIterator::current_bytecode() const {
   DCHECK(!done());
-  uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
-  return interpreter::Bytecodes::FromByte(current_byte);
+  uint8_t current_byte =
+      bytecode_array()->get(bytecode_offset_ + current_prefix_offset());
+  Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+  DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
+  return current_bytecode;
 }
 
-
 int BytecodeArrayIterator::current_bytecode_size() const {
-  return Bytecodes::Size(current_bytecode());
+  return current_prefix_offset() +
+         Bytecodes::Size(current_bytecode(), current_operand_scale());
 }
 
-
-uint32_t BytecodeArrayIterator::GetRawOperand(int operand_index,
-                                              OperandType operand_type) const {
+uint32_t BytecodeArrayIterator::GetUnsignedOperand(
+    int operand_index, OperandType operand_type) const {
   DCHECK_GE(operand_index, 0);
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
   DCHECK_EQ(operand_type,
             Bytecodes::GetOperandType(current_bytecode(), operand_index));
-  uint8_t* operand_start =
+  DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+  const uint8_t* operand_start =
       bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
-      Bytecodes::GetOperandOffset(current_bytecode(), operand_index);
-  switch (Bytecodes::SizeOfOperand(operand_type)) {
-    case OperandSize::kByte:
-      return static_cast<uint32_t>(*operand_start);
-    case OperandSize::kShort:
-      return ReadUnalignedUInt16(operand_start);
-    case OperandSize::kNone:
-      UNREACHABLE();
-  }
-  return 0;
+      current_prefix_offset() +
+      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+                                  current_operand_scale());
+  return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
+                                          current_operand_scale());
 }
 
-
-int8_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
-  uint32_t operand = GetRawOperand(operand_index, OperandType::kImm8);
-  return static_cast<int8_t>(operand);
+int32_t BytecodeArrayIterator::GetSignedOperand(
+    int operand_index, OperandType operand_type) const {
+  DCHECK_GE(operand_index, 0);
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+  DCHECK_EQ(operand_type,
+            Bytecodes::GetOperandType(current_bytecode(), operand_index));
+  DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+  const uint8_t* operand_start =
+      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+      current_prefix_offset() +
+      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+                                  current_operand_scale());
+  return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
+                                        current_operand_scale());
 }
 
-int BytecodeArrayIterator::GetRegisterCountOperand(int operand_index) const {
-  OperandSize size =
-      Bytecodes::GetOperandSize(current_bytecode(), operand_index);
-  OperandType type = (size == OperandSize::kByte) ? OperandType::kRegCount8
-                                                  : OperandType::kRegCount16;
-  uint32_t operand = GetRawOperand(operand_index, type);
-  return static_cast<int>(operand);
+uint32_t BytecodeArrayIterator::GetFlagOperand(int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kFlag8);
+  return GetUnsignedOperand(operand_index, OperandType::kFlag8);
 }
 
+int32_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kImm);
+  return GetSignedOperand(operand_index, OperandType::kImm);
+}
 
-int BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
+uint32_t BytecodeArrayIterator::GetRegisterCountOperand(
+    int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kRegCount);
+  return GetUnsignedOperand(operand_index, OperandType::kRegCount);
+}
+
+uint32_t BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
   OperandType operand_type =
       Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  DCHECK(operand_type == OperandType::kIdx8 ||
-         operand_type == OperandType::kIdx16);
-  uint32_t operand = GetRawOperand(operand_index, operand_type);
-  return static_cast<int>(operand);
+  DCHECK_EQ(operand_type, OperandType::kIdx);
+  return GetUnsignedOperand(operand_index, operand_type);
 }
 
-
 Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
   OperandType operand_type =
       Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
-  uint32_t operand = GetRawOperand(operand_index, operand_type);
-  Register reg;
-  switch (Bytecodes::GetOperandSize(current_bytecode(), operand_index)) {
-    case OperandSize::kByte:
-      reg = Register::FromOperand(static_cast<uint8_t>(operand));
-      break;
-    case OperandSize::kShort:
-      reg = Register::FromWideOperand(static_cast<uint16_t>(operand));
-      break;
-    case OperandSize::kNone:
-      UNREACHABLE();
-      reg = Register::invalid_value();
-      break;
-  }
-  DCHECK_GE(reg.index(),
-            Register::FromParameterIndex(0, bytecode_array()->parameter_count())
-                .index());
-  DCHECK(reg.index() < bytecode_array()->register_count() ||
-         (reg.index() == 0 &&
-          Bytecodes::IsMaybeRegisterOperandType(
-              Bytecodes::GetOperandType(current_bytecode(), operand_index))));
-  return reg;
+  const uint8_t* operand_start =
+      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+      current_prefix_offset() +
+      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+                                  current_operand_scale());
+  return Bytecodes::DecodeRegisterOperand(operand_start, operand_type,
+                                          current_operand_scale());
 }
 
 int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
@@ -116,20 +132,17 @@
       Bytecodes::GetOperandType(current_bytecode(), operand_index);
   DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
   switch (operand_type) {
-    case OperandType::kRegPair8:
-    case OperandType::kRegPair16:
-    case OperandType::kRegOutPair8:
-    case OperandType::kRegOutPair16:
+    case OperandType::kRegPair:
+    case OperandType::kRegOutPair:
       return 2;
-    case OperandType::kRegOutTriple8:
-    case OperandType::kRegOutTriple16:
+    case OperandType::kRegOutTriple:
       return 3;
     default: {
       if (operand_index + 1 !=
           Bytecodes::NumberOfOperands(current_bytecode())) {
         OperandType next_operand_type =
             Bytecodes::GetOperandType(current_bytecode(), operand_index + 1);
-        if (Bytecodes::IsRegisterCountOperandType(next_operand_type)) {
+        if (OperandType::kRegCount == next_operand_type) {
           return GetRegisterCountOperand(operand_index + 1);
         }
       }
@@ -138,6 +151,13 @@
   }
 }
 
+uint32_t BytecodeArrayIterator::GetRuntimeIdOperand(int operand_index) const {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK(operand_type == OperandType::kRuntimeId);
+  return GetUnsignedOperand(operand_index, operand_type);
+}
+
 Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
     int operand_index) const {
   return FixedArray::get(bytecode_array()->constant_pool(),
@@ -150,11 +170,10 @@
   Bytecode bytecode = current_bytecode();
   if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
     int relative_offset = GetImmediateOperand(0);
-    return current_offset() + relative_offset;
-  } else if (interpreter::Bytecodes::IsJumpConstant(bytecode) ||
-             interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
+    return current_offset() + relative_offset + current_prefix_offset();
+  } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
     Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
-    return current_offset() + smi->value();
+    return current_offset() + smi->value() + current_prefix_offset();
   } else {
     UNREACHABLE();
     return kMinInt;
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
index 5379bbf..b372894 100644
--- a/src/interpreter/bytecode-array-iterator.h
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -21,31 +21,38 @@
   bool done() const;
   Bytecode current_bytecode() const;
   int current_bytecode_size() const;
-  void set_current_offset(int offset) { bytecode_offset_ = offset; }
   int current_offset() const { return bytecode_offset_; }
+  OperandScale current_operand_scale() const { return operand_scale_; }
+  int current_prefix_offset() const { return prefix_offset_; }
   const Handle<BytecodeArray>& bytecode_array() const {
     return bytecode_array_;
   }
 
-  int8_t GetImmediateOperand(int operand_index) const;
-  int GetIndexOperand(int operand_index) const;
-  int GetRegisterCountOperand(int operand_index) const;
+  uint32_t GetFlagOperand(int operand_index) const;
+  int32_t GetImmediateOperand(int operand_index) const;
+  uint32_t GetIndexOperand(int operand_index) const;
+  uint32_t GetRegisterCountOperand(int operand_index) const;
   Register GetRegisterOperand(int operand_index) const;
   int GetRegisterOperandRange(int operand_index) const;
+  uint32_t GetRuntimeIdOperand(int operand_index) const;
   Handle<Object> GetConstantForIndexOperand(int operand_index) const;
 
-  // Get the raw byte for the given operand. Note: you should prefer using the
-  // typed versions above which cast the return to an appropriate type.
-  uint32_t GetRawOperand(int operand_index, OperandType operand_type) const;
-
   // Returns the absolute offset of the branch target at the current
   // bytecode. It is an error to call this method if the bytecode is
   // not for a jump or conditional jump.
   int GetJumpTargetOffset() const;
 
  private:
+  uint32_t GetUnsignedOperand(int operand_index,
+                              OperandType operand_type) const;
+  int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
+
+  void UpdateOperandScale();
+
   Handle<BytecodeArray> bytecode_array_;
   int bytecode_offset_;
+  OperandScale operand_scale_;
+  int prefix_offset_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
 };
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index 6f4dc27..b0fa245 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -72,6 +72,7 @@
 
   Scope* scope() const { return scope_; }
   Register reg() const { return register_; }
+  bool ShouldPopContext() { return should_pop_context_; }
 
  private:
   const BytecodeArrayBuilder* builder() const { return generator_->builder(); }
@@ -212,9 +213,9 @@
  protected:
   bool Execute(Command command, Statement* statement) override {
     switch (command) {
-      case CMD_BREAK:
+      case CMD_BREAK:  // We should never see break/continue in top-level.
       case CMD_CONTINUE:
-        break;
+        UNREACHABLE();
       case CMD_RETURN:
         generator()->builder()->Return();
         return true;
@@ -362,15 +363,20 @@
 void BytecodeGenerator::ControlScope::PerformCommand(Command command,
                                                      Statement* statement) {
   ControlScope* current = this;
-  ContextScope* context = this->context();
+  ContextScope* context = generator()->execution_context();
+  // Pop context to the expected depth but do not pop the outermost context.
+  if (context != current->context() && context->ShouldPopContext()) {
+    generator()->builder()->PopContext(current->context()->reg());
+  }
   do {
-    if (current->Execute(command, statement)) { return; }
+    if (current->Execute(command, statement)) {
+      return;
+    }
     current = current->outer();
     if (current->context() != context) {
       // Pop context to the expected depth.
       // TODO(rmcilroy): Only emit a single context pop.
       generator()->builder()->PopContext(current->context()->reg());
-      context = current->context();
     }
   } while (current != nullptr);
   UNREACHABLE();
@@ -450,7 +456,7 @@
 
   virtual ~ExpressionResultScope() {
     generator_->set_execution_result(outer_);
-    DCHECK(result_identified());
+    DCHECK(result_identified() || generator_->HasStackOverflow());
   }
 
   bool IsEffect() const { return kind_ == Expression::kEffect; }
@@ -462,6 +468,7 @@
  protected:
   ExpressionResultScope* outer() const { return outer_; }
   BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+  BytecodeGenerator* generator() const { return generator_; }
   const RegisterAllocationScope* allocator() const { return &allocator_; }
 
   void set_result_identified() {
@@ -536,7 +543,12 @@
     set_result_identified();
   }
 
-  Register ResultRegister() const { return result_register_; }
+  Register ResultRegister() {
+    if (generator()->HasStackOverflow() && !result_identified()) {
+      SetResultInAccumulator();
+    }
+    return result_register_;
+  }
 
  private:
   Register result_register_;
@@ -565,7 +577,8 @@
   // Initialize bytecode array builder.
   set_builder(new (zone()) BytecodeArrayBuilder(
       isolate(), zone(), info->num_parameters_including_this(),
-      scope()->MaxNestedContextChainLength(), scope()->num_stack_slots()));
+      scope()->MaxNestedContextChainLength(), scope()->num_stack_slots(),
+      info->literal()));
 
   // Initialize the incoming context.
   ContextScope incoming_context(this, scope(), false);
@@ -584,7 +597,7 @@
     MakeBytecodeBody();
   }
 
-  builder()->EnsureReturn(info->literal());
+  builder()->EnsureReturn();
   set_scope(nullptr);
   set_info(nullptr);
   return builder()->ToBytecodeArray();
@@ -611,12 +624,6 @@
     UNIMPLEMENTED();
   }
 
-  // Visit illegal re-declaration and bail out if it exists.
-  if (scope()->HasIllegalRedeclaration()) {
-    VisitForEffect(scope()->GetIllegalRedeclaration());
-    return;
-  }
-
   // Visit declarations within the function scope.
   VisitDeclarations(scope()->declarations());
 
@@ -826,6 +833,7 @@
 
 
 void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
+  builder()->SetStatementPosition(stmt);
   BytecodeLabel else_label, end_label;
   if (stmt->condition()->ToBooleanIsTrue()) {
     // Generate then block unconditionally as always true.
@@ -861,23 +869,26 @@
 
 
 void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+  builder()->SetStatementPosition(stmt);
   execution_control()->Continue(stmt->target());
 }
 
 
 void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+  builder()->SetStatementPosition(stmt);
   execution_control()->Break(stmt->target());
 }
 
 
 void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
-  VisitForAccumulatorValue(stmt->expression());
   builder()->SetStatementPosition(stmt);
+  VisitForAccumulatorValue(stmt->expression());
   execution_control()->ReturnAccumulator();
 }
 
 
 void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
+  builder()->SetStatementPosition(stmt);
   VisitForAccumulatorValue(stmt->expression());
   builder()->CastAccumulatorToJSObject();
   VisitNewLocalWithContext();
@@ -893,6 +904,8 @@
   ControlScopeForBreakable scope(this, stmt, &switch_builder);
   int default_index = -1;
 
+  builder()->SetStatementPosition(stmt);
+
   // Keep the switch value in a register until a case matches.
   Register tag = VisitForRegisterValue(stmt->tag());
 
@@ -959,6 +972,7 @@
   } else {
     VisitIterationBody(stmt, &loop_builder);
     loop_builder.Condition();
+    builder()->SetExpressionAsStatementPosition(stmt->cond());
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.JumpToHeaderIfTrue();
   }
@@ -975,6 +989,7 @@
   loop_builder.LoopHeader();
   loop_builder.Condition();
   if (!stmt->cond()->ToBooleanIsTrue()) {
+    builder()->SetExpressionAsStatementPosition(stmt->cond());
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.BreakIfFalse();
   }
@@ -998,12 +1013,14 @@
   loop_builder.LoopHeader();
   loop_builder.Condition();
   if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
+    builder()->SetExpressionAsStatementPosition(stmt->cond());
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.BreakIfFalse();
   }
   VisitIterationBody(stmt, &loop_builder);
   if (stmt->next() != nullptr) {
     loop_builder.Next();
+    builder()->SetStatementPosition(stmt->next());
     Visit(stmt->next());
   }
   loop_builder.JumpToHeader();
@@ -1087,28 +1104,28 @@
 
 void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   if (stmt->subject()->IsNullLiteral() ||
-      stmt->subject()->IsUndefinedLiteral(isolate())) {
+      stmt->subject()->IsUndefinedLiteral()) {
     // ForIn generates lots of code, skip if it wouldn't produce any effects.
     return;
   }
 
   LoopBuilder loop_builder(builder());
-  BytecodeLabel subject_null_label, subject_undefined_label, not_object_label;
+  BytecodeLabel subject_null_label, subject_undefined_label;
 
   // Prepare the state for executing ForIn.
+  builder()->SetExpressionAsStatementPosition(stmt->subject());
   VisitForAccumulatorValue(stmt->subject());
   builder()->JumpIfUndefined(&subject_undefined_label);
   builder()->JumpIfNull(&subject_null_label);
   Register receiver = register_allocator()->NewRegister();
   builder()->CastAccumulatorToJSObject();
-  builder()->JumpIfNull(&not_object_label);
   builder()->StoreAccumulatorInRegister(receiver);
 
   register_allocator()->PrepareForConsecutiveAllocations(3);
   Register cache_type = register_allocator()->NextConsecutiveRegister();
   Register cache_array = register_allocator()->NextConsecutiveRegister();
   Register cache_length = register_allocator()->NextConsecutiveRegister();
-  // Used as kRegTriple8 and kRegPair8 in ForInPrepare and ForInNext.
+  // Used as kRegTriple and kRegPair in ForInPrepare and ForInNext.
   USE(cache_array);
   builder()->ForInPrepare(cache_type);
 
@@ -1119,11 +1136,13 @@
 
   // The loop
   loop_builder.LoopHeader();
+  builder()->SetExpressionAsStatementPosition(stmt->each());
   loop_builder.Condition();
   builder()->ForInDone(index, cache_length);
   loop_builder.BreakIfTrue();
   DCHECK(Register::AreContiguous(cache_type, cache_array));
-  builder()->ForInNext(receiver, index, cache_type);
+  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  builder()->ForInNext(receiver, index, cache_type, feedback_index(slot));
   loop_builder.ContinueIfUndefined();
   VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
   VisitIterationBody(stmt, &loop_builder);
@@ -1132,7 +1151,6 @@
   builder()->StoreAccumulatorInRegister(index);
   loop_builder.JumpToHeader();
   loop_builder.EndLoop();
-  builder()->Bind(&not_object_label);
   builder()->Bind(&subject_null_label);
   builder()->Bind(&subject_undefined_label);
 }
@@ -1146,6 +1164,7 @@
 
   loop_builder.LoopHeader();
   loop_builder.Next();
+  builder()->SetExpressionAsStatementPosition(stmt->next_result());
   VisitForEffect(stmt->next_result());
   VisitForAccumulatorValue(stmt->result_done());
   loop_builder.BreakIfTrue();
@@ -1180,8 +1199,10 @@
   VisitNewLocalCatchContext(stmt->variable());
   builder()->StoreAccumulatorInRegister(context);
 
-  // Clear message object as we enter the catch block.
-  builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+  // If requested, clear message object as we enter the catch block.
+  if (stmt->clear_pending_message()) {
+    builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+  }
 
   // Load the catch context into the accumulator.
   builder()->LoadAccumulatorWithRegister(context);
@@ -1267,7 +1288,9 @@
   // Find or build a shared function info.
   Handle<SharedFunctionInfo> shared_info =
       Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
-  CHECK(!shared_info.is_null());  // TODO(rmcilroy): Set stack overflow?
+  if (shared_info.is_null()) {
+    return SetStackOverflow();
+  }
   builder()->CreateClosure(shared_info,
                            expr->pretenure() ? TENURED : NOT_TENURED);
   execution_result()->SetResultInAccumulator();
@@ -1679,11 +1702,6 @@
     }
   }
 
-  // Transform literals that contain functions to fast properties.
-  if (expr->has_function()) {
-    builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
-  }
-
   execution_result()->SetResultInRegister(literal);
 }
 
@@ -1729,6 +1747,7 @@
 
 
 void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
+  builder()->SetExpressionPosition(proxy);
   VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
 }
 
@@ -2173,6 +2192,7 @@
   }
 
   // Store the value.
+  builder()->SetExpressionPosition(expr);
   FeedbackVectorSlot slot = expr->AssignmentSlot();
   switch (assign_type) {
     case VARIABLE: {
@@ -2210,6 +2230,7 @@
 
 void BytecodeGenerator::VisitThrow(Throw* expr) {
   VisitForAccumulatorValue(expr->exception());
+  builder()->SetExpressionPosition(expr);
   builder()->Throw();
   // Throw statments are modeled as expression instead of statments. These are
   // converted from assignment statements in Rewriter::ReWrite pass. An
@@ -2222,6 +2243,7 @@
 void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
   LhsKind property_kind = Property::GetAssignType(expr);
   FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
+  builder()->SetExpressionPosition(expr);
   switch (property_kind) {
     case VARIABLE:
       UNREACHABLE();
@@ -2718,9 +2740,7 @@
   }
 
   // Convert old value into a number.
-  if (!is_strong(language_mode())) {
-    builder()->CastAccumulatorToNumber();
-  }
+  builder()->CastAccumulatorToNumber();
 
   // Save result for postfix expressions.
   if (is_postfix) {
@@ -2732,6 +2752,7 @@
   builder()->CountOperation(expr->binary_op());
 
   // Store the value.
+  builder()->SetExpressionPosition(expr);
   FeedbackVectorSlot feedback_slot = expr->CountSlot();
   switch (assign_type) {
     case VARIABLE: {
@@ -2791,6 +2812,7 @@
 void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
+  builder()->SetExpressionPosition(expr);
   builder()->CompareOperation(expr->op(), lhs);
   execution_result()->SetResultInAccumulator();
 }
@@ -3129,12 +3151,12 @@
 
 
 LanguageMode BytecodeGenerator::language_mode() const {
-  return info()->language_mode();
+  return execution_context()->scope()->language_mode();
 }
 
 
 int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
-  return info()->feedback_vector()->GetIndex(slot);
+  return info()->shared_info()->feedback_vector()->GetIndex(slot);
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-register-allocator.cc b/src/interpreter/bytecode-register-allocator.cc
index 0a617c0..9bdde9a 100644
--- a/src/interpreter/bytecode-register-allocator.cc
+++ b/src/interpreter/bytecode-register-allocator.cc
@@ -95,17 +95,6 @@
       start = run_end;
       run_length = 0;
     }
-    Register reg_start(*start);
-    Register reg_expected(expected);
-    if (RegisterTranslator::DistanceToTranslationWindow(reg_start) > 0 &&
-        RegisterTranslator::DistanceToTranslationWindow(reg_expected) <= 0) {
-      // Run straddles the lower edge of the translation window. Registers
-      // after the start of this boundary are displaced by the register
-      // translator to provide a hole for translation. Runs either side
-      // of the boundary are fine.
-      start = run_end;
-      run_length = 0;
-    }
     if (++run_length == count) {
       return *start;
     }
@@ -121,16 +110,6 @@
   // Pad temporaries if extended run would cross translation boundary.
   Register reg_first(*start);
   Register reg_last(*start + static_cast<int>(count) - 1);
-  DCHECK_GT(RegisterTranslator::DistanceToTranslationWindow(reg_first),
-            RegisterTranslator::DistanceToTranslationWindow(reg_last));
-  while (RegisterTranslator::DistanceToTranslationWindow(reg_first) > 0 &&
-         RegisterTranslator::DistanceToTranslationWindow(reg_last) <= 0) {
-    auto pos_insert_pair =
-        free_temporaries_.insert(AllocateTemporaryRegister());
-    reg_first = Register(*pos_insert_pair.first);
-    reg_last = Register(reg_first.index() + static_cast<int>(count) - 1);
-    run_length = 0;
-  }
 
   // Ensure enough registers for run.
   while (run_length++ < count) {
@@ -139,10 +118,6 @@
 
   int run_start =
       last_temporary_register().index() - static_cast<int>(count) + 1;
-  DCHECK(RegisterTranslator::DistanceToTranslationWindow(Register(run_start)) <=
-             0 ||
-         RegisterTranslator::DistanceToTranslationWindow(
-             Register(run_start + static_cast<int>(count) - 1)) > 0);
   return run_start;
 }
 
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
index b813605..c724827 100644
--- a/src/interpreter/bytecode-traits.h
+++ b/src/interpreter/bytecode-traits.h
@@ -11,22 +11,35 @@
 namespace internal {
 namespace interpreter {
 
-// TODO(rmcilroy): consider simplifying this to avoid the template magic.
+template <OperandTypeInfo>
+struct OperandTypeInfoTraits {
+  static const bool kIsScalable = false;
+  static const bool kIsUnsigned = false;
+  static const OperandSize kUnscaledSize = OperandSize::kNone;
+};
 
-// Template helpers to deduce the number of operands each bytecode has.
-#define OPERAND_TERM OperandType::kNone, OperandType::kNone, OperandType::kNone
+#define DECLARE_OPERAND_TYPE_INFO(Name, Scalable, Unsigned, BaseSize) \
+  template <>                                                         \
+  struct OperandTypeInfoTraits<OperandTypeInfo::k##Name> {            \
+    static const bool kIsScalable = Scalable;                         \
+    static const bool kIsUnsigned = Unsigned;                         \
+    static const OperandSize kUnscaledSize = BaseSize;                \
+  };
+OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
+#undef DECLARE_OPERAND_TYPE_INFO
 
 template <OperandType>
-struct OperandTraits {};
+struct OperandTraits {
+  typedef OperandTypeInfoTraits<OperandTypeInfo::kNone> TypeInfo;
+};
 
-#define DECLARE_OPERAND_SIZE(Name, Size)             \
-  template <>                                        \
-  struct OperandTraits<OperandType::k##Name> {       \
-    static const OperandSize kSizeType = Size;       \
-    static const int kSize = static_cast<int>(Size); \
+#define DECLARE_OPERAND_TYPE_TRAITS(Name, InfoType)   \
+  template <>                                         \
+  struct OperandTraits<OperandType::k##Name> {        \
+    typedef OperandTypeInfoTraits<InfoType> TypeInfo; \
   };
-OPERAND_TYPE_LIST(DECLARE_OPERAND_SIZE)
-#undef DECLARE_OPERAND_SIZE
+OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE_TRAITS)
+#undef DECLARE_OPERAND_TYPE_TRAITS
 
 template <OperandType>
 struct RegisterOperandTraits {
@@ -41,13 +54,13 @@
 REGISTER_OPERAND_TYPE_LIST(DECLARE_REGISTER_OPERAND)
 #undef DECLARE_REGISTER_OPERAND
 
-template <OperandType... Args>
+template <AccumulatorUse, OperandType...>
 struct BytecodeTraits {};
 
-template <OperandType operand_0, OperandType operand_1, OperandType operand_2,
-          OperandType operand_3>
-struct BytecodeTraits<operand_0, operand_1, operand_2, operand_3,
-                      OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0,
+          OperandType operand_1, OperandType operand_2, OperandType operand_3>
+struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2,
+                      operand_3> {
   static OperandType GetOperandType(int i) {
     DCHECK(0 <= i && i < kOperandCount);
     const OperandType kOperands[] = {operand_0, operand_1, operand_2,
@@ -55,32 +68,20 @@
     return kOperands[i];
   }
 
-  static inline OperandSize GetOperandSize(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const OperandSize kOperandSizes[] =
-        {OperandTraits<operand_0>::kSizeType,
-         OperandTraits<operand_1>::kSizeType,
-         OperandTraits<operand_2>::kSizeType,
-         OperandTraits<operand_3>::kSizeType};
-    return kOperandSizes[i];
-  }
-
-  static inline int GetOperandOffset(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const int kOffset0 = 1;
-    const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
-    const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
-    const int kOffset3 = kOffset2 + OperandTraits<operand_2>::kSize;
-    const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2, kOffset3};
-    return kOperandOffsets[i];
-  }
-
   template <OperandType ot>
   static inline bool HasAnyOperandsOfType() {
     return operand_0 == ot || operand_1 == ot || operand_2 == ot ||
            operand_3 == ot;
   }
 
+  static inline bool IsScalable() {
+    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
+            OperandTraits<operand_1>::TypeInfo::kIsScalable |
+            OperandTraits<operand_2>::TypeInfo::kIsScalable |
+            OperandTraits<operand_3>::TypeInfo::kIsScalable);
+  }
+
+  static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 4;
   static const int kRegisterOperandCount =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand +
@@ -92,42 +93,29 @@
       (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
       (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2) +
       (RegisterOperandTraits<operand_3>::kIsRegisterOperand << 3);
-  static const int kSize =
-      1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
-      OperandTraits<operand_2>::kSize + OperandTraits<operand_3>::kSize;
 };
 
-template <OperandType operand_0, OperandType operand_1, OperandType operand_2>
-struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0,
+          OperandType operand_1, OperandType operand_2>
+struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2> {
   static inline OperandType GetOperandType(int i) {
     DCHECK(0 <= i && i <= 2);
     const OperandType kOperands[] = {operand_0, operand_1, operand_2};
     return kOperands[i];
   }
 
-  static inline OperandSize GetOperandSize(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const OperandSize kOperandSizes[] =
-        {OperandTraits<operand_0>::kSizeType,
-         OperandTraits<operand_1>::kSizeType,
-         OperandTraits<operand_2>::kSizeType};
-    return kOperandSizes[i];
-  }
-
-  static inline int GetOperandOffset(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const int kOffset0 = 1;
-    const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
-    const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
-    const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2};
-    return kOperandOffsets[i];
-  }
-
   template <OperandType ot>
   static inline bool HasAnyOperandsOfType() {
     return operand_0 == ot || operand_1 == ot || operand_2 == ot;
   }
 
+  static inline bool IsScalable() {
+    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
+            OperandTraits<operand_1>::TypeInfo::kIsScalable |
+            OperandTraits<operand_2>::TypeInfo::kIsScalable);
+  }
+
+  static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 3;
   static const int kRegisterOperandCount =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand +
@@ -137,40 +125,28 @@
       RegisterOperandTraits<operand_0>::kIsRegisterOperand +
       (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
       (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2);
-  static const int kSize =
-      1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
-      OperandTraits<operand_2>::kSize;
 };
 
-template <OperandType operand_0, OperandType operand_1>
-struct BytecodeTraits<operand_0, operand_1, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0,
+          OperandType operand_1>
+struct BytecodeTraits<accumulator_use, operand_0, operand_1> {
   static inline OperandType GetOperandType(int i) {
     DCHECK(0 <= i && i < kOperandCount);
     const OperandType kOperands[] = {operand_0, operand_1};
     return kOperands[i];
   }
 
-  static inline OperandSize GetOperandSize(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const OperandSize kOperandSizes[] =
-        {OperandTraits<operand_0>::kSizeType,
-         OperandTraits<operand_1>::kSizeType};
-    return kOperandSizes[i];
-  }
-
-  static inline int GetOperandOffset(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const int kOffset0 = 1;
-    const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
-    const int kOperandOffsets[] = {kOffset0, kOffset1};
-    return kOperandOffsets[i];
-  }
-
   template <OperandType ot>
   static inline bool HasAnyOperandsOfType() {
     return operand_0 == ot || operand_1 == ot;
   }
 
+  static inline bool IsScalable() {
+    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
+            OperandTraits<operand_1>::TypeInfo::kIsScalable);
+  }
+
+  static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 2;
   static const int kRegisterOperandCount =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand +
@@ -178,68 +154,91 @@
   static const int kRegisterOperandBitmap =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand +
       (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1);
-  static const int kSize =
-      1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize;
 };
 
-template <OperandType operand_0>
-struct BytecodeTraits<operand_0, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0>
+struct BytecodeTraits<accumulator_use, operand_0> {
   static inline OperandType GetOperandType(int i) {
     DCHECK(i == 0);
     return operand_0;
   }
 
-  static inline OperandSize GetOperandSize(int i) {
-    DCHECK(i == 0);
-    return OperandTraits<operand_0>::kSizeType;
-  }
-
-  static inline int GetOperandOffset(int i) {
-    DCHECK(i == 0);
-    return 1;
-  }
-
   template <OperandType ot>
   static inline bool HasAnyOperandsOfType() {
     return operand_0 == ot;
   }
 
+  static inline bool IsScalable() {
+    return OperandTraits<operand_0>::TypeInfo::kIsScalable;
+  }
+
+  static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 1;
   static const int kRegisterOperandCount =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand;
   static const int kRegisterOperandBitmap =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand;
-  static const int kSize = 1 + OperandTraits<operand_0>::kSize;
 };
 
-template <>
-struct BytecodeTraits<OperandType::kNone, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use>
+struct BytecodeTraits<accumulator_use> {
   static inline OperandType GetOperandType(int i) {
     UNREACHABLE();
     return OperandType::kNone;
   }
 
-  static inline OperandSize GetOperandSize(int i) {
-    UNREACHABLE();
-    return OperandSize::kNone;
-  }
-
-  static inline int GetOperandOffset(int i) {
-    UNREACHABLE();
-    return 1;
-  }
-
   template <OperandType ot>
   static inline bool HasAnyOperandsOfType() {
     return false;
   }
 
+  static inline bool IsScalable() { return false; }
+
+  static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 0;
   static const int kRegisterOperandCount = 0;
   static const int kRegisterOperandBitmap = 0;
-  static const int kSize = 1 + OperandTraits<OperandType::kNone>::kSize;
 };
 
+template <bool>
+struct OperandScaler {
+  static int Multiply(int size, int operand_scale) { return 0; }
+};
+
+template <>
+struct OperandScaler<false> {
+  static int Multiply(int size, int operand_scale) { return size; }
+};
+
+template <>
+struct OperandScaler<true> {
+  static int Multiply(int size, int operand_scale) {
+    return size * operand_scale;
+  }
+};
+
+static OperandSize ScaledOperandSize(OperandType operand_type,
+                                     OperandScale operand_scale) {
+  switch (operand_type) {
+#define CASE(Name, TypeInfo)                                                   \
+  case OperandType::k##Name: {                                                 \
+    OperandSize base_size = OperandTypeInfoTraits<TypeInfo>::kUnscaledSize;    \
+    int size =                                                                 \
+        OperandScaler<OperandTypeInfoTraits<TypeInfo>::kIsScalable>::Multiply( \
+            static_cast<int>(base_size), static_cast<int>(operand_scale));     \
+    OperandSize operand_size = static_cast<OperandSize>(size);                 \
+    DCHECK(operand_size == OperandSize::kByte ||                               \
+           operand_size == OperandSize::kShort ||                              \
+           operand_size == OperandSize::kQuad);                                \
+    return operand_size;                                                       \
+  }
+    OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return OperandSize::kNone;
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index c3b17c7..fd27f39 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -4,8 +4,11 @@
 
 #include "src/interpreter/bytecodes.h"
 
+#include <iomanip>
+
 #include "src/frames.h"
 #include "src/interpreter/bytecode-traits.h"
+#include "src/interpreter/interpreter.h"
 
 namespace v8 {
 namespace internal {
@@ -25,6 +28,35 @@
   return "";
 }
 
+// static
+std::string Bytecodes::ToString(Bytecode bytecode, OperandScale operand_scale) {
+  static const char kSeparator = '.';
+
+  std::string value(ToString(bytecode));
+  if (operand_scale > OperandScale::kSingle) {
+    Bytecode prefix_bytecode = OperandScaleToPrefixBytecode(operand_scale);
+    std::string suffix = ToString(prefix_bytecode);
+    return value.append(1, kSeparator).append(suffix);
+  } else {
+    return value;
+  }
+}
+
+// static
+const char* Bytecodes::AccumulatorUseToString(AccumulatorUse accumulator_use) {
+  switch (accumulator_use) {
+    case AccumulatorUse::kNone:
+      return "None";
+    case AccumulatorUse::kRead:
+      return "Read";
+    case AccumulatorUse::kWrite:
+      return "Write";
+    case AccumulatorUse::kReadWrite:
+      return "ReadWrite";
+  }
+  UNREACHABLE();
+  return "";
+}
 
 // static
 const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
@@ -39,6 +71,20 @@
   return "";
 }
 
+// static
+const char* Bytecodes::OperandScaleToString(OperandScale operand_scale) {
+  switch (operand_scale) {
+    case OperandScale::kSingle:
+      return "Single";
+    case OperandScale::kDouble:
+      return "Double";
+    case OperandScale::kQuadruple:
+      return "Quadruple";
+    case OperandScale::kInvalid:
+      UNREACHABLE();
+  }
+  return "";
+}
 
 // static
 const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
@@ -49,6 +95,8 @@
       return "Byte";
     case OperandSize::kShort:
       return "Short";
+    case OperandSize::kQuad:
+      return "Quad";
   }
   UNREACHABLE();
   return "";
@@ -72,31 +120,34 @@
 
 // static
 Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
-  switch (Size(bytecode)) {
-#define CASE(Name, ...)                                  \
-  case BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize: \
-    return Bytecode::k##Name;
-    DEBUG_BREAK_BYTECODE_LIST(CASE)
-#undef CASE
-    default:
-      break;
+  DCHECK(!IsDebugBreak(bytecode));
+  if (bytecode == Bytecode::kWide) {
+    return Bytecode::kDebugBreakWide;
   }
+  if (bytecode == Bytecode::kExtraWide) {
+    return Bytecode::kDebugBreakExtraWide;
+  }
+  int bytecode_size = Size(bytecode, OperandScale::kSingle);
+#define RETURN_IF_DEBUG_BREAK_SIZE_MATCHES(Name, ...)                    \
+  if (bytecode_size == Size(Bytecode::k##Name, OperandScale::kSingle)) { \
+    return Bytecode::k##Name;                                            \
+  }
+  DEBUG_BREAK_PLAIN_BYTECODE_LIST(RETURN_IF_DEBUG_BREAK_SIZE_MATCHES)
+#undef RETURN_IF_DEBUG_BREAK_SIZE_MATCHES
   UNREACHABLE();
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 // static
-int Bytecodes::Size(Bytecode bytecode) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)   \
-  case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize;
-    BYTECODE_LIST(CASE)
-#undef CASE
+int Bytecodes::Size(Bytecode bytecode, OperandScale operand_scale) {
+  int size = 1;
+  for (int i = 0; i < NumberOfOperands(bytecode); i++) {
+    OperandSize operand_size = GetOperandSize(bytecode, i, operand_scale);
+    int delta = static_cast<int>(operand_size);
+    DCHECK(base::bits::IsPowerOfTwo32(static_cast<uint32_t>(delta)));
+    size += delta;
   }
-  UNREACHABLE();
-  return 0;
+  return size;
 }
 
 
@@ -106,7 +157,7 @@
   switch (bytecode) {
 #define CASE(Name, ...)   \
   case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kOperandCount;
+    return BytecodeTraits<__VA_ARGS__>::kOperandCount;
     BYTECODE_LIST(CASE)
 #undef CASE
   }
@@ -119,9 +170,9 @@
 int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
-#define CASE(Name, ...)                                            \
-  case Bytecode::k##Name:                                          \
-    typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+#define CASE(Name, ...)                              \
+  case Bytecode::k##Name:                            \
+    typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
     return Name##Trait::kRegisterOperandCount;
     BYTECODE_LIST(CASE)
 #undef CASE
@@ -131,12 +182,71 @@
 }
 
 // static
+Bytecode Bytecodes::OperandScaleToPrefixBytecode(OperandScale operand_scale) {
+  switch (operand_scale) {
+    case OperandScale::kQuadruple:
+      return Bytecode::kExtraWide;
+    case OperandScale::kDouble:
+      return Bytecode::kWide;
+    default:
+      UNREACHABLE();
+      return Bytecode::kIllegal;
+  }
+}
+
+// static
+bool Bytecodes::OperandScaleRequiresPrefixBytecode(OperandScale operand_scale) {
+  return operand_scale != OperandScale::kSingle;
+}
+
+// static
+OperandScale Bytecodes::PrefixBytecodeToOperandScale(Bytecode bytecode) {
+  switch (bytecode) {
+    case Bytecode::kExtraWide:
+    case Bytecode::kDebugBreakExtraWide:
+      return OperandScale::kQuadruple;
+    case Bytecode::kWide:
+    case Bytecode::kDebugBreakWide:
+      return OperandScale::kDouble;
+    default:
+      UNREACHABLE();
+      return OperandScale::kSingle;
+  }
+}
+
+// static
+AccumulatorUse Bytecodes::GetAccumulatorUse(Bytecode bytecode) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return BytecodeTraits<__VA_ARGS__>::kAccumulatorUse;
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return AccumulatorUse::kNone;
+}
+
+// static
+bool Bytecodes::ReadsAccumulator(Bytecode bytecode) {
+  return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
+         AccumulatorUse::kRead;
+}
+
+// static
+bool Bytecodes::WritesAccumulator(Bytecode bytecode) {
+  return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
+         AccumulatorUse::kWrite;
+}
+
+// static
 OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
 #define CASE(Name, ...)   \
   case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandType(i);
+    return BytecodeTraits<__VA_ARGS__>::GetOperandType(i);
     BYTECODE_LIST(CASE)
 #undef CASE
   }
@@ -144,29 +254,20 @@
   return OperandType::kNone;
 }
 
-
 // static
-OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)   \
-  case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandSize(i);
-    BYTECODE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return OperandSize::kNone;
+OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
+                                      OperandScale operand_scale) {
+  OperandType op_type = GetOperandType(bytecode, i);
+  return ScaledOperandSize(op_type, operand_scale);
 }
 
-
 // static
 int Bytecodes::GetRegisterOperandBitmap(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
-#define CASE(Name, ...)                                            \
-  case Bytecode::k##Name:                                          \
-    typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+#define CASE(Name, ...)                              \
+  case Bytecode::k##Name:                            \
+    typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
     return Name##Trait::kRegisterOperandBitmap;
     BYTECODE_LIST(CASE)
 #undef CASE
@@ -176,34 +277,25 @@
 }
 
 // static
-int Bytecodes::GetOperandOffset(Bytecode bytecode, int i) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)   \
-  case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandOffset(i);
-    BYTECODE_LIST(CASE)
-#undef CASE
+int Bytecodes::GetOperandOffset(Bytecode bytecode, int i,
+                                OperandScale operand_scale) {
+  // TODO(oth): restore this to a statically determined constant.
+  int offset = 1;
+  for (int operand_index = 0; operand_index < i; ++operand_index) {
+    OperandSize operand_size =
+        GetOperandSize(bytecode, operand_index, operand_scale);
+    offset += static_cast<int>(operand_size);
   }
-  UNREACHABLE();
-  return 0;
+  return offset;
 }
 
-
 // static
-OperandSize Bytecodes::SizeOfOperand(OperandType operand_type) {
-  switch (operand_type) {
-#define CASE(Name, Size)     \
-  case OperandType::k##Name: \
-    return Size;
-    OPERAND_TYPE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return OperandSize::kNone;
+OperandSize Bytecodes::SizeOfOperand(OperandType operand_type,
+                                     OperandScale operand_scale) {
+  return static_cast<OperandSize>(
+      ScaledOperandSize(operand_type, operand_scale));
 }
 
-
 // static
 bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
   return bytecode == Bytecode::kJumpIfTrue ||
@@ -227,24 +319,10 @@
          bytecode == Bytecode::kJumpIfUndefinedConstant;
 }
 
-
-// static
-bool Bytecodes::IsConditionalJumpConstantWide(Bytecode bytecode) {
-  return bytecode == Bytecode::kJumpIfTrueConstantWide ||
-         bytecode == Bytecode::kJumpIfFalseConstantWide ||
-         bytecode == Bytecode::kJumpIfToBooleanTrueConstantWide ||
-         bytecode == Bytecode::kJumpIfToBooleanFalseConstantWide ||
-         bytecode == Bytecode::kJumpIfNotHoleConstantWide ||
-         bytecode == Bytecode::kJumpIfNullConstantWide ||
-         bytecode == Bytecode::kJumpIfUndefinedConstantWide;
-}
-
-
 // static
 bool Bytecodes::IsConditionalJump(Bytecode bytecode) {
   return IsConditionalJumpImmediate(bytecode) ||
-         IsConditionalJumpConstant(bytecode) ||
-         IsConditionalJumpConstantWide(bytecode);
+         IsConditionalJumpConstant(bytecode);
 }
 
 
@@ -260,26 +338,23 @@
          IsConditionalJumpConstant(bytecode);
 }
 
-
-// static
-bool Bytecodes::IsJumpConstantWide(Bytecode bytecode) {
-  return bytecode == Bytecode::kJumpConstantWide ||
-         IsConditionalJumpConstantWide(bytecode);
-}
-
-
 // static
 bool Bytecodes::IsJump(Bytecode bytecode) {
-  return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode) ||
-         IsJumpConstantWide(bytecode);
+  return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
 }
 
 
 // static
 bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
   return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
-         bytecode == Bytecode::kNew || bytecode == Bytecode::kCallWide ||
-         bytecode == Bytecode::kTailCallWide || bytecode == Bytecode::kNewWide;
+         bytecode == Bytecode::kNew;
+}
+
+// static
+bool Bytecodes::IsCallRuntime(Bytecode bytecode) {
+  return bytecode == Bytecode::kCallRuntime ||
+         bytecode == Bytecode::kCallRuntimeForPair ||
+         bytecode == Bytecode::kInvokeIntrinsic;
 }
 
 // static
@@ -296,31 +371,40 @@
 }
 
 // static
+bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
+  switch (bytecode) {
+#define CASE(Name, ...)                              \
+  case Bytecode::k##Name:                            \
+    typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
+    return Name##Trait::IsScalable();
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return false;
+}
+
+// static
+bool Bytecodes::IsPrefixScalingBytecode(Bytecode bytecode) {
+  switch (bytecode) {
+    case Bytecode::kExtraWide:
+    case Bytecode::kDebugBreakExtraWide:
+    case Bytecode::kWide:
+    case Bytecode::kDebugBreakWide:
+      return true;
+    default:
+      return false;
+  }
+}
+
+// static
 bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
   return bytecode == Bytecode::kReturn || IsJump(bytecode);
 }
 
 // static
-bool Bytecodes::IsIndexOperandType(OperandType operand_type) {
-  return operand_type == OperandType::kIdx8 ||
-         operand_type == OperandType::kIdx16;
-}
-
-// static
-bool Bytecodes::IsImmediateOperandType(OperandType operand_type) {
-  return operand_type == OperandType::kImm8;
-}
-
-// static
-bool Bytecodes::IsRegisterCountOperandType(OperandType operand_type) {
-  return (operand_type == OperandType::kRegCount8 ||
-          operand_type == OperandType::kRegCount16);
-}
-
-// static
 bool Bytecodes::IsMaybeRegisterOperandType(OperandType operand_type) {
-  return (operand_type == OperandType::kMaybeReg8 ||
-          operand_type == OperandType::kMaybeReg16);
+  return operand_type == OperandType::kMaybeReg;
 }
 
 // static
@@ -376,41 +460,102 @@
   return false;
 }
 
-namespace {
-static Register DecodeRegister(const uint8_t* operand_start,
-                               OperandType operand_type) {
-  switch (Bytecodes::SizeOfOperand(operand_type)) {
-    case OperandSize::kByte:
-      return Register::FromOperand(*operand_start);
-    case OperandSize::kShort:
-      return Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
-    case OperandSize::kNone: {
-      UNREACHABLE();
-    }
+// static
+bool Bytecodes::IsUnsignedOperandType(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    return OperandTraits<OperandType::k##Name>::TypeInfo::kIsUnsigned;
+    OPERAND_TYPE_LIST(CASE)
+#undef CASE
   }
-  return Register();
+  UNREACHABLE();
+  return false;
 }
-}  // namespace
 
+// static
+OperandScale Bytecodes::NextOperandScale(OperandScale operand_scale) {
+  DCHECK(operand_scale >= OperandScale::kSingle &&
+         operand_scale <= OperandScale::kMaxValid);
+  return static_cast<OperandScale>(2 * static_cast<int>(operand_scale));
+}
+
+// static
+Register Bytecodes::DecodeRegisterOperand(const uint8_t* operand_start,
+                                          OperandType operand_type,
+                                          OperandScale operand_scale) {
+  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+  int32_t operand =
+      DecodeSignedOperand(operand_start, operand_type, operand_scale);
+  return Register::FromOperand(operand);
+}
+
+// static
+int32_t Bytecodes::DecodeSignedOperand(const uint8_t* operand_start,
+                                       OperandType operand_type,
+                                       OperandScale operand_scale) {
+  DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+  switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+    case OperandSize::kByte:
+      return static_cast<int8_t>(*operand_start);
+    case OperandSize::kShort:
+      return static_cast<int16_t>(ReadUnalignedUInt16(operand_start));
+    case OperandSize::kQuad:
+      return static_cast<int32_t>(ReadUnalignedUInt32(operand_start));
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return 0;
+}
+
+// static
+uint32_t Bytecodes::DecodeUnsignedOperand(const uint8_t* operand_start,
+                                          OperandType operand_type,
+                                          OperandScale operand_scale) {
+  DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+  switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+    case OperandSize::kByte:
+      return *operand_start;
+    case OperandSize::kShort:
+      return ReadUnalignedUInt16(operand_start);
+    case OperandSize::kQuad:
+      return ReadUnalignedUInt32(operand_start);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return 0;
+}
 
 // static
 std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
                                 int parameter_count) {
-  Vector<char> buf = Vector<char>::New(50);
-
   Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
-  int bytecode_size = Bytecodes::Size(bytecode);
-
-  for (int i = 0; i < bytecode_size; i++) {
-    SNPrintF(buf, "%02x ", bytecode_start[i]);
-    os << buf.start();
+  int prefix_offset = 0;
+  OperandScale operand_scale = OperandScale::kSingle;
+  if (IsPrefixScalingBytecode(bytecode)) {
+    prefix_offset = 1;
+    operand_scale = Bytecodes::PrefixBytecodeToOperandScale(bytecode);
+    bytecode = Bytecodes::FromByte(bytecode_start[1]);
   }
+
+  // Prepare to print bytecode and operands as hex digits.
+  std::ios saved_format(nullptr);
+  saved_format.copyfmt(saved_format);
+  os.fill('0');
+  os.flags(std::ios::hex);
+
+  int bytecode_size = Bytecodes::Size(bytecode, operand_scale);
+  for (int i = 0; i < prefix_offset + bytecode_size; i++) {
+    os << std::setw(2) << static_cast<uint32_t>(bytecode_start[i]) << ' ';
+  }
+  os.copyfmt(saved_format);
+
   const int kBytecodeColumnSize = 6;
-  for (int i = bytecode_size; i < kBytecodeColumnSize; i++) {
+  for (int i = prefix_offset + bytecode_size; i < kBytecodeColumnSize; i++) {
     os << "   ";
   }
 
-  os << bytecode << " ";
+  os << Bytecodes::ToString(bytecode, operand_scale) << " ";
 
   // Operands for the debug break are from the original instruction.
   if (IsDebugBreak(bytecode)) return os;
@@ -420,42 +565,42 @@
   for (int i = 0; i < number_of_operands; i++) {
     OperandType op_type = GetOperandType(bytecode, i);
     const uint8_t* operand_start =
-        &bytecode_start[GetOperandOffset(bytecode, i)];
+        &bytecode_start[prefix_offset +
+                        GetOperandOffset(bytecode, i, operand_scale)];
     switch (op_type) {
-      case interpreter::OperandType::kRegCount8:
-        os << "#" << static_cast<unsigned int>(*operand_start);
+      case interpreter::OperandType::kRegCount:
+        os << "#"
+           << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
         break;
-      case interpreter::OperandType::kRegCount16:
-        os << '#' << ReadUnalignedUInt16(operand_start);
+      case interpreter::OperandType::kIdx:
+      case interpreter::OperandType::kRuntimeId:
+        os << "["
+           << DecodeUnsignedOperand(operand_start, op_type, operand_scale)
+           << "]";
         break;
-      case interpreter::OperandType::kIdx8:
-        os << "[" << static_cast<unsigned int>(*operand_start) << "]";
+      case interpreter::OperandType::kImm:
+        os << "[" << DecodeSignedOperand(operand_start, op_type, operand_scale)
+           << "]";
         break;
-      case interpreter::OperandType::kIdx16:
-        os << "[" << ReadUnalignedUInt16(operand_start) << "]";
+      case interpreter::OperandType::kFlag8:
+        os << "#"
+           << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
         break;
-      case interpreter::OperandType::kImm8:
-        os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
-        break;
-      case interpreter::OperandType::kMaybeReg8:
-      case interpreter::OperandType::kMaybeReg16:
-      case interpreter::OperandType::kReg8:
-      case interpreter::OperandType::kReg16:
-      case interpreter::OperandType::kRegOut8:
-      case interpreter::OperandType::kRegOut16: {
-        Register reg = DecodeRegister(operand_start, op_type);
+      case interpreter::OperandType::kMaybeReg:
+      case interpreter::OperandType::kReg:
+      case interpreter::OperandType::kRegOut: {
+        Register reg =
+            DecodeRegisterOperand(operand_start, op_type, operand_scale);
         os << reg.ToString(parameter_count);
         break;
       }
-      case interpreter::OperandType::kRegOutTriple8:
-      case interpreter::OperandType::kRegOutTriple16:
+      case interpreter::OperandType::kRegOutTriple:
         range += 1;
-      case interpreter::OperandType::kRegOutPair8:
-      case interpreter::OperandType::kRegOutPair16:
-      case interpreter::OperandType::kRegPair8:
-      case interpreter::OperandType::kRegPair16: {
+      case interpreter::OperandType::kRegOutPair:
+      case interpreter::OperandType::kRegPair: {
         range += 1;
-        Register first_reg = DecodeRegister(operand_start, op_type);
+        Register first_reg =
+            DecodeRegisterOperand(operand_start, op_type, operand_scale);
         Register last_reg = Register(first_reg.index() + range);
         os << first_reg.ToString(parameter_count) << "-"
            << last_reg.ToString(parameter_count);
@@ -472,20 +617,33 @@
   return os;
 }
 
+// static
+bool Bytecodes::BytecodeHasHandler(Bytecode bytecode,
+                                   OperandScale operand_scale) {
+  return operand_scale == OperandScale::kSingle ||
+         Bytecodes::IsBytecodeWithScalableOperands(bytecode);
+}
+
 std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode) {
   return os << Bytecodes::ToString(bytecode);
 }
 
-
-std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
-  return os << Bytecodes::OperandTypeToString(operand_type);
+std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use) {
+  return os << Bytecodes::AccumulatorUseToString(use);
 }
 
-
 std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
   return os << Bytecodes::OperandSizeToString(operand_size);
 }
 
+std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale) {
+  return os << Bytecodes::OperandScaleToString(operand_scale);
+}
+
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
+  return os << Bytecodes::OperandTypeToString(operand_type);
+}
+
 static const int kLastParamRegisterIndex =
     -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
 static const int kFunctionClosureRegisterIndex =
@@ -495,29 +653,17 @@
 static const int kNewTargetRegisterIndex =
     -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
 
-// The register space is a signed 16-bit space. Register operands
-// occupy range above 0. Parameter indices are biased with the
-// negative value kLastParamRegisterIndex for ease of access in the
-// interpreter.
-static const int kMaxParameterIndex = kMaxInt16 + kLastParamRegisterIndex;
-static const int kMaxRegisterIndex = -kMinInt16;
-static const int kMaxReg8Index = -kMinInt8;
-static const int kMinReg8Index = -kMaxInt8;
-static const int kMaxReg16Index = -kMinInt16;
-static const int kMinReg16Index = -kMaxInt16;
-
 bool Register::is_byte_operand() const {
-  return index_ >= kMinReg8Index && index_ <= kMaxReg8Index;
+  return index_ >= -kMaxInt8 && index_ <= -kMinInt8;
 }
 
 bool Register::is_short_operand() const {
-  return index_ >= kMinReg16Index && index_ <= kMaxReg16Index;
+  return index_ >= -kMaxInt16 && index_ <= -kMinInt16;
 }
 
 Register Register::FromParameterIndex(int index, int parameter_count) {
   DCHECK_GE(index, 0);
   DCHECK_LT(index, parameter_count);
-  DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
   int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
   DCHECK_LT(register_index, 0);
   return Register(register_index);
@@ -557,44 +703,6 @@
   return index() == kNewTargetRegisterIndex;
 }
 
-int Register::MaxParameterIndex() { return kMaxParameterIndex; }
-
-int Register::MaxRegisterIndex() { return kMaxRegisterIndex; }
-
-int Register::MaxRegisterIndexForByteOperand() { return kMaxReg8Index; }
-
-uint8_t Register::ToOperand() const {
-  DCHECK(is_byte_operand());
-  return static_cast<uint8_t>(-index_);
-}
-
-
-Register Register::FromOperand(uint8_t operand) {
-  return Register(-static_cast<int8_t>(operand));
-}
-
-
-uint16_t Register::ToWideOperand() const {
-  DCHECK(is_short_operand());
-  return static_cast<uint16_t>(-index_);
-}
-
-
-Register Register::FromWideOperand(uint16_t operand) {
-  return Register(-static_cast<int16_t>(operand));
-}
-
-
-uint32_t Register::ToRawOperand() const {
-  return static_cast<uint32_t>(-index_);
-}
-
-
-Register Register::FromRawOperand(uint32_t operand) {
-  return Register(-static_cast<int32_t>(operand));
-}
-
-
 bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
                              Register reg4, Register reg5) {
   if (reg1.index() + 1 != reg2.index()) {
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index d4863b1..2361271 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -15,37 +15,24 @@
 namespace internal {
 namespace interpreter {
 
-#define INVALID_OPERAND_TYPE_LIST(V) \
-  V(None, OperandSize::kNone)
+#define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
 
-#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
-  /* Byte operands. */                      \
-  V(MaybeReg8, OperandSize::kByte)          \
-  V(Reg8, OperandSize::kByte)               \
-  V(RegPair8, OperandSize::kByte)           \
-  /* Short operands. */                     \
-  V(MaybeReg16, OperandSize::kShort)        \
-  V(Reg16, OperandSize::kShort)             \
-  V(RegPair16, OperandSize::kShort)
+#define REGISTER_INPUT_OPERAND_TYPE_LIST(V)         \
+  V(MaybeReg, OperandTypeInfo::kScalableSignedByte) \
+  V(Reg, OperandTypeInfo::kScalableSignedByte)      \
+  V(RegPair, OperandTypeInfo::kScalableSignedByte)
 
-#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
-  /* Byte operands. */                       \
-  V(RegOut8, OperandSize::kByte)             \
-  V(RegOutPair8, OperandSize::kByte)         \
-  V(RegOutTriple8, OperandSize::kByte)       \
-  /* Short operands. */                      \
-  V(RegOut16, OperandSize::kShort)           \
-  V(RegOutPair16, OperandSize::kShort)       \
-  V(RegOutTriple16, OperandSize::kShort)
+#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)          \
+  V(RegOut, OperandTypeInfo::kScalableSignedByte)     \
+  V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
+  V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
 
-#define SCALAR_OPERAND_TYPE_LIST(V) \
-  /* Byte operands. */              \
-  V(Idx8, OperandSize::kByte)       \
-  V(Imm8, OperandSize::kByte)       \
-  V(RegCount8, OperandSize::kByte)  \
-  /* Short operands. */             \
-  V(Idx16, OperandSize::kShort)     \
-  V(RegCount16, OperandSize::kShort)
+#define SCALAR_OPERAND_TYPE_LIST(V)                   \
+  V(Flag8, OperandTypeInfo::kFixedUnsignedByte)       \
+  V(Idx, OperandTypeInfo::kScalableUnsignedByte)      \
+  V(Imm, OperandTypeInfo::kScalableSignedByte)        \
+  V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
+  V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
 
 #define REGISTER_OPERAND_TYPE_LIST(V) \
   REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
@@ -60,235 +47,258 @@
   NON_REGISTER_OPERAND_TYPE_LIST(V) \
   REGISTER_OPERAND_TYPE_LIST(V)
 
-// Define one debug break bytecode for each operands size.
-#define DEBUG_BREAK_BYTECODE_LIST(V)                                           \
-  V(DebugBreak0, OperandType::kNone)                                           \
-  V(DebugBreak1, OperandType::kReg8)                                           \
-  V(DebugBreak2, OperandType::kReg16)                                          \
-  V(DebugBreak3, OperandType::kReg16, OperandType::kReg8)                      \
-  V(DebugBreak4, OperandType::kReg16, OperandType::kReg16)                     \
-  V(DebugBreak5, OperandType::kReg16, OperandType::kReg16, OperandType::kReg8) \
-  V(DebugBreak6, OperandType::kReg16, OperandType::kReg16,                     \
-    OperandType::kReg16)                                                       \
-  V(DebugBreak7, OperandType::kReg16, OperandType::kReg16,                     \
-    OperandType::kReg16, OperandType::kReg8)                                   \
-  V(DebugBreak8, OperandType::kReg16, OperandType::kReg16,                     \
-    OperandType::kReg16, OperandType::kReg16)
+// Define one debug break bytecode for each possible size of unscaled
+// bytecodes. Format is V(<bytecode>, <accumulator_use>, <operands>).
+#define DEBUG_BREAK_PLAIN_BYTECODE_LIST(V)                                    \
+  V(DebugBreak0, AccumulatorUse::kRead)                                       \
+  V(DebugBreak1, AccumulatorUse::kRead, OperandType::kReg)                    \
+  V(DebugBreak2, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg) \
+  V(DebugBreak3, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
+    OperandType::kReg)                                                        \
+  V(DebugBreak4, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
+    OperandType::kReg, OperandType::kReg)                                     \
+  V(DebugBreak5, AccumulatorUse::kRead, OperandType::kRuntimeId,              \
+    OperandType::kReg, OperandType::kReg)                                     \
+  V(DebugBreak6, AccumulatorUse::kRead, OperandType::kRuntimeId,              \
+    OperandType::kReg, OperandType::kReg, OperandType::kReg)
+
+// Define one debug break for each widening prefix.
+#define DEBUG_BREAK_PREFIX_BYTECODE_LIST(V) \
+  V(DebugBreakWide, AccumulatorUse::kRead)  \
+  V(DebugBreakExtraWide, AccumulatorUse::kRead)
+
+#define DEBUG_BREAK_BYTECODE_LIST(V) \
+  DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
+  DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
 
 // The list of bytecodes which are interpreted by the interpreter.
-#define BYTECODE_LIST(V)                                                       \
-                                                                               \
-  /* Loading the accumulator */                                                \
-  V(LdaZero, OperandType::kNone)                                               \
-  V(LdaSmi8, OperandType::kImm8)                                               \
-  V(LdaUndefined, OperandType::kNone)                                          \
-  V(LdaNull, OperandType::kNone)                                               \
-  V(LdaTheHole, OperandType::kNone)                                            \
-  V(LdaTrue, OperandType::kNone)                                               \
-  V(LdaFalse, OperandType::kNone)                                              \
-  V(LdaConstant, OperandType::kIdx8)                                           \
-  V(LdaConstantWide, OperandType::kIdx16)                                      \
-                                                                               \
-  /* Globals */                                                                \
-  V(LdaGlobal, OperandType::kIdx8, OperandType::kIdx8)                         \
-  V(LdaGlobalInsideTypeof, OperandType::kIdx8, OperandType::kIdx8)             \
-  V(LdaGlobalWide, OperandType::kIdx16, OperandType::kIdx16)                   \
-  V(LdaGlobalInsideTypeofWide, OperandType::kIdx16, OperandType::kIdx16)       \
-  V(StaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8)                   \
-  V(StaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8)                   \
-  V(StaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16)             \
-  V(StaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16)             \
-                                                                               \
-  /* Context operations */                                                     \
-  V(PushContext, OperandType::kReg8)                                           \
-  V(PopContext, OperandType::kReg8)                                            \
-  V(LdaContextSlot, OperandType::kReg8, OperandType::kIdx8)                    \
-  V(StaContextSlot, OperandType::kReg8, OperandType::kIdx8)                    \
-  V(LdaContextSlotWide, OperandType::kReg8, OperandType::kIdx16)               \
-  V(StaContextSlotWide, OperandType::kReg8, OperandType::kIdx16)               \
-                                                                               \
-  /* Load-Store lookup slots */                                                \
-  V(LdaLookupSlot, OperandType::kIdx8)                                         \
-  V(LdaLookupSlotInsideTypeof, OperandType::kIdx8)                             \
-  V(LdaLookupSlotWide, OperandType::kIdx16)                                    \
-  V(LdaLookupSlotInsideTypeofWide, OperandType::kIdx16)                        \
-  V(StaLookupSlotSloppy, OperandType::kIdx8)                                   \
-  V(StaLookupSlotStrict, OperandType::kIdx8)                                   \
-  V(StaLookupSlotSloppyWide, OperandType::kIdx16)                              \
-  V(StaLookupSlotStrictWide, OperandType::kIdx16)                              \
-                                                                               \
-  /* Register-accumulator transfers */                                         \
-  V(Ldar, OperandType::kReg8)                                                  \
-  V(Star, OperandType::kRegOut8)                                               \
-                                                                               \
-  /* Register-register transfers */                                            \
-  V(Mov, OperandType::kReg8, OperandType::kRegOut8)                            \
-  V(MovWide, OperandType::kReg16, OperandType::kRegOut16)                      \
-                                                                               \
-  /* LoadIC operations */                                                      \
-  V(LoadIC, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8)        \
-  V(KeyedLoadIC, OperandType::kReg8, OperandType::kIdx8)                       \
-  V(LoadICWide, OperandType::kReg8, OperandType::kIdx16, OperandType::kIdx16)  \
-  V(KeyedLoadICWide, OperandType::kReg8, OperandType::kIdx16)                  \
-                                                                               \
-  /* StoreIC operations */                                                     \
-  V(StoreICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
-  V(StoreICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
-  V(KeyedStoreICSloppy, OperandType::kReg8, OperandType::kReg8,                \
-    OperandType::kIdx8)                                                        \
-  V(KeyedStoreICStrict, OperandType::kReg8, OperandType::kReg8,                \
-    OperandType::kIdx8)                                                        \
-  V(StoreICSloppyWide, OperandType::kReg8, OperandType::kIdx16,                \
-    OperandType::kIdx16)                                                       \
-  V(StoreICStrictWide, OperandType::kReg8, OperandType::kIdx16,                \
-    OperandType::kIdx16)                                                       \
-  V(KeyedStoreICSloppyWide, OperandType::kReg8, OperandType::kReg8,            \
-    OperandType::kIdx16)                                                       \
-  V(KeyedStoreICStrictWide, OperandType::kReg8, OperandType::kReg8,            \
-    OperandType::kIdx16)                                                       \
-                                                                               \
-  /* Binary Operators */                                                       \
-  V(Add, OperandType::kReg8)                                                   \
-  V(Sub, OperandType::kReg8)                                                   \
-  V(Mul, OperandType::kReg8)                                                   \
-  V(Div, OperandType::kReg8)                                                   \
-  V(Mod, OperandType::kReg8)                                                   \
-  V(BitwiseOr, OperandType::kReg8)                                             \
-  V(BitwiseXor, OperandType::kReg8)                                            \
-  V(BitwiseAnd, OperandType::kReg8)                                            \
-  V(ShiftLeft, OperandType::kReg8)                                             \
-  V(ShiftRight, OperandType::kReg8)                                            \
-  V(ShiftRightLogical, OperandType::kReg8)                                     \
-                                                                               \
-  /* Unary Operators */                                                        \
-  V(Inc, OperandType::kNone)                                                   \
-  V(Dec, OperandType::kNone)                                                   \
-  V(LogicalNot, OperandType::kNone)                                            \
-  V(TypeOf, OperandType::kNone)                                                \
-  V(DeletePropertyStrict, OperandType::kReg8)                                  \
-  V(DeletePropertySloppy, OperandType::kReg8)                                  \
-                                                                               \
-  /* Call operations */                                                        \
-  V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8,     \
-    OperandType::kIdx8)                                                        \
-  V(CallWide, OperandType::kReg16, OperandType::kReg16,                        \
-    OperandType::kRegCount16, OperandType::kIdx16)                             \
-  V(TailCall, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8, \
-    OperandType::kIdx8)                                                        \
-  V(TailCallWide, OperandType::kReg16, OperandType::kReg16,                    \
-    OperandType::kRegCount16, OperandType::kIdx16)                             \
-  V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8,                 \
-    OperandType::kRegCount8)                                                   \
-  V(CallRuntimeWide, OperandType::kIdx16, OperandType::kMaybeReg16,            \
-    OperandType::kRegCount8)                                                   \
-  V(CallRuntimeForPair, OperandType::kIdx16, OperandType::kMaybeReg8,          \
-    OperandType::kRegCount8, OperandType::kRegOutPair8)                        \
-  V(CallRuntimeForPairWide, OperandType::kIdx16, OperandType::kMaybeReg16,     \
-    OperandType::kRegCount8, OperandType::kRegOutPair16)                       \
-  V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8,                    \
-    OperandType::kRegCount8)                                                   \
-  V(CallJSRuntimeWide, OperandType::kIdx16, OperandType::kReg16,               \
-    OperandType::kRegCount16)                                                  \
-                                                                               \
-  /* New operator */                                                           \
-  V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kRegCount8) \
-  V(NewWide, OperandType::kReg16, OperandType::kMaybeReg16,                    \
-    OperandType::kRegCount16)                                                  \
-                                                                               \
-  /* Test Operators */                                                         \
-  V(TestEqual, OperandType::kReg8)                                             \
-  V(TestNotEqual, OperandType::kReg8)                                          \
-  V(TestEqualStrict, OperandType::kReg8)                                       \
-  V(TestNotEqualStrict, OperandType::kReg8)                                    \
-  V(TestLessThan, OperandType::kReg8)                                          \
-  V(TestGreaterThan, OperandType::kReg8)                                       \
-  V(TestLessThanOrEqual, OperandType::kReg8)                                   \
-  V(TestGreaterThanOrEqual, OperandType::kReg8)                                \
-  V(TestInstanceOf, OperandType::kReg8)                                        \
-  V(TestIn, OperandType::kReg8)                                                \
-                                                                               \
-  /* Cast operators */                                                         \
-  V(ToName, OperandType::kNone)                                                \
-  V(ToNumber, OperandType::kNone)                                              \
-  V(ToObject, OperandType::kNone)                                              \
-                                                                               \
-  /* Literals */                                                               \
-  V(CreateRegExpLiteral, OperandType::kIdx8, OperandType::kIdx8,               \
-    OperandType::kImm8)                                                        \
-  V(CreateArrayLiteral, OperandType::kIdx8, OperandType::kIdx8,                \
-    OperandType::kImm8)                                                        \
-  V(CreateObjectLiteral, OperandType::kIdx8, OperandType::kIdx8,               \
-    OperandType::kImm8)                                                        \
-  V(CreateRegExpLiteralWide, OperandType::kIdx16, OperandType::kIdx16,         \
-    OperandType::kImm8)                                                        \
-  V(CreateArrayLiteralWide, OperandType::kIdx16, OperandType::kIdx16,          \
-    OperandType::kImm8)                                                        \
-  V(CreateObjectLiteralWide, OperandType::kIdx16, OperandType::kIdx16,         \
-    OperandType::kImm8)                                                        \
-                                                                               \
-  /* Closure allocation */                                                     \
-  V(CreateClosure, OperandType::kIdx8, OperandType::kImm8)                     \
-  V(CreateClosureWide, OperandType::kIdx16, OperandType::kImm8)                \
-                                                                               \
-  /* Arguments allocation */                                                   \
-  V(CreateMappedArguments, OperandType::kNone)                                 \
-  V(CreateUnmappedArguments, OperandType::kNone)                               \
-  V(CreateRestParameter, OperandType::kNone)                                   \
-                                                                               \
-  /* Control Flow */                                                           \
-  V(Jump, OperandType::kImm8)                                                  \
-  V(JumpConstant, OperandType::kIdx8)                                          \
-  V(JumpConstantWide, OperandType::kIdx16)                                     \
-  V(JumpIfTrue, OperandType::kImm8)                                            \
-  V(JumpIfTrueConstant, OperandType::kIdx8)                                    \
-  V(JumpIfTrueConstantWide, OperandType::kIdx16)                               \
-  V(JumpIfFalse, OperandType::kImm8)                                           \
-  V(JumpIfFalseConstant, OperandType::kIdx8)                                   \
-  V(JumpIfFalseConstantWide, OperandType::kIdx16)                              \
-  V(JumpIfToBooleanTrue, OperandType::kImm8)                                   \
-  V(JumpIfToBooleanTrueConstant, OperandType::kIdx8)                           \
-  V(JumpIfToBooleanTrueConstantWide, OperandType::kIdx16)                      \
-  V(JumpIfToBooleanFalse, OperandType::kImm8)                                  \
-  V(JumpIfToBooleanFalseConstant, OperandType::kIdx8)                          \
-  V(JumpIfToBooleanFalseConstantWide, OperandType::kIdx16)                     \
-  V(JumpIfNull, OperandType::kImm8)                                            \
-  V(JumpIfNullConstant, OperandType::kIdx8)                                    \
-  V(JumpIfNullConstantWide, OperandType::kIdx16)                               \
-  V(JumpIfUndefined, OperandType::kImm8)                                       \
-  V(JumpIfUndefinedConstant, OperandType::kIdx8)                               \
-  V(JumpIfUndefinedConstantWide, OperandType::kIdx16)                          \
-  V(JumpIfNotHole, OperandType::kImm8)                                         \
-  V(JumpIfNotHoleConstant, OperandType::kIdx8)                                 \
-  V(JumpIfNotHoleConstantWide, OperandType::kIdx16)                            \
-                                                                               \
-  /* Complex flow control For..in */                                           \
-  V(ForInPrepare, OperandType::kRegOutTriple8)                                 \
-  V(ForInPrepareWide, OperandType::kRegOutTriple16)                            \
-  V(ForInDone, OperandType::kReg8, OperandType::kReg8)                         \
-  V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kRegPair8) \
-  V(ForInNextWide, OperandType::kReg16, OperandType::kReg16,                   \
-    OperandType::kRegPair16)                                                   \
-  V(ForInStep, OperandType::kReg8)                                             \
-                                                                               \
-  /* Perform a stack guard check */                                            \
-  V(StackCheck, OperandType::kNone)                                            \
-                                                                               \
-  /* Non-local flow control */                                                 \
-  V(Throw, OperandType::kNone)                                                 \
-  V(ReThrow, OperandType::kNone)                                               \
-  V(Return, OperandType::kNone)                                                \
-                                                                               \
-  /* Debugger */                                                               \
-  V(Debugger, OperandType::kNone)                                              \
-  DEBUG_BREAK_BYTECODE_LIST(V)
+#define BYTECODE_LIST(V)                                                      \
+  /* Extended width operands */                                               \
+  V(Wide, AccumulatorUse::kNone)                                              \
+  V(ExtraWide, AccumulatorUse::kNone)                                         \
+                                                                              \
+  /* Loading the accumulator */                                               \
+  V(LdaZero, AccumulatorUse::kWrite)                                          \
+  V(LdaSmi, AccumulatorUse::kWrite, OperandType::kImm)                        \
+  V(LdaUndefined, AccumulatorUse::kWrite)                                     \
+  V(LdaNull, AccumulatorUse::kWrite)                                          \
+  V(LdaTheHole, AccumulatorUse::kWrite)                                       \
+  V(LdaTrue, AccumulatorUse::kWrite)                                          \
+  V(LdaFalse, AccumulatorUse::kWrite)                                         \
+  V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx)                   \
+                                                                              \
+  /* Globals */                                                               \
+  V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx)  \
+  V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx,         \
+    OperandType::kIdx)                                                        \
+  V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx,                \
+    OperandType::kIdx)                                                        \
+  V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx,                \
+    OperandType::kIdx)                                                        \
+                                                                              \
+  /* Context operations */                                                    \
+  V(PushContext, AccumulatorUse::kRead, OperandType::kReg)                    \
+  V(PopContext, AccumulatorUse::kNone, OperandType::kReg)                     \
+  V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg,                \
+    OperandType::kIdx)                                                        \
+  V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg,                 \
+    OperandType::kIdx)                                                        \
+                                                                              \
+  /* Load-Store lookup slots */                                               \
+  V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx)                 \
+  V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)     \
+  V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx)       \
+  V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx)       \
+                                                                              \
+  /* Register-accumulator transfers */                                        \
+  V(Ldar, AccumulatorUse::kWrite, OperandType::kReg)                          \
+  V(Star, AccumulatorUse::kRead, OperandType::kRegOut)                        \
+                                                                              \
+  /* Register-register transfers */                                           \
+  V(Mov, AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut)      \
+                                                                              \
+  /* LoadIC operations */                                                     \
+  V(LoadIC, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx,     \
+    OperandType::kIdx)                                                        \
+  V(KeyedLoadIC, AccumulatorUse::kReadWrite, OperandType::kReg,               \
+    OperandType::kIdx)                                                        \
+                                                                              \
+  /* StoreIC operations */                                                    \
+  V(StoreICSloppy, AccumulatorUse::kRead, OperandType::kReg,                  \
+    OperandType::kIdx, OperandType::kIdx)                                     \
+  V(StoreICStrict, AccumulatorUse::kRead, OperandType::kReg,                  \
+    OperandType::kIdx, OperandType::kIdx)                                     \
+  V(KeyedStoreICSloppy, AccumulatorUse::kRead, OperandType::kReg,             \
+    OperandType::kReg, OperandType::kIdx)                                     \
+  V(KeyedStoreICStrict, AccumulatorUse::kRead, OperandType::kReg,             \
+    OperandType::kReg, OperandType::kIdx)                                     \
+                                                                              \
+  /* Binary Operators */                                                      \
+  V(Add, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
+  V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
+  V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
+  V(Div, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
+  V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
+  V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg)                \
+  V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg)                \
+  V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg)                \
+  V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg)         \
+                                                                              \
+  /* Unary Operators */                                                       \
+  V(Inc, AccumulatorUse::kReadWrite)                                          \
+  V(Dec, AccumulatorUse::kReadWrite)                                          \
+  V(LogicalNot, AccumulatorUse::kReadWrite)                                   \
+  V(TypeOf, AccumulatorUse::kReadWrite)                                       \
+  V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg)      \
+  V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg)      \
+                                                                              \
+  /* Call operations */                                                       \
+  V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,       \
+    OperandType::kRegCount, OperandType::kIdx)                                \
+  V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,   \
+    OperandType::kRegCount, OperandType::kIdx)                                \
+  V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId,             \
+    OperandType::kMaybeReg, OperandType::kRegCount)                           \
+  V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId,       \
+    OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair) \
+  V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx,                 \
+    OperandType::kReg, OperandType::kRegCount)                                \
+                                                                              \
+  /* Intrinsics */                                                            \
+  V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kRuntimeId,         \
+    OperandType::kMaybeReg, OperandType::kRegCount)                           \
+                                                                              \
+  /* New operator */                                                          \
+  V(New, AccumulatorUse::kReadWrite, OperandType::kReg,                       \
+    OperandType::kMaybeReg, OperandType::kRegCount)                           \
+                                                                              \
+  /* Test Operators */                                                        \
+  V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg)              \
+  V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg)           \
+  V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg)              \
+  V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg)           \
+  V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)       \
+  V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)    \
+  V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg)            \
+  V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg)                    \
+                                                                              \
+  /* Cast operators */                                                        \
+  V(ToName, AccumulatorUse::kReadWrite)                                       \
+  V(ToNumber, AccumulatorUse::kReadWrite)                                     \
+  V(ToObject, AccumulatorUse::kReadWrite)                                     \
+                                                                              \
+  /* Literals */                                                              \
+  V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx,           \
+    OperandType::kIdx, OperandType::kFlag8)                                   \
+  V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx,            \
+    OperandType::kIdx, OperandType::kFlag8)                                   \
+  V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx,           \
+    OperandType::kIdx, OperandType::kFlag8)                                   \
+                                                                              \
+  /* Closure allocation */                                                    \
+  V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx,                 \
+    OperandType::kFlag8)                                                      \
+                                                                              \
+  /* Arguments allocation */                                                  \
+  V(CreateMappedArguments, AccumulatorUse::kWrite)                            \
+  V(CreateUnmappedArguments, AccumulatorUse::kWrite)                          \
+  V(CreateRestParameter, AccumulatorUse::kWrite)                              \
+                                                                              \
+  /* Control Flow */                                                          \
+  V(Jump, AccumulatorUse::kNone, OperandType::kImm)                           \
+  V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx)                   \
+  V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm)                     \
+  V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
+  V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm)                    \
+  V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)            \
+  V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm)            \
+  V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)    \
+  V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm)           \
+  V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)   \
+  V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm)                     \
+  V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
+  V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm)                \
+  V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx)        \
+  V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm)                  \
+  V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx)          \
+                                                                              \
+  /* Complex flow control For..in */                                          \
+  V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple)          \
+  V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg)  \
+  V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,  \
+    OperandType::kRegPair, OperandType::kIdx)                                 \
+  V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg)                     \
+                                                                              \
+  /* Perform a stack guard check */                                           \
+  V(StackCheck, AccumulatorUse::kNone)                                        \
+                                                                              \
+  /* Non-local flow control */                                                \
+  V(Throw, AccumulatorUse::kRead)                                             \
+  V(ReThrow, AccumulatorUse::kRead)                                           \
+  V(Return, AccumulatorUse::kNone)                                            \
+                                                                              \
+  /* Debugger */                                                              \
+  V(Debugger, AccumulatorUse::kNone)                                          \
+  DEBUG_BREAK_BYTECODE_LIST(V)                                                \
+                                                                              \
+  /* Illegal bytecode (terminates execution) */                               \
+  V(Illegal, AccumulatorUse::kNone)
 
-// Enumeration of the size classes of operand types used by bytecodes.
+enum class AccumulatorUse : uint8_t {
+  kNone = 0,
+  kRead = 1 << 0,
+  kWrite = 1 << 1,
+  kReadWrite = kRead | kWrite
+};
+
+V8_INLINE AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
+  int result = static_cast<int>(lhs) & static_cast<int>(rhs);
+  return static_cast<AccumulatorUse>(result);
+}
+
+V8_INLINE AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
+  int result = static_cast<int>(lhs) | static_cast<int>(rhs);
+  return static_cast<AccumulatorUse>(result);
+}
+
+// Enumeration of scaling factors applicable to scalable operands. Code
+// relies on being able to cast values to integer scaling values.
+enum class OperandScale : uint8_t {
+  kSingle = 1,
+  kDouble = 2,
+  kQuadruple = 4,
+  kMaxValid = kQuadruple,
+  kInvalid = 8,
+};
+
+// Enumeration of the size classes of operand types used by
+// bytecodes. Code relies on being able to cast values to integer
+// types to get the size in bytes.
 enum class OperandSize : uint8_t {
   kNone = 0,
   kByte = 1,
   kShort = 2,
+  kQuad = 4,
+  kLast = kQuad
 };
 
+// Primitive operand info used that summarize properties of operands.
+// Columns are Name, IsScalable, IsUnsigned, UnscaledSize.
+#define OPERAND_TYPE_INFO_LIST(V)                         \
+  V(None, false, false, OperandSize::kNone)               \
+  V(ScalableSignedByte, true, false, OperandSize::kByte)  \
+  V(ScalableUnsignedByte, true, true, OperandSize::kByte) \
+  V(FixedUnsignedByte, false, true, OperandSize::kByte)   \
+  V(FixedUnsignedShort, false, true, OperandSize::kShort)
+
+enum class OperandTypeInfo : uint8_t {
+#define DECLARE_OPERAND_TYPE_INFO(Name, ...) k##Name,
+  OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
+#undef DECLARE_OPERAND_TYPE_INFO
+};
 
 // Enumeration of operand types used by bytecodes.
 enum class OperandType : uint8_t {
@@ -330,9 +340,6 @@
 
   static Register FromParameterIndex(int index, int parameter_count);
   int ToParameterIndex(int parameter_count) const;
-  static int MaxParameterIndex();
-  static int MaxRegisterIndex();
-  static int MaxRegisterIndexForByteOperand();
 
   // Returns an invalid register.
   static Register invalid_value() { return Register(); }
@@ -349,14 +356,8 @@
   static Register new_target();
   bool is_new_target() const;
 
-  static Register FromOperand(uint8_t operand);
-  uint8_t ToOperand() const;
-
-  static Register FromWideOperand(uint16_t operand);
-  uint16_t ToWideOperand() const;
-
-  static Register FromRawOperand(uint32_t raw_operand);
-  uint32_t ToRawOperand() const;
+  int32_t ToOperand() const { return -index_; }
+  static Register FromOperand(int32_t operand) { return Register(-operand); }
 
   static bool AreContiguous(Register reg1, Register reg2,
                             Register reg3 = Register(),
@@ -399,9 +400,18 @@
   // Returns string representation of |bytecode|.
   static const char* ToString(Bytecode bytecode);
 
+  // Returns string representation of |bytecode|.
+  static std::string ToString(Bytecode bytecode, OperandScale operand_scale);
+
+  // Returns string representation of |accumulator_use|.
+  static const char* AccumulatorUseToString(AccumulatorUse accumulator_use);
+
   // Returns string representation of |operand_type|.
   static const char* OperandTypeToString(OperandType operand_type);
 
+  // Returns string representation of |operand_scale|.
+  static const char* OperandScaleToString(OperandScale operand_scale);
+
   // Returns string representation of |operand_size|.
   static const char* OperandSizeToString(OperandSize operand_size);
 
@@ -417,57 +427,72 @@
   // Returns the number of register operands expected by |bytecode|.
   static int NumberOfRegisterOperands(Bytecode bytecode);
 
+  // Returns the prefix bytecode representing an operand scale to be
+  // applied to a a bytecode.
+  static Bytecode OperandScaleToPrefixBytecode(OperandScale operand_scale);
+
+  // Returns true if the operand scale requires a prefix bytecode.
+  static bool OperandScaleRequiresPrefixBytecode(OperandScale operand_scale);
+
+  // Returns the scaling applied to scalable operands if bytecode is
+  // is a scaling prefix.
+  static OperandScale PrefixBytecodeToOperandScale(Bytecode bytecode);
+
+  // Returns how accumulator is used by |bytecode|.
+  static AccumulatorUse GetAccumulatorUse(Bytecode bytecode);
+
+  // Returns true if |bytecode| reads the accumulator.
+  static bool ReadsAccumulator(Bytecode bytecode);
+
+  // Returns true if |bytecode| writes the accumulator.
+  static bool WritesAccumulator(Bytecode bytecode);
+
   // Returns the i-th operand of |bytecode|.
   static OperandType GetOperandType(Bytecode bytecode, int i);
 
   // Returns the size of the i-th operand of |bytecode|.
-  static OperandSize GetOperandSize(Bytecode bytecode, int i);
+  static OperandSize GetOperandSize(Bytecode bytecode, int i,
+                                    OperandScale operand_scale);
 
   // Returns the offset of the i-th operand of |bytecode| relative to the start
   // of the bytecode.
-  static int GetOperandOffset(Bytecode bytecode, int i);
+  static int GetOperandOffset(Bytecode bytecode, int i,
+                              OperandScale operand_scale);
 
   // Returns a zero-based bitmap of the register operand positions of
   // |bytecode|.
   static int GetRegisterOperandBitmap(Bytecode bytecode);
 
-  // Returns a debug break bytecode with a matching operand size.
+  // Returns a debug break bytecode to replace |bytecode|.
   static Bytecode GetDebugBreak(Bytecode bytecode);
 
-  // Returns the size of the bytecode including its operands.
-  static int Size(Bytecode bytecode);
+  // Returns the size of the bytecode including its operands for the
+  // given |operand_scale|.
+  static int Size(Bytecode bytecode, OperandScale operand_scale);
 
   // Returns the size of |operand|.
-  static OperandSize SizeOfOperand(OperandType operand);
+  static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
 
   // Returns true if the bytecode is a conditional jump taking
-  // an immediate byte operand (OperandType::kImm8).
+  // an immediate byte operand (OperandType::kImm).
   static bool IsConditionalJumpImmediate(Bytecode bytecode);
 
   // Returns true if the bytecode is a conditional jump taking
-  // a constant pool entry (OperandType::kIdx8).
+  // a constant pool entry (OperandType::kIdx).
   static bool IsConditionalJumpConstant(Bytecode bytecode);
 
   // Returns true if the bytecode is a conditional jump taking
-  // a constant pool entry (OperandType::kIdx16).
-  static bool IsConditionalJumpConstantWide(Bytecode bytecode);
-
-  // Returns true if the bytecode is a conditional jump taking
   // any kind of operand.
   static bool IsConditionalJump(Bytecode bytecode);
 
   // Returns true if the bytecode is a jump or a conditional jump taking
-  // an immediate byte operand (OperandType::kImm8).
+  // an immediate byte operand (OperandType::kImm).
   static bool IsJumpImmediate(Bytecode bytecode);
 
   // Returns true if the bytecode is a jump or conditional jump taking a
-  // constant pool entry (OperandType::kIdx8).
+  // constant pool entry (OperandType::kIdx).
   static bool IsJumpConstant(Bytecode bytecode);
 
-  // Returns true if the bytecode is a jump or conditional jump taking a
-  // constant pool entry (OperandType::kIdx16).
-  static bool IsJumpConstantWide(Bytecode bytecode);
-
   // Returns true if the bytecode is a jump or conditional jump taking
   // any kind of operand.
   static bool IsJump(Bytecode bytecode);
@@ -478,18 +503,17 @@
   // Returns true if the bytecode is a call or a constructor call.
   static bool IsCallOrNew(Bytecode bytecode);
 
+  // Returns true if the bytecode is a call to the runtime.
+  static bool IsCallRuntime(Bytecode bytecode);
+
   // Returns true if the bytecode is a debug break.
   static bool IsDebugBreak(Bytecode bytecode);
 
-  // Returns true if |operand_type| is a register index operand (kIdx8/kIdx16).
-  static bool IsIndexOperandType(OperandType operand_type);
+  // Returns true if the bytecode has wider operand forms.
+  static bool IsBytecodeWithScalableOperands(Bytecode bytecode);
 
-  // Returns true if |operand_type| represents an immediate.
-  static bool IsImmediateOperandType(OperandType operand_type);
-
-  // Returns true if |operand_type| is a register count operand
-  // (kRegCount8/kRegCount16).
-  static bool IsRegisterCountOperandType(OperandType operand_type);
+  // Returns true if the bytecode is a scaling prefix bytecode.
+  static bool IsPrefixScalingBytecode(Bytecode bytecode);
 
   // Returns true if |operand_type| is any type of register operand.
   static bool IsRegisterOperandType(OperandType operand_type);
@@ -501,20 +525,52 @@
   static bool IsRegisterOutputOperandType(OperandType operand_type);
 
   // Returns true if |operand_type| is a maybe register operand
-  // (kMaybeReg8/kMaybeReg16).
+  // (kMaybeReg).
   static bool IsMaybeRegisterOperandType(OperandType operand_type);
 
+  // Returns true if |operand_type| is a runtime-id operand (kRuntimeId).
+  static bool IsRuntimeIdOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| is unsigned, false if signed.
+  static bool IsUnsignedOperandType(OperandType operand_type);
+
+  // Decodes a register operand in a byte array.
+  static Register DecodeRegisterOperand(const uint8_t* operand_start,
+                                        OperandType operand_type,
+                                        OperandScale operand_scale);
+
+  // Decodes a signed operand in a byte array.
+  static int32_t DecodeSignedOperand(const uint8_t* operand_start,
+                                     OperandType operand_type,
+                                     OperandScale operand_scale);
+
+  // Decodes an unsigned operand in a byte array.
+  static uint32_t DecodeUnsignedOperand(const uint8_t* operand_start,
+                                        OperandType operand_type,
+                                        OperandScale operand_scale);
+
   // Decode a single bytecode and operands to |os|.
   static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
                               int number_of_parameters);
 
+  // Returns true if a handler is generated for a bytecode at a given
+  // operand scale. All bytecodes have handlers at OperandScale::kSingle,
+  // but only bytecodes with scalable operands have handlers with larger
+  // OperandScale values.
+  static bool BytecodeHasHandler(Bytecode bytecode, OperandScale operand_scale);
+
+  // Return the next larger operand scale.
+  static OperandScale NextOperandScale(OperandScale operand_scale);
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
 };
 
 std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
+std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
+std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size);
 std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
-std::ostream& operator<<(std::ostream& os, const OperandSize& operand_type);
 
 }  // namespace interpreter
 }  // namespace internal
diff --git a/src/interpreter/constant-array-builder.cc b/src/interpreter/constant-array-builder.cc
index e8b1281..7ce50b5 100644
--- a/src/interpreter/constant-array-builder.cc
+++ b/src/interpreter/constant-array-builder.cc
@@ -11,28 +11,25 @@
 namespace internal {
 namespace interpreter {
 
-ConstantArrayBuilder::ConstantArraySlice::ConstantArraySlice(Zone* zone,
-                                                             size_t start_index,
-                                                             size_t capacity)
+ConstantArrayBuilder::ConstantArraySlice::ConstantArraySlice(
+    Zone* zone, size_t start_index, size_t capacity, OperandSize operand_size)
     : start_index_(start_index),
       capacity_(capacity),
       reserved_(0),
+      operand_size_(operand_size),
       constants_(zone) {}
 
-
 void ConstantArrayBuilder::ConstantArraySlice::Reserve() {
   DCHECK_GT(available(), 0u);
   reserved_++;
   DCHECK_LE(reserved_, capacity() - constants_.size());
 }
 
-
 void ConstantArrayBuilder::ConstantArraySlice::Unreserve() {
   DCHECK_GT(reserved_, 0u);
   reserved_--;
 }
 
-
 size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
     Handle<Object> object) {
   DCHECK_GT(available(), 0u);
@@ -42,45 +39,57 @@
   return index + start_index();
 }
 
-
 Handle<Object> ConstantArrayBuilder::ConstantArraySlice::At(
     size_t index) const {
+  DCHECK_GE(index, start_index());
+  DCHECK_LT(index, start_index() + size());
   return constants_[index - start_index()];
 }
 
-
-STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kMaxCapacity;
-STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kLowCapacity;
-
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::k8BitCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t
+    ConstantArrayBuilder::k16BitCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t
+    ConstantArrayBuilder::k32BitCapacity;
 
 ConstantArrayBuilder::ConstantArrayBuilder(Isolate* isolate, Zone* zone)
-    : isolate_(isolate),
-      idx8_slice_(zone, 0, kLowCapacity),
-      idx16_slice_(zone, kLowCapacity, kHighCapacity),
-      constants_map_(isolate->heap(), zone) {
-  STATIC_ASSERT(kMaxCapacity == static_cast<size_t>(kMaxUInt16 + 1));
-  DCHECK_EQ(idx8_slice_.start_index(), 0u);
-  DCHECK_EQ(idx8_slice_.capacity(), kLowCapacity);
-  DCHECK_EQ(idx16_slice_.start_index(), kLowCapacity);
-  DCHECK_EQ(idx16_slice_.capacity(), kMaxCapacity - kLowCapacity);
+    : isolate_(isolate), constants_map_(isolate->heap(), zone) {
+  idx_slice_[0] =
+      new (zone) ConstantArraySlice(zone, 0, k8BitCapacity, OperandSize::kByte);
+  idx_slice_[1] = new (zone) ConstantArraySlice(
+      zone, k8BitCapacity, k16BitCapacity, OperandSize::kShort);
+  idx_slice_[2] = new (zone) ConstantArraySlice(
+      zone, k8BitCapacity + k16BitCapacity, k32BitCapacity, OperandSize::kQuad);
 }
 
-
 size_t ConstantArrayBuilder::size() const {
-  if (idx16_slice_.size() > 0) {
-    return idx16_slice_.start_index() + idx16_slice_.size();
-  } else {
-    return idx8_slice_.size();
+  size_t i = arraysize(idx_slice_);
+  while (i > 0) {
+    ConstantArraySlice* slice = idx_slice_[--i];
+    if (slice->size() > 0) {
+      return slice->start_index() + slice->size();
+    }
   }
+  return idx_slice_[0]->size();
 }
 
+const ConstantArrayBuilder::ConstantArraySlice*
+ConstantArrayBuilder::IndexToSlice(size_t index) const {
+  for (const ConstantArraySlice* slice : idx_slice_) {
+    if (index <= slice->max_index()) {
+      return slice;
+    }
+  }
+  UNREACHABLE();
+  return nullptr;
+}
 
 Handle<Object> ConstantArrayBuilder::At(size_t index) const {
-  if (index >= idx16_slice_.start_index()) {
-    return idx16_slice_.At(index);
-  } else if (index < idx8_slice_.size()) {
-    return idx8_slice_.At(index);
+  const ConstantArraySlice* slice = IndexToSlice(index);
+  if (index < slice->start_index() + slice->size()) {
+    return slice->At(index);
   } else {
+    DCHECK_LT(index, slice->capacity());
     return isolate_->factory()->the_hole_value();
   }
 }
@@ -88,49 +97,82 @@
 Handle<FixedArray> ConstantArrayBuilder::ToFixedArray() {
   Handle<FixedArray> fixed_array = isolate_->factory()->NewFixedArray(
       static_cast<int>(size()), PretenureFlag::TENURED);
-  for (int i = 0; i < fixed_array->length(); i++) {
-    fixed_array->set(i, *At(static_cast<size_t>(i)));
+  int array_index = 0;
+  for (const ConstantArraySlice* slice : idx_slice_) {
+    if (array_index == fixed_array->length()) {
+      break;
+    }
+    DCHECK(array_index == 0 ||
+           base::bits::IsPowerOfTwo32(static_cast<uint32_t>(array_index)));
+    // Copy objects from slice into array.
+    for (size_t i = 0; i < slice->size(); ++i) {
+      fixed_array->set(array_index++, *slice->At(slice->start_index() + i));
+    }
+    // Insert holes where reservations led to unused slots.
+    size_t padding =
+        std::min(static_cast<size_t>(fixed_array->length() - array_index),
+                 slice->capacity() - slice->size());
+    for (size_t i = 0; i < padding; i++) {
+      fixed_array->set(array_index++, *isolate_->factory()->the_hole_value());
+    }
   }
+  DCHECK_EQ(array_index, fixed_array->length());
   constants_map()->Clear();
   return fixed_array;
 }
 
-
 size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
   index_t* entry = constants_map()->Find(object);
   return (entry == nullptr) ? AllocateEntry(object) : *entry;
 }
 
-
 ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
     Handle<Object> object) {
   DCHECK(!object->IsOddball());
-  size_t index;
   index_t* entry = constants_map()->Get(object);
-  if (idx8_slice_.available() > 0) {
-    index = idx8_slice_.Allocate(object);
-  } else {
-    index = idx16_slice_.Allocate(object);
+  for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
+    if (idx_slice_[i]->available() > 0) {
+      size_t index = idx_slice_[i]->Allocate(object);
+      *entry = static_cast<index_t>(index);
+      return *entry;
+      break;
+    }
   }
-  CHECK_LT(index, kMaxCapacity);
-  *entry = static_cast<index_t>(index);
-  return *entry;
+  UNREACHABLE();
+  return kMaxUInt32;
 }
 
-
 OperandSize ConstantArrayBuilder::CreateReservedEntry() {
-  if (idx8_slice_.available() > 0) {
-    idx8_slice_.Reserve();
-    return OperandSize::kByte;
-  } else if (idx16_slice_.available() > 0) {
-    idx16_slice_.Reserve();
-    return OperandSize::kShort;
-  } else {
-    UNREACHABLE();
-    return OperandSize::kNone;
+  for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
+    if (idx_slice_[i]->available() > 0) {
+      idx_slice_[i]->Reserve();
+      return idx_slice_[i]->operand_size();
+    }
   }
+  UNREACHABLE();
+  return OperandSize::kNone;
 }
 
+ConstantArrayBuilder::ConstantArraySlice*
+ConstantArrayBuilder::OperandSizeToSlice(OperandSize operand_size) const {
+  ConstantArraySlice* slice = nullptr;
+  switch (operand_size) {
+    case OperandSize::kNone:
+      UNREACHABLE();
+      break;
+    case OperandSize::kByte:
+      slice = idx_slice_[0];
+      break;
+    case OperandSize::kShort:
+      slice = idx_slice_[1];
+      break;
+    case OperandSize::kQuad:
+      slice = idx_slice_[2];
+      break;
+  }
+  DCHECK(slice->operand_size() == operand_size);
+  return slice;
+}
 
 size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
                                                  Handle<Object> object) {
@@ -140,33 +182,20 @@
   if (nullptr == entry) {
     index = AllocateEntry(object);
   } else {
-    if (operand_size == OperandSize::kByte &&
-        *entry >= idx8_slice_.capacity()) {
-      // The object is already in the constant array, but has an index
-      // outside the range of an idx8 operand so we need to create a
-      // duplicate entry in the idx8 operand range to satisfy the
-      // commitment.
-      *entry = static_cast<index_t>(idx8_slice_.Allocate(object));
+    ConstantArraySlice* slice = OperandSizeToSlice(operand_size);
+    if (*entry > slice->max_index()) {
+      // The object is already in the constant array, but may have an
+      // index too big for the reserved operand_size. So, duplicate
+      // entry with the smaller operand size.
+      *entry = static_cast<index_t>(slice->Allocate(object));
     }
     index = *entry;
   }
-  DCHECK(operand_size == OperandSize::kShort || index < idx8_slice_.capacity());
-  DCHECK_LT(index, kMaxCapacity);
   return index;
 }
 
-
 void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
-  switch (operand_size) {
-    case OperandSize::kByte:
-      idx8_slice_.Unreserve();
-      return;
-    case OperandSize::kShort:
-      idx16_slice_.Unreserve();
-      return;
-    default:
-      UNREACHABLE();
-  }
+  OperandSizeToSlice(operand_size)->Unreserve();
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/constant-array-builder.h b/src/interpreter/constant-array-builder.h
index d7e41e3..1a68646 100644
--- a/src/interpreter/constant-array-builder.h
+++ b/src/interpreter/constant-array-builder.h
@@ -23,13 +23,14 @@
 class ConstantArrayBuilder final BASE_EMBEDDED {
  public:
   // Capacity of the 8-bit operand slice.
-  static const size_t kLowCapacity = 1u << kBitsPerByte;
-
-  // Capacity of the combined 8-bit and 16-bit operand slices.
-  static const size_t kMaxCapacity = 1u << (2 * kBitsPerByte);
+  static const size_t k8BitCapacity = 1u << kBitsPerByte;
 
   // Capacity of the 16-bit operand slice.
-  static const size_t kHighCapacity = kMaxCapacity - kLowCapacity;
+  static const size_t k16BitCapacity = (1u << 2 * kBitsPerByte) - k8BitCapacity;
+
+  // Capacity of the 32-bit operand slice.
+  static const size_t k32BitCapacity =
+      kMaxUInt32 - k16BitCapacity - k8BitCapacity + 1;
 
   ConstantArrayBuilder(Isolate* isolate, Zone* zone);
 
@@ -60,12 +61,13 @@
   void DiscardReservedEntry(OperandSize operand_size);
 
  private:
-  typedef uint16_t index_t;
+  typedef uint32_t index_t;
 
   index_t AllocateEntry(Handle<Object> object);
 
-  struct ConstantArraySlice final {
-    ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity);
+  struct ConstantArraySlice final : public ZoneObject {
+    ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity,
+                       OperandSize operand_size);
     void Reserve();
     void Unreserve();
     size_t Allocate(Handle<Object> object);
@@ -76,21 +78,26 @@
     inline size_t capacity() const { return capacity_; }
     inline size_t size() const { return constants_.size(); }
     inline size_t start_index() const { return start_index_; }
+    inline size_t max_index() const { return start_index_ + capacity() - 1; }
+    inline OperandSize operand_size() const { return operand_size_; }
 
    private:
     const size_t start_index_;
     const size_t capacity_;
     size_t reserved_;
+    OperandSize operand_size_;
     ZoneVector<Handle<Object>> constants_;
 
     DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
   };
 
+  const ConstantArraySlice* IndexToSlice(size_t index) const;
+  ConstantArraySlice* OperandSizeToSlice(OperandSize operand_size) const;
+
   IdentityMap<index_t>* constants_map() { return &constants_map_; }
 
   Isolate* isolate_;
-  ConstantArraySlice idx8_slice_;
-  ConstantArraySlice idx16_slice_;
+  ConstantArraySlice* idx_slice_[3];
   IdentityMap<index_t> constants_map_;
 };
 
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index 440e879..2663e4a 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -22,12 +22,16 @@
 using compiler::Node;
 
 InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
-                                           Bytecode bytecode)
-    : compiler::CodeStubAssembler(
-          isolate, zone, InterpreterDispatchDescriptor(isolate),
-          Code::ComputeFlags(Code::STUB), Bytecodes::ToString(bytecode), 0),
+                                           Bytecode bytecode,
+                                           OperandScale operand_scale)
+    : compiler::CodeStubAssembler(isolate, zone,
+                                  InterpreterDispatchDescriptor(isolate),
+                                  Code::ComputeFlags(Code::BYTECODE_HANDLER),
+                                  Bytecodes::ToString(bytecode), 0),
       bytecode_(bytecode),
+      operand_scale_(operand_scale),
       accumulator_(this, MachineRepresentation::kTagged),
+      accumulator_use_(AccumulatorUse::kNone),
       context_(this, MachineRepresentation::kTagged),
       bytecode_array_(this, MachineRepresentation::kTagged),
       disable_stack_check_across_call_(false),
@@ -42,11 +46,26 @@
   }
 }
 
-InterpreterAssembler::~InterpreterAssembler() {}
+InterpreterAssembler::~InterpreterAssembler() {
+  // If the following check fails the handler does not use the
+  // accumulator in the way described in the bytecode definitions in
+  // bytecodes.h.
+  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+}
 
-Node* InterpreterAssembler::GetAccumulator() { return accumulator_.value(); }
+Node* InterpreterAssembler::GetAccumulatorUnchecked() {
+  return accumulator_.value();
+}
+
+Node* InterpreterAssembler::GetAccumulator() {
+  DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
+  accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
+  return GetAccumulatorUnchecked();
+}
 
 void InterpreterAssembler::SetAccumulator(Node* value) {
+  DCHECK(Bytecodes::WritesAccumulator(bytecode_));
+  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
   accumulator_.Bind(value);
 }
 
@@ -79,11 +98,11 @@
 
 Node* InterpreterAssembler::LoadRegister(int offset) {
   return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
-              Int32Constant(offset));
+              IntPtrConstant(offset));
 }
 
 Node* InterpreterAssembler::LoadRegister(Register reg) {
-  return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
+  return LoadRegister(IntPtrConstant(-reg.index()));
 }
 
 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
@@ -97,12 +116,12 @@
 
 Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
   return StoreNoWriteBarrier(MachineRepresentation::kTagged,
-                             RegisterFileRawPointer(), Int32Constant(offset),
+                             RegisterFileRawPointer(), IntPtrConstant(offset),
                              value);
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
-  return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
+  return StoreRegister(value, IntPtrConstant(-reg.index()));
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
@@ -113,27 +132,31 @@
 
 Node* InterpreterAssembler::NextRegister(Node* reg_index) {
   // Register indexes are negative, so the next index is minus one.
-  return IntPtrAdd(reg_index, Int32Constant(-1));
+  return IntPtrAdd(reg_index, IntPtrConstant(-1));
 }
 
-Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
-  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kByte,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  return Load(
-      MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-      IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
-                                      bytecode_, operand_index))));
+Node* InterpreterAssembler::OperandOffset(int operand_index) {
+  return IntPtrConstant(
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
 }
 
-Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kByte,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  Node* load = Load(
-      MachineType::Int8(), BytecodeArrayTaggedPointer(),
-      IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
-                                      bytecode_, operand_index))));
+  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  Node* operand_offset = OperandOffset(operand_index);
+  return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+              IntPtrAdd(BytecodeOffset(), operand_offset));
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  Node* operand_offset = OperandOffset(operand_index);
+  Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+                    IntPtrAdd(BytecodeOffset(), operand_offset));
+
   // Ensure that we sign extend to full pointer size
   if (kPointerSize == 8) {
     load = ChangeInt32ToInt64(load);
@@ -141,58 +164,85 @@
   return load;
 }
 
-Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
-  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kShort,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  if (TargetSupportsUnalignedAccess()) {
-    return Load(
-        MachineType::Uint16(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
-                                        bytecode_, operand_index))));
-  } else {
-    int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
-    Node* first_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-                            IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
-    Node* second_byte =
-        Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-             IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
+compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
+    int relative_offset, MachineType result_type) {
+  static const int kMaxCount = 4;
+  DCHECK(!TargetSupportsUnalignedAccess());
+
+  int count;
+  switch (result_type.representation()) {
+    case MachineRepresentation::kWord16:
+      count = 2;
+      break;
+    case MachineRepresentation::kWord32:
+      count = 4;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  MachineType msb_type =
+      result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
+
 #if V8_TARGET_LITTLE_ENDIAN
-    return WordOr(WordShl(second_byte, kBitsPerByte), first_byte);
+  const int kStep = -1;
+  int msb_offset = count - 1;
 #elif V8_TARGET_BIG_ENDIAN
-    return WordOr(WordShl(first_byte, kBitsPerByte), second_byte);
+  const int kStep = 1;
+  int msb_offset = 0;
 #else
 #error "Unknown Architecture"
 #endif
+
+  // Read the most signicant bytecode into bytes[0] and then in order
+  // down to least significant in bytes[count - 1].
+  DCHECK(count <= kMaxCount);
+  compiler::Node* bytes[kMaxCount];
+  for (int i = 0; i < count; i++) {
+    MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
+    Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
+    Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
+    bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset);
+  }
+
+  // Pack LSB to MSB.
+  Node* result = bytes[--count];
+  for (int i = 1; --count >= 0; i++) {
+    Node* shift = Int32Constant(i * kBitsPerByte);
+    Node* value = Word32Shl(bytes[count], shift);
+    result = Word32Or(value, result);
+  }
+  return result;
+}
+
+Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(
+      OperandSize::kShort,
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+  if (TargetSupportsUnalignedAccess()) {
+    return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+  } else {
+    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16());
   }
 }
 
-Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
-    int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kShort,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
+  DCHECK_EQ(
+      OperandSize::kShort,
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
   Node* load;
   if (TargetSupportsUnalignedAccess()) {
     load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
-                IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
   } else {
-#if V8_TARGET_LITTLE_ENDIAN
-    Node* hi_byte_offset = Int32Constant(operand_offset + 1);
-    Node* lo_byte_offset = Int32Constant(operand_offset);
-#elif V8_TARGET_BIG_ENDIAN
-    Node* hi_byte_offset = Int32Constant(operand_offset);
-    Node* lo_byte_offset = Int32Constant(operand_offset + 1);
-#else
-#error "Unknown Architecture"
-#endif
-    Node* hi_byte = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
-                         IntPtrAdd(BytecodeOffset(), hi_byte_offset));
-    Node* lo_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-                         IntPtrAdd(BytecodeOffset(), lo_byte_offset));
-    hi_byte = Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
-    load = Word32Or(hi_byte, lo_byte);
+    load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
   }
 
   // Ensure that we sign extend to full pointer size
@@ -202,57 +252,123 @@
   return load;
 }
 
-Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
-  switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+  if (TargetSupportsUnalignedAccess()) {
+    return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+  } else {
+    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32());
+  }
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+  Node* load;
+  if (TargetSupportsUnalignedAccess()) {
+    load = Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+  } else {
+    load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
+  }
+
+  // Ensure that we sign extend to full pointer size
+  if (kPointerSize == 8) {
+    load = ChangeInt32ToInt64(load);
+  }
+  return load;
+}
+
+Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
+                                                  OperandSize operand_size) {
+  DCHECK(!Bytecodes::IsUnsignedOperandType(
+      Bytecodes::GetOperandType(bytecode_, operand_index)));
+  switch (operand_size) {
     case OperandSize::kByte:
-      DCHECK_EQ(OperandType::kRegCount8,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperand(operand_index);
+      return BytecodeOperandSignedByte(operand_index);
     case OperandSize::kShort:
-      DCHECK_EQ(OperandType::kRegCount16,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperandShort(operand_index);
+      return BytecodeOperandSignedShort(operand_index);
+    case OperandSize::kQuad:
+      return BytecodeOperandSignedQuad(operand_index);
     case OperandSize::kNone:
       UNREACHABLE();
   }
   return nullptr;
 }
 
+Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
+                                                    OperandSize operand_size) {
+  DCHECK(Bytecodes::IsUnsignedOperandType(
+      Bytecodes::GetOperandType(bytecode_, operand_index)));
+  switch (operand_size) {
+    case OperandSize::kByte:
+      return BytecodeOperandUnsignedByte(operand_index);
+    case OperandSize::kShort:
+      return BytecodeOperandUnsignedShort(operand_index);
+    case OperandSize::kQuad:
+      return BytecodeOperandUnsignedQuad(operand_index);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return nullptr;
+}
+
+Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
+  DCHECK_EQ(OperandType::kRegCount,
+            Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
+Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
+  DCHECK_EQ(OperandType::kFlag8,
+            Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  DCHECK_EQ(operand_size, OperandSize::kByte);
+  return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
-  DCHECK_EQ(OperandType::kImm8,
+  DCHECK_EQ(OperandType::kImm,
             Bytecodes::GetOperandType(bytecode_, operand_index));
-  return BytecodeOperandSignExtended(operand_index);
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeSignedOperand(operand_index, operand_size);
 }
 
 Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
-  switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
-    case OperandSize::kByte:
-      DCHECK_EQ(OperandType::kIdx8,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperand(operand_index);
-    case OperandSize::kShort:
-      DCHECK_EQ(OperandType::kIdx16,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperandShort(operand_index);
-    case OperandSize::kNone:
-      UNREACHABLE();
-  }
-  return nullptr;
+  DCHECK(OperandType::kIdx ==
+         Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
 Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
-  OperandType operand_type =
-      Bytecodes::GetOperandType(bytecode_, operand_index);
-  if (Bytecodes::IsRegisterOperandType(operand_type)) {
-    OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
-    if (operand_size == OperandSize::kByte) {
-      return BytecodeOperandSignExtended(operand_index);
-    } else if (operand_size == OperandSize::kShort) {
-      return BytecodeOperandShortSignExtended(operand_index);
-    }
-  }
-  UNREACHABLE();
-  return nullptr;
+  DCHECK(Bytecodes::IsRegisterOperandType(
+      Bytecodes::GetOperandType(bytecode_, operand_index)));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeSignedOperand(operand_index, operand_size);
+}
+
+Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
+  DCHECK(OperandType::kRuntimeId ==
+         Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  DCHECK_EQ(operand_size, OperandSize::kShort);
+  return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
@@ -264,14 +380,6 @@
   return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
 }
 
-Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
-                                                  int index) {
-  Node* entry_offset =
-      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
-                WordShl(Int32Constant(index), kPointerSizeLog2));
-  return Load(MachineType::AnyTagged(), fixed_array, entry_offset);
-}
-
 Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
   return Load(MachineType::AnyTagged(), object,
               IntPtrConstant(offset - kHeapObjectTag));
@@ -285,7 +393,7 @@
 Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
   Node* offset =
       IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+                IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
   return Load(MachineType::AnyTagged(), context, offset);
 }
 
@@ -293,7 +401,7 @@
                                              Node* value) {
   Node* offset =
       IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+                IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
   return Store(MachineRepresentation::kTagged, context, offset, value);
 }
 
@@ -311,8 +419,6 @@
 void InterpreterAssembler::CallPrologue() {
   StoreRegister(SmiTag(BytecodeOffset()),
                 InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
-  StoreRegister(BytecodeArrayTaggedPointer(),
-                InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer);
 
   if (FLAG_debug_code && !disable_stack_check_across_call_) {
     DCHECK(stack_pointer_before_call_ == nullptr);
@@ -368,7 +474,7 @@
   Node* function = IntPtrAdd(function_table, function_offset);
   Node* function_entry =
       Load(MachineType::Pointer(), function,
-           Int32Constant(offsetof(Runtime::Function, entry)));
+           IntPtrConstant(offsetof(Runtime::Function, entry)));
 
   return CallStub(callable.descriptor(), code_target, context, arg_count,
                   first_arg, function_entry, result_size);
@@ -405,7 +511,7 @@
 }
 
 Node* InterpreterAssembler::Advance(int delta) {
-  return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+  return IntPtrAdd(BytecodeOffset(), IntPtrConstant(delta));
 }
 
 Node* InterpreterAssembler::Advance(Node* delta) {
@@ -438,18 +544,21 @@
 }
 
 void InterpreterAssembler::Dispatch() {
-  DispatchTo(Advance(Bytecodes::Size(bytecode_)));
+  DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
 }
 
 void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
   Node* target_bytecode = Load(
       MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
+  if (kPointerSize == 8) {
+    target_bytecode = ChangeUint32ToUint64(target_bytecode);
+  }
 
   // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
   // from code object on every dispatch.
   Node* target_code_object =
       Load(MachineType::Pointer(), DispatchTableRawPointer(),
-           Word32Shl(target_bytecode, Int32Constant(kPointerSizeLog2)));
+           WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
 
   DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
 }
@@ -461,12 +570,46 @@
   }
 
   InterpreterDispatchDescriptor descriptor(isolate());
-  Node* args[] = {GetAccumulator(),          RegisterFileRawPointer(),
+  Node* args[] = {GetAccumulatorUnchecked(), RegisterFileRawPointer(),
                   bytecode_offset,           BytecodeArrayTaggedPointer(),
                   DispatchTableRawPointer(), GetContext()};
   TailCall(descriptor, handler, args, 0);
 }
 
+void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
+  // Dispatching a wide bytecode requires treating the prefix
+  // bytecode a base pointer into the dispatch table and dispatching
+  // the bytecode that follows relative to this base.
+  //
+  //   Indices 0-255 correspond to bytecodes with operand_scale == 0
+  //   Indices 256-511 correspond to bytecodes with operand_scale == 1
+  //   Indices 512-767 correspond to bytecodes with operand_scale == 2
+  Node* next_bytecode_offset = Advance(1);
+  Node* next_bytecode = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+                             next_bytecode_offset);
+  if (kPointerSize == 8) {
+    next_bytecode = ChangeUint32ToUint64(next_bytecode);
+  }
+  Node* base_index;
+  switch (operand_scale) {
+    case OperandScale::kDouble:
+      base_index = IntPtrConstant(1 << kBitsPerByte);
+      break;
+    case OperandScale::kQuadruple:
+      base_index = IntPtrConstant(2 << kBitsPerByte);
+      break;
+    default:
+      UNREACHABLE();
+      base_index = nullptr;
+  }
+  Node* target_index = IntPtrAdd(base_index, next_bytecode);
+  Node* target_code_object =
+      Load(MachineType::Pointer(), DispatchTableRawPointer(),
+           WordShl(target_index, kPointerSizeLog2));
+
+  DispatchToBytecodeHandler(target_code_object, next_bytecode_offset);
+}
+
 void InterpreterAssembler::InterpreterReturn() {
   // TODO(rmcilroy): Investigate whether it is worth supporting self
   // optimization of primitive functions like FullCodegen.
@@ -505,27 +648,29 @@
 void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
   disable_stack_check_across_call_ = true;
   Node* abort_id = SmiTag(Int32Constant(bailout_reason));
-  Node* ret_value = CallRuntime(Runtime::kAbort, GetContext(), abort_id);
+  CallRuntime(Runtime::kAbort, GetContext(), abort_id);
   disable_stack_check_across_call_ = false;
-  // Unreached, but keeps turbofan happy.
-  Return(ret_value);
 }
 
 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
                                                BailoutReason bailout_reason) {
   CodeStubAssembler::Label match(this);
   CodeStubAssembler::Label no_match(this);
+  CodeStubAssembler::Label end(this);
 
   Node* condition = WordEqual(lhs, rhs);
   Branch(condition, &match, &no_match);
   Bind(&no_match);
   Abort(bailout_reason);
+  Goto(&end);
   Bind(&match);
+  Goto(&end);
+  Bind(&end);
 }
 
 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
   CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
-              SmiTag(BytecodeOffset()), GetAccumulator());
+              SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
 }
 
 // static
@@ -534,7 +679,8 @@
   return false;
 #elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
   return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
+    V8_TARGET_ARCH_S390
   return true;
 #else
 #error "Unknown Architecture"
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
index 9600dfb..86ecea5 100644
--- a/src/interpreter/interpreter-assembler.h
+++ b/src/interpreter/interpreter-assembler.h
@@ -19,12 +19,16 @@
 
 class InterpreterAssembler : public compiler::CodeStubAssembler {
  public:
-  InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode);
+  InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
+                       OperandScale operand_scale);
   virtual ~InterpreterAssembler();
 
   // Returns the count immediate for bytecode operand |operand_index| in the
   // current bytecode.
   compiler::Node* BytecodeOperandCount(int operand_index);
+  // Returns the 8-bit flag for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandFlag(int operand_index);
   // Returns the index immediate for bytecode operand |operand_index| in the
   // current bytecode.
   compiler::Node* BytecodeOperandIdx(int operand_index);
@@ -34,6 +38,9 @@
   // Returns the register index for bytecode operand |operand_index| in the
   // current bytecode.
   compiler::Node* BytecodeOperandReg(int operand_index);
+  // Returns the runtime id immediate for bytecode operand
+  // |operand_index| in the current bytecode.
+  compiler::Node* BytecodeOperandRuntimeId(int operand_index);
 
   // Accumulator.
   compiler::Node* GetAccumulator();
@@ -62,9 +69,6 @@
   // Load constant at |index| in the constant pool.
   compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
 
-  // Load an element from a fixed array on the heap.
-  compiler::Node* LoadFixedArrayElement(compiler::Node* fixed_array, int index);
-
   // Load a field from an object on the heap.
   compiler::Node* LoadObjectField(compiler::Node* object, int offset);
 
@@ -139,10 +143,14 @@
     DispatchToBytecodeHandler(handler, BytecodeOffset());
   }
 
+  // Dispatch bytecode as wide operand variant.
+  void DispatchWide(OperandScale operand_scale);
+
   // Abort with the given bailout reason.
   void Abort(BailoutReason bailout_reason);
 
  protected:
+  Bytecode bytecode() const { return bytecode_; }
   static bool TargetSupportsUnalignedAccess();
 
  private:
@@ -155,6 +163,11 @@
   // Returns a raw pointer to first entry in the interpreter dispatch table.
   compiler::Node* DispatchTableRawPointer();
 
+  // Returns the accumulator value without checking whether bytecode
+  // uses it. This is intended to be used only in dispatch and in
+  // tracing as these need to bypass accumulator use validity checks.
+  compiler::Node* GetAccumulatorUnchecked();
+
   // Saves and restores interpreter bytecode offset to the interpreter stack
   // frame when performing a call.
   void CallPrologue() override;
@@ -170,10 +183,28 @@
   // Returns the offset of register |index| relative to RegisterFilePointer().
   compiler::Node* RegisterFrameOffset(compiler::Node* index);
 
-  compiler::Node* BytecodeOperand(int operand_index);
-  compiler::Node* BytecodeOperandSignExtended(int operand_index);
-  compiler::Node* BytecodeOperandShort(int operand_index);
-  compiler::Node* BytecodeOperandShortSignExtended(int operand_index);
+  // Returns the offset of an operand relative to the current bytecode offset.
+  compiler::Node* OperandOffset(int operand_index);
+
+  // Returns a value built from an sequence of bytes in the bytecode
+  // array starting at |relative_offset| from the current bytecode.
+  // The |result_type| determines the size and signedness.  of the
+  // value read. This method should only be used on architectures that
+  // do not support unaligned memory accesses.
+  compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
+                                               MachineType result_type);
+
+  compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
+  compiler::Node* BytecodeOperandSignedByte(int operand_index);
+  compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
+  compiler::Node* BytecodeOperandSignedShort(int operand_index);
+  compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
+  compiler::Node* BytecodeOperandSignedQuad(int operand_index);
+
+  compiler::Node* BytecodeSignedOperand(int operand_index,
+                                        OperandSize operand_size);
+  compiler::Node* BytecodeUnsignedOperand(int operand_index,
+                                          OperandSize operand_size);
 
   // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
   // update BytecodeOffset() itself.
@@ -187,8 +218,12 @@
   void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
                            BailoutReason bailout_reason);
 
+  OperandScale operand_scale() const { return operand_scale_; }
+
   Bytecode bytecode_;
+  OperandScale operand_scale_;
   CodeStubAssembler::Variable accumulator_;
+  AccumulatorUse accumulator_use_;
   CodeStubAssembler::Variable context_;
   CodeStubAssembler::Variable bytecode_array_;
 
diff --git a/src/interpreter/interpreter-intrinsics.cc b/src/interpreter/interpreter-intrinsics.cc
new file mode 100644
index 0000000..6d9917d
--- /dev/null
+++ b/src/interpreter/interpreter-intrinsics.cc
@@ -0,0 +1,159 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/interpreter-intrinsics.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using compiler::Node;
+
+#define __ assembler_->
+
+IntrinsicsHelper::IntrinsicsHelper(InterpreterAssembler* assembler)
+    : assembler_(assembler) {}
+
+bool IntrinsicsHelper::IsSupported(Runtime::FunctionId function_id) {
+  switch (function_id) {
+#define SUPPORTED(name, lower_case, count) case Runtime::kInline##name:
+    INTRINSICS_LIST(SUPPORTED)
+    return true;
+#undef SUPPORTED
+    default:
+      return false;
+  }
+}
+
+Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context,
+                                        Node* first_arg_reg, Node* arg_count) {
+  InterpreterAssembler::Label abort(assembler_), end(assembler_);
+  InterpreterAssembler::Variable result(assembler_,
+                                        MachineRepresentation::kTagged);
+
+#define MAKE_LABEL(name, lower_case, count) \
+  InterpreterAssembler::Label lower_case(assembler_);
+  INTRINSICS_LIST(MAKE_LABEL)
+#undef MAKE_LABEL
+
+#define LABEL_POINTER(name, lower_case, count) &lower_case,
+  InterpreterAssembler::Label* labels[] = {INTRINSICS_LIST(LABEL_POINTER)};
+#undef LABEL_POINTER
+
+#define CASE(name, lower_case, count) \
+  static_cast<int32_t>(Runtime::kInline##name),
+  int32_t cases[] = {INTRINSICS_LIST(CASE)};
+#undef CASE
+
+  __ Switch(function_id, &abort, cases, labels, arraysize(cases));
+#define HANDLE_CASE(name, lower_case, expected_arg_count)   \
+  __ Bind(&lower_case);                                     \
+  if (FLAG_debug_code) {                                    \
+    AbortIfArgCountMismatch(expected_arg_count, arg_count); \
+  }                                                         \
+  result.Bind(name(first_arg_reg));                         \
+  __ Goto(&end);
+  INTRINSICS_LIST(HANDLE_CASE)
+#undef HANDLE_CASE
+
+  __ Bind(&abort);
+  __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
+  result.Bind(__ UndefinedConstant());
+  __ Goto(&end);
+
+  __ Bind(&end);
+  return result.value();
+}
+
+Node* IntrinsicsHelper::CompareInstanceType(Node* map, int type,
+                                            InstanceTypeCompareMode mode) {
+  InterpreterAssembler::Variable return_value(assembler_,
+                                              MachineRepresentation::kTagged);
+  Node* instance_type = __ LoadInstanceType(map);
+
+  InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
+      end(assembler_);
+  Node* condition;
+  if (mode == kInstanceTypeEqual) {
+    condition = __ Word32Equal(instance_type, __ Int32Constant(type));
+  } else {
+    DCHECK(mode == kInstanceTypeGreaterThanOrEqual);
+    condition =
+        __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type));
+  }
+  __ Branch(condition, &if_true, &if_false);
+
+  __ Bind(&if_true);
+  return_value.Bind(__ BooleanConstant(true));
+  __ Goto(&end);
+
+  __ Bind(&if_false);
+  return_value.Bind(__ BooleanConstant(false));
+  __ Goto(&end);
+
+  __ Bind(&end);
+  return return_value.value();
+}
+
+Node* IntrinsicsHelper::IsJSReceiver(Node* input) {
+  InterpreterAssembler::Variable return_value(assembler_,
+                                              MachineRepresentation::kTagged);
+
+  InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
+      end(assembler_);
+  Node* arg = __ LoadRegister(input);
+
+  __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
+  __ Bind(&if_smi);
+  return_value.Bind(__ BooleanConstant(false));
+  __ Goto(&end);
+
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  return_value.Bind(CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
+                                        kInstanceTypeGreaterThanOrEqual));
+  __ Goto(&end);
+
+  __ Bind(&end);
+  return return_value.value();
+}
+
+Node* IntrinsicsHelper::IsArray(Node* input) {
+  InterpreterAssembler::Variable return_value(assembler_,
+                                              MachineRepresentation::kTagged);
+
+  InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
+      end(assembler_);
+  Node* arg = __ LoadRegister(input);
+
+  __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
+  __ Bind(&if_smi);
+  return_value.Bind(__ BooleanConstant(false));
+  __ Goto(&end);
+
+  __ Bind(&if_not_smi);
+  return_value.Bind(
+      CompareInstanceType(arg, JS_ARRAY_TYPE, kInstanceTypeEqual));
+  __ Goto(&end);
+
+  __ Bind(&end);
+  return return_value.value();
+}
+
+void IntrinsicsHelper::AbortIfArgCountMismatch(int expected, Node* actual) {
+  InterpreterAssembler::Label match(assembler_), mismatch(assembler_),
+      end(assembler_);
+  Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected));
+  __ Branch(comparison, &match, &mismatch);
+  __ Bind(&mismatch);
+  __ Abort(kWrongArgumentCountForInvokeIntrinsic);
+  __ Goto(&end);
+  __ Bind(&match);
+  __ Goto(&end);
+  __ Bind(&end);
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/interpreter-intrinsics.h b/src/interpreter/interpreter-intrinsics.h
new file mode 100644
index 0000000..e27c678
--- /dev/null
+++ b/src/interpreter/interpreter-intrinsics.h
@@ -0,0 +1,62 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_INTERPRETER_INTRINSICS_H_
+#define V8_INTERPRETER_INTERPRETER_INTRINSICS_H_
+
+#include "src/allocation.h"
+#include "src/base/smart-pointers.h"
+#include "src/builtins.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter-assembler.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class Node;
+}  // namespace compiler
+
+#define INTRINSICS_LIST(V)           \
+  V(IsJSReceiver, is_js_receiver, 1) \
+  V(IsArray, is_array, 1)
+
+namespace interpreter {
+
+class IntrinsicsHelper {
+ public:
+  explicit IntrinsicsHelper(InterpreterAssembler* assembler);
+
+  compiler::Node* InvokeIntrinsic(compiler::Node* function_id,
+                                  compiler::Node* context,
+                                  compiler::Node* first_arg_reg,
+                                  compiler::Node* arg_count);
+
+  static bool IsSupported(Runtime::FunctionId function_id);
+
+ private:
+  enum InstanceTypeCompareMode {
+    kInstanceTypeEqual,
+    kInstanceTypeGreaterThanOrEqual
+  };
+  compiler::Node* CompareInstanceType(compiler::Node* map, int type,
+                                      InstanceTypeCompareMode mode);
+  void AbortIfArgCountMismatch(int expected, compiler::Node* actual);
+  InterpreterAssembler* assembler_;
+
+#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
+  compiler::Node* name(compiler::Node* input);
+  INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
+#undef DECLARE_INTRINSIC_HELPER
+
+  DISALLOW_COPY_AND_ASSIGN(IntrinsicsHelper);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index eb88342..5084300 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -11,6 +11,8 @@
 #include "src/interpreter/bytecode-generator.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/interpreter-assembler.h"
+#include "src/interpreter/interpreter-intrinsics.h"
+#include "src/log.h"
 #include "src/zone.h"
 
 namespace v8 {
@@ -22,30 +24,69 @@
 #define __ assembler->
 
 Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
-  memset(&dispatch_table_, 0, sizeof(dispatch_table_));
+  memset(dispatch_table_, 0, sizeof(dispatch_table_));
 }
 
 void Interpreter::Initialize() {
   DCHECK(FLAG_ignition);
   if (IsDispatchTableInitialized()) return;
-  Zone zone;
+  Zone zone(isolate_->allocator());
   HandleScope scope(isolate_);
 
-#define GENERATE_CODE(Name, ...)                                        \
-  {                                                                     \
-    InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name); \
-    Do##Name(&assembler);                                               \
-    Handle<Code> code = assembler.GenerateCode();                       \
-    TraceCodegen(code, #Name);                                          \
-    dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] = *code;      \
+  // Generate bytecode handlers for all bytecodes and scales.
+  for (OperandScale operand_scale = OperandScale::kSingle;
+       operand_scale <= OperandScale::kMaxValid;
+       operand_scale = Bytecodes::NextOperandScale(operand_scale)) {
+#define GENERATE_CODE(Name, ...)                                               \
+  {                                                                            \
+    if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) {     \
+      InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name,       \
+                                     operand_scale);                           \
+      Do##Name(&assembler);                                                    \
+      Handle<Code> code = assembler.GenerateCode();                            \
+      size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale);  \
+      dispatch_table_[index] = *code;                                          \
+      TraceCodegen(code);                                                      \
+      LOG_CODE_EVENT(                                                          \
+          isolate_,                                                            \
+          CodeCreateEvent(                                                     \
+              Logger::BYTECODE_HANDLER_TAG, AbstractCode::cast(*code),         \
+              Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
+    }                                                                          \
   }
-  BYTECODE_LIST(GENERATE_CODE)
+    BYTECODE_LIST(GENERATE_CODE)
 #undef GENERATE_CODE
+  }
+
+  // Fill unused entries will the illegal bytecode handler.
+  size_t illegal_index =
+      GetDispatchTableIndex(Bytecode::kIllegal, OperandScale::kSingle);
+  for (size_t index = 0; index < arraysize(dispatch_table_); ++index) {
+    if (dispatch_table_[index] == nullptr) {
+      dispatch_table_[index] = dispatch_table_[illegal_index];
+    }
+  }
 }
 
-Code* Interpreter::GetBytecodeHandler(Bytecode bytecode) {
+Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
+                                      OperandScale operand_scale) {
   DCHECK(IsDispatchTableInitialized());
-  return dispatch_table_[Bytecodes::ToByte(bytecode)];
+  DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
+  size_t index = GetDispatchTableIndex(bytecode, operand_scale);
+  return dispatch_table_[index];
+}
+
+// static
+size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
+                                          OperandScale operand_scale) {
+  static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte;
+  size_t index = static_cast<size_t>(bytecode);
+  OperandScale current_scale = OperandScale::kSingle;
+  while (current_scale != operand_scale) {
+    index += kEntriesPerOperandScale;
+    current_scale = Bytecodes::NextOperandScale(current_scale);
+  }
+  return index;
 }
 
 void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
@@ -62,6 +103,9 @@
 }
 
 bool Interpreter::MakeBytecode(CompilationInfo* info) {
+  TimerEventScope<TimerEventCompileIgnition> timer(info->isolate());
+  TRACE_EVENT0("v8", "V8.CompileIgnition");
+
   if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
     OFStream os(stdout);
     base::SmartArrayPointer<char> name = info->GetDebugName();
@@ -88,8 +132,10 @@
 #endif  // DEBUG
 
   BytecodeGenerator generator(info->isolate(), info->zone());
-  info->EnsureFeedbackVector();
   Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
+
+  if (generator.HasStackOverflow()) return false;
+
   if (FLAG_print_bytecode) {
     OFStream os(stdout);
     bytecodes->Print(os);
@@ -102,23 +148,36 @@
 }
 
 bool Interpreter::IsDispatchTableInitialized() {
-  if (FLAG_trace_ignition) {
-    // Regenerate table to add bytecode tracing operations.
+  if (FLAG_trace_ignition || FLAG_trace_ignition_codegen) {
+    // Regenerate table to add bytecode tracing operations
+    // or to print the assembly code generated by TurboFan.
     return false;
   }
   return dispatch_table_[0] != nullptr;
 }
 
-void Interpreter::TraceCodegen(Handle<Code> code, const char* name) {
+void Interpreter::TraceCodegen(Handle<Code> code) {
 #ifdef ENABLE_DISASSEMBLER
   if (FLAG_trace_ignition_codegen) {
     OFStream os(stdout);
-    code->Disassemble(name, os);
+    code->Disassemble(nullptr, os);
     os << std::flush;
   }
 #endif  // ENABLE_DISASSEMBLER
 }
 
+const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
+#ifdef ENABLE_DISASSEMBLER
+#define RETURN_NAME(Name, ...)                                         \
+  if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code) { \
+    return #Name;                                                      \
+  }
+  BYTECODE_LIST(RETURN_NAME)
+#undef RETURN_NAME
+#endif  // ENABLE_DISASSEMBLER
+  return nullptr;
+}
+
 // LdaZero
 //
 // Load literal '0' into the accumulator.
@@ -128,11 +187,10 @@
   __ Dispatch();
 }
 
-
-// LdaSmi8 <imm8>
+// LdaSmi <imm>
 //
-// Load an 8-bit integer literal into the accumulator as a Smi.
-void Interpreter::DoLdaSmi8(InterpreterAssembler* assembler) {
+// Load an integer literal into the accumulator as a Smi.
+void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
   Node* raw_int = __ BytecodeOperandImm(0);
   Node* smi_int = __ SmiTag(raw_int);
   __ SetAccumulator(smi_int);
@@ -154,15 +212,6 @@
   DoLoadConstant(assembler);
 }
 
-
-// LdaConstantWide <idx>
-//
-// Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstantWide(InterpreterAssembler* assembler) {
-  DoLoadConstant(assembler);
-}
-
-
 // LdaUndefined
 //
 // Load Undefined into the accumulator.
@@ -248,13 +297,6 @@
 }
 
 
-// MovWide <src> <dst>
-//
-// Stores the value of register <src> to register <dst>.
-void Interpreter::DoMovWide(InterpreterAssembler* assembler) {
-  DoMov(assembler);
-}
-
 void Interpreter::DoLoadGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
@@ -295,27 +337,6 @@
   DoLoadGlobal(ic, assembler);
 }
 
-// LdaGlobalWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> outside of a typeof.
-void Interpreter::DoLdaGlobalWide(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-// LdaGlobalInsideTypeofWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> inside of a typeof.
-void Interpreter::DoLdaGlobalInsideTypeofWide(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
 void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
@@ -333,7 +354,6 @@
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
   __ CallStub(ic.descriptor(), code_target, context, global, name, value,
               smi_slot, type_feedback_vector);
-
   __ Dispatch();
 }
 
@@ -359,29 +379,6 @@
   DoStoreGlobal(ic, assembler);
 }
 
-
-// StaGlobalSloppyWide <name_index> <slot>
-//
-// Store the value in the accumulator into the global with name in constant pool
-// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppyWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoStoreGlobal(ic, assembler);
-}
-
-
-// StaGlobalStrictWide <name_index> <slot>
-//
-// Store the value in the accumulator into the global with name in constant pool
-// entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrictWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoStoreGlobal(ic, assembler);
-}
-
-
 // LdaContextSlot <context> <slot_index>
 //
 // Load the object in |slot_index| of |context| into the accumulator.
@@ -394,15 +391,6 @@
   __ Dispatch();
 }
 
-
-// LdaContextSlotWide <context> <slot_index>
-//
-// Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlotWide(InterpreterAssembler* assembler) {
-  DoLdaContextSlot(assembler);
-}
-
-
 // StaContextSlot <context> <slot_index>
 //
 // Stores the object in the accumulator into |slot_index| of |context|.
@@ -415,14 +403,6 @@
   __ Dispatch();
 }
 
-
-// StaContextSlot <context> <slot_index>
-//
-// Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlotWide(InterpreterAssembler* assembler) {
-  DoStaContextSlot(assembler);
-}
-
 void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
                                    InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
@@ -433,7 +413,6 @@
   __ Dispatch();
 }
 
-
 // LdaLookupSlot <name_index>
 //
 // Lookup the object with the name in constant pool entry |name_index|
@@ -442,7 +421,6 @@
   DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
 }
 
-
 // LdaLookupSlotInsideTypeof <name_index>
 //
 // Lookup the object with the name in constant pool entry |name_index|
@@ -451,25 +429,6 @@
   DoLoadLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
 }
 
-
-// LdaLookupSlotWide <name_index>
-//
-// Lookup the object with the name in constant pool entry |name_index|
-// dynamically.
-void Interpreter::DoLdaLookupSlotWide(InterpreterAssembler* assembler) {
-  DoLdaLookupSlot(assembler);
-}
-
-
-// LdaLookupSlotInsideTypeofWide <name_index>
-//
-// Lookup the object with the name in constant pool entry |name_index|
-// dynamically without causing a NoReferenceError.
-void Interpreter::DoLdaLookupSlotInsideTypeofWide(
-    InterpreterAssembler* assembler) {
-  DoLdaLookupSlotInsideTypeof(assembler);
-}
-
 void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
                                     InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
@@ -484,7 +443,6 @@
   __ Dispatch();
 }
 
-
 // StaLookupSlotSloppy <name_index>
 //
 // Store the object in accumulator to the object with the name in constant
@@ -502,24 +460,6 @@
   DoStoreLookupSlot(LanguageMode::STRICT, assembler);
 }
 
-
-// StaLookupSlotSloppyWide <name_index>
-//
-// Store the object in accumulator to the object with the name in constant
-// pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppyWide(InterpreterAssembler* assembler) {
-  DoStaLookupSlotSloppy(assembler);
-}
-
-
-// StaLookupSlotStrictWide <name_index>
-//
-// Store the object in accumulator to the object with the name in constant
-// pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrictWide(InterpreterAssembler* assembler) {
-  DoStaLookupSlotStrict(assembler);
-}
-
 void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* register_index = __ BytecodeOperandReg(0);
@@ -546,17 +486,6 @@
   DoLoadIC(ic, assembler);
 }
 
-// LoadICWide <object> <name_index> <slot>
-//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index>.
-void Interpreter::DoLoadICWide(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
 void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* reg_index = __ BytecodeOperandReg(0);
@@ -582,17 +511,6 @@
   DoKeyedLoadIC(ic, assembler);
 }
 
-// KeyedLoadICWide <object> <slot>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator.
-void Interpreter::DoKeyedLoadICWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
 void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
@@ -633,30 +551,6 @@
   DoStoreIC(ic, assembler);
 }
 
-
-// StoreICSloppyWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
-// the name in constant pool entry <name_index> with the value in the
-// accumulator.
-void Interpreter::DoStoreICSloppyWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoStoreIC(ic, assembler);
-}
-
-
-// StoreICStrictWide <object> <name_index> <slot>
-//
-// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
-// the name in constant pool entry <name_index> with the value in the
-// accumulator.
-void Interpreter::DoStoreICStrictWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoStoreIC(ic, assembler);
-}
-
 void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
@@ -695,28 +589,6 @@
   DoKeyedStoreIC(ic, assembler);
 }
 
-
-// KeyedStoreICSloppyWide <object> <key> <slot>
-//
-// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
-// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppyWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoKeyedStoreIC(ic, assembler);
-}
-
-
-// KeyedStoreICStoreWide <object> <key> <slot>
-//
-// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
-// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrictWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoKeyedStoreIC(ic, assembler);
-}
-
 // PushContext <context>
 //
 // Saves the current context in <context>, and pushes the accumulator as the
@@ -741,6 +613,20 @@
   __ Dispatch();
 }
 
+void Interpreter::DoBinaryOp(Callable callable,
+                             InterpreterAssembler* assembler) {
+  // TODO(bmeurer): Collect definition side type feedback for various
+  // binary operations.
+  Node* target = __ HeapConstant(callable.code());
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* lhs = __ LoadRegister(reg_index);
+  Node* rhs = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(callable.descriptor(), target, context, lhs, rhs);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
 void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
                              InterpreterAssembler* assembler) {
   // TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
@@ -759,7 +645,7 @@
 //
 // Add register <src> to accumulator.
 void Interpreter::DoAdd(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kAdd, assembler);
+  DoBinaryOp(CodeFactory::Add(isolate_), assembler);
 }
 
 
@@ -767,7 +653,7 @@
 //
 // Subtract register <src> from accumulator.
 void Interpreter::DoSub(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kSubtract, assembler);
+  DoBinaryOp(CodeFactory::Subtract(isolate_), assembler);
 }
 
 
@@ -799,7 +685,7 @@
 //
 // BitwiseOr register <src> to accumulator.
 void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kBitwiseOr, assembler);
+  DoBinaryOp(CodeFactory::BitwiseOr(isolate_), assembler);
 }
 
 
@@ -807,7 +693,7 @@
 //
 // BitwiseXor register <src> to accumulator.
 void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kBitwiseXor, assembler);
+  DoBinaryOp(CodeFactory::BitwiseXor(isolate_), assembler);
 }
 
 
@@ -815,7 +701,7 @@
 //
 // BitwiseAnd register <src> to accumulator.
 void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kBitwiseAnd, assembler);
+  DoBinaryOp(CodeFactory::BitwiseAnd(isolate_), assembler);
 }
 
 
@@ -883,24 +769,40 @@
 // Perform logical-not on the accumulator, first casting the
 // accumulator to a boolean value if required.
 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::ToBoolean(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
-  Node* result =
-      __ CallRuntime(Runtime::kInterpreterLogicalNot, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  Node* to_boolean_value =
+      __ CallStub(callable.descriptor(), target, context, accumulator);
+  InterpreterAssembler::Label if_true(assembler), if_false(assembler);
+  Node* true_value = __ BooleanConstant(true);
+  Node* false_value = __ BooleanConstant(false);
+  Node* condition = __ WordEqual(to_boolean_value, true_value);
+  __ Branch(condition, &if_true, &if_false);
+  __ Bind(&if_true);
+  {
+    __ SetAccumulator(false_value);
+    __ Dispatch();
+  }
+  __ Bind(&if_false);
+  {
+    __ SetAccumulator(true_value);
+    __ Dispatch();
+  }
 }
 
-
 // TypeOf
 //
 // Load the accumulator with the string representating type of the
 // object in the accumulator.
 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::Typeof(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* result =
-      __ CallRuntime(Runtime::kInterpreterTypeOf, context, accumulator);
+      __ CallStub(callable.descriptor(), target, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -960,15 +862,6 @@
   DoJSCall(assembler, TailCallMode::kDisallow);
 }
 
-
-// CallWide <callable> <receiver> <arg_count>
-//
-// Call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallWide(InterpreterAssembler* assembler) {
-  DoJSCall(assembler, TailCallMode::kDisallow);
-}
-
 // TailCall <callable> <receiver> <arg_count>
 //
 // Tail call a JSfunction or Callable in |callable| with the |receiver| and
@@ -977,16 +870,8 @@
   DoJSCall(assembler, TailCallMode::kAllow);
 }
 
-// TailCallWide <callable> <receiver> <arg_count>
-//
-// Tail call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
-void Interpreter::DoTailCallWide(InterpreterAssembler* assembler) {
-  DoJSCall(assembler, TailCallMode::kAllow);
-}
-
 void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
-  Node* function_id = __ BytecodeOperandIdx(0);
+  Node* function_id = __ BytecodeOperandRuntimeId(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(first_arg_reg);
   Node* args_count = __ BytecodeOperandCount(2);
@@ -1006,19 +891,26 @@
   DoCallRuntimeCommon(assembler);
 }
 
-
-// CallRuntime <function_id> <first_arg> <arg_count>
+// InvokeIntrinsic <function_id> <first_arg> <arg_count>
 //
-// Call the runtime function |function_id| with the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
-// registers.
-void Interpreter::DoCallRuntimeWide(InterpreterAssembler* assembler) {
-  DoCallRuntimeCommon(assembler);
+// Implements the semantic equivalent of calling the runtime function
+// |function_id| with the first argument in |first_arg| and |arg_count|
+// arguments in subsequent registers.
+void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
+  Node* function_id = __ BytecodeOperandRuntimeId(0);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* arg_count = __ BytecodeOperandCount(2);
+  Node* context = __ GetContext();
+  IntrinsicsHelper helper(assembler);
+  Node* result =
+      helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
 void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
   // Call the runtime function.
-  Node* function_id = __ BytecodeOperandIdx(0);
+  Node* function_id = __ BytecodeOperandRuntimeId(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(first_arg_reg);
   Node* args_count = __ BytecodeOperandCount(2);
@@ -1047,17 +939,6 @@
   DoCallRuntimeForPairCommon(assembler);
 }
 
-
-// CallRuntimeForPairWide <function_id> <first_arg> <arg_count> <first_return>
-//
-// Call the runtime function |function_id| which returns a pair, with the
-// first argument in register |first_arg| and |arg_count| arguments in
-// subsequent registers. Returns the result in <first_return> and
-// <first_return + 1>
-void Interpreter::DoCallRuntimeForPairWide(InterpreterAssembler* assembler) {
-  DoCallRuntimeForPairCommon(assembler);
-}
-
 void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
   Node* context_index = __ BytecodeOperandIdx(0);
   Node* receiver_reg = __ BytecodeOperandReg(1);
@@ -1088,15 +969,6 @@
   DoCallJSRuntimeCommon(assembler);
 }
 
-
-// CallJSRuntimeWide <context_index> <receiver> <arg_count>
-//
-// Call the JS runtime function that has the |context_index| with the receiver
-// in register |receiver| and |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallJSRuntimeWide(InterpreterAssembler* assembler) {
-  DoCallJSRuntimeCommon(assembler);
-}
-
 void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
   Node* new_target = __ GetAccumulator();
@@ -1123,23 +995,11 @@
   DoCallConstruct(assembler);
 }
 
-
-// NewWide <constructor> <first_arg> <arg_count>
-//
-// Call operator new with |constructor| and the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
-// registers. The new.target is in the accumulator.
-//
-void Interpreter::DoNewWide(InterpreterAssembler* assembler) {
-  DoCallConstruct(assembler);
-}
-
-
 // TestEqual <src>
 //
 // Test if the value in the <src> register equals the accumulator.
 void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kEqual, assembler);
+  DoBinaryOp(CodeFactory::Equal(isolate_), assembler);
 }
 
 
@@ -1147,7 +1007,7 @@
 //
 // Test if the value in the <src> register is not equal to the accumulator.
 void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kNotEqual, assembler);
+  DoBinaryOp(CodeFactory::NotEqual(isolate_), assembler);
 }
 
 
@@ -1155,16 +1015,7 @@
 //
 // Test if the value in the <src> register is strictly equal to the accumulator.
 void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kStrictEqual, assembler);
-}
-
-
-// TestNotEqualStrict <src>
-//
-// Test if the value in the <src> register is not strictly equal to the
-// accumulator.
-void Interpreter::DoTestNotEqualStrict(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kStrictNotEqual, assembler);
+  DoBinaryOp(CodeFactory::StrictEqual(isolate_), assembler);
 }
 
 
@@ -1172,7 +1023,7 @@
 //
 // Test if the value in the <src> register is less than the accumulator.
 void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kLessThan, assembler);
+  DoBinaryOp(CodeFactory::LessThan(isolate_), assembler);
 }
 
 
@@ -1180,7 +1031,7 @@
 //
 // Test if the value in the <src> register is greater than the accumulator.
 void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kGreaterThan, assembler);
+  DoBinaryOp(CodeFactory::GreaterThan(isolate_), assembler);
 }
 
 
@@ -1189,7 +1040,7 @@
 // Test if the value in the <src> register is less than or equal to the
 // accumulator.
 void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kLessThanOrEqual, assembler);
+  DoBinaryOp(CodeFactory::LessThanOrEqual(isolate_), assembler);
 }
 
 
@@ -1198,7 +1049,7 @@
 // Test if the value in the <src> register is greater than or equal to the
 // accumulator.
 void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kGreaterThanOrEqual, assembler);
+  DoBinaryOp(CodeFactory::GreaterThanOrEqual(isolate_), assembler);
 }
 
 
@@ -1219,16 +1070,22 @@
   DoBinaryOp(Runtime::kInstanceOf, assembler);
 }
 
+void Interpreter::DoTypeConversionOp(Callable callable,
+                                     InterpreterAssembler* assembler) {
+  Node* target = __ HeapConstant(callable.code());
+  Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallStub(callable.descriptor(), target, context, accumulator);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
 
 // ToName
 //
 // Cast the object referenced by the accumulator to a name.
 void Interpreter::DoToName(InterpreterAssembler* assembler) {
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kToName, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  DoTypeConversionOp(CodeFactory::ToName(isolate_), assembler);
 }
 
 
@@ -1236,11 +1093,7 @@
 //
 // Cast the object referenced by the accumulator to a number.
 void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kToNumber, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  DoTypeConversionOp(CodeFactory::ToNumber(isolate_), assembler);
 }
 
 
@@ -1248,26 +1101,20 @@
 //
 // Cast the object referenced by the accumulator to a JSObject.
 void Interpreter::DoToObject(InterpreterAssembler* assembler) {
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kToObject, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  DoTypeConversionOp(CodeFactory::ToObject(isolate_), assembler);
 }
 
-
-// Jump <imm8>
+// Jump <imm>
 //
-// Jump by number of bytes represented by the immediate operand |imm8|.
+// Jump by number of bytes represented by the immediate operand |imm|.
 void Interpreter::DoJump(InterpreterAssembler* assembler) {
   Node* relative_jump = __ BytecodeOperandImm(0);
   __ Jump(relative_jump);
 }
 
-
-// JumpConstant <idx8>
+// JumpConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
 void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
@@ -1275,17 +1122,7 @@
   __ Jump(relative_jump);
 }
 
-
-// JumpConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the
-// constant pool.
-void Interpreter::DoJumpConstantWide(InterpreterAssembler* assembler) {
-  DoJumpConstant(assembler);
-}
-
-
-// JumpIfTrue <imm8>
+// JumpIfTrue <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the
 // accumulator contains true.
@@ -1296,10 +1133,9 @@
   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
 }
 
-
-// JumpIfTrueConstant <idx8>
+// JumpIfTrueConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the accumulator contains true.
 void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
@@ -1310,17 +1146,7 @@
   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
 }
 
-
-// JumpIfTrueConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstantWide(InterpreterAssembler* assembler) {
-  DoJumpIfTrueConstant(assembler);
-}
-
-
-// JumpIfFalse <imm8>
+// JumpIfFalse <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the
 // accumulator contains false.
@@ -1331,10 +1157,9 @@
   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
 }
 
-
-// JumpIfFalseConstant <idx8>
+// JumpIfFalseConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the accumulator contains false.
 void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
@@ -1345,42 +1170,35 @@
   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
 }
 
-
-// JumpIfFalseConstant <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstantWide(InterpreterAssembler* assembler) {
-  DoJumpIfFalseConstant(assembler);
-}
-
-
-// JumpIfToBooleanTrue <imm8>
+// JumpIfToBooleanTrue <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is true when the object is cast to boolean.
 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::ToBoolean(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+      __ CallStub(callable.descriptor(), target, context, accumulator);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* true_value = __ BooleanConstant(true);
   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
 }
 
-
-// JumpIfToBooleanTrueConstant <idx8>
+// JumpIfToBooleanTrueConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the object referenced by the accumulator is true when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanTrueConstant(
     InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::ToBoolean(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+      __ CallStub(callable.descriptor(), target, context, accumulator);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1388,44 +1206,35 @@
   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
 }
 
-
-// JumpIfToBooleanTrueConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is true when the object is cast
-// to boolean.
-void Interpreter::DoJumpIfToBooleanTrueConstantWide(
-    InterpreterAssembler* assembler) {
-  DoJumpIfToBooleanTrueConstant(assembler);
-}
-
-
-// JumpIfToBooleanFalse <imm8>
+// JumpIfToBooleanFalse <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is false when the object is cast to boolean.
 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::ToBoolean(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+      __ CallStub(callable.descriptor(), target, context, accumulator);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* false_value = __ BooleanConstant(false);
   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
 }
 
-
-// JumpIfToBooleanFalseConstant <idx8>
+// JumpIfToBooleanFalseConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the object referenced by the accumulator is false when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanFalseConstant(
     InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::ToBoolean(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+      __ CallStub(callable.descriptor(), target, context, accumulator);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1433,19 +1242,7 @@
   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
 }
 
-
-// JumpIfToBooleanFalseConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is false when the object is cast
-// to boolean.
-void Interpreter::DoJumpIfToBooleanFalseConstantWide(
-    InterpreterAssembler* assembler) {
-  DoJumpIfToBooleanFalseConstant(assembler);
-}
-
-
-// JumpIfNull <imm8>
+// JumpIfNull <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the null constant.
@@ -1456,10 +1253,9 @@
   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
 }
 
-
-// JumpIfNullConstant <idx8>
+// JumpIfNullConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the object referenced by the accumulator is the null constant.
 void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
@@ -1470,16 +1266,7 @@
   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
 }
 
-
-// JumpIfNullConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstantWide(InterpreterAssembler* assembler) {
-  DoJumpIfNullConstant(assembler);
-}
-
-// JumpIfUndefined <imm8>
+// JumpIfUndefined <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the undefined constant.
@@ -1491,10 +1278,9 @@
   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
 }
 
-
-// JumpIfUndefinedConstant <idx8>
+// JumpIfUndefinedConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the object referenced by the accumulator is the undefined constant.
 void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
@@ -1506,17 +1292,7 @@
   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
 }
 
-
-// JumpIfUndefinedConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefinedConstantWide(
-    InterpreterAssembler* assembler) {
-  DoJumpIfUndefinedConstant(assembler);
-}
-
-// JumpIfNotHole <imm8>
+// JumpIfNotHole <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the hole.
@@ -1527,9 +1303,9 @@
   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
 }
 
-// JumpIfNotHoleConstant <idx8>
+// JumpIfNotHoleConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the object referenced by the accumulator is the hole constant.
 void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
@@ -1540,21 +1316,13 @@
   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
 }
 
-// JumpIfNotHoleConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is the hole constant.
-void Interpreter::DoJumpIfNotHoleConstantWide(InterpreterAssembler* assembler) {
-  DoJumpIfNotHoleConstant(assembler);
-}
-
 void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
                                   InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant_elements = __ LoadConstantPoolEntry(index);
   Node* literal_index_raw = __ BytecodeOperandIdx(1);
   Node* literal_index = __ SmiTag(literal_index_raw);
-  Node* flags_raw = __ BytecodeOperandImm(2);
+  Node* flags_raw = __ BytecodeOperandFlag(2);
   Node* flags = __ SmiTag(flags_raw);
   Node* closure = __ LoadRegister(Register::function_closure());
   Node* context = __ GetContext();
@@ -1570,19 +1338,22 @@
 // Creates a regular expression literal for literal index <literal_idx> with
 // <flags> and the pattern in <pattern_idx>.
 void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
-  DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+  Callable callable = CodeFactory::FastCloneRegExp(isolate_);
+  Node* target = __ HeapConstant(callable.code());
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* pattern = __ LoadConstantPoolEntry(index);
+  Node* literal_index_raw = __ BytecodeOperandIdx(1);
+  Node* literal_index = __ SmiTag(literal_index_raw);
+  Node* flags_raw = __ BytecodeOperandFlag(2);
+  Node* flags = __ SmiTag(flags_raw);
+  Node* closure = __ LoadRegister(Register::function_closure());
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(callable.descriptor(), target, context, closure,
+                             literal_index, pattern, flags);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
-
-// CreateRegExpLiteralWide <pattern_idx> <literal_idx> <flags>
-//
-// Creates a regular expression literal for literal index <literal_idx> with
-// <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteralWide(InterpreterAssembler* assembler) {
-  DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
-}
-
-
 // CreateArrayLiteral <element_idx> <literal_idx> <flags>
 //
 // Creates an array literal for literal index <literal_idx> with flags <flags>
@@ -1591,16 +1362,6 @@
   DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
 }
 
-
-// CreateArrayLiteralWide <element_idx> <literal_idx> <flags>
-//
-// Creates an array literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteralWide(InterpreterAssembler* assembler) {
-  DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
-}
-
-
 // CreateObjectLiteral <element_idx> <literal_idx> <flags>
 //
 // Creates an object literal for literal index <literal_idx> with flags <flags>
@@ -1609,16 +1370,6 @@
   DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
 }
 
-
-// CreateObjectLiteralWide <element_idx> <literal_idx> <flags>
-//
-// Creates an object literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteralWide(InterpreterAssembler* assembler) {
-  DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
-}
-
-
 // CreateClosure <index> <tenured>
 //
 // Creates a new closure for SharedFunctionInfo at position |index| in the
@@ -1628,7 +1379,7 @@
   // calling into the runtime.
   Node* index = __ BytecodeOperandIdx(0);
   Node* shared = __ LoadConstantPoolEntry(index);
-  Node* tenured_raw = __ BytecodeOperandImm(1);
+  Node* tenured_raw = __ BytecodeOperandFlag(1);
   Node* tenured = __ SmiTag(tenured_raw);
   Node* context = __ GetContext();
   Node* result =
@@ -1637,16 +1388,6 @@
   __ Dispatch();
 }
 
-
-// CreateClosureWide <index> <tenured>
-//
-// Creates a new closure for SharedFunctionInfo at position |index| in the
-// constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosureWide(InterpreterAssembler* assembler) {
-  return DoCreateClosure(assembler);
-}
-
-
 // CreateMappedArguments
 //
 // Creates a new mapped arguments object.
@@ -1737,11 +1478,13 @@
 // DebugBreak
 //
 // Call runtime to handle a debug break.
-#define DEBUG_BREAK(Name, ...)                                              \
-  void Interpreter::Do##Name(InterpreterAssembler* assembler) {             \
-    Node* context = __ GetContext();                                        \
-    Node* original_handler = __ CallRuntime(Runtime::kDebugBreak, context); \
-    __ DispatchToBytecodeHandler(original_handler);                         \
+#define DEBUG_BREAK(Name, ...)                                                \
+  void Interpreter::Do##Name(InterpreterAssembler* assembler) {               \
+    Node* context = __ GetContext();                                          \
+    Node* accumulator = __ GetAccumulator();                                  \
+    Node* original_handler =                                                  \
+        __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
+    __ DispatchToBytecodeHandler(original_handler);                           \
   }
 DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
 #undef DEBUG_BREAK
@@ -1768,18 +1511,6 @@
   __ Dispatch();
 }
 
-
-// ForInPrepareWide <cache_info_triple>
-//
-// Returns state for for..in loop execution based on the object in the
-// accumulator. The result is output in registers |cache_info_triple| to
-// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
-// and cache_length respectively.
-void Interpreter::DoForInPrepareWide(InterpreterAssembler* assembler) {
-  DoForInPrepare(assembler);
-}
-
-
 // ForInNext <receiver> <index> <cache_info_pair>
 //
 // Returns the next enumerable property in the the accumulator.
@@ -1792,53 +1523,101 @@
   Node* cache_type = __ LoadRegister(cache_type_reg);
   Node* cache_array_reg = __ NextRegister(cache_type_reg);
   Node* cache_array = __ LoadRegister(cache_array_reg);
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kForInNext, context, receiver,
-                                cache_array, cache_type, index);
-  __ SetAccumulator(result);
-  __ Dispatch();
+
+  // Load the next key from the enumeration array.
+  Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index);
+
+  // Check if we can use the for-in fast path potentially using the enum cache.
+  InterpreterAssembler::Label if_fast(assembler), if_slow(assembler);
+  Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
+  Node* condition = __ WordEqual(receiver_map, cache_type);
+  __ Branch(condition, &if_fast, &if_slow);
+  __ Bind(&if_fast);
+  {
+    // Enum cache in use for {receiver}, the {key} is definitely valid.
+    __ SetAccumulator(key);
+    __ Dispatch();
+  }
+  __ Bind(&if_slow);
+  {
+    // Record the fact that we hit the for-in slow path.
+    Node* vector_index = __ BytecodeOperandIdx(3);
+    Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+    Node* megamorphic_sentinel =
+        __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
+    __ StoreFixedArrayElementNoWriteBarrier(type_feedback_vector, vector_index,
+                                            megamorphic_sentinel);
+
+    // Need to filter the {key} for the {receiver}.
+    Node* context = __ GetContext();
+    Node* result =
+        __ CallRuntime(Runtime::kForInFilter, context, receiver, key);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
 }
 
-
-// ForInNextWide <receiver> <index> <cache_info_pair>
-//
-// Returns the next enumerable property in the the accumulator.
-void Interpreter::DoForInNextWide(InterpreterAssembler* assembler) {
-  return DoForInNext(assembler);
-}
-
-
 // ForInDone <index> <cache_length>
 //
 // Returns true if the end of the enumerable properties has been reached.
 void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
-  // TODO(oth): Implement directly rather than making a runtime call.
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
   Node* cache_length_reg = __ BytecodeOperandReg(1);
   Node* cache_length = __ LoadRegister(cache_length_reg);
-  Node* context = __ GetContext();
-  Node* result =
-      __ CallRuntime(Runtime::kForInDone, context, index, cache_length);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
 
+  // Check if {index} is at {cache_length} already.
+  InterpreterAssembler::Label if_true(assembler), if_false(assembler);
+  Node* condition = __ WordEqual(index, cache_length);
+  __ Branch(condition, &if_true, &if_false);
+  __ Bind(&if_true);
+  {
+    Node* result = __ BooleanConstant(true);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+  __ Bind(&if_false);
+  {
+    Node* result = __ BooleanConstant(false);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+}
 
 // ForInStep <index>
 //
 // Increments the loop counter in register |index| and stores the result
 // in the accumulator.
 void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
-  // TODO(oth): Implement directly rather than making a runtime call.
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kForInStep, context, index);
+  Node* one = __ SmiConstant(Smi::FromInt(1));
+  Node* result = __ SmiAdd(index, one);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
+// Wide
+//
+// Prefix bytecode indicating next bytecode has wide (16-bit) operands.
+void Interpreter::DoWide(InterpreterAssembler* assembler) {
+  __ DispatchWide(OperandScale::kDouble);
+}
+
+// ExtraWide
+//
+// Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands.
+void Interpreter::DoExtraWide(InterpreterAssembler* assembler) {
+  __ DispatchWide(OperandScale::kQuadruple);
+}
+
+// Illegal
+//
+// An invalid bytecode aborting execution if dispatched.
+void Interpreter::DoIllegal(InterpreterAssembler* assembler) {
+  __ Abort(kInvalidBytecode);
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index e02e914..ea50faa 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -40,12 +40,14 @@
   static bool MakeBytecode(CompilationInfo* info);
 
   // Return bytecode handler for |bytecode|.
-  Code* GetBytecodeHandler(Bytecode bytecode);
+  Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
 
   // GC support.
   void IterateDispatchTable(ObjectVisitor* v);
 
-  void TraceCodegen(Handle<Code> code, const char* name);
+  // Disassembler support (only useful with ENABLE_DISASSEMBLER defined).
+  void TraceCodegen(Handle<Code> code);
+  const char* LookupNameOfBytecodeHandler(Code* code);
 
   Address dispatch_table_address() {
     return reinterpret_cast<Address>(&dispatch_table_[0]);
@@ -58,6 +60,9 @@
   BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
 #undef DECLARE_BYTECODE_HANDLER_GENERATOR
 
+  // Generates code to perform the binary operations via |callable|.
+  void DoBinaryOp(Callable callable, InterpreterAssembler* assembler);
+
   // Generates code to perform the binary operations via |function_id|.
   void DoBinaryOp(Runtime::FunctionId function_id,
                   InterpreterAssembler* assembler);
@@ -103,9 +108,12 @@
   // Generates code to perform a JS runtime call.
   void DoCallJSRuntimeCommon(InterpreterAssembler* assembler);
 
-  // Generates code to perform a constructor call..
+  // Generates code to perform a constructor call.
   void DoCallConstruct(InterpreterAssembler* assembler);
 
+  // Generates code to perform a type conversion.
+  void DoTypeConversionOp(Callable callable, InterpreterAssembler* assembler);
+
   // Generates code ro create a literal via |function_id|.
   void DoCreateLiteral(Runtime::FunctionId function_id,
                        InterpreterAssembler* assembler);
@@ -122,9 +130,14 @@
   void DoStoreLookupSlot(LanguageMode language_mode,
                          InterpreterAssembler* assembler);
 
+  // Get dispatch table index of bytecode.
+  static size_t GetDispatchTableIndex(Bytecode bytecode,
+                                      OperandScale operand_scale);
+
   bool IsDispatchTableInitialized();
 
-  static const int kDispatchTableSize = static_cast<int>(Bytecode::kLast) + 1;
+  static const int kNumberOfWideVariants = 3;
+  static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
 
   Isolate* isolate_;
   Code* dispatch_table_[kDispatchTableSize];
diff --git a/src/interpreter/register-translator.cc b/src/interpreter/register-translator.cc
deleted file mode 100644
index 3eba42f..0000000
--- a/src/interpreter/register-translator.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/interpreter/register-translator.h"
-
-#include "src/interpreter/bytecode-array-builder.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-RegisterTranslator::RegisterTranslator(RegisterMover* mover)
-    : mover_(mover),
-      emitting_moves_(false),
-      window_registers_count_(0),
-      output_moves_count_(0) {}
-
-void RegisterTranslator::TranslateInputRegisters(Bytecode bytecode,
-                                                 uint32_t* raw_operands,
-                                                 int raw_operand_count) {
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), raw_operand_count);
-  if (!emitting_moves_) {
-    emitting_moves_ = true;
-    DCHECK_EQ(window_registers_count_, 0);
-    int register_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
-    for (int i = 0; i < raw_operand_count; i++) {
-      if ((register_bitmap & (1 << i)) == 0) {
-        continue;
-      }
-      Register in_reg = Register::FromRawOperand(raw_operands[i]);
-      Register out_reg = TranslateAndMove(bytecode, i, in_reg);
-      raw_operands[i] = out_reg.ToRawOperand();
-    }
-    window_registers_count_ = 0;
-    emitting_moves_ = false;
-  } else {
-    // When the register translator is translating registers, it will
-    // cause the bytecode generator to emit moves on it's behalf. This
-    // path is reached by these moves.
-    DCHECK(bytecode == Bytecode::kMovWide && raw_operand_count == 2 &&
-           Register::FromRawOperand(raw_operands[0]).is_valid() &&
-           Register::FromRawOperand(raw_operands[1]).is_valid());
-  }
-}
-
-Register RegisterTranslator::TranslateAndMove(Bytecode bytecode,
-                                              int operand_index, Register reg) {
-  if (FitsInReg8Operand(reg)) {
-    return reg;
-  }
-
-  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
-  OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
-  if (operand_size == OperandSize::kShort) {
-    CHECK(FitsInReg16Operand(reg));
-    return Translate(reg);
-  }
-
-  CHECK((operand_type == OperandType::kReg8 ||
-         operand_type == OperandType::kRegOut8) &&
-        RegisterIsMovableToWindow(bytecode, operand_index));
-  Register translated_reg = Translate(reg);
-  Register window_reg(kTranslationWindowStart + window_registers_count_);
-  window_registers_count_ += 1;
-  if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
-    DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_type));
-    mover()->MoveRegisterUntranslated(translated_reg, window_reg);
-  } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
-    DCHECK_LT(output_moves_count_, kTranslationWindowLength);
-    output_moves_[output_moves_count_] =
-        std::make_pair(window_reg, translated_reg);
-    output_moves_count_ += 1;
-  } else {
-    UNREACHABLE();
-  }
-  return window_reg;
-}
-
-// static
-bool RegisterTranslator::RegisterIsMovableToWindow(Bytecode bytecode,
-                                                   int operand_index) {
-  // By design, we only support moving individual registers. There
-  // should be wide variants of such bytecodes instead to avoid the
-  // need for a large translation window.
-  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
-  if (operand_type != OperandType::kReg8 &&
-      operand_type != OperandType::kRegOut8) {
-    return false;
-  } else if (operand_index + 1 == Bytecodes::NumberOfOperands(bytecode)) {
-    return true;
-  } else {
-    OperandType next_operand_type =
-        Bytecodes::GetOperandType(bytecode, operand_index + 1);
-    return (next_operand_type != OperandType::kRegCount8 &&
-            next_operand_type != OperandType::kRegCount16);
-  }
-}
-
-void RegisterTranslator::TranslateOutputRegisters() {
-  if (!emitting_moves_) {
-    emitting_moves_ = true;
-    while (output_moves_count_ > 0) {
-      output_moves_count_ -= 1;
-      mover()->MoveRegisterUntranslated(
-          output_moves_[output_moves_count_].first,
-          output_moves_[output_moves_count_].second);
-    }
-    emitting_moves_ = false;
-  }
-}
-
-// static
-Register RegisterTranslator::Translate(Register reg) {
-  if (reg.index() >= kTranslationWindowStart) {
-    return Register(reg.index() + kTranslationWindowLength);
-  } else {
-    return reg;
-  }
-}
-
-// static
-bool RegisterTranslator::InTranslationWindow(Register reg) {
-  return (reg.index() >= kTranslationWindowStart &&
-          reg.index() <= kTranslationWindowLimit);
-}
-
-// static
-Register RegisterTranslator::UntranslateRegister(Register reg) {
-  if (reg.index() >= kTranslationWindowStart) {
-    return Register(reg.index() - kTranslationWindowLength);
-  } else {
-    return reg;
-  }
-}
-
-// static
-int RegisterTranslator::DistanceToTranslationWindow(Register reg) {
-  return kTranslationWindowStart - reg.index();
-}
-
-// static
-bool RegisterTranslator::FitsInReg8Operand(Register reg) {
-  return reg.is_byte_operand() && reg.index() < kTranslationWindowStart;
-}
-
-// static
-bool RegisterTranslator::FitsInReg16Operand(Register reg) {
-  int max_index = Register::MaxRegisterIndex() - kTranslationWindowLength + 1;
-  return reg.is_short_operand() && reg.index() < max_index;
-}
-
-// static
-int RegisterTranslator::RegisterCountAdjustment(int register_count,
-                                                int parameter_count) {
-  if (register_count > kTranslationWindowStart) {
-    return kTranslationWindowLength;
-  } else if (parameter_count > 0) {
-    Register param0 = Register::FromParameterIndex(0, parameter_count);
-    if (!param0.is_byte_operand()) {
-      // TODO(oth): Number of parameters means translation is
-      // required, but the translation window location is such that
-      // some space is wasted. Hopefully a rare corner case, but could
-      // relocate window to limit waste.
-      return kTranslationWindowLimit + 1 - register_count;
-    }
-  }
-  return 0;
-}
-
-}  // namespace interpreter
-}  // namespace internal
-}  // namespace v8
diff --git a/src/interpreter/register-translator.h b/src/interpreter/register-translator.h
deleted file mode 100644
index b683a89..0000000
--- a/src/interpreter/register-translator.h
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INTERPRETER_REGISTER_TRANSLATOR_H_
-#define V8_INTERPRETER_REGISTER_TRANSLATOR_H_
-
-#include "src/interpreter/bytecodes.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-class RegisterMover;
-
-// A class that enables bytecodes having only byte sized register operands
-// to access all registers in the two byte space. Most bytecode uses few
-// registers so space can be saved if most bytecodes with register operands
-// just take byte operands.
-//
-// To reach the wider register space, a translation window is reserved in
-// the byte addressable space specifically for copying registers into and
-// out of before a bytecode is emitted. The translation window occupies
-// the last register slots at the top of the byte addressable range.
-//
-// Because of the translation window any registers which naturally lie
-// at above the translation window have to have their register index
-// incremented by the window width before they are emitted.
-//
-// This class does not support moving ranges of registers to and from
-// the translation window. It would be straightforward to add support
-// for constrained ranges, e.g. kRegPair8, kRegTriple8 operands, but
-// these would have two negative effects. The translation window would
-// need to be wider, further limiting the space for byte operands. And
-// every register in a range would need to be moved consuming more
-// space in the bytecode array.
-class RegisterTranslator final {
- public:
-  explicit RegisterTranslator(RegisterMover* mover);
-
-  // Translate and re-write the register operands that are inputs
-  // to |bytecode| when it is about to be emitted.
-  void TranslateInputRegisters(Bytecode bytecode, uint32_t* raw_operands,
-                               int raw_operand_count);
-
-  // Translate and re-write the register operands that are outputs
-  // from |bytecode| when it has just been output.
-  void TranslateOutputRegisters();
-
-  // Returns true if |reg| is in the translation window.
-  static bool InTranslationWindow(Register reg);
-
-  // Return register value as if it had been translated.
-  static Register UntranslateRegister(Register reg);
-
-  // Returns the distance in registers between the translation window
-  // start and |reg|. The result is negative when |reg| is above the
-  // start of the translation window.
-  static int DistanceToTranslationWindow(Register reg);
-
-  // Returns true if |reg| can be represented as an 8-bit operand
-  // after translation.
-  static bool FitsInReg8Operand(Register reg);
-
-  // Returns true if |reg| can be represented as an 16-bit operand
-  // after translation.
-  static bool FitsInReg16Operand(Register reg);
-
-  // Returns the increment to the register count necessary if the
-  // value indicates the translation window is required.
-  static int RegisterCountAdjustment(int register_count, int parameter_count);
-
- private:
-  static const int kTranslationWindowLength = 4;
-  static const int kTranslationWindowLimit = -kMinInt8;
-  static const int kTranslationWindowStart =
-      kTranslationWindowLimit - kTranslationWindowLength + 1;
-
-  Register TranslateAndMove(Bytecode bytecode, int operand_index, Register reg);
-  static bool RegisterIsMovableToWindow(Bytecode bytecode, int operand_index);
-
-  static Register Translate(Register reg);
-
-  RegisterMover* mover() const { return mover_; }
-
-  // Entity to perform register moves necessary to translate registers
-  // and ensure reachability.
-  RegisterMover* mover_;
-
-  // Flag to avoid re-entrancy when emitting move bytecodes for
-  // translation.
-  bool emitting_moves_;
-
-  // Number of window registers in use.
-  int window_registers_count_;
-
-  // State for restoring register moves emitted by TranslateOutputRegisters.
-  std::pair<Register, Register> output_moves_[kTranslationWindowLength];
-  int output_moves_count_;
-};
-
-// Interface for RegisterTranslator helper class that will emit
-// register move bytecodes at the translator's behest.
-class RegisterMover {
- public:
-  virtual ~RegisterMover() {}
-
-  // Move register |from| to register |to| with no translation.
-  // returns false if either register operand is invalid. Implementations
-  // of this method must be aware that register moves with bad
-  // register values are a security hole.
-  virtual void MoveRegisterUntranslated(Register from, Register to) = 0;
-};
-
-}  // namespace interpreter
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_INTERPRETER_REGISTER_TRANSLATOR_H_
diff --git a/src/interpreter/source-position-table.cc b/src/interpreter/source-position-table.cc
index 0b7c44e..99a865b 100644
--- a/src/interpreter/source-position-table.cc
+++ b/src/interpreter/source-position-table.cc
@@ -4,7 +4,6 @@
 
 #include "src/interpreter/source-position-table.h"
 
-#include "src/assembler.h"
 #include "src/objects-inl.h"
 #include "src/objects.h"
 
@@ -12,71 +11,196 @@
 namespace internal {
 namespace interpreter {
 
-class IsStatementField : public BitField<bool, 0, 1> {};
-class SourcePositionField : public BitField<int, 1, 30> {};
+// We'll use a simple encoding scheme to record the source positions.
+// Conceptually, each position consists of:
+// - bytecode_offset: An integer index into the BytecodeArray
+// - source_position: An integer index into the source string.
+// - position type: Each position is either a statement or an expression.
+//
+// The basic idea for the encoding is to use a variable-length integer coding,
+// where each byte contains 7 bits of payload data, and 1 'more' bit that
+// determines whether additional bytes follow. Additionally:
+// - we record the difference from the previous position,
+// - we just stuff one bit for the type into the bytecode offset,
+// - we write least-significant bits first,
+// - negative numbers occur only rarely, so we use a denormalized
+//   most-significant byte (a byte with all zeros, which normally wouldn't
+//   make any sense) to encode a negative sign, so that we 'pay' nothing for
+//   positive numbers, but have to pay a full byte for negative integers.
+
+namespace {
+
+// A zero-value in the most-significant byte is used to mark negative numbers.
+const int kNegativeSignMarker = 0;
+
+// Each byte is encoded as MoreBit | ValueBits.
+class MoreBit : public BitField8<bool, 7, 1> {};
+class ValueBits : public BitField8<int, 0, 7> {};
+
+// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
+void AddAndSetEntry(PositionTableEntry& value,
+                    const PositionTableEntry& other) {
+  value.bytecode_offset += other.bytecode_offset;
+  value.source_position += other.source_position;
+  value.is_statement = other.is_statement;
+}
+
+// Helper: Substract the offsets from 'other' from 'value'.
+void SubtractFromEntry(PositionTableEntry& value,
+                       const PositionTableEntry& other) {
+  value.bytecode_offset -= other.bytecode_offset;
+  value.source_position -= other.source_position;
+}
+
+// Helper: Encode an integer.
+void EncodeInt(ZoneVector<byte>& bytes, int value) {
+  bool sign = false;
+  if (value < 0) {
+    sign = true;
+    value = -value;
+  }
+
+  bool more;
+  do {
+    more = value > ValueBits::kMax;
+    bytes.push_back(MoreBit::encode(more || sign) |
+                    ValueBits::encode(value & ValueBits::kMax));
+    value >>= ValueBits::kSize;
+  } while (more);
+
+  if (sign) {
+    bytes.push_back(MoreBit::encode(false) |
+                    ValueBits::encode(kNegativeSignMarker));
+  }
+}
+
+// Encode a PositionTableEntry.
+void EncodeEntry(ZoneVector<byte>& bytes, const PositionTableEntry& entry) {
+  // 1 bit for sign + is_statement each, which leaves 30b for the value.
+  DCHECK(abs(entry.bytecode_offset) < (1 << 30));
+  EncodeInt(bytes, (entry.is_statement ? 1 : 0) | (entry.bytecode_offset << 1));
+  EncodeInt(bytes, entry.source_position);
+}
+
+// Helper: Decode an integer.
+void DecodeInt(ByteArray* bytes, int* index, int* v) {
+  byte current;
+  int n = 0;
+  int value = 0;
+  bool more;
+  do {
+    current = bytes->get((*index)++);
+    value |= ValueBits::decode(current) << (n * ValueBits::kSize);
+    n++;
+    more = MoreBit::decode(current);
+  } while (more);
+
+  if (ValueBits::decode(current) == kNegativeSignMarker) {
+    value = -value;
+  }
+  *v = value;
+}
+
+void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
+  int tmp;
+  DecodeInt(bytes, index, &tmp);
+  entry->is_statement = (tmp & 1);
+
+  // Note that '>>' needs to be arithmetic shift in order to handle negative
+  // numbers properly.
+  entry->bytecode_offset = (tmp >> 1);
+
+  DecodeInt(bytes, index, &entry->source_position);
+}
+
+}  // namespace
 
 void SourcePositionTableBuilder::AddStatementPosition(size_t bytecode_offset,
                                                       int source_position) {
   int offset = static_cast<int>(bytecode_offset);
-  // If a position has already been assigned to this bytecode offset,
-  // do not reassign a new statement position.
-  if (CodeOffsetHasPosition(offset)) return;
-  uint32_t encoded = IsStatementField::encode(true) |
-                     SourcePositionField::encode(source_position);
-  entries_.push_back({offset, encoded});
+  AddEntry({offset, source_position, true});
 }
 
 void SourcePositionTableBuilder::AddExpressionPosition(size_t bytecode_offset,
                                                        int source_position) {
   int offset = static_cast<int>(bytecode_offset);
-  // If a position has already been assigned to this bytecode offset,
-  // do not reassign a new statement position.
-  if (CodeOffsetHasPosition(offset)) return;
-  uint32_t encoded = IsStatementField::encode(false) |
-                     SourcePositionField::encode(source_position);
-  entries_.push_back({offset, encoded});
+  AddEntry({offset, source_position, false});
 }
 
-void SourcePositionTableBuilder::RevertPosition(size_t bytecode_offset) {
-  int offset = static_cast<int>(bytecode_offset);
-  // If we already added a source position table entry, but the bytecode array
-  // builder ended up not outputting a bytecode for the corresponding bytecode
-  // offset, we have to remove that entry.
-  if (CodeOffsetHasPosition(offset)) entries_.pop_back();
-}
-
-Handle<FixedArray> SourcePositionTableBuilder::ToFixedArray() {
-  int length = static_cast<int>(entries_.size());
-  Handle<FixedArray> table =
-      isolate_->factory()->NewFixedArray(length * 2, TENURED);
-  for (int i = 0; i < length; i++) {
-    table->set(i * 2, Smi::FromInt(entries_[i].bytecode_offset));
-    table->set(i * 2 + 1, Smi::FromInt(entries_[i].source_position_and_type));
+void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
+  // Don't encode a new entry if this bytecode already has a source position
+  // assigned.
+  if (candidate_.bytecode_offset == entry.bytecode_offset) {
+    if (entry.is_statement) candidate_ = entry;
+    return;
   }
+
+  CommitEntry();
+  candidate_ = entry;
+}
+
+void SourcePositionTableBuilder::CommitEntry() {
+  if (candidate_.bytecode_offset == kUninitializedCandidateOffset) return;
+  PositionTableEntry tmp(candidate_);
+  SubtractFromEntry(tmp, previous_);
+  EncodeEntry(bytes_, tmp);
+  previous_ = candidate_;
+
+  if (candidate_.is_statement) {
+    LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddStatementPositionEvent(
+                                 jit_handler_data_, candidate_.bytecode_offset,
+                                 candidate_.source_position));
+  }
+  LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddPositionEvent(
+                               jit_handler_data_, candidate_.bytecode_offset,
+                               candidate_.source_position));
+
+#ifdef ENABLE_SLOW_DCHECKS
+  raw_entries_.push_back(candidate_);
+#endif
+}
+
+Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable() {
+  CommitEntry();
+  if (bytes_.empty()) return isolate_->factory()->empty_byte_array();
+
+  Handle<ByteArray> table = isolate_->factory()->NewByteArray(
+      static_cast<int>(bytes_.size()), TENURED);
+
+  MemCopy(table->GetDataStartAddress(), &*bytes_.begin(), bytes_.size());
+
+#ifdef ENABLE_SLOW_DCHECKS
+  // Brute force testing: Record all positions and decode
+  // the entire table to verify they are identical.
+  auto raw = raw_entries_.begin();
+  for (SourcePositionTableIterator encoded(*table); !encoded.done();
+       encoded.Advance(), raw++) {
+    DCHECK(raw != raw_entries_.end());
+    DCHECK_EQ(encoded.bytecode_offset(), raw->bytecode_offset);
+    DCHECK_EQ(encoded.source_position(), raw->source_position);
+    DCHECK_EQ(encoded.is_statement(), raw->is_statement);
+  }
+  DCHECK(raw == raw_entries_.end());
+#endif
+
   return table;
 }
 
-SourcePositionTableIterator::SourcePositionTableIterator(
-    BytecodeArray* bytecode_array)
-    : table_(bytecode_array->source_position_table()),
-      index_(0),
-      length_(table_->length()) {
-  DCHECK(table_->length() % 2 == 0);
+SourcePositionTableIterator::SourcePositionTableIterator(ByteArray* byte_array)
+    : table_(byte_array), index_(0), current_() {
   Advance();
 }
 
 void SourcePositionTableIterator::Advance() {
-  if (index_ < length_) {
-    int new_bytecode_offset = Smi::cast(table_->get(index_))->value();
-    // Bytecode offsets are in ascending order.
-    DCHECK(bytecode_offset_ < new_bytecode_offset || index_ == 0);
-    bytecode_offset_ = new_bytecode_offset;
-    uint32_t source_position_and_type =
-        static_cast<uint32_t>(Smi::cast(table_->get(index_ + 1))->value());
-    is_statement_ = IsStatementField::decode(source_position_and_type);
-    source_position_ = SourcePositionField::decode(source_position_and_type);
+  DCHECK(!done());
+  DCHECK(index_ >= 0 && index_ <= table_->length());
+  if (index_ == table_->length()) {
+    index_ = kDone;
+  } else {
+    PositionTableEntry tmp;
+    DecodeEntry(table_, &index_, &tmp);
+    AddAndSetEntry(current_, tmp);
   }
-  index_ += 2;
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/source-position-table.h b/src/interpreter/source-position-table.h
index 336cf42..3ac58d6 100644
--- a/src/interpreter/source-position-table.h
+++ b/src/interpreter/source-position-table.h
@@ -6,72 +6,90 @@
 #define V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
 
 #include "src/assert-scope.h"
+#include "src/checks.h"
 #include "src/handles.h"
-#include "src/zone.h"
+#include "src/log.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 
 class BytecodeArray;
-class FixedArray;
+class ByteArray;
 class Isolate;
+class Zone;
 
 namespace interpreter {
 
-class SourcePositionTableBuilder {
+struct PositionTableEntry {
+  PositionTableEntry()
+      : bytecode_offset(0), source_position(0), is_statement(false) {}
+  PositionTableEntry(int bytecode, int source, bool statement)
+      : bytecode_offset(bytecode),
+        source_position(source),
+        is_statement(statement) {}
+
+  int bytecode_offset;
+  int source_position;
+  bool is_statement;
+};
+
+class SourcePositionTableBuilder : public PositionsRecorder {
  public:
-  explicit SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
-      : isolate_(isolate), entries_(zone) {}
+  SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
+      : isolate_(isolate),
+        bytes_(zone),
+#ifdef ENABLE_SLOW_DCHECKS
+        raw_entries_(zone),
+#endif
+        candidate_(kUninitializedCandidateOffset, 0, false) {
+  }
 
   void AddStatementPosition(size_t bytecode_offset, int source_position);
   void AddExpressionPosition(size_t bytecode_offset, int source_position);
-  void RevertPosition(size_t bytecode_offset);
-  Handle<FixedArray> ToFixedArray();
+  Handle<ByteArray> ToSourcePositionTable();
 
  private:
-  struct Entry {
-    int bytecode_offset;
-    uint32_t source_position_and_type;
-  };
+  static const int kUninitializedCandidateOffset = -1;
 
-  bool CodeOffsetHasPosition(int bytecode_offset) {
-    // Return whether bytecode offset already has a position assigned.
-    return entries_.size() > 0 &&
-           entries_.back().bytecode_offset == bytecode_offset;
-  }
+  void AddEntry(const PositionTableEntry& entry);
+  void CommitEntry();
 
   Isolate* isolate_;
-  ZoneVector<Entry> entries_;
+  ZoneVector<byte> bytes_;
+#ifdef ENABLE_SLOW_DCHECKS
+  ZoneVector<PositionTableEntry> raw_entries_;
+#endif
+  PositionTableEntry candidate_;  // Next entry to be written, if initialized.
+  PositionTableEntry previous_;   // Previously written entry, to compute delta.
 };
 
 class SourcePositionTableIterator {
  public:
-  explicit SourcePositionTableIterator(BytecodeArray* bytecode_array);
+  explicit SourcePositionTableIterator(ByteArray* byte_array);
 
   void Advance();
 
   int bytecode_offset() const {
     DCHECK(!done());
-    return bytecode_offset_;
+    return current_.bytecode_offset;
   }
   int source_position() const {
     DCHECK(!done());
-    return source_position_;
+    return current_.source_position;
   }
   bool is_statement() const {
     DCHECK(!done());
-    return is_statement_;
+    return current_.is_statement;
   }
-  bool done() const { return index_ > length_; }
+  bool done() const { return index_ == kDone; }
 
  private:
-  FixedArray* table_;
+  static const int kDone = -1;
+
+  ByteArray* table_;
   int index_;
-  int length_;
-  bool is_statement_;
-  int bytecode_offset_;
-  int source_position_;
+  PositionTableEntry current_;
   DisallowHeapAllocation no_gc;
 };
 
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index c27b7a7..da36f76 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -97,6 +97,24 @@
 NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
 #undef NATIVE_CONTEXT_FIELD_ACCESSOR
 
+bool Isolate::IsArraySpeciesLookupChainIntact() {
+  if (!FLAG_harmony_species) return true;
+  // Note: It would be nice to have debug checks to make sure that the
+  // species protector is accurate, but this would be hard to do for most of
+  // what the protector stands for:
+  // - You'd need to traverse the heap to check that no Array instance has
+  //   a constructor property
+  // - To check that Array[Symbol.species] == Array, JS code has to execute,
+  //   but JS cannot be invoked in callstack overflow situations
+  // All that could be checked reliably is that
+  // Array.prototype.constructor == Array. Given that limitation, no check is
+  // done here. In place, there are mjsunit tests harmony/array-species* which
+  // ensure that behavior is correct in various invalid protector cases.
+
+  PropertyCell* species_cell = heap()->species_protector();
+  return species_cell->value()->IsSmi() &&
+         Smi::cast(species_cell->value())->value() == kArrayProtectorValid;
+}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/isolate.cc b/src/isolate.cc
index 8116f14..c9f0111 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -22,6 +22,7 @@
 #include "src/crankshaft/hydrogen.h"
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
+#include "src/external-reference-table.h"
 #include "src/frames-inl.h"
 #include "src/ic/stub-cache.h"
 #include "src/interpreter/interpreter.h"
@@ -34,7 +35,7 @@
 #include "src/regexp/regexp-stack.h"
 #include "src/runtime-profiler.h"
 #include "src/simulator.h"
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/deserializer.h"
 #include "src/v8.h"
 #include "src/version.h"
 #include "src/vm-state-inl.h"
@@ -338,8 +339,23 @@
   return true;
 }
 
+static Handle<FixedArray> MaybeGrow(Isolate* isolate,
+                                    Handle<FixedArray> elements,
+                                    int cur_position, int new_size) {
+  if (new_size > elements->length()) {
+    int new_capacity = JSObject::NewElementsCapacity(elements->length());
+    Handle<FixedArray> new_elements =
+        isolate->factory()->NewFixedArrayWithHoles(new_capacity);
+    for (int i = 0; i < cur_position; i++) {
+      new_elements->set(i, elements->get(i));
+    }
+    elements = new_elements;
+  }
+  DCHECK(new_size <= elements->length());
+  return elements;
+}
 
-Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
+Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
                                                 Handle<Object> caller) {
   // Get stack trace limit.
   Handle<JSObject> error = error_function();
@@ -364,51 +380,72 @@
   int frames_seen = 0;
   int sloppy_frames = 0;
   bool encountered_strict_function = false;
-  for (JavaScriptFrameIterator iter(this);
-       !iter.done() && frames_seen < limit;
+  for (StackFrameIterator iter(this); !iter.done() && frames_seen < limit;
        iter.Advance()) {
-    JavaScriptFrame* frame = iter.frame();
-    // Set initial size to the maximum inlining level + 1 for the outermost
-    // function.
-    List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-    frame->Summarize(&frames);
-    for (int i = frames.length() - 1; i >= 0; i--) {
-      Handle<JSFunction> fun = frames[i].function();
-      Handle<Object> recv = frames[i].receiver();
-      // Filter out internal frames that we do not want to show.
-      if (!IsVisibleInStackTrace(*fun, *caller, *recv, &seen_caller)) continue;
-      // Filter out frames from other security contexts.
-      if (!this->context()->HasSameSecurityTokenAs(fun->context())) continue;
-      if (cursor + 4 > elements->length()) {
-        int new_capacity = JSObject::NewElementsCapacity(elements->length());
-        Handle<FixedArray> new_elements =
-            factory()->NewFixedArrayWithHoles(new_capacity);
-        for (int i = 0; i < cursor; i++) {
-          new_elements->set(i, elements->get(i));
-        }
-        elements = new_elements;
-      }
-      DCHECK(cursor + 4 <= elements->length());
+    StackFrame* frame = iter.frame();
 
-      Handle<AbstractCode> abstract_code = frames[i].abstract_code();
+    switch (frame->type()) {
+      case StackFrame::JAVA_SCRIPT:
+      case StackFrame::OPTIMIZED:
+      case StackFrame::INTERPRETED: {
+        JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
+        // Set initial size to the maximum inlining level + 1 for the outermost
+        // function.
+        List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+        js_frame->Summarize(&frames);
+        for (int i = frames.length() - 1; i >= 0; i--) {
+          Handle<JSFunction> fun = frames[i].function();
+          Handle<Object> recv = frames[i].receiver();
+          // Filter out internal frames that we do not want to show.
+          if (!IsVisibleInStackTrace(*fun, *caller, *recv, &seen_caller)) {
+            continue;
+          }
+          // Filter out frames from other security contexts.
+          if (!this->context()->HasSameSecurityTokenAs(fun->context())) {
+            continue;
+          }
+          elements = MaybeGrow(this, elements, cursor, cursor + 4);
 
-      Handle<Smi> offset(Smi::FromInt(frames[i].code_offset()), this);
-      // The stack trace API should not expose receivers and function
-      // objects on frames deeper than the top-most one with a strict
-      // mode function.  The number of sloppy frames is stored as
-      // first element in the result array.
-      if (!encountered_strict_function) {
-        if (is_strict(fun->shared()->language_mode())) {
-          encountered_strict_function = true;
-        } else {
-          sloppy_frames++;
+          Handle<AbstractCode> abstract_code = frames[i].abstract_code();
+
+          Handle<Smi> offset(Smi::FromInt(frames[i].code_offset()), this);
+          // The stack trace API should not expose receivers and function
+          // objects on frames deeper than the top-most one with a strict mode
+          // function. The number of sloppy frames is stored as first element in
+          // the result array.
+          if (!encountered_strict_function) {
+            if (is_strict(fun->shared()->language_mode())) {
+              encountered_strict_function = true;
+            } else {
+              sloppy_frames++;
+            }
+          }
+          elements->set(cursor++, *recv);
+          elements->set(cursor++, *fun);
+          elements->set(cursor++, *abstract_code);
+          elements->set(cursor++, *offset);
+          frames_seen++;
         }
-      }
-      elements->set(cursor++, *recv);
-      elements->set(cursor++, *fun);
-      elements->set(cursor++, *abstract_code);
-      elements->set(cursor++, *offset);
-      frames_seen++;
+      } break;
+
+      case StackFrame::WASM: {
+        WasmFrame* wasm_frame = WasmFrame::cast(frame);
+        Code* code = wasm_frame->unchecked_code();
+        Handle<AbstractCode> abstract_code =
+            Handle<AbstractCode>(AbstractCode::cast(code));
+        Handle<JSFunction> fun = factory()->NewFunction(
+            factory()->NewStringFromAsciiChecked("<WASM>"));
+        elements = MaybeGrow(this, elements, cursor, cursor + 4);
+        // TODO(jfb) Pass module object.
+        elements->set(cursor++, *factory()->undefined_value());
+        elements->set(cursor++, *fun);
+        elements->set(cursor++, *abstract_code);
+        elements->set(cursor++, Internals::IntToSmi(0));
+        frames_seen++;
+      } break;
+
+      default:
+        break;
     }
   }
   elements->set(0, Smi::FromInt(sloppy_frames));
@@ -419,9 +456,8 @@
   return result;
 }
 
-
-MaybeHandle<JSObject> Isolate::CaptureAndSetDetailedStackTrace(
-    Handle<JSObject> error_object) {
+MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
+    Handle<JSReceiver> error_object) {
   if (capture_stack_trace_for_uncaught_exceptions_) {
     // Capture stack trace for a detailed exception message.
     Handle<Name> key = factory()->detailed_stack_trace_symbol();
@@ -429,21 +465,20 @@
         stack_trace_for_uncaught_exceptions_frame_limit_,
         stack_trace_for_uncaught_exceptions_options_);
     RETURN_ON_EXCEPTION(
-        this, JSObject::SetProperty(error_object, key, stack_trace, STRICT),
-        JSObject);
+        this, JSReceiver::SetProperty(error_object, key, stack_trace, STRICT),
+        JSReceiver);
   }
   return error_object;
 }
 
-
-MaybeHandle<JSObject> Isolate::CaptureAndSetSimpleStackTrace(
-    Handle<JSObject> error_object, Handle<Object> caller) {
+MaybeHandle<JSReceiver> Isolate::CaptureAndSetSimpleStackTrace(
+    Handle<JSReceiver> error_object, Handle<Object> caller) {
   // Capture stack trace for simple stack trace string formatting.
   Handle<Name> key = factory()->stack_trace_symbol();
   Handle<Object> stack_trace = CaptureSimpleStackTrace(error_object, caller);
   RETURN_ON_EXCEPTION(
-      this, JSObject::SetProperty(error_object, key, stack_trace, STRICT),
-      JSObject);
+      this, JSReceiver::SetProperty(error_object, key, stack_trace, STRICT),
+      JSReceiver);
   return error_object;
 }
 
@@ -868,7 +903,7 @@
 
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap && FLAG_stress_compaction) {
-    heap()->CollectAllAvailableGarbage("trigger compaction");
+    heap()->CollectAllGarbage(Heap::kNoGCFlags, "trigger compaction");
   }
 #endif  // VERIFY_HEAP
 
@@ -1780,6 +1815,8 @@
       descriptor_lookup_cache_(NULL),
       handle_scope_implementer_(NULL),
       unicode_cache_(NULL),
+      runtime_zone_(&allocator_),
+      interface_descriptor_zone_(&allocator_),
       inner_pointer_to_code_cache_(NULL),
       global_handles_(NULL),
       eternal_handles_(NULL),
@@ -1794,6 +1831,7 @@
       serializer_enabled_(enable_serializer),
       has_fatal_error_(false),
       initialized_from_snapshot_(false),
+      is_tail_call_elimination_enabled_(true),
       cpu_profiler_(NULL),
       heap_profiler_(NULL),
       function_entry_hook_(NULL),
@@ -2165,7 +2203,7 @@
   // Initialize other runtime facilities
 #if defined(USE_SIMULATOR)
 #if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
-    V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
+    V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390
   Simulator::Initialize(this);
 #endif
 #endif
@@ -2196,7 +2234,7 @@
   }
 
   if (create_heap_objects) {
-    // Terminate the cache array with the sentinel so we can iterate.
+    // Terminate the partial snapshot cache so we can iterate.
     partial_snapshot_cache_.Add(heap_.undefined_value());
   }
 
@@ -2268,9 +2306,8 @@
     // the snapshot.
     HandleScope scope(this);
     Deoptimizer::EnsureCodeForDeoptimizationEntry(
-        this,
-        Deoptimizer::LAZY,
-        kDeoptTableSerializeEntryCount - 1);
+        this, Deoptimizer::LAZY,
+        ExternalReferenceTable::kDeoptTableSerializeEntryCount - 1);
   }
 
   if (!serializer_enabled()) {
@@ -2431,12 +2468,11 @@
   return code_tracer();
 }
 
-
-Map* Isolate::get_initial_js_array_map(ElementsKind kind, Strength strength) {
+Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
   if (IsFastElementsKind(kind)) {
     DisallowHeapAllocation no_gc;
-    Object* const initial_js_array_map = context()->native_context()->get(
-        Context::ArrayMapIndex(kind, strength));
+    Object* const initial_js_array_map =
+        context()->native_context()->get(Context::ArrayMapIndex(kind));
     if (!initial_js_array_map->IsUndefined()) {
       return Map::cast(initial_js_array_map);
     }
@@ -2511,25 +2547,6 @@
   return cell_reports_intact;
 }
 
-bool Isolate::IsArraySpeciesLookupChainIntact() {
-  if (!FLAG_harmony_species) return true;
-  // Note: It would be nice to have debug checks to make sure that the
-  // species protector is accurate, but this would be hard to do for most of
-  // what the protector stands for:
-  // - You'd need to traverse the heap to check that no Array instance has
-  //   a constructor property or a modified __proto__
-  // - To check that Array[Symbol.species] == Array, JS code has to execute,
-  //   but JS cannot be invoked in callstack overflow situations
-  // All that could be checked reliably is that
-  // Array.prototype.constructor == Array. Given that limitation, no check is
-  // done here. In place, there are mjsunit tests harmony/array-species* which
-  // ensure that behavior is correct in various invalid protector cases.
-
-  PropertyCell* species_cell = heap()->species_protector();
-  return species_cell->value()->IsSmi() &&
-         Smi::cast(species_cell->value())->value() == kArrayProtectorValid;
-}
-
 void Isolate::InvalidateArraySpeciesProtector() {
   if (!FLAG_harmony_species) return;
   DCHECK(factory()->species_protector()->value()->IsSmi());
@@ -2682,7 +2699,11 @@
 
 void Isolate::FireCallCompletedCallback() {
   bool has_call_completed_callbacks = !call_completed_callbacks_.is_empty();
-  bool run_microtasks = autorun_microtasks() && pending_microtask_count();
+  bool run_microtasks =
+      pending_microtask_count() &&
+      !handle_scope_implementer()->HasMicrotasksSuppressions() &&
+      handle_scope_implementer()->microtasks_policy() ==
+          v8::MicrotasksPolicy::kAuto;
   if (!has_call_completed_callbacks && !run_microtasks) return;
 
   if (!handle_scope_implementer()->CallDepthIsZero()) return;
@@ -2737,7 +2758,12 @@
   // Increase call depth to prevent recursive callbacks.
   v8::Isolate::SuppressMicrotaskExecutionScope suppress(
       reinterpret_cast<v8::Isolate*>(this));
+  RunMicrotasksInternal();
+  FireMicrotasksCompletedCallback();
+}
 
+
+void Isolate::RunMicrotasksInternal() {
   while (pending_microtask_count() > 0) {
     HandleScope scope(this);
     int num_tasks = pending_microtask_count();
@@ -2746,8 +2772,8 @@
     set_pending_microtask_count(0);
     heap()->set_microtask_queue(heap()->empty_fixed_array());
 
-    for (int i = 0; i < num_tasks; i++) {
-      HandleScope scope(this);
+    Isolate* isolate = this;
+    FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < num_tasks, i++, {
       Handle<Object> microtask(queue->get(i), this);
       if (microtask->IsJSFunction()) {
         Handle<JSFunction> microtask_function =
@@ -2774,11 +2800,37 @@
         void* data = v8::ToCData<void*>(callback_info->data());
         callback(data);
       }
+    });
+  }
+}
+
+
+void Isolate::AddMicrotasksCompletedCallback(
+    MicrotasksCompletedCallback callback) {
+  for (int i = 0; i < microtasks_completed_callbacks_.length(); i++) {
+    if (callback == microtasks_completed_callbacks_.at(i)) return;
+  }
+  microtasks_completed_callbacks_.Add(callback);
+}
+
+
+void Isolate::RemoveMicrotasksCompletedCallback(
+    MicrotasksCompletedCallback callback) {
+  for (int i = 0; i < microtasks_completed_callbacks_.length(); i++) {
+    if (callback == microtasks_completed_callbacks_.at(i)) {
+      microtasks_completed_callbacks_.Remove(i);
     }
   }
 }
 
 
+void Isolate::FireMicrotasksCompletedCallback() {
+  for (int i = 0; i < microtasks_completed_callbacks_.length(); i++) {
+    microtasks_completed_callbacks_.at(i)(reinterpret_cast<v8::Isolate*>(this));
+  }
+}
+
+
 void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
   DCHECK(!use_counter_callback_);
   use_counter_callback_ = callback;
@@ -2817,6 +2869,14 @@
   }
 }
 
+void Isolate::SetTailCallEliminationEnabled(bool enabled) {
+  if (is_tail_call_elimination_enabled_ == enabled) return;
+  is_tail_call_elimination_enabled_ = enabled;
+  // TODO(ishell): Introduce DependencyGroup::kTailCallChangedGroup to
+  // deoptimize only those functions that are affected by the change of this
+  // flag.
+  internal::Deoptimizer::DeoptimizeAll(this);
+}
 
 // Heap::detached_contexts tracks detached contexts as pairs
 // (number of GC since the context was detached, the context).
diff --git a/src/isolate.h b/src/isolate.h
index 2d74dc4..8847164 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -11,6 +11,7 @@
 #include "include/v8-debug.h"
 #include "src/allocation.h"
 #include "src/assert-scope.h"
+#include "src/base/accounting-allocator.h"
 #include "src/base/atomicops.h"
 #include "src/builtins.h"
 #include "src/cancelable-task.h"
@@ -26,8 +27,8 @@
 #include "src/messages.h"
 #include "src/optimizing-compile-dispatcher.h"
 #include "src/regexp/regexp-stack.h"
-#include "src/runtime/runtime.h"
 #include "src/runtime-profiler.h"
+#include "src/runtime/runtime.h"
 #include "src/zone.h"
 
 namespace v8 {
@@ -178,6 +179,20 @@
   C(ExternalCaughtException, external_caught_exception) \
   C(JSEntrySP, js_entry_sp)
 
+#define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var,      \
+                              limit_check, increment, body)                \
+  do {                                                                     \
+    loop_var_type init;                                                    \
+    loop_var_type for_with_handle_limit = loop_var;                        \
+    Isolate* for_with_handle_isolate = isolate;                            \
+    while (limit_check) {                                                  \
+      for_with_handle_limit += 1024;                                       \
+      HandleScope loop_scope(for_with_handle_isolate);                     \
+      for (; limit_check && loop_var < for_with_handle_limit; increment) { \
+        body                                                               \
+      }                                                                    \
+    }                                                                      \
+  } while (false)
 
 // Platform-independent, reliable thread identifier.
 class ThreadId {
@@ -378,7 +393,6 @@
   V(HashMap*, external_reference_map, NULL)                                    \
   V(HashMap*, root_index_map, NULL)                                            \
   V(int, pending_microtask_count, 0)                                           \
-  V(bool, autorun_microtasks, true)                                            \
   V(HStatistics*, hstatistics, NULL)                                           \
   V(CompilationStatistics*, turbo_statistics, NULL)                            \
   V(HTracer*, htracer, NULL)                                                   \
@@ -668,12 +682,12 @@
   Handle<JSArray> CaptureCurrentStackTrace(
       int frame_limit,
       StackTrace::StackTraceOptions options);
-  Handle<Object> CaptureSimpleStackTrace(Handle<JSObject> error_object,
+  Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
                                          Handle<Object> caller);
-  MaybeHandle<JSObject> CaptureAndSetDetailedStackTrace(
-      Handle<JSObject> error_object);
-  MaybeHandle<JSObject> CaptureAndSetSimpleStackTrace(
-      Handle<JSObject> error_object, Handle<Object> caller);
+  MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
+      Handle<JSReceiver> error_object);
+  MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
+      Handle<JSReceiver> error_object, Handle<Object> caller);
   Handle<JSArray> GetDetailedStackTrace(Handle<JSObject> error_object);
   Handle<JSArray> GetDetailedFromSimpleStackTrace(
       Handle<JSObject> error_object);
@@ -953,14 +967,13 @@
     date_cache_ = date_cache;
   }
 
-  Map* get_initial_js_array_map(ElementsKind kind,
-                                Strength strength = Strength::WEAK);
+  Map* get_initial_js_array_map(ElementsKind kind);
 
   static const int kArrayProtectorValid = 1;
   static const int kArrayProtectorInvalid = 0;
 
   bool IsFastArrayConstructorPrototypeChainIntact();
-  bool IsArraySpeciesLookupChainIntact();
+  inline bool IsArraySpeciesLookupChainIntact();
 
   // On intent to set an element in object, make sure that appropriate
   // notifications occur if the set is on the elements of the array or
@@ -998,13 +1011,6 @@
     return optimizing_compile_dispatcher_ != NULL;
   }
 
-  bool concurrent_osr_enabled() const {
-    // Thread is only available with flag enabled.
-    DCHECK(optimizing_compile_dispatcher_ == NULL ||
-           FLAG_concurrent_recompilation);
-    return optimizing_compile_dispatcher_ != NULL && FLAG_concurrent_osr;
-  }
-
   OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
     return optimizing_compile_dispatcher_;
   }
@@ -1061,6 +1067,10 @@
   void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
   void FireBeforeCallEnteredCallback();
 
+  void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
+  void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
+  void FireMicrotasksCompletedCallback();
+
   void SetPromiseRejectCallback(PromiseRejectCallback callback);
   void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
                            v8::PromiseRejectEvent event);
@@ -1080,6 +1090,14 @@
   int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
 #endif
 
+  // Support for dynamically disabling tail call elimination.
+  Address is_tail_call_elimination_enabled_address() {
+    return reinterpret_cast<Address>(&is_tail_call_elimination_enabled_);
+  }
+  bool is_tail_call_elimination_enabled() const {
+    return is_tail_call_elimination_enabled_;
+  }
+  void SetTailCallEliminationEnabled(bool enabled);
 
   void AddDetachedContext(Handle<Context> context);
   void CheckDetachedContextsAfterGC();
@@ -1101,6 +1119,8 @@
 
   interpreter::Interpreter* interpreter() const { return interpreter_; }
 
+  base::AccountingAllocator* allocator() { return &allocator_; }
+
  protected:
   explicit Isolate(bool enable_serializer);
 
@@ -1209,6 +1229,8 @@
   // the frame.
   void RemoveMaterializedObjectsOnUnwind(StackFrame* frame);
 
+  void RunMicrotasksInternal();
+
   base::Atomic32 id_;
   EntryStackItem* entry_stack_;
   int stack_trace_nesting_level_;
@@ -1239,6 +1261,7 @@
   HandleScopeData handle_scope_data_;
   HandleScopeImplementer* handle_scope_implementer_;
   UnicodeCache* unicode_cache_;
+  base::AccountingAllocator allocator_;
   Zone runtime_zone_;
   Zone interface_descriptor_zone_;
   InnerPointerToCodeCache* inner_pointer_to_code_cache_;
@@ -1266,6 +1289,9 @@
   // True if this isolate was initialized from a snapshot.
   bool initialized_from_snapshot_;
 
+  // True if ES2015 tail call elimination feature is enabled.
+  bool is_tail_call_elimination_enabled_;
+
   // Time stamp at initialization.
   double time_millis_at_init_;
 
@@ -1330,6 +1356,9 @@
   // List of callbacks when a Call completes.
   List<CallCompletedCallback> call_completed_callbacks_;
 
+  // List of callbacks after microtasks were run.
+  List<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
+
   v8::Isolate::UseCounterCallback use_counter_callback_;
   BasicBlockProfiler* basic_block_profiler_;
 
@@ -1358,6 +1387,8 @@
   friend class v8::Locker;
   friend class v8::Unlocker;
   friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
+  friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
+                                                        const char*);
 
   DISALLOW_COPY_AND_ASSIGN(Isolate);
 };
diff --git a/src/js/array-iterator.js b/src/js/array-iterator.js
index 2609ebd..b3e25e9 100644
--- a/src/js/array-iterator.js
+++ b/src/js/array-iterator.js
@@ -109,6 +109,24 @@
   return CreateArrayIterator(this, ITERATOR_KIND_KEYS);
 }
 
+// TODO(littledan): Check for detached TypedArray in these three methods
+function TypedArrayEntries() {
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+  return %_Call(ArrayEntries, this);
+}
+
+
+function TypedArrayValues() {
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+  return %_Call(ArrayValues, this);
+}
+
+
+function TypedArrayKeys() {
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+  return %_Call(ArrayKeys, this);
+}
+
 
 %FunctionSetPrototype(ArrayIterator, {__proto__: IteratorPrototype});
 %FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
@@ -117,8 +135,6 @@
   'next', ArrayIteratorNext
 ]);
 utils.SetFunctionName(ArrayIteratorIterator, iteratorSymbol);
-%AddNamedProperty(ArrayIterator.prototype, iteratorSymbol,
-                  ArrayIteratorIterator, DONT_ENUM);
 %AddNamedProperty(ArrayIterator.prototype, toStringTagSymbol,
                   "Array Iterator", READ_ONLY | DONT_ENUM);
 
@@ -135,12 +151,13 @@
 %AddNamedProperty(GlobalArray.prototype, iteratorSymbol, ArrayValues,
                   DONT_ENUM);
 
+utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
+  'entries', TypedArrayEntries,
+  'keys', TypedArrayKeys,
+  'values', TypedArrayValues
+]);
 %AddNamedProperty(GlobalTypedArray.prototype,
-                  'entries', ArrayEntries, DONT_ENUM);
-%AddNamedProperty(GlobalTypedArray.prototype, 'values', ArrayValues, DONT_ENUM);
-%AddNamedProperty(GlobalTypedArray.prototype, 'keys', ArrayKeys, DONT_ENUM);
-%AddNamedProperty(GlobalTypedArray.prototype,
-                  iteratorSymbol, ArrayValues, DONT_ENUM);
+                  iteratorSymbol, TypedArrayValues, DONT_ENUM);
 
 // -------------------------------------------------------------------
 // Exports
diff --git a/src/js/array.js b/src/js/array.js
index 0a5e283..1406df3 100644
--- a/src/js/array.js
+++ b/src/js/array.js
@@ -73,17 +73,13 @@
   }
 }
 
+function KeySortCompare(a, b) {
+  return a - b;
+}
 
-// Global list of arrays visited during toString, toLocaleString and
-// join invocations.
-var visited_arrays = new InternalArray();
-
-
-// Gets a sorted array of array keys.  Useful for operations on sparse
-// arrays.  Dupes have not been removed.
 function GetSortedArrayKeys(array, indices) {
-  var keys = new InternalArray();
   if (IS_NUMBER(indices)) {
+    var keys = new InternalArray();
     // It's an interval
     var limit = indices;
     for (var i = 0; i < limit; ++i) {
@@ -92,61 +88,34 @@
         keys.push(i);
       }
     }
-  } else {
-    var length = indices.length;
-    for (var k = 0; k < length; ++k) {
-      var key = indices[k];
-      if (!IS_UNDEFINED(key)) {
-        var e = array[key];
-        if (!IS_UNDEFINED(e) || key in array) {
-          keys.push(key);
-        }
-      }
-    }
-    keys.sort(function(a, b) { return a - b; });
+    return keys;
   }
-  return keys;
+  return InnerArraySort(indices, indices.length, KeySortCompare);
 }
 
 
-function SparseJoinWithSeparatorJS(array, len, convert, separator) {
-  var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
-  var totalLength = 0;
-  var elements = new InternalArray(keys.length * 2);
-  var previousKey = -1;
-  for (var i = 0; i < keys.length; i++) {
+function SparseJoinWithSeparatorJS(array, keys, length, convert, separator) {
+  var keys_length = keys.length;
+  var elements = new InternalArray(keys_length * 2);
+  for (var i = 0; i < keys_length; i++) {
     var key = keys[i];
-    if (key != previousKey) {  // keys may contain duplicates.
-      var e = array[key];
-      if (!IS_STRING(e)) e = convert(e);
-      elements[i * 2] = key;
-      elements[i * 2 + 1] = e;
-      previousKey = key;
-    }
+    var e = array[key];
+    elements[i * 2] = key;
+    elements[i * 2 + 1] = IS_STRING(e) ? e : convert(e);
   }
-  return %SparseJoinWithSeparator(elements, len, separator);
+  return %SparseJoinWithSeparator(elements, length, separator);
 }
 
 
 // Optimized for sparse arrays if separator is ''.
-function SparseJoin(array, len, convert) {
-  var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
-  var last_key = -1;
+function SparseJoin(array, keys, convert) {
   var keys_length = keys.length;
-
   var elements = new InternalArray(keys_length);
-  var elements_length = 0;
-
   for (var i = 0; i < keys_length; i++) {
-    var key = keys[i];
-    if (key != last_key) {
-      var e = array[key];
-      if (!IS_STRING(e)) e = convert(e);
-      elements[elements_length++] = e;
-      last_key = key;
-    }
+    var e = array[keys[i]];
+    elements[i] = IS_STRING(e) ? e : convert(e);
   }
-  return %StringBuilderConcat(elements, elements_length, '');
+  return %StringBuilderConcat(elements, keys_length, '');
 }
 
 
@@ -167,94 +136,122 @@
     (touched > estimated_elements * 4);
 }
 
+function Stack() {
+  this.length = 0;
+  this.values = new InternalArray();
+}
+
+// Predeclare the instance variables on the prototype. Otherwise setting them in
+// the constructor will leak the instance through settings on Object.prototype.
+Stack.prototype.length = null;
+Stack.prototype.values = null;
+
+function StackPush(stack, value) {
+  stack.values[stack.length++] = value;
+}
+
+function StackPop(stack) {
+  stack.values[--stack.length] = null
+}
+
+function StackHas(stack, v) {
+  var length = stack.length;
+  var values = stack.values;
+  for (var i = 0; i < length; i++) {
+    if (values[i] === v) return true;
+  }
+  return false;
+}
+
+// Global list of arrays visited during toString, toLocaleString and
+// join invocations.
+var visited_arrays = new Stack();
+
+function DoJoin(array, length, is_array, separator, convert) {
+  if (UseSparseVariant(array, length, is_array, length)) {
+    %NormalizeElements(array);
+    var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, length));
+    if (separator === '') {
+      if (keys.length === 0) return '';
+      return SparseJoin(array, keys, convert);
+    } else {
+      return SparseJoinWithSeparatorJS(array, keys, length, convert, separator);
+    }
+  }
+
+  // Fast case for one-element arrays.
+  if (length === 1) {
+    var e = array[0];
+    return IS_STRING(e) ? e : convert(e);
+  }
+
+  // Construct an array for the elements.
+  var elements = new InternalArray(length);
+
+  // We pull the empty separator check outside the loop for speed!
+  if (separator === '') {
+    for (var i = 0; i < length; i++) {
+      var e = array[i];
+      elements[i] = IS_STRING(e) ? e : convert(e);
+    }
+    return %StringBuilderConcat(elements, length, '');
+  }
+  // Non-empty separator case.
+  // If the first element is a number then use the heuristic that the
+  // remaining elements are also likely to be numbers.
+  var e = array[0];
+  if (IS_NUMBER(e)) {
+    elements[0] = %_NumberToString(e);
+    for (var i = 1; i < length; i++) {
+      e = array[i];
+      if (IS_NUMBER(e)) {
+        elements[i] = %_NumberToString(e);
+      } else {
+        elements[i] = IS_STRING(e) ? e : convert(e);
+      }
+    }
+  } else {
+    elements[0] = IS_STRING(e) ? e : convert(e);
+    for (var i = 1; i < length; i++) {
+      e = array[i];
+      elements[i] = IS_STRING(e) ? e : convert(e);
+    }
+  }
+  return %StringBuilderJoin(elements, length, separator);
+}
 
 function Join(array, length, separator, convert) {
-  if (length == 0) return '';
+  if (length === 0) return '';
 
   var is_array = IS_ARRAY(array);
 
   if (is_array) {
     // If the array is cyclic, return the empty string for already
     // visited arrays.
-    if (!%PushIfAbsent(visited_arrays, array)) return '';
+    if (StackHas(visited_arrays, array)) return '';
+    StackPush(visited_arrays, array);
   }
 
   // Attempt to convert the elements.
   try {
-    if (UseSparseVariant(array, length, is_array, length)) {
-      %NormalizeElements(array);
-      if (separator.length == 0) {
-        return SparseJoin(array, length, convert);
-      } else {
-        return SparseJoinWithSeparatorJS(array, length, convert, separator);
-      }
-    }
-
-    // Fast case for one-element arrays.
-    if (length == 1) {
-      var e = array[0];
-      if (IS_STRING(e)) return e;
-      return convert(e);
-    }
-
-    // Construct an array for the elements.
-    var elements = new InternalArray(length);
-
-    // We pull the empty separator check outside the loop for speed!
-    if (separator.length == 0) {
-      var elements_length = 0;
-      for (var i = 0; i < length; i++) {
-        var e = array[i];
-        if (!IS_STRING(e)) e = convert(e);
-        elements[elements_length++] = e;
-      }
-      elements.length = elements_length;
-      return %StringBuilderConcat(elements, elements_length, '');
-    }
-    // Non-empty separator case.
-    // If the first element is a number then use the heuristic that the
-    // remaining elements are also likely to be numbers.
-    if (!IS_NUMBER(array[0])) {
-      for (var i = 0; i < length; i++) {
-        var e = array[i];
-        if (!IS_STRING(e)) e = convert(e);
-        elements[i] = e;
-      }
-    } else {
-      for (var i = 0; i < length; i++) {
-        var e = array[i];
-        if (IS_NUMBER(e)) {
-          e = %_NumberToString(e);
-        } else if (!IS_STRING(e)) {
-          e = convert(e);
-        }
-        elements[i] = e;
-      }
-    }
-    return %StringBuilderJoin(elements, length, separator);
+    return DoJoin(array, length, is_array, separator, convert);
   } finally {
     // Make sure to remove the last element of the visited array no
     // matter what happens.
-    if (is_array) visited_arrays.length = visited_arrays.length - 1;
+    if (is_array) StackPop(visited_arrays);
   }
 }
 
 
 function ConvertToString(x) {
-  if (IS_NULL_OR_UNDEFINED(x)) {
-    return '';
-  } else {
-    return TO_STRING(x);
-  }
+  if (IS_NULL_OR_UNDEFINED(x)) return '';
+  return TO_STRING(x);
 }
 
 
 function ConvertToLocaleString(e) {
-  if (IS_NULL_OR_UNDEFINED(e)) {
-    return '';
-  } else {
-    return TO_STRING(e.toLocaleString());
-  }
+  if (IS_NULL_OR_UNDEFINED(e)) return '';
+  return TO_STRING(e.toLocaleString());
 }
 
 
@@ -275,12 +272,10 @@
     var length = indices.length;
     for (var k = 0; k < length; ++k) {
       var key = indices[k];
-      if (!IS_UNDEFINED(key)) {
-        if (key >= start_i) {
-          var current = array[key];
-          if (!IS_UNDEFINED(current) || key in array) {
-            DefineIndexedProperty(deleted_elements, key - start_i, current);
-          }
+      if (key >= start_i) {
+        var current = array[key];
+        if (!IS_UNDEFINED(current) || key in array) {
+          DefineIndexedProperty(deleted_elements, key - start_i, current);
         }
       }
     }
@@ -317,21 +312,19 @@
     var length = indices.length;
     for (var k = 0; k < length; ++k) {
       var key = indices[k];
-      if (!IS_UNDEFINED(key)) {
-        if (key < start_i) {
-          var current = array[key];
-          if (!IS_UNDEFINED(current) || key in array) {
-            new_array[key] = current;
-          }
-        } else if (key >= start_i + del_count) {
-          var current = array[key];
-          if (!IS_UNDEFINED(current) || key in array) {
-            var new_key = key - del_count + num_additional_args;
-            new_array[new_key] = current;
-            if (new_key > 0xfffffffe) {
-              big_indices = big_indices || new InternalArray();
-              big_indices.push(new_key);
-            }
+      if (key < start_i) {
+        var current = array[key];
+        if (!IS_UNDEFINED(current) || key in array) {
+          new_array[key] = current;
+        }
+      } else if (key >= start_i + del_count) {
+        var current = array[key];
+        if (!IS_UNDEFINED(current) || key in array) {
+          var new_key = key - del_count + num_additional_args;
+          new_array[new_key] = current;
+          if (new_key > 0xfffffffe) {
+            big_indices = big_indices || new InternalArray();
+            big_indices.push(new_key);
           }
         }
       }
@@ -1069,8 +1062,7 @@
       } else {
         for (var i = 0; i < indices.length; i++) {
           var index = indices[i];
-          if (!IS_UNDEFINED(index) && !HAS_OWN_PROPERTY(obj, index)
-              && HAS_OWN_PROPERTY(proto, index)) {
+          if (!HAS_OWN_PROPERTY(obj, index) && HAS_OWN_PROPERTY(proto, index)) {
             obj[index] = proto[index];
             if (index >= max) { max = index + 1; }
           }
@@ -1097,8 +1089,7 @@
       } else {
         for (var i = 0; i < indices.length; i++) {
           var index = indices[i];
-          if (!IS_UNDEFINED(index) && from <= index &&
-              HAS_OWN_PROPERTY(proto, index)) {
+          if (from <= index && HAS_OWN_PROPERTY(proto, index)) {
             obj[index] = UNDEFINED;
           }
         }
@@ -1247,10 +1238,19 @@
   if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
 
   var is_array = IS_ARRAY(array);
-  for (var i = 0; i < length; i++) {
-    if (HAS_INDEX(array, i, is_array)) {
-      var element = array[i];
-      %_Call(f, receiver, element, i, array);
+  if (IS_UNDEFINED(receiver)) {
+    for (var i = 0; i < length; i++) {
+      if (HAS_INDEX(array, i, is_array)) {
+        var element = array[i];
+        f(element, i, array);
+      }
+    }
+  } else {
+    for (var i = 0; i < length; i++) {
+      if (HAS_INDEX(array, i, is_array)) {
+        var element = array[i];
+        %_Call(f, receiver, element, i, array);
+      }
     }
   }
 }
@@ -1347,7 +1347,7 @@
   if (IS_UNDEFINED(index)) {
     index = 0;
   } else {
-    index = TO_INTEGER(index);
+    index = TO_INTEGER(index) + 0;  // Add 0 to convert -0 to 0
     // If index is negative, index from the end of the array.
     if (index < 0) {
       index = length + index;
@@ -1373,7 +1373,7 @@
       while (i < n && sortedKeys[i] < index) i++;
       while (i < n) {
         var key = sortedKeys[i];
-        if (!IS_UNDEFINED(key) && array[key] === element) return key;
+        if (array[key] === element) return key;
         i++;
       }
       return -1;
@@ -1409,7 +1409,7 @@
   if (argumentsLength < 2) {
     index = length - 1;
   } else {
-    index = TO_INTEGER(index);
+    index = TO_INTEGER(index) + 0;  // Add 0 to convert -0 to 0
     // If index is negative, index from end of the array.
     if (index < 0) index += length;
     // If index is still negative, do not search the array.
@@ -1432,7 +1432,7 @@
       var i = sortedKeys.length - 1;
       while (i >= 0) {
         var key = sortedKeys[i];
-        if (!IS_UNDEFINED(key) && array[key] === element) return key;
+        if (array[key] === element) return key;
         i--;
       }
       return -1;
@@ -1946,6 +1946,10 @@
   to.InnerArraySort = InnerArraySort;
   to.InnerArrayToLocaleString = InnerArrayToLocaleString;
   to.PackedArrayReverse = PackedArrayReverse;
+  to.Stack = Stack;
+  to.StackHas = StackHas;
+  to.StackPush = StackPush;
+  to.StackPop = StackPop;
 });
 
 %InstallToContext([
diff --git a/src/js/harmony-atomics.js b/src/js/harmony-atomics.js
index b861a2a..9f80227 100644
--- a/src/js/harmony-atomics.js
+++ b/src/js/harmony-atomics.js
@@ -12,12 +12,14 @@
 // Imports
 
 var GlobalObject = global.Object;
+var MakeRangeError;
 var MakeTypeError;
 var MaxSimple;
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 utils.Import(function(from) {
   MakeTypeError = from.MakeTypeError;
+  MakeRangeError = from.MakeRangeError;
   MaxSimple = from.MaxSimple;
 });
 
@@ -37,14 +39,24 @@
   }
 }
 
+// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
+function ValidateIndex(index, length) {
+  var numberIndex = TO_NUMBER(index);
+  var accessIndex = TO_INTEGER(numberIndex);
+  if (numberIndex !== accessIndex) {
+    throw MakeRangeError(kInvalidAtomicAccessIndex);
+  }
+  if (accessIndex < 0 || accessIndex >= length) {
+    throw MakeRangeError(kInvalidAtomicAccessIndex);
+  }
+  return accessIndex;
+}
+
 //-------------------------------------------------------------------
 
 function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
   CheckSharedIntegerTypedArray(sta);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(sta));
   oldValue = TO_NUMBER(oldValue);
   newValue = TO_NUMBER(newValue);
   return %_AtomicsCompareExchange(sta, index, oldValue, newValue);
@@ -52,79 +64,55 @@
 
 function AtomicsLoadJS(sta, index) {
   CheckSharedIntegerTypedArray(sta);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(sta));
   return %_AtomicsLoad(sta, index);
 }
 
 function AtomicsStoreJS(sta, index, value) {
   CheckSharedIntegerTypedArray(sta);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(sta));
   value = TO_NUMBER(value);
   return %_AtomicsStore(sta, index, value);
 }
 
 function AtomicsAddJS(ia, index, value) {
   CheckSharedIntegerTypedArray(ia);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(ia));
   value = TO_NUMBER(value);
   return %_AtomicsAdd(ia, index, value);
 }
 
 function AtomicsSubJS(ia, index, value) {
   CheckSharedIntegerTypedArray(ia);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(ia));
   value = TO_NUMBER(value);
   return %_AtomicsSub(ia, index, value);
 }
 
 function AtomicsAndJS(ia, index, value) {
   CheckSharedIntegerTypedArray(ia);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(ia));
   value = TO_NUMBER(value);
   return %_AtomicsAnd(ia, index, value);
 }
 
 function AtomicsOrJS(ia, index, value) {
   CheckSharedIntegerTypedArray(ia);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(ia));
   value = TO_NUMBER(value);
   return %_AtomicsOr(ia, index, value);
 }
 
 function AtomicsXorJS(ia, index, value) {
   CheckSharedIntegerTypedArray(ia);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(ia));
   value = TO_NUMBER(value);
   return %_AtomicsXor(ia, index, value);
 }
 
 function AtomicsExchangeJS(ia, index, value) {
   CheckSharedIntegerTypedArray(ia);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(ia));
   value = TO_NUMBER(value);
   return %_AtomicsExchange(ia, index, value);
 }
@@ -137,10 +125,7 @@
 
 function AtomicsFutexWaitJS(ia, index, value, timeout) {
   CheckSharedInteger32TypedArray(ia);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(ia));
   if (IS_UNDEFINED(timeout)) {
     timeout = INFINITY;
   } else {
@@ -156,20 +141,17 @@
 
 function AtomicsFutexWakeJS(ia, index, count) {
   CheckSharedInteger32TypedArray(ia);
-  index = TO_INTEGER(index);
-  if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
-    return UNDEFINED;
-  }
+  index = ValidateIndex(index, %_TypedArrayGetLength(ia));
   count = MaxSimple(0, TO_INTEGER(count));
   return %AtomicsFutexWake(ia, index, count);
 }
 
 function AtomicsFutexWakeOrRequeueJS(ia, index1, count, value, index2) {
   CheckSharedInteger32TypedArray(ia);
-  index1 = TO_INTEGER(index1);
+  index1 = ValidateIndex(index1, %_TypedArrayGetLength(ia));
   count = MaxSimple(0, TO_INTEGER(count));
   value = TO_INT32(value);
-  index2 = TO_INTEGER(index2);
+  index2 = ValidateIndex(index2, %_TypedArrayGetLength(ia));
   if (index1 < 0 || index1 >= %_TypedArrayGetLength(ia) ||
       index2 < 0 || index2 >= %_TypedArrayGetLength(ia)) {
     return UNDEFINED;
diff --git a/src/js/harmony-regexp-exec.js b/src/js/harmony-regexp-exec.js
new file mode 100644
index 0000000..e2eece9
--- /dev/null
+++ b/src/js/harmony-regexp-exec.js
@@ -0,0 +1,37 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalRegExp = global.RegExp;
+var RegExpSubclassExecJS = utils.ImportNow("RegExpSubclassExecJS");
+var RegExpSubclassMatch = utils.ImportNow("RegExpSubclassMatch");
+var RegExpSubclassReplace = utils.ImportNow("RegExpSubclassReplace");
+var RegExpSubclassSearch = utils.ImportNow("RegExpSubclassSearch");
+var RegExpSubclassSplit = utils.ImportNow("RegExpSubclassSplit");
+var RegExpSubclassTest = utils.ImportNow("RegExpSubclassTest");
+var matchSymbol = utils.ImportNow("match_symbol");
+var replaceSymbol = utils.ImportNow("replace_symbol");
+var searchSymbol = utils.ImportNow("search_symbol");
+var splitSymbol = utils.ImportNow("split_symbol");
+
+utils.OverrideFunction(GlobalRegExp.prototype, "exec",
+                       RegExpSubclassExecJS, true);
+utils.OverrideFunction(GlobalRegExp.prototype, matchSymbol,
+                       RegExpSubclassMatch, true);
+utils.OverrideFunction(GlobalRegExp.prototype, replaceSymbol,
+                       RegExpSubclassReplace, true);
+utils.OverrideFunction(GlobalRegExp.prototype, searchSymbol,
+                       RegExpSubclassSearch, true);
+utils.OverrideFunction(GlobalRegExp.prototype, splitSymbol,
+                       RegExpSubclassSplit, true);
+utils.OverrideFunction(GlobalRegExp.prototype, "test",
+                       RegExpSubclassTest, true);
+
+})
diff --git a/src/js/harmony-regexp.js b/src/js/harmony-regexp.js
deleted file mode 100644
index f76ef86..0000000
--- a/src/js/harmony-regexp.js
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalRegExp = global.RegExp;
-var GlobalRegExpPrototype = GlobalRegExp.prototype;
-var MakeTypeError;
-var regExpFlagsSymbol = utils.ImportNow("regexp_flags_symbol");
-
-utils.Import(function(from) {
-  MakeTypeError = from.MakeTypeError;
-});
-
-// -------------------------------------------------------------------
-
-// ES6 draft 12-06-13, section 21.2.5.3
-// + https://bugs.ecmascript.org/show_bug.cgi?id=3423
-function RegExpGetFlags() {
-  if (!IS_RECEIVER(this)) {
-    throw MakeTypeError(
-        kRegExpNonObject, "RegExp.prototype.flags", TO_STRING(this));
-  }
-  var result = '';
-  if (this.global) result += 'g';
-  if (this.ignoreCase) result += 'i';
-  if (this.multiline) result += 'm';
-  if (this.unicode) result += 'u';
-  if (this.sticky) result += 'y';
-  return result;
-}
-
-// ES6 21.2.5.12.
-function RegExpGetSticky() {
-  if (!IS_REGEXP(this)) {
-    // Compat fix: RegExp.prototype.sticky == undefined; UseCounter tracks it
-    // TODO(littledan): Remove this workaround or standardize it
-    if (this === GlobalRegExpPrototype) {
-      %IncrementUseCounter(kRegExpPrototypeStickyGetter);
-      return UNDEFINED;
-    }
-    throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.sticky");
-  }
-  return !!REGEXP_STICKY(this);
-}
-%FunctionSetName(RegExpGetSticky, "RegExp.prototype.sticky");
-%SetNativeFlag(RegExpGetSticky);
-
-utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
-utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
-
-})
diff --git a/src/js/harmony-string-padding.js b/src/js/harmony-string-padding.js
new file mode 100644
index 0000000..a6c6c47
--- /dev/null
+++ b/src/js/harmony-string-padding.js
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalString = global.String;
+var MakeTypeError;
+
+utils.Import(function(from) {
+  MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+// http://tc39.github.io/proposal-string-pad-start-end/
+
+function StringPad(thisString, maxLength, fillString) {
+  maxLength = TO_LENGTH(maxLength);
+  var stringLength = thisString.length;
+
+  if (maxLength <= stringLength) return "";
+
+  if (IS_UNDEFINED(fillString)) {
+    fillString = " ";
+  } else {
+    fillString = TO_STRING(fillString);
+    if (fillString === "") {
+      fillString = " ";
+    }
+  }
+
+  var fillLength = maxLength - stringLength;
+  var repetitions = (fillLength / fillString.length) | 0;
+  var remainingChars = (fillLength - fillString.length * repetitions) | 0;
+
+  var filler = "";
+  while (true) {
+    if (repetitions & 1) filler += fillString;
+    repetitions >>= 1;
+    if (repetitions === 0) break;
+    fillString += fillString;
+  }
+
+  if (remainingChars) {
+    filler += %_SubString(fillString, 0, remainingChars);
+  }
+
+  return filler;
+}
+
+function StringPadStart(maxLength, fillString) {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.padStart")
+  var thisString = TO_STRING(this);
+
+  return StringPad(thisString, maxLength, fillString) + thisString;
+}
+%FunctionSetLength(StringPadStart, 1);
+
+function StringPadEnd(maxLength, fillString) {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.padEnd")
+  var thisString = TO_STRING(this);
+
+  return thisString + StringPad(thisString, maxLength, fillString);
+}
+%FunctionSetLength(StringPadEnd, 1);
+
+utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
+  "padStart", StringPadStart,
+  "padEnd", StringPadEnd
+]);
+
+});
diff --git a/src/js/harmony-unicode-regexps.js b/src/js/harmony-unicode-regexps.js
index b24bbdf..16d06ba 100644
--- a/src/js/harmony-unicode-regexps.js
+++ b/src/js/harmony-unicode-regexps.js
@@ -31,10 +31,9 @@
     }
     throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.unicode");
   }
-  return !!REGEXP_UNICODE(this);
+  return TO_BOOLEAN(REGEXP_UNICODE(this));
 }
-%FunctionSetName(RegExpGetUnicode, "RegExp.prototype.unicode");
-%SetNativeFlag(RegExpGetUnicode);
+%SetForceInlineFlag(RegExpGetUnicode);
 
 utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
 
diff --git a/src/js/i18n.js b/src/js/i18n.js
index 7b2f5a1..845289a 100644
--- a/src/js/i18n.js
+++ b/src/js/i18n.js
@@ -20,26 +20,30 @@
 var ArrayIndexOf;
 var ArrayJoin;
 var ArrayPush;
-var IsFinite;
-var IsNaN;
 var GlobalBoolean = global.Boolean;
 var GlobalDate = global.Date;
 var GlobalNumber = global.Number;
 var GlobalRegExp = global.RegExp;
 var GlobalString = global.String;
+var InstallFunctions = utils.InstallFunctions;
+var InstallGetter = utils.InstallGetter;
+var InternalPackedArray = utils.InternalPackedArray;
+var InternalRegExpMatch;
+var InternalRegExpReplace
+var IsFinite;
+var IsNaN;
 var MakeError;
 var MakeRangeError;
 var MakeTypeError;
-var MathFloor;
 var ObjectDefineProperties = utils.ImportNow("ObjectDefineProperties");
 var ObjectDefineProperty = utils.ImportNow("ObjectDefineProperty");
+var ObjectHasOwnProperty = utils.ImportNow("ObjectHasOwnProperty");
+var OverrideFunction = utils.OverrideFunction;
 var patternSymbol = utils.ImportNow("intl_pattern_symbol");
-var RegExpTest;
 var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
+var SetFunctionName = utils.SetFunctionName;
 var StringIndexOf;
 var StringLastIndexOf;
-var StringMatch;
-var StringReplace;
 var StringSplit;
 var StringSubstr;
 var StringSubstring;
@@ -53,17 +57,72 @@
   MakeError = from.MakeError;
   MakeRangeError = from.MakeRangeError;
   MakeTypeError = from.MakeTypeError;
-  MathFloor = from.MathFloor;
-  RegExpTest = from.RegExpTest;
+  InternalRegExpMatch = from.InternalRegExpMatch;
+  InternalRegExpReplace = from.InternalRegExpReplace;
   StringIndexOf = from.StringIndexOf;
   StringLastIndexOf = from.StringLastIndexOf;
-  StringMatch = from.StringMatch;
-  StringReplace = from.StringReplace;
   StringSplit = from.StringSplit;
   StringSubstr = from.StringSubstr;
   StringSubstring = from.StringSubstring;
 });
 
+// Utilities for definitions
+
+function InstallFunction(object, name, func) {
+  InstallFunctions(object, DONT_ENUM, [name, func]);
+}
+
+
+function InstallConstructor(object, name, func) {
+  %CheckIsBootstrapping();
+  SetFunctionName(func, name);
+  %AddNamedProperty(object, name, func, DONT_ENUM);
+  %SetNativeFlag(func);
+  %ToFastProperties(object);
+}
+
+/**
+ * Adds bound method to the prototype of the given object.
+ */
+function AddBoundMethod(obj, methodName, implementation, length) {
+  %CheckIsBootstrapping();
+  var internalName = %CreatePrivateSymbol(methodName);
+  var getter = function() {
+    if (!%IsInitializedIntlObject(this)) {
+      throw MakeTypeError(kMethodCalledOnWrongObject, methodName);
+    }
+    if (IS_UNDEFINED(this[internalName])) {
+      var boundMethod;
+      if (IS_UNDEFINED(length) || length === 2) {
+        boundMethod = (x, y) => implementation(this, x, y);
+      } else if (length === 1) {
+        boundMethod = x => implementation(this, x);
+      } else {
+        boundMethod = (...args) => {
+          // DateTimeFormat.format needs to be 0 arg method, but can stil
+          // receive optional dateValue param. If one was provided, pass it
+          // along.
+          if (args.length > 0) {
+            return implementation(this, args[0]);
+          } else {
+            return implementation(this);
+          }
+        }
+      }
+      // TODO(littledan): Once function name reform is shipped, remove the
+      // following line and wrap the boundMethod definition in an anonymous
+      // function macro.
+      %FunctionSetName(boundMethod, '__bound' + methodName + '__');
+      %FunctionRemovePrototype(boundMethod);
+      %SetNativeFlag(boundMethod);
+      this[internalName] = boundMethod;
+    }
+    return this[internalName];
+  };
+
+  InstallGetter(obj.prototype, methodName, getter, DONT_ENUM);
+}
+
 // -------------------------------------------------------------------
 
 var Intl = {};
@@ -197,74 +256,13 @@
   return TIMEZONE_NAME_LOCATION_PART_RE;
 }
 
-/**
- * Adds bound method to the prototype of the given object.
- */
-function addBoundMethod(obj, methodName, implementation, length) {
-  %CheckIsBootstrapping();
-  function getter() {
-    if (!%IsInitializedIntlObject(this)) {
-      throw MakeTypeError(kMethodCalledOnWrongObject, methodName);
-    }
-    var internalName = '__bound' + methodName + '__';
-    if (IS_UNDEFINED(this[internalName])) {
-      var that = this;
-      var boundMethod;
-      if (IS_UNDEFINED(length) || length === 2) {
-        boundMethod = function(x, y) {
-          if (!IS_UNDEFINED(new.target)) {
-            throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
-          }
-          return implementation(that, x, y);
-        }
-      } else if (length === 1) {
-        boundMethod = function(x) {
-          if (!IS_UNDEFINED(new.target)) {
-            throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
-          }
-          return implementation(that, x);
-        }
-      } else {
-        boundMethod = function() {
-          if (!IS_UNDEFINED(new.target)) {
-            throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
-          }
-          // DateTimeFormat.format needs to be 0 arg method, but can stil
-          // receive optional dateValue param. If one was provided, pass it
-          // along.
-          if (arguments.length > 0) {
-            return implementation(that, arguments[0]);
-          } else {
-            return implementation(that);
-          }
-        }
-      }
-      %FunctionSetName(boundMethod, internalName);
-      %FunctionRemovePrototype(boundMethod);
-      %SetNativeFlag(boundMethod);
-      this[internalName] = boundMethod;
-    }
-    return this[internalName];
-  }
-
-  %FunctionSetName(getter, methodName);
-  %FunctionRemovePrototype(getter);
-  %SetNativeFlag(getter);
-
-  ObjectDefineProperty(obj.prototype, methodName, {
-    get: getter,
-    enumerable: false,
-    configurable: true
-  });
-}
-
 
 /**
  * Returns an intersection of locales and service supported locales.
  * Parameter locales is treated as a priority list.
  */
 function supportedLocalesOf(service, locales, options) {
-  if (IS_NULL(%_Call(StringMatch, service, GetServiceRE()))) {
+  if (IS_NULL(InternalRegExpMatch(GetServiceRE(), service))) {
     throw MakeError(kWrongServiceType, service);
   }
 
@@ -312,10 +310,8 @@
   var matchedLocales = [];
   for (var i = 0; i < requestedLocales.length; ++i) {
     // Remove -u- extension.
-    var locale = %_Call(StringReplace,
-                        requestedLocales[i],
-                        GetUnicodeExtensionRE(),
-                        '');
+    var locale = InternalRegExpReplace(
+        GetUnicodeExtensionRE(), requestedLocales[i], '');
     do {
       if (!IS_UNDEFINED(availableLocales[locale])) {
         // Push requested locale not the resolved one.
@@ -421,7 +417,7 @@
  * lookup algorithm.
  */
 function lookupMatcher(service, requestedLocales) {
-  if (IS_NULL(%_Call(StringMatch, service, GetServiceRE()))) {
+  if (IS_NULL(InternalRegExpMatch(GetServiceRE(), service))) {
     throw MakeError(kWrongServiceType, service);
   }
 
@@ -432,13 +428,13 @@
 
   for (var i = 0; i < requestedLocales.length; ++i) {
     // Remove all extensions.
-    var locale = %_Call(StringReplace, requestedLocales[i],
-                        GetAnyExtensionRE(), '');
+    var locale = InternalRegExpReplace(
+        GetAnyExtensionRE(), requestedLocales[i], '');
     do {
       if (!IS_UNDEFINED(AVAILABLE_LOCALES[service][locale])) {
         // Return the resolved locale and extension.
-        var extensionMatch =
-            %_Call(StringMatch, requestedLocales[i], GetUnicodeExtensionRE());
+        var extensionMatch = InternalRegExpMatch(
+            GetUnicodeExtensionRE(), requestedLocales[i]);
         var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
         return {'locale': locale, 'extension': extension, 'position': i};
       }
@@ -535,7 +531,7 @@
   }
 
   for (var key in keyValues) {
-    if (%HasOwnProperty(keyValues, key)) {
+    if (HAS_OWN_PROPERTY(keyValues, key)) {
       var value = UNDEFINED;
       var map = keyValues[key];
       if (!IS_UNDEFINED(map.property)) {
@@ -551,7 +547,7 @@
       // User options didn't have it, check Unicode extension.
       // Here we want to convert strings 'true', 'false' into proper Boolean
       // values (not a user error).
-      if (%HasOwnProperty(extensionMap, key)) {
+      if (HAS_OWN_PROPERTY(extensionMap, key)) {
         value = extensionMap[key];
         if (!IS_UNDEFINED(value)) {
           updateProperty(map.property, map.type, value);
@@ -612,8 +608,8 @@
   }
 
   // Preserve extensions of resolved locale, but swap base tags with original.
-  var resolvedBase = new GlobalRegExp('^' + locales[1].base);
-  return %_Call(StringReplace, resolved, resolvedBase, locales[0].base);
+  var resolvedBase = new GlobalRegExp('^' + locales[1].base, 'g');
+  return InternalRegExpReplace(resolvedBase, resolved, locales[0].base);
 }
 
 
@@ -627,10 +623,10 @@
   var available = %AvailableLocalesOf(service);
 
   for (var i in available) {
-    if (%HasOwnProperty(available, i)) {
-      var parts =
-          %_Call(StringMatch, i, /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
-      if (parts !== null) {
+    if (HAS_OWN_PROPERTY(available, i)) {
+      var parts = InternalRegExpMatch(
+          /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/, i);
+      if (!IS_NULL(parts)) {
         // Build xx-ZZ. We don't care about the actual value,
         // as long it's not undefined.
         available[parts[1] + '-' + parts[3]] = null;
@@ -700,7 +696,7 @@
  * 'of', 'au' and 'es' are special-cased and lowercased.
  */
 function toTitleCaseTimezoneLocation(location) {
-  var match = %_Call(StringMatch, location, GetTimezoneNameLocationPartRE());
+  var match = InternalRegExpMatch(GetTimezoneNameLocationPartRE(), location)
   if (IS_NULL(match)) throw MakeRangeError(kExpectedLocation, location);
 
   var result = toTitleCaseWord(match[1]);
@@ -797,7 +793,7 @@
  */
 function isValidLanguageTag(locale) {
   // Check if it's well-formed, including grandfadered tags.
-  if (!%_Call(RegExpTest, GetLanguageTagRE(), locale)) {
+  if (IS_NULL(InternalRegExpMatch(GetLanguageTagRE(), locale))) {
     return false;
   }
 
@@ -809,17 +805,17 @@
   // Check if there are any duplicate variants or singletons (extensions).
 
   // Remove private use section.
-  locale = %_Call(StringSplit, locale, /-x-/)[0];
+  locale = %_Call(StringSplit, locale, '-x-')[0];
 
   // Skip language since it can match variant regex, so we start from 1.
   // We are matching i-klingon here, but that's ok, since i-klingon-klingon
   // is not valid and would fail LANGUAGE_TAG_RE test.
   var variants = [];
   var extensions = [];
-  var parts = %_Call(StringSplit, locale, /-/);
+  var parts = %_Call(StringSplit, locale, '-');
   for (var i = 1; i < parts.length; i++) {
     var value = parts[i];
-    if (%_Call(RegExpTest, GetLanguageVariantRE(), value) &&
+    if (!IS_NULL(InternalRegExpMatch(GetLanguageVariantRE(), value)) &&
         extensions.length === 0) {
       if (%_Call(ArrayIndexOf, variants, value) === -1) {
         %_Call(ArrayPush, variants, value);
@@ -828,7 +824,7 @@
       }
     }
 
-    if (%_Call(RegExpTest, GetLanguageSingletonRE(), value)) {
+    if (!IS_NULL(InternalRegExpMatch(GetLanguageSingletonRE(), value))) {
       if (%_Call(ArrayIndexOf, extensions, value) === -1) {
         %_Call(ArrayPush, extensions, value);
       } else {
@@ -943,7 +939,7 @@
 
   var collation = 'default';
   var extension = '';
-  if (%HasOwnProperty(extensionMap, 'co') && internalOptions.usage === 'sort') {
+  if (HAS_OWN_PROPERTY(extensionMap, 'co') && internalOptions.usage === 'sort') {
 
     /**
      * Allowed -u-co- values. List taken from:
@@ -1001,7 +997,7 @@
  *
  * @constructor
  */
-%AddNamedProperty(Intl, 'Collator', function() {
+InstallConstructor(Intl, 'Collator', function() {
     var locales = arguments[0];
     var options = arguments[1];
 
@@ -1011,15 +1007,14 @@
     }
 
     return initializeCollator(TO_OBJECT(this), locales, options);
-  },
-  DONT_ENUM
+  }
 );
 
 
 /**
  * Collator resolvedOptions method.
  */
-%AddNamedProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
+InstallFunction(Intl.Collator.prototype, 'resolvedOptions', function() {
     if (!IS_UNDEFINED(new.target)) {
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
@@ -1041,12 +1036,8 @@
       caseFirst: coll[resolvedSymbol].caseFirst,
       collation: coll[resolvedSymbol].collation
     };
-  },
-  DONT_ENUM
+  }
 );
-%FunctionSetName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
-%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
-%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
 
 
 /**
@@ -1055,18 +1046,14 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-%AddNamedProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
+InstallFunction(Intl.Collator, 'supportedLocalesOf', function(locales) {
     if (!IS_UNDEFINED(new.target)) {
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
     return supportedLocalesOf('collator', locales, arguments[1]);
-  },
-  DONT_ENUM
+  }
 );
-%FunctionSetName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
-%SetNativeFlag(Intl.Collator.supportedLocalesOf);
 
 
 /**
@@ -1085,7 +1072,7 @@
 };
 
 
-addBoundMethod(Intl.Collator, 'compare', compare, 2);
+AddBoundMethod(Intl.Collator, 'compare', compare, 2);
 
 /**
  * Verifies that the input is a well-formed ISO 4217 currency code.
@@ -1093,9 +1080,8 @@
  * For example \u00DFP (Eszett+P) becomes SSP.
  */
 function isWellFormedCurrencyCode(currency) {
-  return typeof currency == "string" &&
-      currency.length == 3 &&
-      %_Call(StringMatch, currency, /[^A-Za-z]/) == null;
+  return typeof currency == "string" && currency.length == 3 &&
+      IS_NULL(InternalRegExpMatch(/[^A-Za-z]/, currency));
 }
 
 
@@ -1110,7 +1096,7 @@
     if (IsNaN(value) || value < min || value > max) {
       throw MakeRangeError(kPropertyValueOutOfRange, property);
     }
-    return MathFloor(value);
+    return %math_floor(value);
   }
 
   return fallback;
@@ -1225,10 +1211,10 @@
     style: {value: internalOptions.style, writable: true},
     useGrouping: {writable: true}
   });
-  if (%HasOwnProperty(internalOptions, 'minimumSignificantDigits')) {
+  if (HAS_OWN_PROPERTY(internalOptions, 'minimumSignificantDigits')) {
     defineWEProperty(resolved, 'minimumSignificantDigits', UNDEFINED);
   }
-  if (%HasOwnProperty(internalOptions, 'maximumSignificantDigits')) {
+  if (HAS_OWN_PROPERTY(internalOptions, 'maximumSignificantDigits')) {
     defineWEProperty(resolved, 'maximumSignificantDigits', UNDEFINED);
   }
   var formatter = %CreateNumberFormat(requestedLocale,
@@ -1254,7 +1240,7 @@
  *
  * @constructor
  */
-%AddNamedProperty(Intl, 'NumberFormat', function() {
+InstallConstructor(Intl, 'NumberFormat', function() {
     var locales = arguments[0];
     var options = arguments[1];
 
@@ -1264,15 +1250,14 @@
     }
 
     return initializeNumberFormat(TO_OBJECT(this), locales, options);
-  },
-  DONT_ENUM
+  }
 );
 
 
 /**
  * NumberFormat resolvedOptions method.
  */
-%AddNamedProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
+InstallFunction(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
     if (!IS_UNDEFINED(new.target)) {
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
@@ -1301,24 +1286,19 @@
                         format[resolvedSymbol].currencyDisplay);
     }
 
-    if (%HasOwnProperty(format[resolvedSymbol], 'minimumSignificantDigits')) {
+    if (HAS_OWN_PROPERTY(format[resolvedSymbol], 'minimumSignificantDigits')) {
       defineWECProperty(result, 'minimumSignificantDigits',
                         format[resolvedSymbol].minimumSignificantDigits);
     }
 
-    if (%HasOwnProperty(format[resolvedSymbol], 'maximumSignificantDigits')) {
+    if (HAS_OWN_PROPERTY(format[resolvedSymbol], 'maximumSignificantDigits')) {
       defineWECProperty(result, 'maximumSignificantDigits',
                         format[resolvedSymbol].maximumSignificantDigits);
     }
 
     return result;
-  },
-  DONT_ENUM
+  }
 );
-%FunctionSetName(Intl.NumberFormat.prototype.resolvedOptions,
-                 'resolvedOptions');
-%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
-%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
 
 
 /**
@@ -1327,18 +1307,14 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-%AddNamedProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
+InstallFunction(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
     if (!IS_UNDEFINED(new.target)) {
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
     return supportedLocalesOf('numberformat', locales, arguments[1]);
-  },
-  DONT_ENUM
+  }
 );
-%FunctionSetName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
-%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
 
 
 /**
@@ -1364,8 +1340,8 @@
 }
 
 
-addBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
-addBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
+AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
+AddBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
 
 /**
  * Returns a string that matches LDML representation of the options object.
@@ -1435,57 +1411,57 @@
  */
 function fromLDMLString(ldmlString) {
   // First remove '' quoted text, so we lose 'Uhr' strings.
-  ldmlString = %_Call(StringReplace, ldmlString, GetQuotedStringRE(), '');
+  ldmlString = InternalRegExpReplace(GetQuotedStringRE(), ldmlString, '');
 
   var options = {};
-  var match = %_Call(StringMatch, ldmlString, /E{3,5}/g);
+  var match = InternalRegExpMatch(/E{3,5}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
 
-  match = %_Call(StringMatch, ldmlString, /G{3,5}/g);
+  match = InternalRegExpMatch(/G{3,5}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
 
-  match = %_Call(StringMatch, ldmlString, /y{1,2}/g);
+  match = InternalRegExpMatch(/y{1,2}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'year', match, {y: 'numeric', yy: '2-digit'});
 
-  match = %_Call(StringMatch, ldmlString, /M{1,5}/g);
+  match = InternalRegExpMatch(/M{1,5}/, ldmlString);
   options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
       M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
 
   // Sometimes we get L instead of M for month - standalone name.
-  match = %_Call(StringMatch, ldmlString, /L{1,5}/g);
+  match = InternalRegExpMatch(/L{1,5}/, ldmlString);
   options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
       L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
 
-  match = %_Call(StringMatch, ldmlString, /d{1,2}/g);
+  match = InternalRegExpMatch(/d{1,2}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'day', match, {d: 'numeric', dd: '2-digit'});
 
-  match = %_Call(StringMatch, ldmlString, /h{1,2}/g);
+  match = InternalRegExpMatch(/h{1,2}/, ldmlString);
   if (match !== null) {
     options['hour12'] = true;
   }
   options = appendToDateTimeObject(
       options, 'hour', match, {h: 'numeric', hh: '2-digit'});
 
-  match = %_Call(StringMatch, ldmlString, /H{1,2}/g);
+  match = InternalRegExpMatch(/H{1,2}/, ldmlString);
   if (match !== null) {
     options['hour12'] = false;
   }
   options = appendToDateTimeObject(
       options, 'hour', match, {H: 'numeric', HH: '2-digit'});
 
-  match = %_Call(StringMatch, ldmlString, /m{1,2}/g);
+  match = InternalRegExpMatch(/m{1,2}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'minute', match, {m: 'numeric', mm: '2-digit'});
 
-  match = %_Call(StringMatch, ldmlString, /s{1,2}/g);
+  match = InternalRegExpMatch(/s{1,2}/, ldmlString);
   options = appendToDateTimeObject(
       options, 'second', match, {s: 'numeric', ss: '2-digit'});
 
-  match = %_Call(StringMatch, ldmlString, /z|zzzz/g);
+  match = InternalRegExpMatch(/z|zzzz/, ldmlString);
   options = appendToDateTimeObject(
       options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
 
@@ -1495,7 +1471,7 @@
 
 function appendToDateTimeObject(options, option, match, pairs) {
   if (IS_NULL(match)) {
-    if (!%HasOwnProperty(options, option)) {
+    if (!HAS_OWN_PROPERTY(options, option)) {
       defineWEProperty(options, option, UNDEFINED);
     }
     return options;
@@ -1658,7 +1634,7 @@
  *
  * @constructor
  */
-%AddNamedProperty(Intl, 'DateTimeFormat', function() {
+InstallConstructor(Intl, 'DateTimeFormat', function() {
     var locales = arguments[0];
     var options = arguments[1];
 
@@ -1668,15 +1644,14 @@
     }
 
     return initializeDateTimeFormat(TO_OBJECT(this), locales, options);
-  },
-  DONT_ENUM
+  }
 );
 
 
 /**
  * DateTimeFormat resolvedOptions method.
  */
-%AddNamedProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
+InstallFunction(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
     if (!IS_UNDEFINED(new.target)) {
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
@@ -1735,13 +1710,8 @@
     addWECPropertyIfDefined(result, 'second', fromPattern.second);
 
     return result;
-  },
-  DONT_ENUM
+  }
 );
-%FunctionSetName(Intl.DateTimeFormat.prototype.resolvedOptions,
-                 'resolvedOptions');
-%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
-%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
 
 
 /**
@@ -1750,18 +1720,14 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-%AddNamedProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
+InstallFunction(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
     if (!IS_UNDEFINED(new.target)) {
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
     return supportedLocalesOf('dateformat', locales, arguments[1]);
-  },
-  DONT_ENUM
+  }
 );
-%FunctionSetName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
-%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
 
 
 /**
@@ -1797,8 +1763,8 @@
 
 
 // 0 because date is optional argument.
-addBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
-addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
+AddBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
+AddBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
 
 
 /**
@@ -1822,7 +1788,7 @@
 
   // We expect only _, '-' and / beside ASCII letters.
   // All inputs should conform to Area/Location(/Location)* from now on.
-  var match = %_Call(StringMatch, tzID, GetTimezoneNameCheckRE());
+  var match = InternalRegExpMatch(GetTimezoneNameCheckRE(), tzID);
   if (IS_NULL(match)) throw MakeRangeError(kExpectedTimezoneID, tzID);
 
   var result = toTitleCaseTimezoneLocation(match[1]) + '/' +
@@ -1885,7 +1851,7 @@
  *
  * @constructor
  */
-%AddNamedProperty(Intl, 'v8BreakIterator', function() {
+InstallConstructor(Intl, 'v8BreakIterator', function() {
     var locales = arguments[0];
     var options = arguments[1];
 
@@ -1895,15 +1861,14 @@
     }
 
     return initializeBreakIterator(TO_OBJECT(this), locales, options);
-  },
-  DONT_ENUM
+  }
 );
 
 
 /**
  * BreakIterator resolvedOptions method.
  */
-%AddNamedProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions',
+InstallFunction(Intl.v8BreakIterator.prototype, 'resolvedOptions',
   function() {
     if (!IS_UNDEFINED(new.target)) {
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
@@ -1922,13 +1887,8 @@
       locale: locale,
       type: segmenter[resolvedSymbol].type
     };
-  },
-  DONT_ENUM
+  }
 );
-%FunctionSetName(Intl.v8BreakIterator.prototype.resolvedOptions,
-                 'resolvedOptions');
-%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
-%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
 
 
 /**
@@ -1937,19 +1897,15 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-%AddNamedProperty(Intl.v8BreakIterator, 'supportedLocalesOf',
+InstallFunction(Intl.v8BreakIterator, 'supportedLocalesOf',
   function(locales) {
     if (!IS_UNDEFINED(new.target)) {
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
     return supportedLocalesOf('breakiterator', locales, arguments[1]);
-  },
-  DONT_ENUM
+  }
 );
-%FunctionSetName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
-%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
 
 
 /**
@@ -1994,11 +1950,11 @@
 }
 
 
-addBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
-addBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
-addBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
-addBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
-addBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
+AddBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
+AddBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
+AddBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
+AddBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
+AddBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
 
 // Save references to Intl objects and methods we use, for added security.
 var savedObjects = {
@@ -2036,18 +1992,6 @@
   return new savedObjects[service](locales, useOptions);
 }
 
-
-function OverrideFunction(object, name, f) {
-  %CheckIsBootstrapping();
-  ObjectDefineProperty(object, name, { value: f,
-                                       writeable: true,
-                                       configurable: true,
-                                       enumerable: false });
-  %FunctionSetName(f, name);
-  %FunctionRemovePrototype(f);
-  %SetNativeFlag(f);
-}
-
 /**
  * Compares this and that, and returns less than 0, 0 or greater than 0 value.
  * Overrides the built-in method.
diff --git a/src/js/json.js b/src/js/json.js
index 73d7802..c6dbed9 100644
--- a/src/js/json.js
+++ b/src/js/json.js
@@ -19,6 +19,10 @@
 var MaxSimple;
 var MinSimple;
 var ObjectHasOwnProperty;
+var Stack;
+var StackHas;
+var StackPop;
+var StackPush;
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 utils.Import(function(from) {
@@ -26,6 +30,10 @@
   MaxSimple = from.MaxSimple;
   MinSimple = from.MinSimple;
   ObjectHasOwnProperty = from.ObjectHasOwnProperty;
+  Stack = from.Stack;
+  StackHas = from.StackHas;
+  StackPop = from.StackPop;
+  StackPush = from.StackPush;
 });
 
 // -------------------------------------------------------------------
@@ -51,7 +59,9 @@
         }
       }
     } else {
-      for (var p of %object_keys(val)) {
+      var keys = %object_keys(val);
+      for (var i = 0; i < keys.length; i++) {
+        var p = keys[i];
         var newElement = InternalizeJSONProperty(val, p, reviver);
         if (IS_UNDEFINED(newElement)) {
           %reflect_delete_property(val, p);
@@ -76,7 +86,8 @@
 
 
 function SerializeArray(value, replacer, stack, indent, gap) {
-  if (!%PushIfAbsent(stack, value)) throw MakeTypeError(kCircularStructure);
+  if (StackHas(stack, value)) throw MakeTypeError(kCircularStructure);
+  StackPush(stack, value);
   var stepback = indent;
   indent += gap;
   var partial = new InternalArray();
@@ -99,13 +110,14 @@
   } else {
     final = "[]";
   }
-  stack.pop();
+  StackPop(stack);
   return final;
 }
 
 
 function SerializeObject(value, replacer, stack, indent, gap) {
-  if (!%PushIfAbsent(stack, value)) throw MakeTypeError(kCircularStructure);
+  if (StackHas(stack, value)) throw MakeTypeError(kCircularStructure);
+  StackPush(stack, value);
   var stepback = indent;
   indent += gap;
   var partial = new InternalArray();
@@ -122,7 +134,9 @@
       }
     }
   } else {
-    for (var p of %object_keys(value)) {
+    var keys = %object_keys(value);
+    for (var i = 0; i < keys.length; i++) {
+      var p = keys[i];
       var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
       if (!IS_UNDEFINED(strP)) {
         var member = %QuoteJSONString(p) + ":";
@@ -142,7 +156,7 @@
   } else {
     final = "{}";
   }
-  stack.pop();
+  StackPop(stack);
   return final;
 }
 
@@ -237,7 +251,7 @@
   if (!IS_CALLABLE(replacer) && !property_list && !gap && !IS_PROXY(value)) {
     return %BasicJSONStringify(value);
   }
-  return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
+  return JSONSerialize('', {'': value}, replacer, new Stack(), "", gap);
 }
 
 // -------------------------------------------------------------------
@@ -275,7 +289,7 @@
   var holder = {};
   holder[key] = object;
   // No need to pass the actual holder since there is no replacer function.
-  return JSONSerialize(key, holder, UNDEFINED, new InternalArray(), "", "");
+  return JSONSerialize(key, holder, UNDEFINED, new Stack(), "", "");
 }
 
 %InstallToContext(["json_serialize_adapter", JsonSerializeAdapter]);
diff --git a/src/js/macros.py b/src/js/macros.py
index b2a7856..a4c7f53 100644
--- a/src/js/macros.py
+++ b/src/js/macros.py
@@ -88,9 +88,9 @@
 macro IS_SIMD_VALUE(arg)        = (%IsSimdValue(arg));
 macro IS_STRING(arg)            = (typeof(arg) === 'string');
 macro IS_STRING_WRAPPER(arg)    = (%_ClassOf(arg) === 'String');
-macro IS_STRONG(arg)            = (%IsStrong(arg));
 macro IS_SYMBOL(arg)            = (typeof(arg) === 'symbol');
 macro IS_SYMBOL_WRAPPER(arg)    = (%_ClassOf(arg) === 'Symbol');
+macro IS_TYPEDARRAY(arg)        = (%_IsTypedArray(arg));
 macro IS_UNDEFINED(arg)         = (arg === (void 0));
 macro IS_WEAKMAP(arg)           = (%_ClassOf(arg) === 'WeakMap');
 macro IS_WEAKSET(arg)           = (%_ClassOf(arg) === 'WeakSet');
@@ -122,12 +122,12 @@
 macro TO_PRIMITIVE_STRING(arg) = (%_ToPrimitive_String(arg));
 macro TO_NAME(arg) = (%_ToName(arg));
 macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
-macro HAS_OWN_PROPERTY(arg, index) = (%_Call(ObjectHasOwnProperty, arg, index));
-macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array))) ? (index < array.length) : (index in array));
+macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
+macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array)) && (index < array.length)) ||  (index in array));
 
 # Private names.
 macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
-macro HAS_PRIVATE(obj, sym) = (%HasOwnProperty(obj, sym));
+macro HAS_PRIVATE(obj, key) = HAS_OWN_PROPERTY(obj, key);
 macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
 macro GET_PRIVATE(obj, sym) = (obj[sym]);
 macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
@@ -255,7 +255,6 @@
 define kForcedGC = 7;
 define kSloppyMode = 8;
 define kStrictMode = 9;
-define kStrongMode = 10;
 define kRegExpPrototypeStickyGetter = 11;
 define kRegExpPrototypeToString = 12;
 define kRegExpPrototypeUnicodeGetter = 13;
@@ -265,3 +264,15 @@
 define kPromiseChain = 17;
 define kPromiseAccept = 18;
 define kPromiseDefer = 19;
+define kHtmlCommentInExternalScript = 20;
+define kHtmlComment = 21;
+define kSloppyModeBlockScopedFunctionRedefinition = 22;
+define kForInInitializer = 23;
+define kArrayProtectorDirtied = 24;
+define kArraySpeciesModified = 25;
+define kArrayPrototypeConstructorModified = 26;
+define kArrayInstanceProtoModified = 27;
+define kArrayInstanceConstructorModified = 28;
+define kLegacyFunctionDeclaration = 29;
+define kRegExpPrototypeSourceGetter = 30;
+define kRegExpPrototypeOldFlagGetter = 31;
diff --git a/src/js/math.js b/src/js/math.js
index a698fd4..f8ad6b1 100644
--- a/src/js/math.js
+++ b/src/js/math.js
@@ -10,7 +10,6 @@
 // -------------------------------------------------------------------
 // Imports
 
-define kRandomBatchSize = 64;
 // The first two slots are reserved to persist PRNG state.
 define kRandomNumberStart = 2;
 
@@ -19,7 +18,7 @@
 var GlobalObject = global.Object;
 var InternalArray = utils.InternalArray;
 var NaN = %GetRootNaN();
-var nextRandomIndex = kRandomBatchSize;
+var nextRandomIndex = 0;
 var randomNumbers = UNDEFINED;
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
@@ -31,33 +30,13 @@
   return (x > 0) ? x : 0 - x;
 }
 
-// ECMA 262 - 15.8.2.2
-function MathAcosJS(x) {
-  return %_MathAcos(+x);
-}
-
-// ECMA 262 - 15.8.2.3
-function MathAsinJS(x) {
-  return %_MathAsin(+x);
-}
-
-// ECMA 262 - 15.8.2.4
-function MathAtanJS(x) {
-  return %_MathAtan(+x);
-}
-
 // ECMA 262 - 15.8.2.5
 // The naming of y and x matches the spec, as does the order in which
 // ToNumber (valueOf) is called.
 function MathAtan2JS(y, x) {
   y = +y;
   x = +x;
-  return %_MathAtan2(y, x);
-}
-
-// ECMA 262 - 15.8.2.6
-function MathCeil(x) {
-  return -%_MathFloor(-x);
+  return %MathAtan2(y, x);
 }
 
 // ECMA 262 - 15.8.2.8
@@ -65,11 +44,6 @@
   return %MathExpRT(TO_NUMBER(x));
 }
 
-// ECMA 262 - 15.8.2.9
-function MathFloorJS(x) {
-  return %_MathFloor(+x);
-}
-
 // ECMA 262 - 15.8.2.10
 function MathLog(x) {
   return %_MathLogRT(TO_NUMBER(x));
@@ -82,34 +56,24 @@
 
 // ECMA 262 - 15.8.2.14
 function MathRandom() {
-  if (nextRandomIndex >= kRandomBatchSize) {
+  // While creating a startup snapshot, %GenerateRandomNumbers returns a
+  // normal array containing a single random number, and has to be called for
+  // every new random number.
+  // Otherwise, it returns a pre-populated typed array of random numbers. The
+  // first two elements are reserved for the PRNG state.
+  if (nextRandomIndex <= kRandomNumberStart) {
     randomNumbers = %GenerateRandomNumbers(randomNumbers);
-    nextRandomIndex = kRandomNumberStart;
+    nextRandomIndex = randomNumbers.length;
   }
-  return randomNumbers[nextRandomIndex++];
+  return randomNumbers[--nextRandomIndex];
 }
 
 function MathRandomRaw() {
-  if (nextRandomIndex >= kRandomBatchSize) {
+  if (nextRandomIndex <= kRandomNumberStart) {
     randomNumbers = %GenerateRandomNumbers(randomNumbers);
-    nextRandomIndex = kRandomNumberStart;
+    nextRandomIndex = randomNumbers.length;
   }
-  return %_DoubleLo(randomNumbers[nextRandomIndex++]) & 0x3FFFFFFF;
-}
-
-// ECMA 262 - 15.8.2.15
-function MathRound(x) {
-  return %RoundNumber(TO_NUMBER(x));
-}
-
-// ECMA 262 - 15.8.2.17
-function MathSqrtJS(x) {
-  return %_MathSqrt(+x);
-}
-
-// Non-standard extension.
-function MathImul(x, y) {
-  return %NumberImul(TO_NUMBER(x), TO_NUMBER(y));
+  return %_DoubleLo(randomNumbers[--nextRandomIndex]) & 0x3FFFFFFF;
 }
 
 // ES6 draft 09-27-13, section 20.2.2.28.
@@ -121,23 +85,14 @@
   return x;
 }
 
-// ES6 draft 09-27-13, section 20.2.2.34.
-function MathTrunc(x) {
-  x = +x;
-  if (x > 0) return %_MathFloor(x);
-  if (x < 0) return -%_MathFloor(-x);
-  // -0, 0 or NaN.
-  return x;
-}
-
 // ES6 draft 09-27-13, section 20.2.2.5.
 function MathAsinh(x) {
   x = TO_NUMBER(x);
   // Idempotent for NaN, +/-0 and +/-Infinity.
   if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
-  if (x > 0) return MathLog(x + %_MathSqrt(x * x + 1));
+  if (x > 0) return MathLog(x + %math_sqrt(x * x + 1));
   // This is to prevent numerical errors caused by large negative x.
-  return -MathLog(-x + %_MathSqrt(x * x + 1));
+  return -MathLog(-x + %math_sqrt(x * x + 1));
 }
 
 // ES6 draft 09-27-13, section 20.2.2.3.
@@ -146,7 +101,7 @@
   if (x < 1) return NaN;
   // Idempotent for NaN and +Infinity.
   if (!NUMBER_IS_FINITE(x)) return x;
-  return MathLog(x + %_MathSqrt(x + 1) * %_MathSqrt(x - 1));
+  return MathLog(x + %math_sqrt(x + 1) * %math_sqrt(x - 1));
 }
 
 // ES6 draft 09-27-13, section 20.2.2.7.
@@ -185,17 +140,7 @@
     compensation = (preliminary - sum) - summand;
     sum = preliminary;
   }
-  return %_MathSqrt(sum) * max;
-}
-
-// ES6 draft 09-27-13, section 20.2.2.16.
-function MathFroundJS(x) {
-  return %MathFround(TO_NUMBER(x));
-}
-
-// ES6 draft 07-18-14, section 20.2.2.11
-function MathClz32JS(x) {
-  return %_MathClz32(x >>> 0);
+  return %math_sqrt(sum) * max;
 }
 
 // ES6 draft 09-27-13, section 20.2.2.9.
@@ -213,7 +158,7 @@
 endmacro
 
 function CubeRoot(x) {
-  var approx_hi = MathFloorJS(%_DoubleHi(x) / 3) + 0x2A9F7893;
+  var approx_hi = %math_floor(%_DoubleHi(x) / 3) + 0x2A9F7893;
   var approx = %_ConstructDouble(approx_hi | 0, 0);
   approx = NEWTON_ITERATION_CBRT(x, approx);
   approx = NEWTON_ITERATION_CBRT(x, approx);
@@ -223,6 +168,10 @@
 
 // -------------------------------------------------------------------
 
+%InstallToContext([
+  "math_pow", MathPowJS,
+]);
+
 %AddNamedProperty(GlobalMath, toStringTagSymbol, "Math", READ_ONLY | DONT_ENUM);
 
 // Set up math constants.
@@ -246,41 +195,22 @@
 utils.InstallFunctions(GlobalMath, DONT_ENUM, [
   "random", MathRandom,
   "abs", MathAbs,
-  "acos", MathAcosJS,
-  "asin", MathAsinJS,
-  "atan", MathAtanJS,
-  "ceil", MathCeil,
   "exp", MathExp,
-  "floor", MathFloorJS,
   "log", MathLog,
-  "round", MathRound,
-  "sqrt", MathSqrtJS,
   "atan2", MathAtan2JS,
   "pow", MathPowJS,
-  "imul", MathImul,
   "sign", MathSign,
-  "trunc", MathTrunc,
   "asinh", MathAsinh,
   "acosh", MathAcosh,
   "atanh", MathAtanh,
   "hypot", MathHypot,
-  "fround", MathFroundJS,
-  "clz32", MathClz32JS,
   "cbrt", MathCbrt
 ]);
 
 %SetForceInlineFlag(MathAbs);
-%SetForceInlineFlag(MathAcosJS);
-%SetForceInlineFlag(MathAsinJS);
-%SetForceInlineFlag(MathAtanJS);
 %SetForceInlineFlag(MathAtan2JS);
-%SetForceInlineFlag(MathCeil);
-%SetForceInlineFlag(MathClz32JS);
-%SetForceInlineFlag(MathFloorJS);
 %SetForceInlineFlag(MathRandom);
 %SetForceInlineFlag(MathSign);
-%SetForceInlineFlag(MathSqrtJS);
-%SetForceInlineFlag(MathTrunc);
 
 // -------------------------------------------------------------------
 // Exports
@@ -288,7 +218,6 @@
 utils.Export(function(to) {
   to.MathAbs = MathAbs;
   to.MathExp = MathExp;
-  to.MathFloor = MathFloorJS;
   to.IntRandom = MathRandomRaw;
 });
 
diff --git a/src/js/messages.js b/src/js/messages.js
index feb14d3..f8cb967 100644
--- a/src/js/messages.js
+++ b/src/js/messages.js
@@ -23,7 +23,6 @@
     utils.ImportNow("call_site_position_symbol");
 var callSiteStrictSymbol =
     utils.ImportNow("call_site_strict_symbol");
-var FLAG_harmony_tostring;
 var Float32x4ToString;
 var formattedStackTraceSymbol =
     utils.ImportNow("formatted_stack_trace_symbol");
@@ -34,6 +33,7 @@
 var InternalArray = utils.InternalArray;
 var internalErrorSymbol = utils.ImportNow("internal_error_symbol");
 var ObjectDefineProperty;
+var ObjectHasOwnProperty;
 var ObjectToString = utils.ImportNow("object_to_string");
 var Script = utils.ImportNow("Script");
 var stackTraceSymbol = utils.ImportNow("stack_trace_symbol");
@@ -56,6 +56,7 @@
   Int32x4ToString = from.Int32x4ToString;
   Int8x16ToString = from.Int8x16ToString;
   ObjectDefineProperty = from.ObjectDefineProperty;
+  ObjectHasOwnProperty = from.ObjectHasOwnProperty;
   StringCharAt = from.StringCharAt;
   StringIndexOf = from.StringIndexOf;
   StringSubstring = from.StringSubstring;
@@ -65,10 +66,6 @@
   Uint8x16ToString = from.Uint8x16ToString;
 });
 
-utils.ImportFromExperimental(function(from) {
-  FLAG_harmony_tostring = from.FLAG_harmony_tostring;
-});
-
 // -------------------------------------------------------------------
 
 var GlobalError;
@@ -85,13 +82,8 @@
   if (IS_NULL(this)) return "[object Null]";
   var O = TO_OBJECT(this);
   var builtinTag = %_ClassOf(O);
-  var tag;
-  if (FLAG_harmony_tostring) {
-    tag = %GetDataProperty(O, toStringTagSymbol);
-    if (!IS_STRING(tag)) {
-      tag = builtinTag;
-    }
-  } else {
+  var tag = %GetDataProperty(O, toStringTagSymbol);
+  if (!IS_STRING(tag)) {
     tag = builtinTag;
   }
   return `[object ${tag}]`;
@@ -578,69 +570,90 @@
   SET_PRIVATE(this, callSiteStrictSymbol, TO_BOOLEAN(strict_mode));
 }
 
+function CheckCallSite(obj, name) {
+  if (!IS_RECEIVER(obj) || !HAS_PRIVATE(obj, callSiteFunctionSymbol)) {
+    throw MakeTypeError(kCallSiteMethod, name);
+  }
+}
+
 function CallSiteGetThis() {
+  CheckCallSite(this, "getThis");
   return GET_PRIVATE(this, callSiteStrictSymbol)
       ? UNDEFINED : GET_PRIVATE(this, callSiteReceiverSymbol);
 }
 
 function CallSiteGetFunction() {
+  CheckCallSite(this, "getFunction");
   return GET_PRIVATE(this, callSiteStrictSymbol)
       ? UNDEFINED : GET_PRIVATE(this, callSiteFunctionSymbol);
 }
 
 function CallSiteGetPosition() {
+  CheckCallSite(this, "getPosition");
   return GET_PRIVATE(this, callSitePositionSymbol);
 }
 
 function CallSiteGetTypeName() {
+  CheckCallSite(this, "getTypeName");
   return GetTypeName(GET_PRIVATE(this, callSiteReceiverSymbol), false);
 }
 
 function CallSiteIsToplevel() {
+  CheckCallSite(this, "isTopLevel");
   return %CallSiteIsToplevelRT(this);
 }
 
 function CallSiteIsEval() {
+  CheckCallSite(this, "isEval");
   return %CallSiteIsEvalRT(this);
 }
 
 function CallSiteGetEvalOrigin() {
+  CheckCallSite(this, "getEvalOrigin");
   var script = %FunctionGetScript(GET_PRIVATE(this, callSiteFunctionSymbol));
   return FormatEvalOrigin(script);
 }
 
 function CallSiteGetScriptNameOrSourceURL() {
+  CheckCallSite(this, "getScriptNameOrSourceURL");
   return %CallSiteGetScriptNameOrSourceUrlRT(this);
 }
 
 function CallSiteGetFunctionName() {
   // See if the function knows its own name
+  CheckCallSite(this, "getFunctionName");
   return %CallSiteGetFunctionNameRT(this);
 }
 
 function CallSiteGetMethodName() {
   // See if we can find a unique property on the receiver that holds
   // this function.
+  CheckCallSite(this, "getMethodName");
   return %CallSiteGetMethodNameRT(this);
 }
 
 function CallSiteGetFileName() {
+  CheckCallSite(this, "getFileName");
   return %CallSiteGetFileNameRT(this);
 }
 
 function CallSiteGetLineNumber() {
+  CheckCallSite(this, "getLineNumber");
   return %CallSiteGetLineNumberRT(this);
 }
 
 function CallSiteGetColumnNumber() {
+  CheckCallSite(this, "getColumnNumber");
   return %CallSiteGetColumnNumberRT(this);
 }
 
 function CallSiteIsNative() {
+  CheckCallSite(this, "isNative");
   return %CallSiteIsNativeRT(this);
 }
 
 function CallSiteIsConstructor() {
+  CheckCallSite(this, "isConstructor");
   return %CallSiteIsConstructorRT(this);
 }
 
diff --git a/src/js/prologue.js b/src/js/prologue.js
index 24225a0..f9589a5 100644
--- a/src/js/prologue.js
+++ b/src/js/prologue.js
@@ -126,6 +126,18 @@
 }
 
 
+function OverrideFunction(object, name, f, afterInitialBootstrap) {
+  %CheckIsBootstrapping();
+  %ObjectDefineProperty(object, name, { value: f,
+                                        writeable: true,
+                                        configurable: true,
+                                        enumerable: false });
+  SetFunctionName(f, name);
+  if (!afterInitialBootstrap) %FunctionRemovePrototype(f);
+  %SetNativeFlag(f);
+}
+
+
 // Prevents changes to the prototype of a built-in function.
 // The "prototype" property of the function object is made non-configurable,
 // and the prototype object is made non-extensible. The latter prevents
@@ -175,18 +187,26 @@
     "GetMethod",
     "IsNaN",
     "MakeError",
+    "MakeRangeError",
     "MakeTypeError",
     "MapEntries",
     "MapIterator",
     "MapIteratorNext",
     "MaxSimple",
     "MinSimple",
+    "NumberIsInteger",
     "ObjectDefineProperty",
     "ObserveArrayMethods",
     "ObserveObjectMethods",
     "PromiseChain",
     "PromiseDeferred",
     "PromiseResolved",
+    "RegExpSubclassExecJS",
+    "RegExpSubclassMatch",
+    "RegExpSubclassReplace",
+    "RegExpSubclassSearch",
+    "RegExpSubclassSplit",
+    "RegExpSubclassTest",
     "SetIterator",
     "SetIteratorNext",
     "SetValues",
@@ -206,6 +226,10 @@
     "to_string_tag_symbol",
     "object_to_string",
     "species_symbol",
+    "match_symbol",
+    "replace_symbol",
+    "search_symbol",
+    "split_symbol",
   ];
 
   var filtered_exports = {};
@@ -284,6 +308,7 @@
 utils.InstallFunctions = InstallFunctions;
 utils.InstallGetter = InstallGetter;
 utils.InstallGetterSetter = InstallGetterSetter;
+utils.OverrideFunction = OverrideFunction;
 utils.SetUpLockedPrototype = SetUpLockedPrototype;
 utils.PostNatives = PostNatives;
 utils.PostExperimentals = PostExperimentals;
@@ -323,14 +348,14 @@
 // indirection and slowness given how un-optimized bind is.
 
 extrasUtils.simpleBind = function simpleBind(func, thisArg) {
-  return function() {
-    return %Apply(func, thisArg, arguments, 0, arguments.length);
+  return function(...args) {
+    return %reflect_apply(func, thisArg, args);
   };
 };
 
 extrasUtils.uncurryThis = function uncurryThis(func) {
-  return function(thisArg) {
-    return %Apply(func, thisArg, arguments, 1, arguments.length - 1);
+  return function(thisArg, ...args) {
+    return %reflect_apply(func, thisArg, args);
   };
 };
 
diff --git a/src/js/promise.js b/src/js/promise.js
index 8cf6a36..bcf826a 100644
--- a/src/js/promise.js
+++ b/src/js/promise.js
@@ -61,13 +61,13 @@
 
 var GlobalPromise = function Promise(resolver) {
   if (resolver === promiseRawSymbol) {
-    return %NewObject(GlobalPromise, new.target);
+    return %_NewObject(GlobalPromise, new.target);
   }
   if (IS_UNDEFINED(new.target)) throw MakeTypeError(kNotAPromise, this);
   if (!IS_CALLABLE(resolver))
     throw MakeTypeError(kResolverNotAFunction, resolver);
 
-  var promise = PromiseInit(%NewObject(GlobalPromise, new.target));
+  var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
   var callbacks = CreateResolvingFunctions(promise);
 
   try {
@@ -89,9 +89,6 @@
   SET_PRIVATE(promise, promiseValueSymbol, value);
   SET_PRIVATE(promise, promiseOnResolveSymbol, onResolve);
   SET_PRIVATE(promise, promiseOnRejectSymbol, onReject);
-  if (DEBUG_IS_ACTIVE) {
-    %DebugPromiseEvent({ promise: promise, status: status, value: value });
-  }
   return promise;
 }
 
@@ -217,8 +214,6 @@
   PromiseDone(promise, -1, r, promiseOnRejectSymbol)
 }
 
-// Convenience.
-
 function NewPromiseCapability(C) {
   if (C === GlobalPromise) {
     // Optimized case, avoid extra closure.
@@ -239,6 +234,9 @@
     result.reject = reject;
   });
 
+  if (!IS_CALLABLE(result.resolve) || !IS_CALLABLE(result.reject))
+      throw MakeTypeError(kPromiseNonCallable);
+
   return result;
 }
 
@@ -305,9 +303,6 @@
   }
   // Mark this promise as having handler.
   SET_PRIVATE(this, promiseHasHandlerSymbol, true);
-  if (DEBUG_IS_ACTIVE) {
-    %DebugPromiseEvent({ promise: deferred.promise, parentPromise: this });
-  }
   return deferred.promise;
 }
 
diff --git a/src/js/regexp.js b/src/js/regexp.js
index e80d019..cc8cb41 100644
--- a/src/js/regexp.js
+++ b/src/js/regexp.js
@@ -4,26 +4,37 @@
 
 (function(global, utils) {
 
+'use strict';
+
 %CheckIsBootstrapping();
 
 // -------------------------------------------------------------------
 // Imports
 
+var AddIndexedProperty;
 var ExpandReplacement;
+var GlobalArray = global.Array;
 var GlobalObject = global.Object;
 var GlobalRegExp = global.RegExp;
 var GlobalRegExpPrototype;
 var InternalArray = utils.InternalArray;
 var InternalPackedArray = utils.InternalPackedArray;
 var MakeTypeError;
+var MaxSimple;
+var MinSimple;
 var matchSymbol = utils.ImportNow("match_symbol");
 var replaceSymbol = utils.ImportNow("replace_symbol");
 var searchSymbol = utils.ImportNow("search_symbol");
 var splitSymbol = utils.ImportNow("split_symbol");
+var SpeciesConstructor;
 
 utils.Import(function(from) {
+  AddIndexedProperty = from.AddIndexedProperty;
   ExpandReplacement = from.ExpandReplacement;
   MakeTypeError = from.MakeTypeError;
+  MaxSimple = from.MaxSimple;
+  MinSimple = from.MinSimple;
+  SpeciesConstructor = from.SpeciesConstructor;
 });
 
 // -------------------------------------------------------------------
@@ -44,6 +55,7 @@
 
 // -------------------------------------------------------------------
 
+// ES#sec-isregexp IsRegExp ( argument )
 function IsRegExp(o) {
   if (!IS_RECEIVER(o)) return false;
   var is_regexp = o[matchSymbol];
@@ -52,7 +64,8 @@
 }
 
 
-// ES6 section 21.2.3.2.2
+// ES#sec-regexpinitialize
+// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
 function RegExpInitialize(object, pattern, flags) {
   pattern = IS_UNDEFINED(pattern) ? '' : TO_STRING(pattern);
   flags = IS_UNDEFINED(flags) ? '' : TO_STRING(flags);
@@ -70,6 +83,8 @@
 }
 
 
+// ES#sec-regexp-pattern-flags
+// RegExp ( pattern, flags )
 function RegExpConstructor(pattern, flags) {
   var newtarget = new.target;
   var pattern_is_regexp = IsRegExp(pattern);
@@ -94,11 +109,12 @@
     if (IS_UNDEFINED(flags)) flags = input_pattern.flags;
   }
 
-  var object = %NewObject(GlobalRegExp, newtarget);
+  var object = %_NewObject(GlobalRegExp, newtarget);
   return RegExpInitialize(object, pattern, flags);
 }
 
 
+// ES#sec-regexp.prototype.compile RegExp.prototype.compile (pattern, flags)
 function RegExpCompileJS(pattern, flags) {
   if (!IS_REGEXP(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver,
@@ -163,6 +179,54 @@
 }
 
 
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+function RegExpSubclassExecJS(string) {
+  if (!IS_REGEXP(this)) {
+    throw MakeTypeError(kIncompatibleMethodReceiver,
+                        'RegExp.prototype.exec', this);
+  }
+
+  string = TO_STRING(string);
+  var lastIndex = this.lastIndex;
+
+  // Conversion is required by the ES2015 specification (RegExpBuiltinExec
+  // algorithm, step 4) even if the value is discarded for non-global RegExps.
+  var i = TO_LENGTH(lastIndex);
+
+  var global = TO_BOOLEAN(REGEXP_GLOBAL(this));
+  var sticky = TO_BOOLEAN(REGEXP_STICKY(this));
+  var updateLastIndex = global || sticky;
+  if (updateLastIndex) {
+    if (i > string.length) {
+      this.lastIndex = 0;
+      return null;
+    }
+  } else {
+    i = 0;
+  }
+
+  // matchIndices is either null or the RegExpLastMatchInfo array.
+  // TODO(littledan): Whether a RegExp is sticky is compiled into the RegExp
+  // itself, but ES2015 allows monkey-patching this property to differ from
+  // the internal flags. If it differs, recompile a different RegExp?
+  var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
+
+  if (IS_NULL(matchIndices)) {
+    this.lastIndex = 0;
+    return null;
+  }
+
+  // Successful match.
+  if (updateLastIndex) {
+    this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
+  }
+  RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
+}
+%FunctionRemovePrototype(RegExpSubclassExecJS);
+
+
+// Legacy implementation of RegExp.prototype.exec
 function RegExpExecJS(string) {
   if (!IS_REGEXP(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver,
@@ -202,10 +266,30 @@
 }
 
 
+// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
+// Also takes an optional exec method in case our caller
+// has already fetched exec.
+function RegExpSubclassExec(regexp, string, exec) {
+  if (IS_UNDEFINED(exec)) {
+    exec = regexp.exec;
+  }
+  if (IS_CALLABLE(exec)) {
+    var result = %_Call(exec, regexp, string);
+    if (!IS_RECEIVER(result) && !IS_NULL(result)) {
+      throw MakeTypeError(kInvalidRegExpExecResult);
+    }
+    return result;
+  }
+  return %_Call(RegExpExecJS, regexp, string);
+}
+%SetForceInlineFlag(RegExpSubclassExec);
+
+
 // One-element cache for the simplified test regexp.
 var regexp_key;
 var regexp_val;
 
+// Legacy implementation of RegExp.prototype.test
 // Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
 // that test is defined in terms of String.prototype.exec. However, it probably
 // means the original value of String.prototype.exec, which is what everybody
@@ -259,6 +343,19 @@
   }
 }
 
+
+// ES#sec-regexp.prototype.test RegExp.prototype.test ( S )
+function RegExpSubclassTest(string) {
+  if (!IS_RECEIVER(this)) {
+    throw MakeTypeError(kIncompatibleMethodReceiver,
+                        'RegExp.prototype.test', this);
+  }
+  string = TO_STRING(string);
+  var match = RegExpSubclassExec(this, string);
+  return !IS_NULL(match);
+}
+%FunctionRemovePrototype(RegExpSubclassTest);
+
 function TrimRegExp(regexp) {
   if (regexp_key !== regexp) {
     regexp_key = regexp;
@@ -273,27 +370,14 @@
 
 
 function RegExpToString() {
-  if (!IS_REGEXP(this)) {
-    // RegExp.prototype.toString() returns '/(?:)/' as a compatibility fix;
-    // a UseCounter is incremented to track it.
-    // TODO(littledan): Remove this workaround or standardize it
-    if (this === GlobalRegExpPrototype) {
-      %IncrementUseCounter(kRegExpPrototypeToString);
-      return '/(?:)/';
-    }
-    if (!IS_RECEIVER(this)) {
-      throw MakeTypeError(
-          kIncompatibleMethodReceiver, 'RegExp.prototype.toString', this);
-    }
-    return '/' + TO_STRING(this.source) + '/' + TO_STRING(this.flags);
+  if (!IS_RECEIVER(this)) {
+    throw MakeTypeError(
+        kIncompatibleMethodReceiver, 'RegExp.prototype.toString', this);
   }
-  var result = '/' + REGEXP_SOURCE(this) + '/';
-  if (REGEXP_GLOBAL(this)) result += 'g';
-  if (REGEXP_IGNORE_CASE(this)) result += 'i';
-  if (REGEXP_MULTILINE(this)) result += 'm';
-  if (REGEXP_UNICODE(this)) result += 'u';
-  if (REGEXP_STICKY(this)) result += 'y';
-  return result;
+  if (this === GlobalRegExpPrototype) {
+    %IncrementUseCounter(kRegExpPrototypeToString);
+  }
+  return '/' + TO_STRING(this.source) + '/' + TO_STRING(this.flags);
 }
 
 
@@ -306,7 +390,8 @@
 }
 
 
-// ES6 21.2.5.11.
+// Legacy implementation of RegExp.prototype[Symbol.split] which
+// doesn't properly call the underlying exec, @@species methods
 function RegExpSplit(string, limit) {
   // TODO(yangguo): allow non-regexp receivers.
   if (!IS_REGEXP(this)) {
@@ -380,9 +465,85 @@
 }
 
 
-// ES6 21.2.5.6.
+// ES#sec-regexp.prototype-@@split
+// RegExp.prototype [ @@split ] ( string, limit )
+function RegExpSubclassSplit(string, limit) {
+  if (!IS_RECEIVER(this)) {
+    throw MakeTypeError(kIncompatibleMethodReceiver,
+                        "RegExp.prototype.@@split", this);
+  }
+  string = TO_STRING(string);
+  var constructor = SpeciesConstructor(this, GlobalRegExp);
+  var flags = TO_STRING(this.flags);
+
+  // TODO(adamk): this fast path is wrong with respect to this.global
+  // and this.sticky, but hopefully the spec will remove those gets
+  // and thus make the assumption of 'exec' having no side-effects
+  // more correct. Also, we doesn't ensure that 'exec' is actually
+  // a data property on RegExp.prototype.
+  var exec;
+  if (IS_REGEXP(this) && constructor === GlobalRegExp) {
+    exec = this.exec;
+    if (exec === RegExpSubclassExecJS) {
+      return %_Call(RegExpSplit, this, string, limit);
+    }
+  }
+
+  var unicode = %StringIndexOf(flags, 'u', 0) >= 0;
+  var sticky = %StringIndexOf(flags, 'y', 0) >= 0;
+  var newFlags = sticky ? flags : flags + "y";
+  var splitter = new constructor(this, newFlags);
+  var array = new GlobalArray();
+  var arrayIndex = 0;
+  var lim = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
+  var size = string.length;
+  var prevStringIndex = 0;
+  if (lim === 0) return array;
+  var result;
+  if (size === 0) {
+    result = RegExpSubclassExec(splitter, string);
+    if (IS_NULL(result)) AddIndexedProperty(array, 0, string);
+    return array;
+  }
+  var stringIndex = prevStringIndex;
+  while (stringIndex < size) {
+    splitter.lastIndex = stringIndex;
+    result = RegExpSubclassExec(splitter, string, exec);
+    // Ensure exec will be read again on the next loop through.
+    exec = UNDEFINED;
+    if (IS_NULL(result)) {
+      stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
+    } else {
+      var end = MinSimple(TO_LENGTH(splitter.lastIndex), size);
+      if (end === stringIndex) {
+        stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
+      } else {
+        AddIndexedProperty(
+            array, arrayIndex,
+            %_SubString(string, prevStringIndex, stringIndex));
+        arrayIndex++;
+        if (arrayIndex === lim) return array;
+        prevStringIndex = end;
+        var numberOfCaptures = MaxSimple(TO_LENGTH(result.length), 0);
+        for (var i = 1; i < numberOfCaptures; i++) {
+          AddIndexedProperty(array, arrayIndex, result[i]);
+          arrayIndex++;
+          if (arrayIndex === lim) return array;
+        }
+        stringIndex = prevStringIndex;
+      }
+    }
+  }
+  AddIndexedProperty(array, arrayIndex,
+                     %_SubString(string, prevStringIndex, size));
+  return array;
+}
+%FunctionRemovePrototype(RegExpSubclassSplit);
+
+
+// Legacy implementation of RegExp.prototype[Symbol.match] which
+// doesn't properly call the underlying exec method
 function RegExpMatch(string) {
-  // TODO(yangguo): allow non-regexp receivers.
   if (!IS_REGEXP(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver,
                         "RegExp.prototype.@@match", this);
@@ -396,7 +557,41 @@
 }
 
 
-// ES6 21.2.5.8.
+// ES#sec-regexp.prototype-@@match
+// RegExp.prototype [ @@match ] ( string )
+function RegExpSubclassMatch(string) {
+  if (!IS_RECEIVER(this)) {
+    throw MakeTypeError(kIncompatibleMethodReceiver,
+                        "RegExp.prototype.@@match", this);
+  }
+  string = TO_STRING(string);
+  var global = this.global;
+  if (!global) return RegExpSubclassExec(this, string);
+  var unicode = this.unicode;
+  this.lastIndex = 0;
+  var array = new InternalArray();
+  var n = 0;
+  var result;
+  while (true) {
+    result = RegExpSubclassExec(this, string);
+    if (IS_NULL(result)) {
+      if (n === 0) return null;
+      break;
+    }
+    var matchStr = TO_STRING(result[0]);
+    array[n] = matchStr;
+    if (matchStr === "") SetAdvancedStringIndex(this, string, unicode);
+    n++;
+  }
+  var resultArray = [];
+  %MoveArrayContents(array, resultArray);
+  return resultArray;
+}
+%FunctionRemovePrototype(RegExpSubclassMatch);
+
+
+// Legacy implementation of RegExp.prototype[Symbol.replace] which
+// doesn't properly call the underlying exec method.
 
 // TODO(lrn): This array will survive indefinitely if replace is never
 // called again. However, it will be empty, since the contents are cleared
@@ -458,7 +653,7 @@
       if (!%_IsSmi(elem)) {
         // elem must be an Array.
         // Use the apply argument as backing for global RegExp properties.
-        var func_result = %Apply(replace, UNDEFINED, elem, 0, elem.length);
+        var func_result = %reflect_apply(replace, UNDEFINED, elem);
         // Overwrite the i'th element in the results with the string we got
         // back from the callback function.
         res[i] = TO_STRING(func_result);
@@ -512,7 +707,7 @@
     parameters[j] = index;
     parameters[j + 1] = subject;
 
-    replacement = %Apply(replace, UNDEFINED, parameters, 0, j + 2);
+    replacement = %reflect_apply(replace, UNDEFINED, parameters);
   }
 
   result += replacement;  // The add method converts to string if necessary.
@@ -523,7 +718,6 @@
 
 
 function RegExpReplace(string, replace) {
-  // TODO(littledan): allow non-regexp receivers.
   if (!IS_REGEXP(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver,
                         "RegExp.prototype.@@replace", this);
@@ -565,9 +759,206 @@
 }
 
 
-// ES6 21.2.5.9.
+// ES#sec-getsubstitution
+// GetSubstitution(matched, str, position, captures, replacement)
+// Expand the $-expressions in the string and return a new string with
+// the result.
+// TODO(littledan): Call this function from String.prototype.replace instead
+// of the very similar ExpandReplacement in src/js/string.js
+function GetSubstitution(matched, string, position, captures, replacement) {
+  var matchLength = matched.length;
+  var stringLength = string.length;
+  var capturesLength = captures.length;
+  var tailPos = position + matchLength;
+  var result = "";
+  var pos, expansion, peek, next, scaledIndex, advance, newScaledIndex;
+
+  var next = %StringIndexOf(replacement, '$', 0);
+  if (next < 0) {
+    result += replacement;
+    return result;
+  }
+
+  if (next > 0) result += %_SubString(replacement, 0, next);
+
+  while (true) {
+    expansion = '$';
+    pos = next + 1;
+    if (pos < replacement.length) {
+      peek = %_StringCharCodeAt(replacement, pos);
+      if (peek == 36) {         // $$
+        ++pos;
+        result += '$';
+      } else if (peek == 38) {  // $& - match
+        ++pos;
+        result += matched;
+      } else if (peek == 96) {  // $` - prefix
+        ++pos;
+        result += %_SubString(string, 0, position);
+      } else if (peek == 39) {  // $' - suffix
+        ++pos;
+        result += %_SubString(string, tailPos, stringLength);
+      } else if (peek >= 48 && peek <= 57) {
+        // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
+        scaledIndex = (peek - 48);
+        advance = 1;
+        if (pos + 1 < replacement.length) {
+          next = %_StringCharCodeAt(replacement, pos + 1);
+          if (next >= 48 && next <= 57) {
+            newScaledIndex = scaledIndex * 10 + ((next - 48));
+            if (newScaledIndex < capturesLength) {
+              scaledIndex = newScaledIndex;
+              advance = 2;
+            }
+          }
+        }
+        if (scaledIndex != 0 && scaledIndex < capturesLength) {
+          var capture = captures[scaledIndex];
+          if (!IS_UNDEFINED(capture)) result += capture;
+          pos += advance;
+        } else {
+          result += '$';
+        }
+      } else {
+        result += '$';
+      }
+    } else {
+      result += '$';
+    }
+
+    // Go the the next $ in the replacement.
+    next = %StringIndexOf(replacement, '$', pos);
+
+    // Return if there are no more $ characters in the replacement. If we
+    // haven't reached the end, we need to append the suffix.
+    if (next < 0) {
+      if (pos < replacement.length) {
+        result += %_SubString(replacement, pos, replacement.length);
+      }
+      return result;
+    }
+
+    // Append substring between the previous and the next $ character.
+    if (next > pos) {
+      result += %_SubString(replacement, pos, next);
+    }
+  }
+  return result;
+}
+
+
+// ES#sec-advancestringindex
+// AdvanceStringIndex ( S, index, unicode )
+function AdvanceStringIndex(string, index, unicode) {
+  var increment = 1;
+  if (unicode) {
+    var first = %_StringCharCodeAt(string, index);
+    if (first >= 0xD800 && first <= 0xDBFF && string.length > index + 1) {
+      var second = %_StringCharCodeAt(string, index + 1);
+      if (second >= 0xDC00 && second <= 0xDFFF) {
+        increment = 2;
+      }
+    }
+  }
+  return increment;
+}
+
+
+function SetAdvancedStringIndex(regexp, string, unicode) {
+  var lastIndex = regexp.lastIndex;
+  regexp.lastIndex = lastIndex +
+                     AdvanceStringIndex(string, lastIndex, unicode);
+}
+
+
+// ES#sec-regexp.prototype-@@replace
+// RegExp.prototype [ @@replace ] ( string, replaceValue )
+function RegExpSubclassReplace(string, replace) {
+  if (!IS_RECEIVER(this)) {
+    throw MakeTypeError(kIncompatibleMethodReceiver,
+                        "RegExp.prototype.@@replace", this);
+  }
+  string = TO_STRING(string);
+  var length = string.length;
+  var functionalReplace = IS_CALLABLE(replace);
+  if (!functionalReplace) replace = TO_STRING(replace);
+  var global = TO_BOOLEAN(this.global);
+  if (global) {
+    var unicode = TO_BOOLEAN(this.unicode);
+    this.lastIndex = 0;
+  }
+
+  // TODO(adamk): this fast path is wrong with respect to this.global
+  // and this.sticky, but hopefully the spec will remove those gets
+  // and thus make the assumption of 'exec' having no side-effects
+  // more correct. Also, we doesn't ensure that 'exec' is actually
+  // a data property on RegExp.prototype, nor does the fast path
+  // correctly handle lastIndex setting.
+  var exec;
+  if (IS_REGEXP(this)) {
+    exec = this.exec;
+    if (exec === RegExpSubclassExecJS) {
+      return %_Call(RegExpReplace, this, string, replace);
+    }
+  }
+
+  var results = new InternalArray();
+  var result, replacement;
+  while (true) {
+    result = RegExpSubclassExec(this, string, exec);
+    // Ensure exec will be read again on the next loop through.
+    exec = UNDEFINED;
+    if (IS_NULL(result)) {
+      break;
+    } else {
+      results.push(result);
+      if (!global) break;
+      var matchStr = TO_STRING(result[0]);
+      if (matchStr === "") SetAdvancedStringIndex(this, string, unicode);
+    }
+  }
+  var accumulatedResult = "";
+  var nextSourcePosition = 0;
+  for (var i = 0; i < results.length; i++) {
+    result = results[i];
+    var capturesLength = MaxSimple(TO_LENGTH(result.length), 0);
+    var matched = TO_STRING(result[0]);
+    var matchedLength = matched.length;
+    var position = MaxSimple(MinSimple(TO_INTEGER(result.index), length), 0);
+    var captures = new InternalArray();
+    for (var n = 0; n < capturesLength; n++) {
+      var capture = result[n];
+      if (!IS_UNDEFINED(capture)) capture = TO_STRING(capture);
+      captures[n] = capture;
+    }
+    if (functionalReplace) {
+      var parameters = new InternalArray(capturesLength + 2);
+      for (var j = 0; j < capturesLength; j++) {
+        parameters[j] = captures[j];
+      }
+      parameters[j] = position;
+      parameters[j + 1] = string;
+      replacement = %reflect_apply(replace, UNDEFINED, parameters, 0,
+                                   parameters.length);
+    } else {
+      replacement = GetSubstitution(matched, string, position, captures,
+                                    replace);
+    }
+    if (position >= nextSourcePosition) {
+      accumulatedResult +=
+        %_SubString(string, nextSourcePosition, position) + replacement;
+      nextSourcePosition = position + matchedLength;
+    }
+  }
+  if (nextSourcePosition >= length) return accumulatedResult;
+  return accumulatedResult + %_SubString(string, nextSourcePosition, length);
+}
+%FunctionRemovePrototype(RegExpSubclassReplace);
+
+
+// Legacy implementation of RegExp.prototype[Symbol.search] which
+// doesn't properly use the overridden exec method
 function RegExpSearch(string) {
-  // TODO(yangguo): allow non-regexp receivers.
   if (!IS_REGEXP(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver,
                         "RegExp.prototype.@@search", this);
@@ -578,6 +969,24 @@
 }
 
 
+// ES#sec-regexp.prototype-@@search
+// RegExp.prototype [ @@search ] ( string )
+function RegExpSubclassSearch(string) {
+  if (!IS_RECEIVER(this)) {
+    throw MakeTypeError(kIncompatibleMethodReceiver,
+                        "RegExp.prototype.@@search", this);
+  }
+  string = TO_STRING(string);
+  var previousLastIndex = this.lastIndex;
+  this.lastIndex = 0;
+  var result = RegExpSubclassExec(this, string);
+  this.lastIndex = previousLastIndex;
+  if (IS_NULL(result)) return -1;
+  return result.index;
+}
+%FunctionRemovePrototype(RegExpSubclassSearch);
+
+
 // Getters for the static properties lastMatch, lastParen, leftContext, and
 // rightContext of the RegExp constructor.  The properties are computed based
 // on the captures array of the last successful match and the subject string
@@ -639,19 +1048,35 @@
 }
 
 
+// ES6 21.2.5.3.
+function RegExpGetFlags() {
+  if (!IS_RECEIVER(this)) {
+    throw MakeTypeError(
+        kRegExpNonObject, "RegExp.prototype.flags", TO_STRING(this));
+  }
+  var result = '';
+  if (this.global) result += 'g';
+  if (this.ignoreCase) result += 'i';
+  if (this.multiline) result += 'm';
+  if (this.unicode) result += 'u';
+  if (this.sticky) result += 'y';
+  return result;
+}
+
+
 // ES6 21.2.5.4.
 function RegExpGetGlobal() {
   if (!IS_REGEXP(this)) {
     // TODO(littledan): Remove this RegExp compat workaround
     if (this === GlobalRegExpPrototype) {
+      %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
       return UNDEFINED;
     }
     throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.global");
   }
-  return !!REGEXP_GLOBAL(this);
+  return TO_BOOLEAN(REGEXP_GLOBAL(this));
 }
-%FunctionSetName(RegExpGetGlobal, "RegExp.prototype.global");
-%SetNativeFlag(RegExpGetGlobal);
+%SetForceInlineFlag(RegExpGetGlobal);
 
 
 // ES6 21.2.5.5.
@@ -659,14 +1084,13 @@
   if (!IS_REGEXP(this)) {
     // TODO(littledan): Remove this RegExp compat workaround
     if (this === GlobalRegExpPrototype) {
+      %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
       return UNDEFINED;
     }
     throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.ignoreCase");
   }
-  return !!REGEXP_IGNORE_CASE(this);
+  return TO_BOOLEAN(REGEXP_IGNORE_CASE(this));
 }
-%FunctionSetName(RegExpGetIgnoreCase, "RegExp.prototype.ignoreCase");
-%SetNativeFlag(RegExpGetIgnoreCase);
 
 
 // ES6 21.2.5.7.
@@ -674,14 +1098,13 @@
   if (!IS_REGEXP(this)) {
     // TODO(littledan): Remove this RegExp compat workaround
     if (this === GlobalRegExpPrototype) {
+      %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
       return UNDEFINED;
     }
     throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.multiline");
   }
-  return !!REGEXP_MULTILINE(this);
+  return TO_BOOLEAN(REGEXP_MULTILINE(this));
 }
-%FunctionSetName(RegExpGetMultiline, "RegExp.prototype.multiline");
-%SetNativeFlag(RegExpGetMultiline);
 
 
 // ES6 21.2.5.10.
@@ -689,14 +1112,29 @@
   if (!IS_REGEXP(this)) {
     // TODO(littledan): Remove this RegExp compat workaround
     if (this === GlobalRegExpPrototype) {
-      return UNDEFINED;
+      %IncrementUseCounter(kRegExpPrototypeSourceGetter);
+      return "(?:)";
     }
     throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.source");
   }
   return REGEXP_SOURCE(this);
 }
-%FunctionSetName(RegExpGetSource, "RegExp.prototype.source");
-%SetNativeFlag(RegExpGetSource);
+
+
+// ES6 21.2.5.12.
+function RegExpGetSticky() {
+  if (!IS_REGEXP(this)) {
+    // Compat fix: RegExp.prototype.sticky == undefined; UseCounter tracks it
+    // TODO(littledan): Remove this workaround or standardize it
+    if (this === GlobalRegExpPrototype) {
+      %IncrementUseCounter(kRegExpPrototypeStickyGetter);
+      return UNDEFINED;
+    }
+    throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.sticky");
+  }
+  return TO_BOOLEAN(REGEXP_STICKY(this));
+}
+%SetForceInlineFlag(RegExpGetSticky);
 
 // -------------------------------------------------------------------
 
@@ -718,10 +1156,12 @@
   splitSymbol, RegExpSplit,
 ]);
 
+utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
 utils.InstallGetter(GlobalRegExp.prototype, 'global', RegExpGetGlobal);
 utils.InstallGetter(GlobalRegExp.prototype, 'ignoreCase', RegExpGetIgnoreCase);
 utils.InstallGetter(GlobalRegExp.prototype, 'multiline', RegExpGetMultiline);
 utils.InstallGetter(GlobalRegExp.prototype, 'source', RegExpGetSource);
+utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
 
 // The properties `input` and `$_` are aliases for each other.  When this
 // value is set the value it is set to is coerced to a string.
@@ -769,12 +1209,39 @@
 %ToFastProperties(GlobalRegExp);
 
 // -------------------------------------------------------------------
+// Internal
+
+var InternalRegExpMatchInfo = new InternalPackedArray(2, "", UNDEFINED, 0, 0);
+
+function InternalRegExpMatch(regexp, subject) {
+  var matchInfo = %_RegExpExec(regexp, subject, 0, InternalRegExpMatchInfo);
+  if (!IS_NULL(matchInfo)) {
+    RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, subject);
+  }
+  return null;
+}
+
+function InternalRegExpReplace(regexp, subject, replacement) {
+  return %StringReplaceGlobalRegExpWithString(
+      subject, regexp, replacement, InternalRegExpMatchInfo);
+}
+
+// -------------------------------------------------------------------
 // Exports
 
 utils.Export(function(to) {
+  to.InternalRegExpMatch = InternalRegExpMatch;
+  to.InternalRegExpReplace = InternalRegExpReplace;
+  to.IsRegExp = IsRegExp;
   to.RegExpExec = DoRegExpExec;
-  to.RegExpExecNoTests = RegExpExecNoTests;
+  to.RegExpInitialize = RegExpInitialize;
   to.RegExpLastMatchInfo = RegExpLastMatchInfo;
+  to.RegExpSubclassExecJS = RegExpSubclassExecJS;
+  to.RegExpSubclassMatch = RegExpSubclassMatch;
+  to.RegExpSubclassReplace = RegExpSubclassReplace;
+  to.RegExpSubclassSearch = RegExpSubclassSearch;
+  to.RegExpSubclassSplit = RegExpSubclassSplit;
+  to.RegExpSubclassTest = RegExpSubclassTest;
   to.RegExpTest = RegExpTest;
 });
 
diff --git a/src/js/runtime.js b/src/js/runtime.js
index 7a61094..8e4f283 100644
--- a/src/js/runtime.js
+++ b/src/js/runtime.js
@@ -42,14 +42,6 @@
    ---------------------------------
 */
 
-function ConcatIterableToArray(target, iterable) {
-   var index = target.length;
-   for (var element of iterable) {
-     AddIndexedProperty(target, index++, element);
-   }
-   return target;
-}
-
 
 // This function should be called rather than %AddElement in contexts where the
 // argument might not be less than 2**32-1. ES2015 ToLength semantics mean that
@@ -137,8 +129,4 @@
   to.SpeciesConstructor = SpeciesConstructor;
 });
 
-%InstallToContext([
-  "concat_iterable_to_array", ConcatIterableToArray,
-]);
-
 })
diff --git a/src/js/string-iterator.js b/src/js/string-iterator.js
index 3c331dd..af9af31 100644
--- a/src/js/string-iterator.js
+++ b/src/js/string-iterator.js
@@ -32,6 +32,7 @@
 
 // 21.1.5.1 CreateStringIterator Abstract Operation
 function CreateStringIterator(string) {
+  CHECK_OBJECT_COERCIBLE(string, 'String.prototype[Symbol.iterator]');
   var s = TO_STRING(string);
   var iterator = new StringIterator;
   SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, s);
diff --git a/src/js/string.js b/src/js/string.js
index a401978..0eb394e 100644
--- a/src/js/string.js
+++ b/src/js/string.js
@@ -15,12 +15,13 @@
 var GlobalString = global.String;
 var InternalArray = utils.InternalArray;
 var InternalPackedArray = utils.InternalPackedArray;
+var IsRegExp;
 var MakeRangeError;
 var MakeTypeError;
 var MaxSimple;
 var MinSimple;
+var RegExpInitialize;
 var matchSymbol = utils.ImportNow("match_symbol");
-var RegExpExecNoTests;
 var replaceSymbol = utils.ImportNow("replace_symbol");
 var searchSymbol = utils.ImportNow("search_symbol");
 var splitSymbol = utils.ImportNow("split_symbol");
@@ -28,11 +29,12 @@
 utils.Import(function(from) {
   ArrayIndexOf = from.ArrayIndexOf;
   ArrayJoin = from.ArrayJoin;
+  IsRegExp = from.IsRegExp;
   MakeRangeError = from.MakeRangeError;
   MakeTypeError = from.MakeTypeError;
   MaxSimple = from.MaxSimple;
   MinSimple = from.MinSimple;
-  RegExpExecNoTests = from.RegExpExecNoTests;
+  RegExpInitialize = from.RegExpInitialize;
 });
 
 //-------------------------------------------------------------------
@@ -159,9 +161,10 @@
 
   var subject = TO_STRING(this);
 
-  // Non-regexp argument.
-  var regexp = new GlobalRegExp(pattern);
-  return RegExpExecNoTests(regexp, subject, 0);
+  // Equivalent to RegExpCreate (ES#sec-regexpcreate)
+  var regexp = %_NewObject(GlobalRegExp, GlobalRegExp);
+  RegExpInitialize(regexp, pattern);
+  return regexp[matchSymbol](subject);
 }
 
 
@@ -355,7 +358,10 @@
   }
 
   var subject = TO_STRING(this);
-  var regexp = new GlobalRegExp(pattern);
+
+  // Equivalent to RegExpCreate (ES#sec-regexpcreate)
+  var regexp = %_NewObject(GlobalRegExp, GlobalRegExp);
+  RegExpInitialize(regexp, pattern);
   return %_Call(regexp[searchSymbol], regexp, subject);
 }
 
@@ -558,18 +564,6 @@
 }
 
 
-// ECMA-262, section 15.5.3.2
-function StringFromCharCode(_) {  // length == 1
-  "use strict";
-  var s = "";
-  var n = arguments.length;
-  for (var i = 0; i < n; ++i) {
-    s += %_StringCharFromCode(arguments[i] & 0xffff);
-  }
-  return s;
-}
-
-
 // ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
 function HtmlEscape(str) {
   return %_Call(StringReplace, TO_STRING(str), /"/g, "&quot;");
@@ -701,7 +695,7 @@
 
   var s = TO_STRING(this);
 
-  if (IS_REGEXP(searchString)) {
+  if (IsRegExp(searchString)) {
     throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.startsWith");
   }
 
@@ -727,7 +721,7 @@
 
   var s = TO_STRING(this);
 
-  if (IS_REGEXP(searchString)) {
+  if (IsRegExp(searchString)) {
     throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.endsWith");
   }
 
@@ -754,7 +748,7 @@
 
   var string = TO_STRING(this);
 
-  if (IS_REGEXP(searchString)) {
+  if (IsRegExp(searchString)) {
     throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.includes");
   }
 
@@ -860,7 +854,6 @@
 
 // Set up the non-enumerable functions on the String object.
 utils.InstallFunctions(GlobalString, DONT_ENUM, [
-  "fromCharCode", StringFromCharCode,
   "fromCodePoint", StringFromCodePoint,
   "raw", StringRaw
 ]);
diff --git a/src/js/symbol.js b/src/js/symbol.js
index ae54369..7365655 100644
--- a/src/js/symbol.js
+++ b/src/js/symbol.js
@@ -84,9 +84,7 @@
   // "search", searchSymbol,
   // "split, splitSymbol,
   "toPrimitive", toPrimitiveSymbol,
-  // TODO(dslomov, caitp): Currently defined in harmony-tostring.js ---
-  // Move here when shipping
-  // "toStringTag", toStringTagSymbol,
+  "toStringTag", toStringTagSymbol,
   "unscopables", unscopablesSymbol,
 ]);
 
diff --git a/src/js/typedarray.js b/src/js/typedarray.js
index 3d500a3..4fb174b 100644
--- a/src/js/typedarray.js
+++ b/src/js/typedarray.js
@@ -11,11 +11,15 @@
 // -------------------------------------------------------------------
 // Imports
 
-var ArrayFrom;
-var ArrayToString;
+var AddIndexedProperty;
+// array.js has to come before typedarray.js for this to work
+var ArrayToString = utils.ImportNow("ArrayToString");
 var ArrayValues;
+var GetIterator;
+var GetMethod;
 var GlobalArray = global.Array;
 var GlobalArrayBuffer = global.ArrayBuffer;
+var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
 var GlobalDataView = global.DataView;
 var GlobalObject = global.Object;
 var InternalArray = utils.InternalArray;
@@ -67,9 +71,10 @@
 TYPED_ARRAYS(DECLARE_GLOBALS)
 
 utils.Import(function(from) {
-  ArrayFrom = from.ArrayFrom;
-  ArrayToString = from.ArrayToString;
+  AddIndexedProperty = from.AddIndexedProperty;
   ArrayValues = from.ArrayValues;
+  GetIterator = from.GetIterator;
+  GetMethod = from.GetMethod;
   InnerArrayCopyWithin = from.InnerArrayCopyWithin;
   InnerArrayEvery = from.InnerArrayEvery;
   InnerArrayFill = from.InnerArrayFill;
@@ -118,7 +123,7 @@
   } else {
     var newTypedArray = new constructor(arg0, arg1, arg2);
   }
-  if (!%_IsTypedArray(newTypedArray)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(newTypedArray)) throw MakeTypeError(kNotTypedArray);
   // TODO(littledan): Check for being detached, here and elsewhere
   // All callers where the first argument is a Number have no additional
   // arguments.
@@ -195,8 +200,7 @@
   }
 }
 
-function NAMEConstructByArrayLike(obj, arrayLike) {
-  var length = arrayLike.length;
+function NAMEConstructByArrayLike(obj, arrayLike, length) {
   var l = ToPositiveInteger(length, kInvalidTypedArrayLength);
 
   if (l > %_MaxSmi()) {
@@ -236,7 +240,23 @@
   for (var value of newIterable) {
     list.push(value);
   }
-  NAMEConstructByArrayLike(obj, list);
+  NAMEConstructByArrayLike(obj, list, list.length);
+}
+
+// ES#sec-typedarray-typedarray TypedArray ( typedArray )
+function NAMEConstructByTypedArray(obj, typedArray) {
+  // TODO(littledan): Throw on detached typedArray
+  var srcData = %TypedArrayGetBuffer(typedArray);
+  var length = %_TypedArrayGetLength(typedArray);
+  var byteLength = %_ArrayBufferViewGetByteLength(typedArray);
+  var newByteLength = length * ELEMENT_SIZE;
+  NAMEConstructByArrayLike(obj, typedArray, length);
+  var bufferConstructor = SpeciesConstructor(srcData, GlobalArrayBuffer);
+  var prototype = bufferConstructor.prototype;
+  // TODO(littledan): Use the right prototype based on bufferConstructor's realm
+  if (IS_RECEIVER(prototype) && prototype !== GlobalArrayBufferPrototype) {
+    %InternalSetPrototype(%TypedArrayGetBuffer(obj), prototype);
+  }
 }
 
 function NAMEConstructor(arg1, arg2, arg3) {
@@ -246,14 +266,12 @@
     } else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
                IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
       NAMEConstructByLength(this, arg1);
+    } else if (IS_TYPEDARRAY(arg1)) {
+      NAMEConstructByTypedArray(this, arg1);
     } else {
-      // TODO(littledan): If arg1 is a TypedArray, follow the constructor
-      // path in ES2015 22.2.4.3, and call SpeciesConstructor, in a
-      // path that seems to be an optimized version of what's below, but
-      // in an observably different way.
       var iteratorFn = arg1[iteratorSymbol];
       if (IS_UNDEFINED(iteratorFn) || iteratorFn === ArrayValues) {
-        NAMEConstructByArrayLike(this, arg1);
+        NAMEConstructByArrayLike(this, arg1, arg1.length);
       } else {
         NAMEConstructByIterable(this, arg1, iteratorFn);
       }
@@ -263,14 +281,6 @@
   }
 }
 
-// TODO(littledan): Remove this performance workaround BUG(chromium:579905)
-function NAME_GetLength() {
-  if (!(%_ClassOf(this) === 'NAME')) {
-    throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.length", this);
-  }
-  return %_TypedArrayGetLength(this);
-}
-
 function NAMESubArray(begin, end) {
   var beginInt = TO_INTEGER(begin);
   if (!IS_UNDEFINED(end)) {
@@ -323,7 +333,7 @@
 %SetForceInlineFlag(TypedArraySubArray);
 
 function TypedArrayGetBuffer() {
-  if (!%_IsTypedArray(this)) {
+  if (!IS_TYPEDARRAY(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver,
                         "get TypedArray.prototype.buffer", this);
   }
@@ -332,7 +342,7 @@
 %SetForceInlineFlag(TypedArrayGetBuffer);
 
 function TypedArrayGetByteLength() {
-  if (!%_IsTypedArray(this)) {
+  if (!IS_TYPEDARRAY(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver,
                         "get TypedArray.prototype.byteLength", this);
   }
@@ -341,7 +351,7 @@
 %SetForceInlineFlag(TypedArrayGetByteLength);
 
 function TypedArrayGetByteOffset() {
-  if (!%_IsTypedArray(this)) {
+  if (!IS_TYPEDARRAY(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver,
                         "get TypedArray.prototype.byteOffset", this);
   }
@@ -350,7 +360,7 @@
 %SetForceInlineFlag(TypedArrayGetByteOffset);
 
 function TypedArrayGetLength() {
-  if (!%_IsTypedArray(this)) {
+  if (!IS_TYPEDARRAY(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver,
                         "get TypedArray.prototype.length", this);
   }
@@ -465,7 +475,7 @@
 %FunctionSetLength(TypedArraySet, 1);
 
 function TypedArrayGetToStringTag() {
-  if (!%_IsTypedArray(this)) return;
+  if (!IS_TYPEDARRAY(this)) return;
   var name = %_ClassOf(this);
   if (IS_UNDEFINED(name)) return;
   return name;
@@ -473,7 +483,7 @@
 
 
 function TypedArrayCopyWithin(target, start, end) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -485,7 +495,7 @@
 
 // ES6 draft 05-05-15, section 22.2.3.7
 function TypedArrayEvery(f, receiver) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -496,7 +506,7 @@
 
 // ES6 draft 08-24-14, section 22.2.3.12
 function TypedArrayForEach(f, receiver) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -507,7 +517,7 @@
 
 // ES6 draft 04-05-14 section 22.2.3.8
 function TypedArrayFill(value, start, end) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -518,7 +528,7 @@
 
 // ES6 draft 07-15-13, section 22.2.3.9
 function TypedArrayFilter(f, thisArg) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
   if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
@@ -536,7 +546,7 @@
 
 // ES6 draft 07-15-13, section 22.2.3.10
 function TypedArrayFind(predicate, thisArg) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -547,7 +557,7 @@
 
 // ES6 draft 07-15-13, section 22.2.3.11
 function TypedArrayFindIndex(predicate, thisArg) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -558,7 +568,7 @@
 
 // ES6 draft 05-18-15, section 22.2.3.21
 function TypedArrayReverse() {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -586,7 +596,7 @@
 
 // ES6 draft 05-18-15, section 22.2.3.25
 function TypedArraySort(comparefn) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -600,7 +610,7 @@
 
 // ES6 section 22.2.3.13
 function TypedArrayIndexOf(element, index) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
   return InnerArrayIndexOf(this, element, index, length);
@@ -610,7 +620,7 @@
 
 // ES6 section 22.2.3.16
 function TypedArrayLastIndexOf(element, index) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -622,7 +632,7 @@
 
 // ES6 draft 07-15-13, section 22.2.3.18
 function TypedArrayMap(f, thisArg) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
   var result = TypedArraySpeciesCreate(this, length);
@@ -638,7 +648,7 @@
 
 // ES6 draft 05-05-15, section 22.2.3.24
 function TypedArraySome(f, receiver) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -649,7 +659,7 @@
 
 // ES6 section 22.2.3.27
 function TypedArrayToLocaleString() {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -657,15 +667,9 @@
 }
 
 
-// ES6 section 22.2.3.28
-function TypedArrayToString() {
-  return %_Call(ArrayToString, this);
-}
-
-
 // ES6 section 22.2.3.14
 function TypedArrayJoin(separator) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -675,7 +679,7 @@
 
 // ES6 draft 07-15-13, section 22.2.3.19
 function TypedArrayReduce(callback, current) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
   return InnerArrayReduce(callback, current, this, length,
@@ -686,7 +690,7 @@
 
 // ES6 draft 07-15-13, section 22.2.3.19
 function TypedArrayReduceRight(callback, current) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
   return InnerArrayReduceRight(callback, current, this, length,
@@ -696,7 +700,7 @@
 
 
 function TypedArraySlice(start, end) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
   var len = %_TypedArrayGetLength(this);
 
   var relativeStart = TO_INTEGER(start);
@@ -740,7 +744,7 @@
 
 // ES2016 draft, section 22.2.3.14
 function TypedArrayIncludes(searchElement, fromIndex) {
-  if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+  if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
 
   var length = %_TypedArrayGetLength(this);
 
@@ -760,14 +764,50 @@
 }
 
 
+// ES#sec-iterabletoarraylike Runtime Semantics: IterableToArrayLike( items )
+function IterableToArrayLike(items) {
+  var iterable = GetMethod(items, iteratorSymbol);
+  if (!IS_UNDEFINED(iterable)) {
+    var internal_array = new InternalArray();
+    var i = 0;
+    for (var value of
+         { [iteratorSymbol]() { return GetIterator(items, iterable) } }) {
+      internal_array[i] = value;
+      i++;
+    }
+    var array = [];
+    %MoveArrayContents(internal_array, array);
+    return array;
+  }
+  return TO_OBJECT(items);
+}
+
+
+// ES#sec-%typedarray%.from
+// %TypedArray%.from ( source [ , mapfn [ , thisArg ] ] )
 function TypedArrayFrom(source, mapfn, thisArg) {
-  // TODO(littledan): Investigate if there is a receiver which could be
-  // faster to accumulate on than Array, e.g., a TypedVector.
-  // TODO(littledan): Rewrite this code to ensure that things happen
-  // in the right order, e.g., the constructor needs to be called before
-  // the mapping function on array-likes.
-  var array = %_Call(ArrayFrom, GlobalArray, source, mapfn, thisArg);
-  return TypedArrayCreate(this, array);
+  if (!%IsConstructor(this)) throw MakeTypeError(kNotConstructor, this);
+  var mapping;
+  if (!IS_UNDEFINED(mapfn)) {
+    if (!IS_CALLABLE(mapfn)) throw MakeTypeError(kCalledNonCallable, this);
+    mapping = true;
+  } else {
+    mapping = false;
+  }
+  var arrayLike = IterableToArrayLike(source);
+  var length = TO_LENGTH(arrayLike.length);
+  var targetObject = TypedArrayCreate(this, length);
+  var value, mappedValue;
+  for (var i = 0; i < length; i++) {
+    value = arrayLike[i];
+    if (mapping) {
+      mappedValue = %_Call(mapfn, thisArg, value, i);
+    } else {
+      mappedValue = value;
+    }
+    targetObject[i] = mappedValue;
+  }
+  return targetObject;
 }
 %FunctionSetLength(TypedArrayFrom, 1);
 
@@ -785,7 +825,7 @@
 %FunctionSetPrototype(TypedArray, new GlobalObject());
 %AddNamedProperty(TypedArray.prototype,
                   "constructor", TypedArray, DONT_ENUM);
-utils.InstallFunctions(TypedArray, DONT_ENUM | DONT_DELETE | READ_ONLY, [
+utils.InstallFunctions(TypedArray, DONT_ENUM, [
   "from", TypedArrayFrom,
   "of", TypedArrayOf
 ]);
@@ -819,10 +859,12 @@
   "slice", TypedArraySlice,
   "some", TypedArraySome,
   "sort", TypedArraySort,
-  "toString", TypedArrayToString,
   "toLocaleString", TypedArrayToLocaleString
 ]);
 
+%AddNamedProperty(TypedArray.prototype, "toString", ArrayToString,
+                  DONT_ENUM);
+
 
 macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
   %SetCode(GlobalNAME, NAMEConstructor);
@@ -838,9 +880,6 @@
   %AddNamedProperty(GlobalNAME.prototype,
                     "BYTES_PER_ELEMENT", ELEMENT_SIZE,
                     READ_ONLY | DONT_ENUM | DONT_DELETE);
-  // TODO(littledan): Remove this performance workaround BUG(chromium:579905)
-  utils.InstallGetter(GlobalNAME.prototype, "length", NAME_GetLength,
-                      DONT_ENUM | DONT_DELETE);
 endmacro
 
 TYPED_ARRAYS(SETUP_TYPED_ARRAY)
diff --git a/src/js/uri.js b/src/js/uri.js
index 712d7e6..dca83c9 100644
--- a/src/js/uri.js
+++ b/src/js/uri.js
@@ -15,7 +15,6 @@
 // Imports
 
 var GlobalObject = global.Object;
-var GlobalArray = global.Array;
 var InternalArray = utils.InternalArray;
 var MakeURIError;
 
@@ -76,7 +75,7 @@
   var x = (cc >> 12) & 0xF;
   var y = (cc >> 6) & 63;
   var z = cc & 63;
-  var octets = new GlobalArray(3);
+  var octets = new InternalArray(3);
   if (cc <= 0x007F) {
     octets[0] = cc;
   } else if (cc <= 0x07FF) {
@@ -96,7 +95,7 @@
   var x = cc1 & 3;
   var y = (cc2 >> 6) & 0xF;
   var z = cc2 & 63;
-  var octets = new GlobalArray(4);
+  var octets = new InternalArray(4);
   octets[0] = (u >> 2) + 240;
   octets[1] = (((u & 3) << 4) | w) + 128;
   octets[2] = ((x << 4) | y) + 128;
@@ -248,7 +247,7 @@
         var n = 0;
         while (((cc << ++n) & 0x80) != 0) { }
         if (n == 1 || n > 4) throw MakeURIError();
-        var octets = new GlobalArray(n);
+        var octets = new InternalArray(n);
         octets[0] = cc;
         if (k + 3 * (n - 1) >= uriLength) throw MakeURIError();
         for (var i = 1; i < n; i++) {
diff --git a/src/js/v8natives.js b/src/js/v8natives.js
index 5e1a825..5185c62 100644
--- a/src/js/v8natives.js
+++ b/src/js/v8natives.js
@@ -134,14 +134,6 @@
 }
 
 
-// ES6 7.3.11
-function ObjectHasOwnProperty(value) {
-  var name = TO_NAME(value);
-  var object = TO_OBJECT(this);
-  return %HasOwnProperty(object, name);
-}
-
-
 // ES6 19.1.3.3 Object.prototype.isPrototypeOf(V)
 function ObjectIsPrototypeOf(V) {
   if (!IS_RECEIVER(V)) return false;
@@ -581,11 +573,9 @@
         if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
           var currentIsWritable = current.isWritable();
           if (currentIsWritable != desc.isWritable()) {
-            if (!currentIsWritable || IS_STRONG(obj)) {
+            if (!currentIsWritable) {
               if (should_throw) {
-                throw currentIsWritable
-                    ? MakeTypeError(kStrongRedefineDisallowed, obj, p)
-                    : MakeTypeError(kRedefineDisallowed, p);
+                throw MakeTypeError(kRedefineDisallowed, p);
               } else {
                 return false;
               }
@@ -850,7 +840,6 @@
   "toString", ObjectToString,
   "toLocaleString", ObjectToLocaleString,
   "valueOf", ObjectValueOf,
-  "hasOwnProperty", ObjectHasOwnProperty,
   "isPrototypeOf", ObjectIsPrototypeOf,
   "propertyIsEnumerable", ObjectPropertyIsEnumerable,
   "__defineGetter__", ObjectDefineGetter,
@@ -1106,9 +1095,10 @@
   to.IsFinite = GlobalIsFinite;
   to.IsNaN = GlobalIsNaN;
   to.NumberIsNaN = NumberIsNaN;
+  to.NumberIsInteger = NumberIsInteger;
   to.ObjectDefineProperties = ObjectDefineProperties;
   to.ObjectDefineProperty = ObjectDefineProperty;
-  to.ObjectHasOwnProperty = ObjectHasOwnProperty;
+  to.ObjectHasOwnProperty = GlobalObject.prototype.hasOwnProperty;
 });
 
 %InstallToContext([
diff --git a/src/json-parser.h b/src/json-parser.h
index efd3c04..1b9829f 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -37,6 +37,7 @@
         source_length_(source->length()),
         isolate_(source->map()->GetHeap()->isolate()),
         factory_(isolate_->factory()),
+        zone_(isolate_->allocator()),
         object_constructor_(isolate_->native_context()->object_function(),
                             isolate_),
         position_(-1) {
@@ -536,7 +537,7 @@
     fast_elements->set(i, *elements[i]);
   }
   Handle<Object> json_array = factory()->NewJSArrayWithElements(
-      fast_elements, FAST_ELEMENTS, Strength::WEAK, pretenure_);
+      fast_elements, FAST_ELEMENTS, pretenure_);
   return scope.CloseAndEscape(json_array);
 }
 
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index d97ca2b..b40a782 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -245,7 +245,7 @@
                     LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
   Handle<Object> fun;
   ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
-  if (!fun->IsJSFunction()) return object;
+  if (!fun->IsCallable()) return object;
 
   // Call toJSON function.
   if (key->IsSmi()) key = factory()->NumberToString(key);
@@ -501,8 +501,7 @@
     if (i > 0) builder_.AppendCharacter(',');
     Handle<Object> element;
     ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate_, element,
-        Object::GetElement(isolate_, object, i),
+        isolate_, element, JSReceiver::GetElement(isolate_, object, i),
         EXCEPTION);
     if (element->IsUndefined()) {
       builder_.AppendCString("null");
@@ -580,8 +579,8 @@
         DCHECK(key->IsNumber());
         key_handle = factory()->NumberToString(Handle<Object>(key, isolate_));
         if (key->IsSmi()) {
-          maybe_property = Object::GetElement(
-              isolate_, object, Smi::cast(key)->value());
+          maybe_property =
+              JSReceiver::GetElement(isolate_, object, Smi::cast(key)->value());
         } else {
           maybe_property = Object::GetPropertyOrElement(object, key_handle);
         }
diff --git a/src/key-accumulator.cc b/src/keys.cc
similarity index 65%
rename from src/key-accumulator.cc
rename to src/keys.cc
index c2c4996..f8b606c 100644
--- a/src/key-accumulator.cc
+++ b/src/keys.cc
@@ -2,26 +2,24 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/key-accumulator.h"
+#include "src/keys.h"
 
 #include "src/elements.h"
 #include "src/factory.h"
 #include "src/isolate-inl.h"
 #include "src/objects-inl.h"
 #include "src/property-descriptor.h"
-
+#include "src/prototype.h"
 
 namespace v8 {
 namespace internal {
 
-
 KeyAccumulator::~KeyAccumulator() {
   for (size_t i = 0; i < elements_.size(); i++) {
     delete elements_[i];
   }
 }
 
-
 Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
   if (length_ == 0) {
     return isolate_->factory()->empty_fixed_array();
@@ -46,6 +44,7 @@
   for (size_t level = 0; level < max_level; level++) {
     int num_string_properties = level_lengths_[level * 2];
     int num_symbol_properties = level_lengths_[level * 2 + 1];
+    int num_elements = 0;
     if (num_string_properties < 0) {
       // If the |num_string_properties| is negative, the current level contains
       // properties from a proxy, hence we skip the integer keys in |elements_|
@@ -54,7 +53,7 @@
     } else if (level < elements_.size()) {
       // Add the element indices for this prototype level.
       std::vector<uint32_t>* elements = elements_[level];
-      int num_elements = static_cast<int>(elements->size());
+      num_elements = static_cast<int>(elements->size());
       for (int i = 0; i < num_elements; i++) {
         Handle<Object> key;
         if (convert == KEEP_NUMBERS) {
@@ -80,13 +79,19 @@
       insertion_index++;
       symbol_properties_index++;
     }
+    if (FLAG_trace_for_in_enumerate) {
+      PrintF("| strings=%d symbols=%d elements=%i ", num_string_properties,
+             num_symbol_properties, num_elements);
+    }
+  }
+  if (FLAG_trace_for_in_enumerate) {
+    PrintF("|| prototypes=%zu ||\n", max_level);
   }
 
   DCHECK_EQ(insertion_index, length_);
   return result;
 }
 
-
 namespace {
 
 bool AccumulatorHasKey(std::vector<uint32_t>* sub_elements, uint32_t key) {
@@ -99,7 +104,6 @@
   return AddKey(handle(key, isolate_), convert);
 }
 
-
 bool KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
   if (key->IsSymbol()) {
     if (filter_ & SKIP_SYMBOLS) return false;
@@ -136,10 +140,8 @@
   return AddStringKey(key, convert);
 }
 
-
 bool KeyAccumulator::AddKey(uint32_t key) { return AddIntegerKey(key); }
 
-
 bool KeyAccumulator::AddIntegerKey(uint32_t key) {
   // Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
   // We mark proxy-levels with a negative length
@@ -154,7 +156,6 @@
   return true;
 }
 
-
 bool KeyAccumulator::AddStringKey(Handle<Object> key,
                                   AddKeyConversion convert) {
   if (string_properties_.is_null()) {
@@ -176,7 +177,6 @@
   }
 }
 
-
 bool KeyAccumulator::AddSymbolKey(Handle<Object> key) {
   if (symbol_properties_.is_null()) {
     symbol_properties_ = OrderedHashSet::Allocate(isolate_, 16);
@@ -192,7 +192,6 @@
   }
 }
 
-
 void KeyAccumulator::AddKeys(Handle<FixedArray> array,
                              AddKeyConversion convert) {
   int add_length = array->length();
@@ -203,7 +202,6 @@
   }
 }
 
-
 void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
                              AddKeyConversion convert) {
   DCHECK(array_like->IsJSArray() || array_like->HasSloppyArgumentsElements());
@@ -211,7 +209,6 @@
   accessor->AddElementsToKeyAccumulator(array_like, this, convert);
 }
 
-
 void KeyAccumulator::AddKeysFromProxy(Handle<JSObject> array_like) {
   // Proxies define a complete list of keys with no distinction of
   // elements and properties, which breaks the normal assumption for the
@@ -223,7 +220,6 @@
   level_string_length_ = -level_string_length_;
 }
 
-
 MaybeHandle<FixedArray> FilterProxyKeys(Isolate* isolate, Handle<JSProxy> owner,
                                         Handle<FixedArray> keys,
                                         PropertyFilter filter) {
@@ -253,13 +249,14 @@
   return keys;
 }
 
-
 // Returns "nothing" in case of exception, "true" on success.
 Maybe<bool> KeyAccumulator::AddKeysFromProxy(Handle<JSProxy> proxy,
                                              Handle<FixedArray> keys) {
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate_, keys, FilterProxyKeys(isolate_, proxy, keys, filter_),
-      Nothing<bool>());
+  if (filter_proxy_keys_) {
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate_, keys, FilterProxyKeys(isolate_, proxy, keys, filter_),
+        Nothing<bool>());
+  }
   // Proxies define a complete list of keys with no distinction of
   // elements and properties, which breaks the normal assumption for the
   // KeyAccumulator.
@@ -277,7 +274,6 @@
   return Just(true);
 }
 
-
 void KeyAccumulator::AddElementKeysFromInterceptor(
     Handle<JSObject> array_like) {
   AddKeys(array_like, CONVERT_TO_ARRAY_INDEX);
@@ -286,7 +282,6 @@
   SortCurrentElementsListRemoveDuplicates();
 }
 
-
 void KeyAccumulator::SortCurrentElementsListRemoveDuplicates() {
   // Sort and remove duplicates from the current elements level and adjust.
   // the lengths accordingly.
@@ -300,14 +295,12 @@
   length_ -= static_cast<int>(nof_removed_keys);
 }
 
-
 void KeyAccumulator::SortCurrentElementsList() {
   if (elements_.empty()) return;
   auto element_keys = elements_.back();
   std::sort(element_keys->begin(), element_keys->end());
 }
 
-
 void KeyAccumulator::NextPrototype() {
   // Store the protoLength on the first call of this method.
   if (!elements_.empty()) {
@@ -319,6 +312,154 @@
   level_symbol_length_ = 0;
 }
 
+namespace {
+
+void TrySettingEmptyEnumCache(JSReceiver* object) {
+  Map* map = object->map();
+  DCHECK_EQ(kInvalidEnumCacheSentinel, map->EnumLength());
+  if (!map->OnlyHasSimpleProperties()) return;
+  if (map->IsJSProxyMap()) return;
+  if (map->NumberOfOwnDescriptors() > 0) {
+    int number_of_enumerable_own_properties =
+        map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
+    if (number_of_enumerable_own_properties > 0) return;
+  }
+  DCHECK(object->IsJSObject());
+  map->SetEnumLength(0);
+}
+
+bool CheckAndInitalizeSimpleEnumCache(JSReceiver* object) {
+  if (object->map()->EnumLength() == kInvalidEnumCacheSentinel) {
+    TrySettingEmptyEnumCache(object);
+  }
+  if (object->map()->EnumLength() != 0) return false;
+  DCHECK(object->IsJSObject());
+  return !JSObject::cast(object)->HasEnumerableElements();
+}
+}  // namespace
+
+void FastKeyAccumulator::Prepare() {
+  DisallowHeapAllocation no_gc;
+  // Directly go for the fast path for OWN_ONLY keys.
+  if (type_ == OWN_ONLY) return;
+  // Fully walk the prototype chain and find the last prototype with keys.
+  is_receiver_simple_enum_ = false;
+  has_empty_prototype_ = true;
+  JSReceiver* first_non_empty_prototype;
+  for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd();
+       iter.Advance()) {
+    JSReceiver* current = iter.GetCurrent<JSReceiver>();
+    if (CheckAndInitalizeSimpleEnumCache(current)) continue;
+    has_empty_prototype_ = false;
+    first_non_empty_prototype = current;
+    // TODO(cbruni): use the first non-empty prototype.
+    USE(first_non_empty_prototype);
+    return;
+  }
+  DCHECK(has_empty_prototype_);
+  is_receiver_simple_enum_ =
+      receiver_->map()->EnumLength() != kInvalidEnumCacheSentinel &&
+      !JSObject::cast(*receiver_)->HasEnumerableElements();
+}
+
+namespace {
+
+template <bool fast_properties>
+Handle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
+                                          Handle<JSObject> object,
+                                          GetKeysConversion convert) {
+  Handle<FixedArray> keys;
+  ElementsAccessor* accessor = object->GetElementsAccessor();
+  if (fast_properties) {
+    keys = JSObject::GetFastEnumPropertyKeys(isolate, object);
+  } else {
+    // TODO(cbruni): preallocate big enough array to also hold elements.
+    keys = JSObject::GetEnumPropertyKeys(object);
+  }
+  Handle<FixedArray> result =
+      accessor->PrependElementIndices(object, keys, convert, ONLY_ENUMERABLE);
+
+  if (FLAG_trace_for_in_enumerate) {
+    PrintF("| strings=%d symbols=0 elements=%u || prototypes>=1 ||\n",
+           keys->length(), result->length() - keys->length());
+  }
+  return result;
+}
+
+MaybeHandle<FixedArray> GetOwnKeysWithUninitializedEnumCache(
+    Isolate* isolate, Handle<JSObject> object) {
+  // Uninitalized enum cache
+  Map* map = object->map();
+  if (object->elements() != isolate->heap()->empty_fixed_array() ||
+      object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
+    // Assume that there are elements.
+    return MaybeHandle<FixedArray>();
+  }
+  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+  if (number_of_own_descriptors == 0) {
+    map->SetEnumLength(0);
+    return isolate->factory()->empty_fixed_array();
+  }
+  // We have no elements but possibly enumerable property keys, hence we can
+  // directly initialize the enum cache.
+  return JSObject::GetFastEnumPropertyKeys(isolate, object);
+}
+
+bool OnlyHasSimpleProperties(Map* map) {
+  return map->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER;
+}
+
+}  // namespace
+
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeys(GetKeysConversion convert) {
+  Handle<FixedArray> keys;
+  if (GetKeysFast(convert).ToHandle(&keys)) {
+    return keys;
+  }
+  return GetKeysSlow(convert);
+}
+
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
+    GetKeysConversion convert) {
+  bool own_only = has_empty_prototype_ || type_ == OWN_ONLY;
+  Map* map = receiver_->map();
+  if (!own_only || !OnlyHasSimpleProperties(map)) {
+    return MaybeHandle<FixedArray>();
+  }
+
+  // From this point on we are certiain to only collect own keys.
+  DCHECK(receiver_->IsJSObject());
+  Handle<JSObject> object = Handle<JSObject>::cast(receiver_);
+
+  // Do not try to use the enum-cache for dict-mode objects.
+  if (map->is_dictionary_map()) {
+    return GetOwnKeysWithElements<false>(isolate_, object, convert);
+  }
+  int enum_length = receiver_->map()->EnumLength();
+  if (enum_length == kInvalidEnumCacheSentinel) {
+    Handle<FixedArray> keys;
+    // Try initializing the enum cache and return own properties.
+    if (GetOwnKeysWithUninitializedEnumCache(isolate_, object)
+            .ToHandle(&keys)) {
+      if (FLAG_trace_for_in_enumerate) {
+        PrintF("| strings=%d symbols=0 elements=0 || prototypes>=1 ||\n",
+               keys->length());
+      }
+      is_receiver_simple_enum_ =
+          object->map()->EnumLength() != kInvalidEnumCacheSentinel;
+      return keys;
+    }
+  }
+  // The properties-only case failed because there were probably elements on the
+  // receiver.
+  return GetOwnKeysWithElements<true>(isolate_, object, convert);
+}
+
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
+    GetKeysConversion convert) {
+  return JSReceiver::GetKeys(receiver_, type_, ENUMERABLE_STRINGS, KEEP_NUMBERS,
+                             filter_proxy_keys_);
+}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/key-accumulator.h b/src/keys.h
similarity index 74%
rename from src/key-accumulator.h
rename to src/keys.h
index 9daee10..1fd3fc0 100644
--- a/src/key-accumulator.h
+++ b/src/keys.h
@@ -52,6 +52,7 @@
   Handle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
   int length() { return length_; }
   Isolate* isolate() { return isolate_; }
+  void set_filter_proxy_keys(bool filter) { filter_proxy_keys_ = filter; }
 
  private:
   bool AddIntegerKey(uint32_t key);
@@ -62,6 +63,7 @@
   Isolate* isolate_;
   KeyCollectionType type_;
   PropertyFilter filter_;
+  bool filter_proxy_keys_ = true;
   // |elements_| contains the sorted element keys (indices) per level.
   std::vector<std::vector<uint32_t>*> elements_;
   // |protoLengths_| contains the total number of keys (elements + properties)
@@ -86,9 +88,42 @@
   DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
 };
 
+// The FastKeyAccumulator handles the cases where there are no elements on the
+// prototype chain and forwords the complex/slow cases to the normal
+// KeyAccumulator.
+class FastKeyAccumulator {
+ public:
+  FastKeyAccumulator(Isolate* isolate, Handle<JSReceiver> receiver,
+                     KeyCollectionType type, PropertyFilter filter)
+      : isolate_(isolate), receiver_(receiver), type_(type), filter_(filter) {
+    Prepare();
+    // TODO(cbruni): pass filter_ directly to the KeyAccumulator.
+    USE(filter_);
+  }
+
+  bool is_receiver_simple_enum() { return is_receiver_simple_enum_; }
+  bool has_empty_prototype() { return has_empty_prototype_; }
+  void set_filter_proxy_keys(bool filter) { filter_proxy_keys_ = filter; }
+
+  MaybeHandle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
+
+ private:
+  void Prepare();
+  MaybeHandle<FixedArray> GetKeysFast(GetKeysConversion convert);
+  MaybeHandle<FixedArray> GetKeysSlow(GetKeysConversion convert);
+
+  Isolate* isolate_;
+  Handle<JSReceiver> receiver_;
+  KeyCollectionType type_;
+  PropertyFilter filter_;
+  bool is_receiver_simple_enum_ = false;
+  bool has_empty_prototype_ = false;
+  bool filter_proxy_keys_ = true;
+
+  DISALLOW_COPY_AND_ASSIGN(FastKeyAccumulator);
+};
 
 }  // namespace internal
 }  // namespace v8
 
-
 #endif  // V8_KEY_ACCUMULATOR_H_
diff --git a/src/libplatform/default-platform.cc b/src/libplatform/default-platform.cc
index 6902504..71ee6be 100644
--- a/src/libplatform/default-platform.cc
+++ b/src/libplatform/default-platform.cc
@@ -29,9 +29,7 @@
   return reinterpret_cast<DefaultPlatform*>(platform)->PumpMessageLoop(isolate);
 }
 
-
-const int DefaultPlatform::kMaxThreadPoolSize = 4;
-
+const int DefaultPlatform::kMaxThreadPoolSize = 8;
 
 DefaultPlatform::DefaultPlatform()
     : initialized_(false), thread_pool_size_(0) {}
@@ -66,7 +64,7 @@
   base::LockGuard<base::Mutex> guard(&lock_);
   DCHECK(thread_pool_size >= 0);
   if (thread_pool_size < 1) {
-    thread_pool_size = base::SysInfo::NumberOfProcessors();
+    thread_pool_size = base::SysInfo::NumberOfProcessors() - 1;
   }
   thread_pool_size_ =
       std::max(std::min(thread_pool_size, kMaxThreadPoolSize), 1);
@@ -172,8 +170,9 @@
 
 uint64_t DefaultPlatform::AddTraceEvent(
     char phase, const uint8_t* category_enabled_flag, const char* name,
-    uint64_t id, uint64_t bind_id, int num_args, const char** arg_names,
-    const uint8_t* arg_types, const uint64_t* arg_values, unsigned int flags) {
+    const char* scope, uint64_t id, uint64_t bind_id, int num_args,
+    const char** arg_names, const uint8_t* arg_types,
+    const uint64_t* arg_values, unsigned int flags) {
   return 0;
 }
 
@@ -194,6 +193,7 @@
   return dummy;
 }
 
+
 size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
   return static_cast<size_t>(thread_pool_size_);
 }
diff --git a/src/libplatform/default-platform.h b/src/libplatform/default-platform.h
index 2c428ee..ea39abc 100644
--- a/src/libplatform/default-platform.h
+++ b/src/libplatform/default-platform.h
@@ -47,9 +47,10 @@
   const char* GetCategoryGroupName(
       const uint8_t* category_enabled_flag) override;
   uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
-                         const char* name, uint64_t id, uint64_t bind_id,
-                         int32_t num_args, const char** arg_names,
-                         const uint8_t* arg_types, const uint64_t* arg_values,
+                         const char* name, const char* scope, uint64_t id,
+                         uint64_t bind_id, int32_t num_args,
+                         const char** arg_names, const uint8_t* arg_types,
+                         const uint64_t* arg_values,
                          unsigned int flags) override;
   void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
                                 const char* name, uint64_t handle) override;
diff --git a/src/log-utils.h b/src/log-utils.h
index 7621668..3e70a96 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -30,7 +30,7 @@
   static bool InitLogAtStart() {
     return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
            FLAG_log_handles || FLAG_log_suspect || FLAG_log_regexp ||
-           FLAG_ll_prof || FLAG_perf_basic_prof ||
+           FLAG_ll_prof || FLAG_perf_basic_prof || FLAG_perf_prof ||
            FLAG_log_internal_timer_events || FLAG_prof_cpp;
   }
 
diff --git a/src/log.cc b/src/log.cc
index cbdd9dd..93111a2 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -13,9 +13,12 @@
 #include "src/code-stubs.h"
 #include "src/deoptimizer.h"
 #include "src/global-handles.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
 #include "src/log-inl.h"
 #include "src/log-utils.h"
 #include "src/macro-assembler.h"
+#include "src/perf-jit.h"
 #include "src/profiler/cpu-profiler.h"
 #include "src/runtime-profiler.h"
 #include "src/string-stream.h"
@@ -45,11 +48,13 @@
     }                                                     \
   } while (false);
 
-static const char* ComputeMarker(SharedFunctionInfo* shared, Code* code) {
+static const char* ComputeMarker(SharedFunctionInfo* shared,
+                                 AbstractCode* code) {
   switch (code->kind()) {
-    case Code::FUNCTION:
+    case AbstractCode::FUNCTION:
+    case AbstractCode::INTERPRETED_FUNCTION:
       return shared->optimization_disabled() ? "" : "~";
-    case Code::OPTIMIZED_FUNCTION:
+    case AbstractCode::OPTIMIZED_FUNCTION:
       return "*";
     default:
       return "";
@@ -159,42 +164,35 @@
 
 CodeEventLogger::~CodeEventLogger() { delete name_buffer_; }
 
-
 void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                      Code* code,
-                                      const char* comment) {
+                                      AbstractCode* code, const char* comment) {
   name_buffer_->Init(tag);
   name_buffer_->AppendBytes(comment);
   LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
 }
 
-
 void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                      Code* code,
-                                      Name* name) {
+                                      AbstractCode* code, Name* name) {
   name_buffer_->Init(tag);
   name_buffer_->AppendName(name);
   LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
 }
 
-
 void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                      Code* code,
+                                      AbstractCode* code,
                                       SharedFunctionInfo* shared,
-                                      CompilationInfo* info,
-                                      Name* name) {
+                                      CompilationInfo* info, Name* name) {
   name_buffer_->Init(tag);
   name_buffer_->AppendBytes(ComputeMarker(shared, code));
   name_buffer_->AppendName(name);
   LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
 }
 
-
 void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                      Code* code,
+                                      AbstractCode* code,
                                       SharedFunctionInfo* shared,
-                                      CompilationInfo* info,
-                                      Name* source, int line, int column) {
+                                      CompilationInfo* info, Name* source,
+                                      int line, int column) {
   name_buffer_->Init(tag);
   name_buffer_->AppendBytes(ComputeMarker(shared, code));
   name_buffer_->AppendString(shared->DebugName());
@@ -211,17 +209,15 @@
   LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
 }
 
-
 void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                      Code* code,
-                                      int args_count) {
+                                      AbstractCode* code, int args_count) {
   name_buffer_->Init(tag);
   name_buffer_->AppendInt(args_count);
   LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
 }
 
-
-void CodeEventLogger::RegExpCodeCreateEvent(Code* code, String* source) {
+void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
+                                            String* source) {
   name_buffer_->Init(Logger::REG_EXP_TAG);
   name_buffer_->AppendString(source);
   LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
@@ -232,17 +228,15 @@
 class PerfBasicLogger : public CodeEventLogger {
  public:
   PerfBasicLogger();
-  virtual ~PerfBasicLogger();
+  ~PerfBasicLogger() override;
 
-  virtual void CodeMoveEvent(Address from, Address to) { }
-  virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
-  virtual void CodeDeleteEvent(Address from) { }
+  void CodeMoveEvent(AbstractCode* from, Address to) override {}
+  void CodeDisableOptEvent(AbstractCode* code,
+                           SharedFunctionInfo* shared) override {}
 
  private:
-  virtual void LogRecordedBuffer(Code* code,
-                                 SharedFunctionInfo* shared,
-                                 const char* name,
-                                 int length);
+  void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+                         const char* name, int length) override;
 
   // Extension added to V8 log file name to get the low-level log name.
   static const char kFilenameFormatString[];
@@ -281,16 +275,12 @@
   perf_output_handle_ = NULL;
 }
 
-
-void PerfBasicLogger::LogRecordedBuffer(Code* code,
-                                       SharedFunctionInfo*,
-                                       const char* name,
-                                       int length) {
-  DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
-
+void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
+                                        const char* name, int length) {
   if (FLAG_perf_basic_prof_only_functions &&
-      (code->kind() != Code::FUNCTION &&
-       code->kind() != Code::OPTIMIZED_FUNCTION)) {
+      (code->kind() != AbstractCode::FUNCTION &&
+       code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
+       code->kind() != AbstractCode::OPTIMIZED_FUNCTION)) {
     return;
   }
 
@@ -306,19 +296,17 @@
 class LowLevelLogger : public CodeEventLogger {
  public:
   explicit LowLevelLogger(const char* file_name);
-  virtual ~LowLevelLogger();
+  ~LowLevelLogger() override;
 
-  virtual void CodeMoveEvent(Address from, Address to);
-  virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
-  virtual void CodeDeleteEvent(Address from);
-  virtual void SnapshotPositionEvent(Address addr, int pos);
-  virtual void CodeMovingGCEvent();
+  void CodeMoveEvent(AbstractCode* from, Address to) override;
+  void CodeDisableOptEvent(AbstractCode* code,
+                           SharedFunctionInfo* shared) override {}
+  void SnapshotPositionEvent(HeapObject* obj, int pos);
+  void CodeMovingGCEvent() override;
 
  private:
-  virtual void LogRecordedBuffer(Code* code,
-                                 SharedFunctionInfo* shared,
-                                 const char* name,
-                                 int length);
+  void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+                         const char* name, int length) override;
 
   // Low-level profiling event structures.
   struct CodeCreateStruct {
@@ -338,21 +326,6 @@
   };
 
 
-  struct CodeDeleteStruct {
-    static const char kTag = 'D';
-
-    Address address;
-  };
-
-
-  struct SnapshotPositionStruct {
-    static const char kTag = 'P';
-
-    Address address;
-    int32_t position;
-  };
-
-
   static const char kCodeMovingGCTag = 'G';
 
 
@@ -416,21 +389,19 @@
   const char arch[] = "x87";
 #elif V8_TARGET_ARCH_ARM64
   const char arch[] = "arm64";
+#elif V8_TARGET_ARCH_S390
+  const char arch[] = "s390";
 #else
   const char arch[] = "unknown";
 #endif
   LogWriteBytes(arch, sizeof(arch));
 }
 
-
-void LowLevelLogger::LogRecordedBuffer(Code* code,
-                                       SharedFunctionInfo*,
-                                       const char* name,
-                                       int length) {
+void LowLevelLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
+                                       const char* name, int length) {
   CodeCreateStruct event;
   event.name_size = length;
   event.code_address = code->instruction_start();
-  DCHECK(event.code_address == code->address() + Code::kHeaderSize);
   event.code_size = code->instruction_size();
   LogWriteStruct(event);
   LogWriteBytes(name, length);
@@ -439,26 +410,11 @@
       code->instruction_size());
 }
 
-
-void LowLevelLogger::CodeMoveEvent(Address from, Address to) {
+void LowLevelLogger::CodeMoveEvent(AbstractCode* from, Address to) {
   CodeMoveStruct event;
-  event.from_address = from + Code::kHeaderSize;
-  event.to_address = to + Code::kHeaderSize;
-  LogWriteStruct(event);
-}
-
-
-void LowLevelLogger::CodeDeleteEvent(Address from) {
-  CodeDeleteStruct event;
-  event.address = from + Code::kHeaderSize;
-  LogWriteStruct(event);
-}
-
-
-void LowLevelLogger::SnapshotPositionEvent(Address addr, int pos) {
-  SnapshotPositionStruct event;
-  event.address = addr + Code::kHeaderSize;
-  event.position = pos;
+  event.from_address = from->instruction_start();
+  size_t header_size = from->instruction_start() - from->address();
+  event.to_address = to + header_size;
   LogWriteStruct(event);
 }
 
@@ -484,23 +440,19 @@
  public:
   explicit JitLogger(JitCodeEventHandler code_event_handler);
 
-  virtual void CodeMoveEvent(Address from, Address to);
-  virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
-  virtual void CodeDeleteEvent(Address from);
-  virtual void AddCodeLinePosInfoEvent(
-      void* jit_handler_data,
-      int pc_offset,
-      int position,
-      JitCodeEvent::PositionType position_type);
+  void CodeMoveEvent(AbstractCode* from, Address to) override;
+  void CodeDisableOptEvent(AbstractCode* code,
+                           SharedFunctionInfo* shared) override {}
+  void AddCodeLinePosInfoEvent(void* jit_handler_data, int pc_offset,
+                               int position,
+                               JitCodeEvent::PositionType position_type);
 
   void* StartCodePosInfoEvent();
-  void EndCodePosInfoEvent(Code* code, void* jit_handler_data);
+  void EndCodePosInfoEvent(AbstractCode* code, void* jit_handler_data);
 
  private:
-  virtual void LogRecordedBuffer(Code* code,
-                                 SharedFunctionInfo* shared,
-                                 const char* name,
-                                 int length);
+  void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+                         const char* name, int length) override;
 
   JitCodeEventHandler code_event_handler_;
   base::Mutex logger_mutex_;
@@ -511,10 +463,8 @@
     : code_event_handler_(code_event_handler) {
 }
 
-
-void JitLogger::LogRecordedBuffer(Code* code,
-                                  SharedFunctionInfo* shared,
-                                  const char* name,
+void JitLogger::LogRecordedBuffer(AbstractCode* code,
+                                  SharedFunctionInfo* shared, const char* name,
                                   int length) {
   JitCodeEvent event;
   memset(&event, 0, sizeof(event));
@@ -531,35 +481,19 @@
   code_event_handler_(&event);
 }
 
-
-void JitLogger::CodeMoveEvent(Address from, Address to) {
+void JitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
   base::LockGuard<base::Mutex> guard(&logger_mutex_);
-  Code* from_code = Code::cast(HeapObject::FromAddress(from));
 
   JitCodeEvent event;
   event.type = JitCodeEvent::CODE_MOVED;
-  event.code_start = from_code->instruction_start();
-  event.code_len = from_code->instruction_size();
+  event.code_start = from->instruction_start();
+  event.code_len = from->instruction_size();
 
   // Calculate the header size.
-  const size_t header_size =
-      from_code->instruction_start() - reinterpret_cast<byte*>(from_code);
+  const size_t header_size = from->instruction_start() - from->address();
 
   // Calculate the new start address of the instructions.
-  event.new_code_start =
-      reinterpret_cast<byte*>(HeapObject::FromAddress(to)) + header_size;
-
-  code_event_handler_(&event);
-}
-
-
-void JitLogger::CodeDeleteEvent(Address from) {
-  Code* from_code = Code::cast(HeapObject::FromAddress(from));
-
-  JitCodeEvent event;
-  event.type = JitCodeEvent::CODE_REMOVED;
-  event.code_start = from_code->instruction_start();
-  event.code_len = from_code->instruction_size();
+  event.new_code_start = to + header_size;
 
   code_event_handler_(&event);
 }
@@ -590,8 +524,8 @@
   return event.user_data;
 }
 
-
-void JitLogger::EndCodePosInfoEvent(Code* code, void* jit_handler_data) {
+void JitLogger::EndCodePosInfoEvent(AbstractCode* code,
+                                    void* jit_handler_data) {
   JitCodeEvent event;
   memset(&event, 0, sizeof(event));
   event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
@@ -778,19 +712,18 @@
 //
 
 Logger::Logger(Isolate* isolate)
-  : isolate_(isolate),
-    ticker_(NULL),
-    profiler_(NULL),
-    log_events_(NULL),
-    is_logging_(false),
-    log_(new Log(this)),
-    perf_basic_logger_(NULL),
-    ll_logger_(NULL),
-    jit_logger_(NULL),
-    listeners_(5),
-    is_initialized_(false) {
-}
-
+    : isolate_(isolate),
+      ticker_(NULL),
+      profiler_(NULL),
+      log_events_(NULL),
+      is_logging_(false),
+      log_(new Log(this)),
+      perf_basic_logger_(NULL),
+      perf_jit_logger_(NULL),
+      ll_logger_(NULL),
+      jit_logger_(NULL),
+      listeners_(5),
+      is_initialized_(false) {}
 
 Logger::~Logger() {
   delete log_;
@@ -934,7 +867,6 @@
 
 void Logger::EnterExternal(Isolate* isolate) {
   LOG(isolate, TimerEvent(START, TimerEventExternal::name()));
-  TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
   DCHECK(isolate->current_vm_state() == JS);
   isolate->set_current_vm_state(EXTERNAL);
 }
@@ -942,7 +874,6 @@
 
 void Logger::LeaveExternal(Isolate* isolate) {
   LOG(isolate, TimerEvent(END, TimerEventExternal::name()));
-  TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
   DCHECK(isolate->current_vm_state() == EXTERNAL);
   isolate->set_current_vm_state(JS);
 }
@@ -971,7 +902,7 @@
   //      (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
 
   Handle<Object> source =
-      Object::GetProperty(isolate, regexp, "source").ToHandleChecked();
+      JSReceiver::GetProperty(isolate, regexp, "source").ToHandleChecked();
   if (!source->IsString()) {
     msg->Append("no source");
     return;
@@ -990,19 +921,19 @@
 
   // global flag
   Handle<Object> global =
-      Object::GetProperty(isolate, regexp, "global").ToHandleChecked();
+      JSReceiver::GetProperty(isolate, regexp, "global").ToHandleChecked();
   if (global->IsTrue()) {
     msg->Append('g');
   }
   // ignorecase flag
   Handle<Object> ignorecase =
-      Object::GetProperty(isolate, regexp, "ignoreCase").ToHandleChecked();
+      JSReceiver::GetProperty(isolate, regexp, "ignoreCase").ToHandleChecked();
   if (ignorecase->IsTrue()) {
     msg->Append('i');
   }
   // multiline flag
   Handle<Object> multiline =
-      Object::GetProperty(isolate, regexp, "multiline").ToHandleChecked();
+      JSReceiver::GetProperty(isolate, regexp, "multiline").ToHandleChecked();
   if (multiline->IsTrue()) {
     msg->Append('m');
   }
@@ -1136,10 +1067,9 @@
   CallbackEventInternal("set ", name, entry_point);
 }
 
-
 static void AppendCodeCreateHeader(Log::MessageBuilder* msg,
                                    Logger::LogEventsAndTags tag,
-                                   Code* code) {
+                                   AbstractCode* code) {
   DCHECK(msg);
   msg->Append("%s,%s,%d,",
               kLogEventsNames[Logger::CODE_CREATION_EVENT],
@@ -1149,9 +1079,7 @@
   msg->Append(",%d,", code->ExecutableSize());
 }
 
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
-                             Code* code,
+void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                              const char* comment) {
   PROFILER_LOG(CodeCreateEvent(tag, code, comment));
 
@@ -1165,9 +1093,7 @@
   msg.WriteToLogFile();
 }
 
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
-                             Code* code,
+void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                              Name* name) {
   PROFILER_LOG(CodeCreateEvent(tag, code, name));
 
@@ -1187,11 +1113,8 @@
   msg.WriteToLogFile();
 }
 
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
-                             Code* code,
-                             SharedFunctionInfo* shared,
-                             CompilationInfo* info,
+void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                             SharedFunctionInfo* shared, CompilationInfo* info,
                              Name* name) {
   PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, name));
 
@@ -1199,7 +1122,10 @@
   CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, name));
 
   if (!FLAG_log_code || !log_->IsEnabled()) return;
-  if (code == isolate_->builtins()->builtin(Builtins::kCompileLazy)) return;
+  if (code == AbstractCode::cast(
+                  isolate_->builtins()->builtin(Builtins::kCompileLazy))) {
+    return;
+  }
 
   Log::MessageBuilder msg(log_);
   AppendCodeCreateHeader(&msg, tag, code);
@@ -1220,10 +1146,8 @@
 // Although, it is possible to extract source and line from
 // the SharedFunctionInfo object, we left it to caller
 // to leave logging functions free from heap allocations.
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
-                             Code* code,
-                             SharedFunctionInfo* shared,
-                             CompilationInfo* info,
+void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                             SharedFunctionInfo* shared, CompilationInfo* info,
                              Name* source, int line, int column) {
   PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line, column));
 
@@ -1250,9 +1174,7 @@
   msg.WriteToLogFile();
 }
 
-
-void Logger::CodeCreateEvent(LogEventsAndTags tag,
-                             Code* code,
+void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                              int args_count) {
   PROFILER_LOG(CodeCreateEvent(tag, code, args_count));
 
@@ -1266,8 +1188,7 @@
   msg.WriteToLogFile();
 }
 
-
-void Logger::CodeDisableOptEvent(Code* code,
+void Logger::CodeDisableOptEvent(AbstractCode* code,
                                  SharedFunctionInfo* shared) {
   PROFILER_LOG(CodeDisableOptEvent(code, shared));
 
@@ -1294,8 +1215,7 @@
   base::OS::SignalCodeMovingGC();
 }
 
-
-void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
+void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
   PROFILER_LOG(RegExpCodeCreateEvent(code, source));
 
   if (!is_logging_code_events()) return;
@@ -1310,33 +1230,16 @@
   msg.WriteToLogFile();
 }
 
-
-void Logger::CodeMoveEvent(Address from, Address to) {
+void Logger::CodeMoveEvent(AbstractCode* from, Address to) {
   PROFILER_LOG(CodeMoveEvent(from, to));
 
   if (!is_logging_code_events()) return;
   CALL_LISTENERS(CodeMoveEvent(from, to));
-  MoveEventInternal(CODE_MOVE_EVENT, from, to);
+  MoveEventInternal(CODE_MOVE_EVENT, from->address(), to);
 }
 
-
-void Logger::CodeDeleteEvent(Address from) {
-  PROFILER_LOG(CodeDeleteEvent(from));
-
-  if (!is_logging_code_events()) return;
-  CALL_LISTENERS(CodeDeleteEvent(from));
-
-  if (!FLAG_log_code || !log_->IsEnabled()) return;
-  Log::MessageBuilder msg(log_);
-  msg.Append("%s,", kLogEventsNames[CODE_DELETE_EVENT]);
-  msg.AppendAddress(from);
-  msg.WriteToLogFile();
-}
-
-
 void Logger::CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
-                                     int pc_offset,
-                                     int position) {
+                                             int pc_offset, int position) {
   JIT_LOG(AddCodeLinePosInfoEvent(jit_handler_data,
                                   pc_offset,
                                   position,
@@ -1360,8 +1263,7 @@
   }
 }
 
-
-void Logger::CodeEndLinePosInfoRecordEvent(Code* code,
+void Logger::CodeEndLinePosInfoRecordEvent(AbstractCode* code,
                                            void* jit_handler_data) {
   JIT_LOG(EndCodePosInfoEvent(code, jit_handler_data));
 }
@@ -1376,18 +1278,6 @@
 }
 
 
-void Logger::SnapshotPositionEvent(Address addr, int pos) {
-  if (!log_->IsEnabled()) return;
-  LL_LOG(SnapshotPositionEvent(addr, pos));
-  if (!FLAG_log_snapshot_positions) return;
-  Log::MessageBuilder msg(log_);
-  msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
-  msg.AppendAddress(addr);
-  msg.Append(",%d", pos);
-  msg.WriteToLogFile();
-}
-
-
 void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
   if (!is_logging_code_events()) return;
   MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
@@ -1535,9 +1425,9 @@
 class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
  public:
   EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
-                                     Handle<Code>* code_objects,
+                                     Handle<AbstractCode>* code_objects,
                                      int* count)
-      : sfis_(sfis), code_objects_(code_objects), count_(count) { }
+      : sfis_(sfis), code_objects_(code_objects), count_(count) {}
 
   virtual void EnterContext(Context* context) {}
   virtual void LeaveContext(Context* context) {}
@@ -1551,22 +1441,22 @@
       sfis_[*count_] = Handle<SharedFunctionInfo>(sfi);
     }
     if (code_objects_ != NULL) {
-      DCHECK(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
-      code_objects_[*count_] = Handle<Code>(function->code());
+      DCHECK(function->abstract_code()->kind() ==
+             AbstractCode::OPTIMIZED_FUNCTION);
+      code_objects_[*count_] = Handle<AbstractCode>(function->abstract_code());
     }
     *count_ = *count_ + 1;
   }
 
  private:
   Handle<SharedFunctionInfo>* sfis_;
-  Handle<Code>* code_objects_;
+  Handle<AbstractCode>* code_objects_;
   int* count_;
 };
 
-
 static int EnumerateCompiledFunctions(Heap* heap,
                                       Handle<SharedFunctionInfo>* sfis,
-                                      Handle<Code>* code_objects) {
+                                      Handle<AbstractCode>* code_objects) {
   HeapIterator iterator(heap);
   DisallowHeapAllocation no_gc;
   int compiled_funcs_count = 0;
@@ -1583,7 +1473,8 @@
         sfis[compiled_funcs_count] = Handle<SharedFunctionInfo>(sfi);
       }
       if (code_objects != NULL) {
-        code_objects[compiled_funcs_count] = Handle<Code>(sfi->code());
+        code_objects[compiled_funcs_count] =
+            Handle<AbstractCode>(sfi->abstract_code());
       }
       ++compiled_funcs_count;
     }
@@ -1600,60 +1491,71 @@
 
 
 void Logger::LogCodeObject(Object* object) {
-  Code* code_object = Code::cast(object);
+  AbstractCode* code_object = AbstractCode::cast(object);
   LogEventsAndTags tag = Logger::STUB_TAG;
   const char* description = "Unknown code from the snapshot";
   switch (code_object->kind()) {
-    case Code::FUNCTION:
-    case Code::OPTIMIZED_FUNCTION:
+    case AbstractCode::FUNCTION:
+    case AbstractCode::INTERPRETED_FUNCTION:
+    case AbstractCode::OPTIMIZED_FUNCTION:
       return;  // We log this later using LogCompiledFunctions.
-    case Code::BINARY_OP_IC:
-    case Code::COMPARE_IC:  // fall through
-    case Code::COMPARE_NIL_IC:   // fall through
-    case Code::TO_BOOLEAN_IC:  // fall through
-    case Code::STUB:
-      description = CodeStub::MajorName(CodeStub::GetMajorKey(code_object));
+    case AbstractCode::BYTECODE_HANDLER:
+      return;  // We log it later by walking the dispatch table.
+    case AbstractCode::BINARY_OP_IC:    // fall through
+    case AbstractCode::COMPARE_IC:      // fall through
+    case AbstractCode::TO_BOOLEAN_IC:   // fall through
+
+    case AbstractCode::STUB:
+      description =
+          CodeStub::MajorName(CodeStub::GetMajorKey(code_object->GetCode()));
       if (description == NULL)
         description = "A stub from the snapshot";
       tag = Logger::STUB_TAG;
       break;
-    case Code::REGEXP:
+    case AbstractCode::REGEXP:
       description = "Regular expression code";
       tag = Logger::REG_EXP_TAG;
       break;
-    case Code::BUILTIN:
-      description = isolate_->builtins()->name(code_object->builtin_index());
+    case AbstractCode::BUILTIN:
+      description =
+          isolate_->builtins()->name(code_object->GetCode()->builtin_index());
       tag = Logger::BUILTIN_TAG;
       break;
-    case Code::HANDLER:
+    case AbstractCode::HANDLER:
       description = "An IC handler from the snapshot";
       tag = Logger::HANDLER_TAG;
       break;
-    case Code::KEYED_LOAD_IC:
+    case AbstractCode::KEYED_LOAD_IC:
       description = "A keyed load IC from the snapshot";
       tag = Logger::KEYED_LOAD_IC_TAG;
       break;
-    case Code::LOAD_IC:
+    case AbstractCode::LOAD_IC:
       description = "A load IC from the snapshot";
       tag = Logger::LOAD_IC_TAG;
       break;
-    case Code::CALL_IC:
+    case AbstractCode::CALL_IC:
       description = "A call IC from the snapshot";
       tag = Logger::CALL_IC_TAG;
       break;
-    case Code::STORE_IC:
+    case AbstractCode::STORE_IC:
       description = "A store IC from the snapshot";
       tag = Logger::STORE_IC_TAG;
       break;
-    case Code::KEYED_STORE_IC:
+    case AbstractCode::KEYED_STORE_IC:
       description = "A keyed store IC from the snapshot";
       tag = Logger::KEYED_STORE_IC_TAG;
       break;
-    case Code::WASM_FUNCTION:
-      description = "A wasm function";
+    case AbstractCode::WASM_FUNCTION:
+      description = "A Wasm function";
       tag = Logger::STUB_TAG;
       break;
-    case Code::NUMBER_OF_KINDS:
+    case AbstractCode::JS_TO_WASM_FUNCTION:
+      description = "A JavaScript to Wasm adapter";
+      tag = Logger::STUB_TAG;
+      break;
+    case AbstractCode::WASM_TO_JS_FUNCTION:
+      description = "A Wasm to JavaScript adapter";
+      tag = Logger::STUB_TAG;
       break;
   }
   PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
@@ -1668,12 +1570,34 @@
   DisallowHeapAllocation no_gc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsCode()) LogCodeObject(obj);
+    if (obj->IsBytecodeArray()) LogCodeObject(obj);
   }
 }
 
+void Logger::LogBytecodeHandlers() {
+  if (!FLAG_ignition) return;
+
+  interpreter::Interpreter* interpreter = isolate_->interpreter();
+  const int last_index = static_cast<int>(interpreter::Bytecode::kLast);
+  for (auto operand_scale = interpreter::OperandScale::kSingle;
+       operand_scale <= interpreter::OperandScale::kMaxValid;
+       operand_scale =
+           interpreter::Bytecodes::NextOperandScale(operand_scale)) {
+    for (int index = 0; index <= last_index; ++index) {
+      interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(index);
+      if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
+        Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
+        std::string bytecode_name =
+            interpreter::Bytecodes::ToString(bytecode, operand_scale);
+        CodeCreateEvent(Logger::BYTECODE_HANDLER_TAG, AbstractCode::cast(code),
+                        bytecode_name.c_str());
+      }
+    }
+  }
+}
 
 void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
-                                 Handle<Code> code) {
+                                 Handle<AbstractCode> code) {
   Handle<String> func_name(shared->DebugName());
   if (shared->script()->IsScript()) {
     Handle<Script> script(Script::cast(shared->script()));
@@ -1730,7 +1654,7 @@
   HandleScope scope(isolate_);
   const int compiled_funcs_count = EnumerateCompiledFunctions(heap, NULL, NULL);
   ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
-  ScopedVector< Handle<Code> > code_objects(compiled_funcs_count);
+  ScopedVector<Handle<AbstractCode> > code_objects(compiled_funcs_count);
   EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start());
 
   // During iteration, there can be heap allocation due to
@@ -1827,11 +1751,6 @@
   if (is_initialized_) return true;
   is_initialized_ = true;
 
-  // --ll-prof implies --log-code and --log-snapshot-positions.
-  if (FLAG_ll_prof) {
-    FLAG_log_snapshot_positions = true;
-  }
-
   std::ostringstream log_file_name;
   PrepareLogFileName(log_file_name, isolate, FLAG_logfile);
   log_->Initialize(log_file_name.str().c_str());
@@ -1842,6 +1761,11 @@
     addCodeEventListener(perf_basic_logger_);
   }
 
+  if (FLAG_perf_prof) {
+    perf_jit_logger_ = new PerfJitLogger();
+    addCodeEventListener(perf_jit_logger_);
+  }
+
   if (FLAG_ll_prof) {
     ll_logger_ = new LowLevelLogger(log_file_name.str().c_str());
     addCodeEventListener(ll_logger_);
@@ -1910,6 +1834,12 @@
     perf_basic_logger_ = NULL;
   }
 
+  if (perf_jit_logger_) {
+    removeCodeEventListener(perf_jit_logger_);
+    delete perf_jit_logger_;
+    perf_jit_logger_ = NULL;
+  }
+
   if (ll_logger_) {
     removeCodeEventListener(ll_logger_);
     delete ll_logger_;
diff --git a/src/log.h b/src/log.h
index 1a454da..fdc5047 100644
--- a/src/log.h
+++ b/src/log.h
@@ -82,63 +82,61 @@
       logger->Call;                                 \
   } while (false)
 
-
-#define LOG_EVENTS_AND_TAGS_LIST(V)                                     \
-  V(CODE_CREATION_EVENT,            "code-creation")                    \
-  V(CODE_DISABLE_OPT_EVENT,         "code-disable-optimization")        \
-  V(CODE_MOVE_EVENT,                "code-move")                        \
-  V(CODE_DELETE_EVENT,              "code-delete")                      \
-  V(CODE_MOVING_GC,                 "code-moving-gc")                   \
-  V(SHARED_FUNC_MOVE_EVENT,         "sfi-move")                         \
-  V(SNAPSHOT_POSITION_EVENT,        "snapshot-pos")                     \
-  V(SNAPSHOT_CODE_NAME_EVENT,       "snapshot-code-name")               \
-  V(TICK_EVENT,                     "tick")                             \
-  V(REPEAT_META_EVENT,              "repeat")                           \
-  V(BUILTIN_TAG,                    "Builtin")                          \
-  V(CALL_DEBUG_BREAK_TAG,           "CallDebugBreak")                   \
-  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn")           \
-  V(CALL_INITIALIZE_TAG,            "CallInitialize")                   \
-  V(CALL_MEGAMORPHIC_TAG,           "CallMegamorphic")                  \
-  V(CALL_MISS_TAG,                  "CallMiss")                         \
-  V(CALL_NORMAL_TAG,                "CallNormal")                       \
-  V(CALL_PRE_MONOMORPHIC_TAG,       "CallPreMonomorphic")               \
-  V(LOAD_INITIALIZE_TAG,            "LoadInitialize")                   \
-  V(LOAD_PREMONOMORPHIC_TAG,        "LoadPreMonomorphic")               \
-  V(LOAD_MEGAMORPHIC_TAG,           "LoadMegamorphic")                  \
-  V(STORE_INITIALIZE_TAG,           "StoreInitialize")                  \
-  V(STORE_PREMONOMORPHIC_TAG,       "StorePreMonomorphic")              \
-  V(STORE_GENERIC_TAG,              "StoreGeneric")                     \
-  V(STORE_MEGAMORPHIC_TAG,          "StoreMegamorphic")                 \
-  V(KEYED_CALL_DEBUG_BREAK_TAG,     "KeyedCallDebugBreak")              \
-  V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG,                               \
-    "KeyedCallDebugPrepareStepIn")                                      \
-  V(KEYED_CALL_INITIALIZE_TAG,      "KeyedCallInitialize")              \
-  V(KEYED_CALL_MEGAMORPHIC_TAG,     "KeyedCallMegamorphic")             \
-  V(KEYED_CALL_MISS_TAG,            "KeyedCallMiss")                    \
-  V(KEYED_CALL_NORMAL_TAG,          "KeyedCallNormal")                  \
-  V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic")          \
-  V(CALLBACK_TAG,                   "Callback")                         \
-  V(EVAL_TAG,                       "Eval")                             \
-  V(FUNCTION_TAG,                   "Function")                         \
-  V(HANDLER_TAG,                    "Handler")                          \
-  V(KEYED_LOAD_IC_TAG,              "KeyedLoadIC")                      \
-  V(KEYED_LOAD_POLYMORPHIC_IC_TAG,  "KeyedLoadPolymorphicIC")           \
-  V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC")       \
-  V(KEYED_STORE_IC_TAG,             "KeyedStoreIC")                     \
-  V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC")          \
-  V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC")     \
-  V(LAZY_COMPILE_TAG,               "LazyCompile")                      \
-  V(CALL_IC_TAG,                    "CallIC")                           \
-  V(LOAD_IC_TAG,                    "LoadIC")                           \
-  V(LOAD_POLYMORPHIC_IC_TAG,        "LoadPolymorphicIC")                \
-  V(REG_EXP_TAG,                    "RegExp")                           \
-  V(SCRIPT_TAG,                     "Script")                           \
-  V(STORE_IC_TAG,                   "StoreIC")                          \
-  V(STORE_POLYMORPHIC_IC_TAG,       "StorePolymorphicIC")               \
-  V(STUB_TAG,                       "Stub")                             \
-  V(NATIVE_FUNCTION_TAG,            "Function")                         \
-  V(NATIVE_LAZY_COMPILE_TAG,        "LazyCompile")                      \
-  V(NATIVE_SCRIPT_TAG,              "Script")
+#define LOG_EVENTS_AND_TAGS_LIST(V)                                      \
+  V(CODE_CREATION_EVENT, "code-creation")                                \
+  V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization")                 \
+  V(CODE_MOVE_EVENT, "code-move")                                        \
+  V(CODE_DELETE_EVENT, "code-delete")                                    \
+  V(CODE_MOVING_GC, "code-moving-gc")                                    \
+  V(SHARED_FUNC_MOVE_EVENT, "sfi-move")                                  \
+  V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name")                      \
+  V(TICK_EVENT, "tick")                                                  \
+  V(REPEAT_META_EVENT, "repeat")                                         \
+  V(BUILTIN_TAG, "Builtin")                                              \
+  V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak")                              \
+  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn")            \
+  V(CALL_INITIALIZE_TAG, "CallInitialize")                               \
+  V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic")                             \
+  V(CALL_MISS_TAG, "CallMiss")                                           \
+  V(CALL_NORMAL_TAG, "CallNormal")                                       \
+  V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic")                      \
+  V(LOAD_INITIALIZE_TAG, "LoadInitialize")                               \
+  V(LOAD_PREMONOMORPHIC_TAG, "LoadPreMonomorphic")                       \
+  V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic")                             \
+  V(STORE_INITIALIZE_TAG, "StoreInitialize")                             \
+  V(STORE_PREMONOMORPHIC_TAG, "StorePreMonomorphic")                     \
+  V(STORE_GENERIC_TAG, "StoreGeneric")                                   \
+  V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic")                           \
+  V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak")                   \
+  V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, "KeyedCallDebugPrepareStepIn") \
+  V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize")                    \
+  V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic")                  \
+  V(KEYED_CALL_MISS_TAG, "KeyedCallMiss")                                \
+  V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal")                            \
+  V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic")           \
+  V(CALLBACK_TAG, "Callback")                                            \
+  V(EVAL_TAG, "Eval")                                                    \
+  V(FUNCTION_TAG, "Function")                                            \
+  V(HANDLER_TAG, "Handler")                                              \
+  V(BYTECODE_HANDLER_TAG, "BytecodeHandler")                             \
+  V(KEYED_LOAD_IC_TAG, "KeyedLoadIC")                                    \
+  V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC")             \
+  V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC")        \
+  V(KEYED_STORE_IC_TAG, "KeyedStoreIC")                                  \
+  V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC")           \
+  V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC")      \
+  V(LAZY_COMPILE_TAG, "LazyCompile")                                     \
+  V(CALL_IC_TAG, "CallIC")                                               \
+  V(LOAD_IC_TAG, "LoadIC")                                               \
+  V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC")                        \
+  V(REG_EXP_TAG, "RegExp")                                               \
+  V(SCRIPT_TAG, "Script")                                                \
+  V(STORE_IC_TAG, "StoreIC")                                             \
+  V(STORE_POLYMORPHIC_IC_TAG, "StorePolymorphicIC")                      \
+  V(STUB_TAG, "Stub")                                                    \
+  V(NATIVE_FUNCTION_TAG, "Function")                                     \
+  V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile")                              \
+  V(NATIVE_SCRIPT_TAG, "Script")
 // Note that 'NATIVE_' cases for functions and scripts are mapped onto
 // original tags when writing to the log.
 
@@ -146,6 +144,7 @@
 class JitLogger;
 class PerfBasicLogger;
 class LowLevelLogger;
+class PerfJitLogger;
 class Sampler;
 
 class Logger {
@@ -224,30 +223,24 @@
   void GetterCallbackEvent(Name* name, Address entry_point);
   void SetterCallbackEvent(Name* name, Address entry_point);
   // Emits a code create event.
-  void CodeCreateEvent(LogEventsAndTags tag,
-                       Code* code, const char* source);
-  void CodeCreateEvent(LogEventsAndTags tag,
-                       Code* code, Name* name);
-  void CodeCreateEvent(LogEventsAndTags tag,
-                       Code* code,
-                       SharedFunctionInfo* shared,
-                       CompilationInfo* info,
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                       const char* source);
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code, Name* name);
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                       SharedFunctionInfo* shared, CompilationInfo* info,
                        Name* name);
-  void CodeCreateEvent(LogEventsAndTags tag,
-                       Code* code,
-                       SharedFunctionInfo* shared,
-                       CompilationInfo* info,
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                       SharedFunctionInfo* shared, CompilationInfo* info,
                        Name* source, int line, int column);
-  void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
+  void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+                       int args_count);
   // Emits a code deoptimization event.
-  void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared);
+  void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared);
   void CodeMovingGCEvent();
   // Emits a code create event for a RegExp.
-  void RegExpCodeCreateEvent(Code* code, String* source);
+  void RegExpCodeCreateEvent(AbstractCode* code, String* source);
   // Emits a code move event.
-  void CodeMoveEvent(Address from, Address to);
-  // Emits a code delete event.
-  void CodeDeleteEvent(Address from);
+  void CodeMoveEvent(AbstractCode* from, Address to);
   // Emits a code line info add event with Postion type.
   void CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
                                        int pc_offset,
@@ -260,12 +253,12 @@
   void CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder);
   // Emits a code line info finish record event.
   // It's the callee's responsibility to dispose the parameter jit_handler_data.
-  void CodeEndLinePosInfoRecordEvent(Code* code, void* jit_handler_data);
+  void CodeEndLinePosInfoRecordEvent(AbstractCode* code,
+                                     void* jit_handler_data);
 
   void SharedFunctionInfoMoveEvent(Address from, Address to);
 
   void CodeNameEvent(Address addr, int pos, const char* code_name);
-  void SnapshotPositionEvent(Address addr, int pos);
 
   // ==== Events logged by --log-gc. ====
   // Heap sampling events: start, end, and individual types.
@@ -316,13 +309,15 @@
   void StopProfiler();
 
   void LogExistingFunction(Handle<SharedFunctionInfo> shared,
-                           Handle<Code> code);
+                           Handle<AbstractCode> code);
   // Logs all compiled functions found in the heap.
   void LogCompiledFunctions();
   // Logs all accessor callbacks found in the heap.
   void LogAccessorCallbacks();
   // Used for logging stubs found in the snapshot.
   void LogCodeObjects();
+  // Used for logging bytecode handlers found in the snapshot.
+  void LogBytecodeHandlers();
 
   // Converts tag to a corresponding NATIVE_... if the script is native.
   INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
@@ -397,6 +392,7 @@
   bool is_logging_;
   Log* log_;
   PerfBasicLogger* perf_basic_logger_;
+  PerfJitLogger* perf_jit_logger_;
   LowLevelLogger* ll_logger_;
   JitLogger* jit_logger_;
   List<CodeEventListener*> listeners_;
@@ -413,6 +409,7 @@
 #define TIMER_EVENTS_LIST(V)    \
   V(RecompileSynchronous, true) \
   V(RecompileConcurrent, true)  \
+  V(CompileIgnition, true)      \
   V(CompileFullCode, true)      \
   V(OptimizeCode, true)         \
   V(CompileCode, true)          \
@@ -446,83 +443,86 @@
   Isolate* isolate_;
 };
 
+class PositionsRecorder BASE_EMBEDDED {
+ public:
+  PositionsRecorder() { jit_handler_data_ = NULL; }
+
+  void AttachJITHandlerData(void* user_data) { jit_handler_data_ = user_data; }
+
+  void* DetachJITHandlerData() {
+    void* old_data = jit_handler_data_;
+    jit_handler_data_ = NULL;
+    return old_data;
+  }
+
+ protected:
+  // Currently jit_handler_data_ is used to store JITHandler-specific data
+  // over the lifetime of a PositionsRecorder
+  void* jit_handler_data_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
+};
 
 class CodeEventListener {
  public:
   virtual ~CodeEventListener() {}
 
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
+  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
                                const char* comment) = 0;
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
+  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
                                Name* name) = 0;
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
+  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
                                SharedFunctionInfo* shared,
-                               CompilationInfo* info,
-                               Name* name) = 0;
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
+                               CompilationInfo* info, Name* name) = 0;
+  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
                                SharedFunctionInfo* shared,
-                               CompilationInfo* info,
-                               Name* source,
-                               int line, int column) = 0;
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
+                               CompilationInfo* info, Name* source, int line,
+                               int column) = 0;
+  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
                                int args_count) = 0;
   virtual void CallbackEvent(Name* name, Address entry_point) = 0;
   virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
   virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
-  virtual void RegExpCodeCreateEvent(Code* code, String* source) = 0;
-  virtual void CodeMoveEvent(Address from, Address to) = 0;
-  virtual void CodeDeleteEvent(Address from) = 0;
+  virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
+  virtual void CodeMoveEvent(AbstractCode* from, Address to) = 0;
   virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
   virtual void CodeMovingGCEvent() = 0;
-  virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) = 0;
+  virtual void CodeDisableOptEvent(AbstractCode* code,
+                                   SharedFunctionInfo* shared) = 0;
 };
 
 
 class CodeEventLogger : public CodeEventListener {
  public:
   CodeEventLogger();
-  virtual ~CodeEventLogger();
+  ~CodeEventLogger() override;
 
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
-                               const char* comment);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
-                               Name* name);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
-                               int args_count);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
-                               SharedFunctionInfo* shared,
-                               CompilationInfo* info,
-                               Name* name);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
-                               SharedFunctionInfo* shared,
-                               CompilationInfo* info,
-                               Name* source,
-                               int line, int column);
-  virtual void RegExpCodeCreateEvent(Code* code, String* source);
+  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+                       const char* comment) override;
+  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+                       Name* name) override;
+  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+                       int args_count) override;
+  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+                       SharedFunctionInfo* shared, CompilationInfo* info,
+                       Name* name) override;
+  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+                       SharedFunctionInfo* shared, CompilationInfo* info,
+                       Name* source, int line, int column) override;
+  void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
 
-  virtual void CallbackEvent(Name* name, Address entry_point) { }
-  virtual void GetterCallbackEvent(Name* name, Address entry_point) { }
-  virtual void SetterCallbackEvent(Name* name, Address entry_point) { }
-  virtual void SharedFunctionInfoMoveEvent(Address from, Address to) { }
-  virtual void CodeMovingGCEvent() { }
+  void CallbackEvent(Name* name, Address entry_point) override {}
+  void GetterCallbackEvent(Name* name, Address entry_point) override {}
+  void SetterCallbackEvent(Name* name, Address entry_point) override {}
+  void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+  void CodeMovingGCEvent() override {}
 
  private:
   class NameBuffer;
 
-  virtual void LogRecordedBuffer(Code* code,
-                                 SharedFunctionInfo* shared,
-                                 const char* name,
-                                 int length) = 0;
+  virtual void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+                                 const char* name, int length) = 0;
 
   NameBuffer* name_buffer_;
 };
diff --git a/src/lookup.cc b/src/lookup.cc
index bad5a20..3df8752 100644
--- a/src/lookup.cc
+++ b/src/lookup.cc
@@ -45,6 +45,25 @@
   return LookupIterator(receiver, name, configuration);
 }
 
+template <bool is_element>
+void LookupIterator::Start() {
+  DisallowHeapAllocation no_gc;
+
+  has_property_ = false;
+  state_ = NOT_FOUND;
+  holder_ = initial_holder_;
+
+  JSReceiver* holder = *holder_;
+  Map* map = holder->map();
+
+  state_ = LookupInHolder<is_element>(map, holder);
+  if (IsFound()) return;
+
+  NextInternal<is_element>(map, holder);
+}
+
+template void LookupIterator::Start<true>();
+template void LookupIterator::Start<false>();
 
 void LookupIterator::Next() {
   DCHECK_NE(JSPROXY, state_);
@@ -55,38 +74,47 @@
   JSReceiver* holder = *holder_;
   Map* map = holder->map();
 
-  // Perform lookup on current holder.
-  state_ = LookupInHolder(map, holder);
-  if (IsFound()) return;
+  if (map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+    state_ = IsElement() ? LookupInSpecialHolder<true>(map, holder)
+                         : LookupInSpecialHolder<false>(map, holder);
+    if (IsFound()) return;
+  }
 
-  // Continue lookup if lookup on current holder failed.
+  IsElement() ? NextInternal<true>(map, holder)
+              : NextInternal<false>(map, holder);
+}
+
+template <bool is_element>
+void LookupIterator::NextInternal(Map* map, JSReceiver* holder) {
   do {
     JSReceiver* maybe_holder = NextHolder(map);
     if (maybe_holder == nullptr) {
       if (interceptor_state_ == InterceptorState::kSkipNonMasking) {
-        RestartLookupForNonMaskingInterceptors();
+        RestartLookupForNonMaskingInterceptors<is_element>();
         return;
       }
-      break;
+      state_ = NOT_FOUND;
+      if (holder != *holder_) holder_ = handle(holder, isolate_);
+      return;
     }
     holder = maybe_holder;
     map = holder->map();
-    state_ = LookupInHolder(map, holder);
+    state_ = LookupInHolder<is_element>(map, holder);
   } while (!IsFound());
 
-  if (holder != *holder_) holder_ = handle(holder, isolate_);
+  holder_ = handle(holder, isolate_);
 }
 
-
+template <bool is_element>
 void LookupIterator::RestartInternal(InterceptorState interceptor_state) {
-  state_ = NOT_FOUND;
   interceptor_state_ = interceptor_state;
   property_details_ = PropertyDetails::Empty();
-  holder_ = initial_holder_;
   number_ = DescriptorArray::kNotFound;
-  Next();
+  Start<is_element>();
 }
 
+template void LookupIterator::RestartInternal<true>(InterceptorState);
+template void LookupIterator::RestartInternal<false>(InterceptorState);
 
 // static
 Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
@@ -116,29 +144,17 @@
   return handle(Handle<HeapObject>::cast(receiver_)->map(), isolate_);
 }
 
-
-Handle<JSObject> LookupIterator::GetStoreTarget() const {
-  if (receiver_->IsJSGlobalProxy()) {
-    Object* prototype = JSGlobalProxy::cast(*receiver_)->map()->prototype();
-    if (!prototype->IsNull()) {
-      return handle(JSGlobalObject::cast(prototype), isolate_);
-    }
-  }
-  return Handle<JSObject>::cast(receiver_);
-}
-
-
 bool LookupIterator::HasAccess() const {
   DCHECK_EQ(ACCESS_CHECK, state_);
   return isolate_->MayAccess(handle(isolate_->context()),
                              GetHolder<JSObject>());
 }
 
-
+template <bool is_element>
 void LookupIterator::ReloadPropertyInformation() {
   state_ = BEFORE_PROPERTY;
   interceptor_state_ = InterceptorState::kUninitialized;
-  state_ = LookupInHolder(holder_->map(), *holder_);
+  state_ = LookupInHolder<is_element>(holder_->map(), *holder_);
   DCHECK(IsFound() || !holder_->HasFastProperties());
 }
 
@@ -156,14 +172,11 @@
   return false;
 }
 
-void LookupIterator::UpdateProtector() {
-  if (!FLAG_harmony_species) return;
-
-  if (IsElement()) return;
+void LookupIterator::InternalUpdateProtector() {
   if (isolate_->bootstrapper()->IsActive()) return;
   if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
 
-  if (*name_ == *isolate_->factory()->constructor_string()) {
+  if (*name_ == heap()->constructor_string()) {
     // Setting the constructor property could change an instance's @@species
     if (holder_->IsJSArray()) {
       isolate_->CountUsage(
@@ -178,7 +191,7 @@
         isolate_->InvalidateArraySpeciesProtector();
       }
     }
-  } else if (*name_ == *isolate_->factory()->species_symbol()) {
+  } else if (*name_ == heap()->species_symbol()) {
     // Setting the Symbol.species property of any Array constructor invalidates
     // the species protector
     if (HolderIsInContextIndex(Context::ARRAY_FUNCTION_INDEX)) {
@@ -228,7 +241,7 @@
   }
 
   JSObject::MigrateToMap(holder, new_map);
-  ReloadPropertyInformation();
+  ReloadPropertyInformation<false>();
 }
 
 
@@ -243,19 +256,23 @@
     Handle<FixedArrayBase> elements(holder->elements());
     holder->GetElementsAccessor()->Reconfigure(holder, elements, number_, value,
                                                attributes);
-  } else if (!holder->HasFastProperties()) {
-    PropertyDetails details(attributes, v8::internal::DATA, 0,
-                            PropertyCellType::kMutable);
-    JSObject::SetNormalizedProperty(holder, name(), value, details);
+    ReloadPropertyInformation<true>();
   } else {
-    Handle<Map> old_map(holder->map(), isolate_);
-    Handle<Map> new_map = Map::ReconfigureExistingProperty(
-        old_map, descriptor_number(), i::kData, attributes);
-    new_map = Map::PrepareForDataProperty(new_map, descriptor_number(), value);
-    JSObject::MigrateToMap(holder, new_map);
+    if (!holder->HasFastProperties()) {
+      PropertyDetails details(attributes, v8::internal::DATA, 0,
+                              PropertyCellType::kMutable);
+      JSObject::SetNormalizedProperty(holder, name(), value, details);
+    } else {
+      Handle<Map> old_map(holder->map(), isolate_);
+      Handle<Map> new_map = Map::ReconfigureExistingProperty(
+          old_map, descriptor_number(), i::kData, attributes);
+      new_map =
+          Map::PrepareForDataProperty(new_map, descriptor_number(), value);
+      JSObject::MigrateToMap(holder, new_map);
+    }
+    ReloadPropertyInformation<false>();
   }
 
-  ReloadPropertyInformation();
   WriteDataValue(value);
 
 #if VERIFY_HEAP
@@ -323,7 +340,7 @@
     property_details_ = transition->GetLastDescriptorDetails();
     state_ = DATA;
   } else {
-    ReloadPropertyInformation();
+    ReloadPropertyInformation<false>();
   }
 }
 
@@ -342,7 +359,7 @@
     if (holder->HasFastProperties()) {
       JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
                                     "DeletingProperty");
-      ReloadPropertyInformation();
+      ReloadPropertyInformation<false>();
     }
     // TODO(verwaest): Get rid of the name_ argument.
     JSReceiver::DeleteNormalizedProperty(holder, name_, number_);
@@ -364,14 +381,31 @@
   Handle<JSObject> receiver = GetStoreTarget();
 
   if (!IsElement() && !receiver->map()->is_dictionary_map()) {
-    holder_ = receiver;
     Handle<Map> old_map(receiver->map(), isolate_);
+
+    if (!holder_.is_identical_to(receiver)) {
+      holder_ = receiver;
+      state_ = NOT_FOUND;
+    } else if (state_ == INTERCEPTOR) {
+      LookupInRegularHolder<false>(*old_map, *holder_);
+    }
+    int descriptor =
+        IsFound() ? static_cast<int>(number_) : DescriptorArray::kNotFound;
+
     Handle<Map> new_map = Map::TransitionToAccessorProperty(
-        old_map, name_, component, accessor, attributes);
+        old_map, name_, descriptor, component, accessor, attributes);
+    bool simple_transition = new_map->GetBackPointer() == receiver->map();
     JSObject::MigrateToMap(receiver, new_map);
 
-    ReloadPropertyInformation();
+    if (simple_transition) {
+      int number = new_map->LastAdded();
+      number_ = static_cast<uint32_t>(number);
+      property_details_ = new_map->GetLastDescriptorDetails();
+      state_ = ACCESSOR;
+      return;
+    }
 
+    ReloadPropertyInformation<false>();
     if (!new_map->is_dictionary_map()) return;
   }
 
@@ -430,6 +464,8 @@
     } else {
       receiver->set_elements(*dictionary);
     }
+
+    ReloadPropertyInformation<true>();
   } else {
     PropertyNormalizationMode mode = receiver->map()->is_prototype_map()
                                          ? KEEP_INOBJECT_PROPERTIES
@@ -440,9 +476,9 @@
 
     JSObject::SetNormalizedProperty(receiver, name_, pair, details);
     JSObject::ReoptimizeIfPrototype(receiver);
-  }
 
-  ReloadPropertyInformation();
+    ReloadPropertyInformation<false>();
+  }
 }
 
 
@@ -451,13 +487,13 @@
   // Optimization that only works if configuration_ is not mutable.
   if (!check_prototype_chain()) return true;
   DisallowHeapAllocation no_gc;
+  if (*receiver_ == *holder_) return true;
   if (!receiver_->IsJSReceiver()) return false;
   JSReceiver* current = JSReceiver::cast(*receiver_);
   JSReceiver* object = *holder_;
-  if (current == object) return true;
   if (!current->map()->has_hidden_prototype()) return false;
   // JSProxy do not occur as hidden prototypes.
-  if (current->IsJSProxy()) return false;
+  if (object->IsJSProxy()) return false;
   PrototypeIterator iter(isolate(), current,
                          PrototypeIterator::START_AT_PROTOTYPE,
                          PrototypeIterator::END_AT_NON_HIDDEN);
@@ -581,15 +617,9 @@
   }
 }
 
-
-bool LookupIterator::HasInterceptor(Map* map) const {
-  if (IsElement()) return map->has_indexed_interceptor();
-  return map->has_named_interceptor();
-}
-
-
+template <bool is_element>
 bool LookupIterator::SkipInterceptor(JSObject* holder) {
-  auto info = GetInterceptor(holder);
+  auto info = GetInterceptor<is_element>(holder);
   // TODO(dcarney): check for symbol/can_intercept_symbols here as well.
   if (info->non_masking()) {
     switch (interceptor_state_) {
@@ -605,10 +635,9 @@
   return interceptor_state_ == InterceptorState::kProcessNonMasking;
 }
 
-
 JSReceiver* LookupIterator::NextHolder(Map* map) {
   DisallowHeapAllocation no_gc;
-  if (!map->prototype()->IsJSReceiver()) return NULL;
+  if (map->prototype() == heap()->null_value()) return NULL;
 
   DCHECK(!map->IsJSGlobalProxyMap() || map->has_hidden_prototype());
 
@@ -635,45 +664,37 @@
              : NOT_FOUND;
 }
 
-LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
-                                                     JSReceiver* const holder) {
+namespace {
+
+template <bool is_element>
+bool HasInterceptor(Map* map) {
+  return is_element ? map->has_indexed_interceptor()
+                    : map->has_named_interceptor();
+}
+
+}  // namespace
+
+template <bool is_element>
+LookupIterator::State LookupIterator::LookupInSpecialHolder(
+    Map* const map, JSReceiver* const holder) {
   STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
-  DisallowHeapAllocation no_gc;
-  if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
-    return LookupNonMaskingInterceptorInHolder(map, holder);
-  }
   switch (state_) {
     case NOT_FOUND:
       if (map->IsJSProxyMap()) {
-        if (IsElement() || !name_->IsPrivate()) return JSPROXY;
+        if (is_element || !name_->IsPrivate()) return JSPROXY;
       }
       if (map->is_access_check_needed()) {
-        if (IsElement() || !name_->IsPrivate()) return ACCESS_CHECK;
+        if (is_element || !name_->IsPrivate()) return ACCESS_CHECK;
       }
     // Fall through.
     case ACCESS_CHECK:
-      if (check_interceptor() && HasInterceptor(map) &&
-          !SkipInterceptor(JSObject::cast(holder))) {
-        if (IsElement() || !name_->IsPrivate()) return INTERCEPTOR;
+      if (check_interceptor() && HasInterceptor<is_element>(map) &&
+          !SkipInterceptor<is_element>(JSObject::cast(holder))) {
+        if (is_element || !name_->IsPrivate()) return INTERCEPTOR;
       }
     // Fall through.
     case INTERCEPTOR:
-      if (IsElement()) {
-        JSObject* js_object = JSObject::cast(holder);
-        ElementsAccessor* accessor = js_object->GetElementsAccessor();
-        FixedArrayBase* backing_store = js_object->elements();
-        number_ = accessor->GetEntryForIndex(js_object, backing_store, index_);
-        if (number_ == kMaxUInt32) {
-          return holder->IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
-        }
-        property_details_ = accessor->GetDetails(js_object, number_);
-      } else if (!map->is_dictionary_map()) {
-        DescriptorArray* descriptors = map->instance_descriptors();
-        int number = descriptors->SearchWithCache(isolate_, *name_, map);
-        if (number == DescriptorArray::kNotFound) return NotFound(holder);
-        number_ = static_cast<uint32_t>(number);
-        property_details_ = descriptors->GetDetails(number_);
-      } else if (map->IsJSGlobalObjectMap()) {
+      if (!is_element && map->IsJSGlobalObjectMap()) {
         GlobalDictionary* dict = JSObject::cast(holder)->global_dictionary();
         int number = dict->FindEntry(name_);
         if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
@@ -682,20 +703,15 @@
         PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
         if (cell->value()->IsTheHole()) return NOT_FOUND;
         property_details_ = cell->property_details();
-      } else {
-        NameDictionary* dict = holder->property_dictionary();
-        int number = dict->FindEntry(name_);
-        if (number == NameDictionary::kNotFound) return NotFound(holder);
-        number_ = static_cast<uint32_t>(number);
-        property_details_ = dict->DetailsAt(number_);
+        has_property_ = true;
+        switch (property_details_.kind()) {
+          case v8::internal::kData:
+            return DATA;
+          case v8::internal::kAccessor:
+            return ACCESSOR;
+        }
       }
-      has_property_ = true;
-      switch (property_details_.kind()) {
-        case v8::internal::kData:
-          return DATA;
-        case v8::internal::kAccessor:
-          return ACCESSOR;
-      }
+      return LookupInRegularHolder<is_element>(map, holder);
     case ACCESSOR:
     case DATA:
       return NOT_FOUND;
@@ -705,22 +721,47 @@
       UNREACHABLE();
   }
   UNREACHABLE();
-  return state_;
+  return NOT_FOUND;
 }
 
-
-LookupIterator::State LookupIterator::LookupNonMaskingInterceptorInHolder(
+template <bool is_element>
+LookupIterator::State LookupIterator::LookupInRegularHolder(
     Map* const map, JSReceiver* const holder) {
-  switch (state_) {
-    case NOT_FOUND:
-      if (check_interceptor() && HasInterceptor(map) &&
-          !SkipInterceptor(JSObject::cast(holder))) {
-        return INTERCEPTOR;
-      }
-    // Fall through.
-    default:
-      return NOT_FOUND;
+  DisallowHeapAllocation no_gc;
+  if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
+    return NOT_FOUND;
   }
+
+  if (is_element) {
+    JSObject* js_object = JSObject::cast(holder);
+    ElementsAccessor* accessor = js_object->GetElementsAccessor();
+    FixedArrayBase* backing_store = js_object->elements();
+    number_ = accessor->GetEntryForIndex(js_object, backing_store, index_);
+    if (number_ == kMaxUInt32) {
+      return holder->IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
+    }
+    property_details_ = accessor->GetDetails(js_object, number_);
+  } else if (!map->is_dictionary_map()) {
+    DescriptorArray* descriptors = map->instance_descriptors();
+    int number = descriptors->SearchWithCache(isolate_, *name_, map);
+    if (number == DescriptorArray::kNotFound) return NotFound(holder);
+    number_ = static_cast<uint32_t>(number);
+    property_details_ = descriptors->GetDetails(number_);
+  } else {
+    NameDictionary* dict = holder->property_dictionary();
+    int number = dict->FindEntry(name_);
+    if (number == NameDictionary::kNotFound) return NotFound(holder);
+    number_ = static_cast<uint32_t>(number);
+    property_details_ = dict->DetailsAt(number_);
+  }
+  has_property_ = true;
+  switch (property_details_.kind()) {
+    case v8::internal::kData:
+      return DATA;
+    case v8::internal::kAccessor:
+      return ACCESSOR;
+  }
+
   UNREACHABLE();
   return state_;
 }
diff --git a/src/lookup.h b/src/lookup.h
index 0c298d9..8005f32 100644
--- a/src/lookup.h
+++ b/src/lookup.h
@@ -47,81 +47,71 @@
   LookupIterator(Handle<Object> receiver, Handle<Name> name,
                  Configuration configuration = DEFAULT)
       : configuration_(ComputeConfiguration(configuration, name)),
-        state_(NOT_FOUND),
         interceptor_state_(InterceptorState::kUninitialized),
         property_details_(PropertyDetails::Empty()),
         isolate_(name->GetIsolate()),
         name_(isolate_->factory()->InternalizeName(name)),
+        receiver_(receiver),
+        initial_holder_(GetRoot(isolate_, receiver)),
         // kMaxUInt32 isn't a valid index.
         index_(kMaxUInt32),
-        receiver_(receiver),
-        holder_(GetRoot(isolate_, receiver)),
-        initial_holder_(holder_),
         number_(DescriptorArray::kNotFound) {
 #ifdef DEBUG
     uint32_t index;  // Assert that the name is not an array index.
     DCHECK(!name->AsArrayIndex(&index));
 #endif  // DEBUG
-    Next();
+    Start<false>();
   }
 
   LookupIterator(Handle<Object> receiver, Handle<Name> name,
                  Handle<JSReceiver> holder,
                  Configuration configuration = DEFAULT)
       : configuration_(ComputeConfiguration(configuration, name)),
-        state_(NOT_FOUND),
         interceptor_state_(InterceptorState::kUninitialized),
         property_details_(PropertyDetails::Empty()),
         isolate_(name->GetIsolate()),
         name_(isolate_->factory()->InternalizeName(name)),
+        receiver_(receiver),
+        initial_holder_(holder),
         // kMaxUInt32 isn't a valid index.
         index_(kMaxUInt32),
-        receiver_(receiver),
-        holder_(holder),
-        initial_holder_(holder_),
         number_(DescriptorArray::kNotFound) {
 #ifdef DEBUG
     uint32_t index;  // Assert that the name is not an array index.
     DCHECK(!name->AsArrayIndex(&index));
 #endif  // DEBUG
-    Next();
+    Start<false>();
   }
 
   LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
                  Configuration configuration = DEFAULT)
       : configuration_(configuration),
-        state_(NOT_FOUND),
         interceptor_state_(InterceptorState::kUninitialized),
         property_details_(PropertyDetails::Empty()),
         isolate_(isolate),
-        name_(),
-        index_(index),
         receiver_(receiver),
-        holder_(GetRoot(isolate, receiver, index)),
-        initial_holder_(holder_),
+        initial_holder_(GetRoot(isolate, receiver, index)),
+        index_(index),
         number_(DescriptorArray::kNotFound) {
     // kMaxUInt32 isn't a valid index.
     DCHECK_NE(kMaxUInt32, index_);
-    Next();
+    Start<true>();
   }
 
   LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
                  Handle<JSReceiver> holder,
                  Configuration configuration = DEFAULT)
       : configuration_(configuration),
-        state_(NOT_FOUND),
         interceptor_state_(InterceptorState::kUninitialized),
         property_details_(PropertyDetails::Empty()),
         isolate_(isolate),
-        name_(),
-        index_(index),
         receiver_(receiver),
-        holder_(holder),
-        initial_holder_(holder_),
+        initial_holder_(holder),
+        index_(index),
         number_(DescriptorArray::kNotFound) {
     // kMaxUInt32 isn't a valid index.
     DCHECK_NE(kMaxUInt32, index_);
-    Next();
+    Start<true>();
   }
 
   static LookupIterator PropertyOrElement(
@@ -154,7 +144,10 @@
       Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
       bool* success, Configuration configuration = DEFAULT);
 
-  void Restart() { RestartInternal(InterceptorState::kUninitialized); }
+  void Restart() {
+    InterceptorState state = InterceptorState::kUninitialized;
+    IsElement() ? RestartInternal<true>(state) : RestartInternal<false>(state);
+  }
 
   Isolate* isolate() const { return isolate_; }
   State state() const { return state_; }
@@ -184,7 +177,17 @@
   Heap* heap() const { return isolate_->heap(); }
   Factory* factory() const { return isolate_->factory(); }
   Handle<Object> GetReceiver() const { return receiver_; }
-  Handle<JSObject> GetStoreTarget() const;
+
+  Handle<JSObject> GetStoreTarget() const {
+    if (receiver_->IsJSGlobalProxy()) {
+      Map* map = JSGlobalProxy::cast(*receiver_)->map();
+      if (map->has_hidden_prototype()) {
+        return handle(JSGlobalObject::cast(map->prototype()), isolate_);
+      }
+    }
+    return Handle<JSObject>::cast(receiver_);
+  }
+
   bool is_dictionary_holder() const { return !holder_->HasFastProperties(); }
   Handle<Map> transition_map() const {
     DCHECK_EQ(TRANSITION, state_);
@@ -252,13 +255,24 @@
   Handle<Object> GetAccessors() const;
   inline Handle<InterceptorInfo> GetInterceptor() const {
     DCHECK_EQ(INTERCEPTOR, state_);
-    return handle(GetInterceptor(JSObject::cast(*holder_)), isolate_);
+    InterceptorInfo* result =
+        IsElement() ? GetInterceptor<true>(JSObject::cast(*holder_))
+                    : GetInterceptor<false>(JSObject::cast(*holder_));
+    return handle(result, isolate_);
   }
   Handle<Object> GetDataValue() const;
   void WriteDataValue(Handle<Object> value);
-  void UpdateProtector();
+  inline void UpdateProtector() {
+    if (FLAG_harmony_species && !IsElement() &&
+        (*name_ == heap()->constructor_string() ||
+         *name_ == heap()->species_symbol())) {
+      InternalUpdateProtector();
+    }
+  }
 
  private:
+  void InternalUpdateProtector();
+
   enum class InterceptorState {
     kUninitialized,
     kSkipNonMasking,
@@ -268,19 +282,37 @@
   Handle<Map> GetReceiverMap() const;
 
   MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map);
-  inline State LookupInHolder(Map* map, JSReceiver* holder);
-  void RestartLookupForNonMaskingInterceptors() {
-    RestartInternal(InterceptorState::kProcessNonMasking);
+
+  template <bool is_element>
+  void Start();
+  template <bool is_element>
+  void NextInternal(Map* map, JSReceiver* holder);
+  template <bool is_element>
+  inline State LookupInHolder(Map* map, JSReceiver* holder) {
+    return map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE
+               ? LookupInSpecialHolder<is_element>(map, holder)
+               : LookupInRegularHolder<is_element>(map, holder);
   }
+  template <bool is_element>
+  State LookupInRegularHolder(Map* map, JSReceiver* holder);
+  template <bool is_element>
+  State LookupInSpecialHolder(Map* map, JSReceiver* holder);
+  template <bool is_element>
+  void RestartLookupForNonMaskingInterceptors() {
+    RestartInternal<is_element>(InterceptorState::kProcessNonMasking);
+  }
+  template <bool is_element>
   void RestartInternal(InterceptorState interceptor_state);
-  State LookupNonMaskingInterceptorInHolder(Map* map, JSReceiver* holder);
   Handle<Object> FetchValue() const;
+  template <bool is_element>
   void ReloadPropertyInformation();
-  inline bool SkipInterceptor(JSObject* holder);
-  bool HasInterceptor(Map* map) const;
+
+  template <bool is_element>
+  bool SkipInterceptor(JSObject* holder);
+  template <bool is_element>
   inline InterceptorInfo* GetInterceptor(JSObject* holder) const {
-    if (IsElement()) return holder->GetIndexedInterceptor();
-    return holder->GetNamedInterceptor();
+    return is_element ? holder->GetIndexedInterceptor()
+                      : holder->GetNamedInterceptor();
   }
 
   bool check_hidden() const { return (configuration_ & kHidden) != 0; }
@@ -332,11 +364,11 @@
   PropertyDetails property_details_;
   Isolate* const isolate_;
   Handle<Name> name_;
-  uint32_t index_;
   Handle<Object> transition_;
   const Handle<Object> receiver_;
   Handle<JSReceiver> holder_;
   const Handle<JSReceiver> initial_holder_;
+  const uint32_t index_;
   uint32_t number_;
 };
 
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index fd2aa7c..6338b2c 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -68,6 +68,11 @@
 #include "src/mips64/assembler-mips64-inl.h"
 #include "src/mips64/constants-mips64.h"
 #include "src/mips64/macro-assembler-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/assembler-s390.h"
+#include "src/s390/assembler-s390-inl.h"
+#include "src/s390/constants-s390.h"
+#include "src/s390/macro-assembler-s390.h"
 #elif V8_TARGET_ARCH_X87
 #include "src/x87/assembler-x87.h"
 #include "src/x87/assembler-x87-inl.h"
diff --git a/src/messages.cc b/src/messages.cc
index 072ac1d..67ab36f 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -236,9 +236,20 @@
   Handle<Object> function_name(fun_->shared()->name(), isolate_);
   if (function_name->IsName()) {
     Handle<Name> name = Handle<Name>::cast(function_name);
+    // ES2015 gives getters and setters name prefixes which must
+    // be stripped to find the property name.
+    if (name->IsString() && FLAG_harmony_function_name) {
+      Handle<String> name_string = Handle<String>::cast(name);
+      if (name_string->IsUtf8EqualTo(CStrVector("get "), true) ||
+          name_string->IsUtf8EqualTo(CStrVector("set "), true)) {
+        name = isolate_->factory()->NewProperSubString(name_string, 4,
+                                                       name_string->length());
+      }
+    }
     if (CheckMethodName(isolate_, obj, name, fun_,
-                        LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR))
+                        LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
       return name;
+    }
   }
 
   HandleScope outer_scope(isolate_);
diff --git a/src/messages.h b/src/messages.h
index c71e11b..4aa0b73 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -94,11 +94,13 @@
   T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements")         \
   T(ArrayNotSubclassable, "Subclassing Arrays is not currently supported.")    \
   T(CalledNonCallable, "% is not a function")                                  \
-  T(CalledNonCallableInstanceOf, "right-hand side is not a function")          \
+  T(CalledNonCallableInstanceOf,                                               \
+    "Right-hand side of 'instanceof' is not callable")                         \
   T(CalledOnNonObject, "% called on non-object")                               \
   T(CalledOnNullOrUndefined, "% called on null or undefined")                  \
   T(CallSiteExpectsFunction,                                                   \
     "CallSite expects function as second argument, got %")                     \
+  T(CallSiteMethod, "CallSite method % expects CallSite as receiver")          \
   T(CannotConvertToPrimitive, "Cannot convert object to primitive value")      \
   T(CannotPreventExt, "Cannot prevent extensions")                             \
   T(CannotFreezeArrayBufferView,                                               \
@@ -135,6 +137,8 @@
     "Function has non-object prototype '%' in instanceof check")               \
   T(InvalidArgument, "invalid_argument")                                       \
   T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %")   \
+  T(InvalidRegExpExecResult,                                                   \
+    "RegExp exec method returned something other than an Object or null")      \
   T(InvalidSimdOperation, "% is not a valid type for this SIMD operation.")    \
   T(IteratorResultNotAnObject, "Iterator result % is not an object")           \
   T(IteratorValueNotAnObject, "Iterator value % is not an entry object")       \
@@ -147,6 +151,8 @@
   T(NoAccess, "no access")                                                     \
   T(NonCoercible, "Cannot match against 'undefined' or 'null'.")               \
   T(NonExtensibleProto, "% is not extensible")                                 \
+  T(NonObjectInInstanceOfCheck,                                                \
+    "Right-hand side of 'instanceof' is not an object")                        \
   T(NonObjectPropertyLoad, "Cannot read property '%' of %")                    \
   T(NonObjectPropertyStore, "Cannot set property '%' of %")                    \
   T(NoSetterInCallback, "Cannot set property % of % which has only a getter")  \
@@ -187,6 +193,7 @@
   T(PromiseCyclic, "Chaining cycle detected for promise %")                    \
   T(PromiseExecutorAlreadyInvoked,                                             \
     "Promise executor has already been invoked with non-undefined arguments")  \
+  T(PromiseNonCallable, "Promise resolve or reject function is not callable")  \
   T(PropertyDescObject, "Property description must be an object: %")           \
   T(PropertyNotFunction,                                                       \
     "'%' returned for property '%' of object '%' is not a function")           \
@@ -208,9 +215,6 @@
   T(ProxyDeletePropertyNonConfigurable,                                        \
     "'deleteProperty' on proxy: trap returned truish for property '%' which "  \
     "is non-configurable in the proxy target")                                 \
-  T(ProxyEnumerateNonObject, "'enumerate' on proxy: trap returned non-object") \
-  T(ProxyEnumerateNonString,                                                   \
-    "'enumerate' on proxy: trap result includes non-string")                   \
   T(ProxyGetNonConfigurableData,                                               \
     "'get' on proxy: property '%' is a read-only and "                         \
     "non-configurable data property on the proxy target but the proxy "        \
@@ -305,18 +309,6 @@
   T(StrictReadOnlyProperty,                                                    \
     "Cannot assign to read only property '%' of % '%'")                        \
   T(StrictCannotCreateProperty, "Cannot create property '%' on % '%'")         \
-  T(StrongArity,                                                               \
-    "In strong mode, calling a function with too few arguments is deprecated") \
-  T(StrongDeleteProperty,                                                      \
-    "Deleting property '%' of strong object '%' is deprecated")                \
-  T(StrongExtendNull, "In strong mode, classes extending null are deprecated") \
-  T(StrongImplicitConversion,                                                  \
-    "In strong mode, implicit conversions are deprecated")                     \
-  T(StrongRedefineDisallowed,                                                  \
-    "On strong object %, redefining writable, non-configurable property '%' "  \
-    "to be non-writable is deprecated")                                        \
-  T(StrongSetProto,                                                            \
-    "On strong object %, redefining the internal prototype is deprecated")     \
   T(SymbolIteratorInvalid,                                                     \
     "Result of the Symbol.iterator method is not an object")                   \
   T(SymbolKeyFor, "% is not a symbol")                                         \
@@ -333,9 +325,6 @@
   /* ReferenceError */                                                         \
   T(NonMethod, "'super' is referenced from non-method")                        \
   T(NotDefined, "% is not defined")                                            \
-  T(StrongSuperCallMissing,                                                    \
-    "In strong mode, invoking the super constructor in a subclass is "         \
-    "required")                                                                \
   T(UnsupportedSuper, "Unsupported reference to 'super'")                      \
   /* RangeError */                                                             \
   T(DateRange, "Provided date is not in valid range.")                         \
@@ -347,6 +336,7 @@
   T(InvalidArrayBufferLength, "Invalid array buffer length")                   \
   T(ArrayBufferAllocationFailed, "Array buffer allocation failed")             \
   T(InvalidArrayLength, "Invalid array length")                                \
+  T(InvalidAtomicAccessIndex, "Invalid atomic access index")                   \
   T(InvalidCodePoint, "Invalid code point %")                                  \
   T(InvalidCountValue, "Invalid count value")                                  \
   T(InvalidCurrencyCode, "Invalid currency code: %")                           \
@@ -390,7 +380,6 @@
     "% loop variable declaration may not have an initializer.")                \
   T(ForInOfLoopMultiBindings,                                                  \
     "Invalid left-hand side in % loop: Must have a single binding.")           \
-  T(IllegalAccess, "Illegal access")                                           \
   T(IllegalBreak, "Illegal break statement")                                   \
   T(IllegalContinue, "Illegal continue statement")                             \
   T(IllegalLanguageModeDirective,                                              \
@@ -407,11 +396,15 @@
   T(InvalidLhsInPrefixOp,                                                      \
     "Invalid left-hand side expression in prefix operation")                   \
   T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'")    \
+  T(InvalidOrUnexpectedToken, "Invalid or unexpected token")                   \
   T(JsonParseUnexpectedEOS, "Unexpected end of JSON input")                    \
   T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %")      \
   T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
   T(JsonParseUnexpectedTokenString, "Unexpected string in JSON at position %") \
   T(LabelRedeclaration, "Label '%' has already been declared")                 \
+  T(LabelledFunctionDeclaration,                                               \
+    "Labelled function declaration not allowed as the body of a control flow " \
+    "structure")                                                               \
   T(MalformedArrowFunParamList, "Malformed arrow function parameter list")     \
   T(MalformedRegExp, "Invalid regular expression: /%/: %")                     \
   T(MalformedRegExpFlags, "Invalid regular expression flags")                  \
@@ -432,7 +425,11 @@
     "Setter function argument must not be a rest parameter")                   \
   T(ParamDupe, "Duplicate parameter name not allowed in this context")         \
   T(ParenthesisInArgString, "Function arg string contains parenthesis")        \
+  T(RuntimeWrongNumArgs, "Runtime function given wrong number of arguments")   \
   T(SingleFunctionLiteral, "Single function literal required")                 \
+  T(SloppyFunction,                                                            \
+    "In non-strict mode code, functions can only be declared at top level, "   \
+    "inside a block, or as the body of an if statement.")                      \
   T(SloppyLexical,                                                             \
     "Block-scoped declarations (let, const, function, class) not yet "         \
     "supported outside strict mode")                                           \
@@ -442,53 +439,9 @@
   T(StrictEvalArguments, "Unexpected eval or arguments in strict mode")        \
   T(StrictFunction,                                                            \
     "In strict mode code, functions can only be declared at top level or "     \
-    "immediately within another function.")                                    \
+    "inside a block.")                                                         \
   T(StrictOctalLiteral, "Octal literals are not allowed in strict mode.")      \
   T(StrictWith, "Strict mode code may not include a with statement")           \
-  T(StrongArguments,                                                           \
-    "In strong mode, 'arguments' is deprecated, use '...args' instead")        \
-  T(StrongConstructorDirective,                                                \
-    "\"use strong\" directive is disallowed in class constructor body")        \
-  T(StrongConstructorReturnMisplaced,                                          \
-    "In strong mode, returning from a constructor before its super "           \
-    "constructor invocation or all assignments to 'this' is deprecated")       \
-  T(StrongConstructorReturnValue,                                              \
-    "In strong mode, returning a value from a constructor is deprecated")      \
-  T(StrongConstructorSuper,                                                    \
-    "In strong mode, 'super' can only be used to invoke the super "            \
-    "constructor, and cannot be nested inside another statement or "           \
-    "expression")                                                              \
-  T(StrongConstructorThis,                                                     \
-    "In strong mode, 'this' can only be used to initialize properties, and "   \
-    "cannot be nested inside another statement or expression")                 \
-  T(StrongDelete,                                                              \
-    "In strong mode, 'delete' is deprecated, use maps or sets instead")        \
-  T(StrongDirectEval, "In strong mode, direct calls to eval are deprecated")   \
-  T(StrongEllision,                                                            \
-    "In strong mode, arrays with holes are deprecated, use maps instead")      \
-  T(StrongEmpty,                                                               \
-    "In strong mode, empty sub-statements are deprecated, make them explicit " \
-    "with '{}' instead")                                                       \
-  T(StrongEqual,                                                               \
-    "In strong mode, '==' and '!=' are deprecated, use '===' and '!==' "       \
-    "instead")                                                                 \
-  T(StrongForIn,                                                               \
-    "In strong mode, 'for'-'in' loops are deprecated, use 'for'-'of' instead") \
-  T(StrongPropertyAccess,                                                      \
-    "In strong mode, accessing missing property '%' of % is deprecated")       \
-  T(StrongSuperCallDuplicate,                                                  \
-    "In strong mode, invoking the super constructor multiple times is "        \
-    "deprecated")                                                              \
-  T(StrongSuperCallMisplaced,                                                  \
-    "In strong mode, the super constructor must be invoked before any "        \
-    "assignment to 'this'")                                                    \
-  T(StrongSwitchFallthrough,                                                   \
-    "In strong mode, switch fall-through is deprecated, terminate each case "  \
-    "with 'break', 'continue', 'return' or 'throw'")                           \
-  T(StrongUndefined,                                                           \
-    "In strong mode, binding or assigning to 'undefined' is deprecated")       \
-  T(StrongVar,                                                                 \
-    "In strong mode, 'var' is deprecated, use 'let' or 'const' instead")       \
   T(TemplateOctalLiteral,                                                      \
     "Octal literals are not allowed in template strings.")                     \
   T(ThisFormalParameter, "'this' is not a valid formal parameter name")        \
@@ -518,7 +471,10 @@
   T(UnterminatedTemplate, "Unterminated template literal")                     \
   T(UnterminatedTemplateExpr, "Missing } in template expression")              \
   T(FoundNonCallableHasInstance, "Found non-callable @@hasInstance")           \
-  T(NonObjectInInstanceOfCheck, "Expecting an object in instanceof check")     \
+  T(InvalidHexEscapeSequence, "Invalid hexadecimal escape sequence")           \
+  T(InvalidUnicodeEscapeSequence, "Invalid Unicode escape sequence")           \
+  T(UndefinedUnicodeCodePoint, "Undefined Unicode code-point")                 \
+  T(YieldInParameter, "Yield expression not allowed in formal parameter")      \
   /* EvalError */                                                              \
   T(CodeGenFromStrings, "%")                                                   \
   /* URIError */                                                               \
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 5e27f45..517d4ad 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -102,6 +102,10 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
 
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) ||
@@ -152,6 +156,18 @@
   }
 }
 
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, size_t old_size, size_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  DCHECK(old_base <= wasm_memory_reference() &&
+         wasm_memory_reference() < old_base + old_size);
+  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+  DCHECK(new_base <= updated_reference &&
+         updated_reference < new_base + new_size);
+  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+                                   icache_flush_mode);
+}
 
 Address Assembler::target_address_from_return_address(Address pc) {
   return pc - kCallTargetAddressOffset;
@@ -160,19 +176,30 @@
 
 void Assembler::set_target_internal_reference_encoded_at(Address pc,
                                                          Address target) {
-  // Encoded internal references are lui/ori load of 32-bit abolute address.
-  Instr instr_lui = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
-  Instr instr_ori = Assembler::instr_at(pc + 1 * Assembler::kInstrSize);
-  DCHECK(Assembler::IsLui(instr_lui));
-  DCHECK(Assembler::IsOri(instr_ori));
-  instr_lui &= ~kImm16Mask;
-  instr_ori &= ~kImm16Mask;
+  Instr instr1 = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
+  Instr instr2 = Assembler::instr_at(pc + 1 * Assembler::kInstrSize);
+  DCHECK(Assembler::IsLui(instr1));
+  DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2));
+  instr1 &= ~kImm16Mask;
+  instr2 &= ~kImm16Mask;
   int32_t imm = reinterpret_cast<int32_t>(target);
   DCHECK((imm & 3) == 0);
-  Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
-                          instr_lui | ((imm >> kLuiShift) & kImm16Mask));
-  Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
-                          instr_ori | (imm & kImm16Mask));
+  if (Assembler::IsJicOrJialc(instr2)) {
+    // Encoded internal references are lui/jic load of 32-bit absolute address.
+    uint32_t lui_offset_u, jic_offset_u;
+    Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+
+    Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
+                            instr1 | lui_offset_u);
+    Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
+                            instr2 | jic_offset_u);
+  } else {
+    // Encoded internal references are lui/ori load of 32-bit absolute address.
+    Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
+                            instr1 | ((imm >> kLuiShift) & kImm16Mask));
+    Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
+                            instr2 | (imm & kImm16Mask));
+  }
 
   // Currently used only by deserializer, and all code will be flushed
   // after complete deserialization, no need to flush on each reference.
@@ -230,14 +257,19 @@
   if (rmode_ == INTERNAL_REFERENCE) {
     return Memory::Address_at(pc_);
   } else {
-    // Encoded internal references are lui/ori load of 32-bit abolute address.
+    // Encoded internal references are lui/ori or lui/jic load of 32-bit
+    // absolute address.
     DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
-    Instr instr_lui = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
-    Instr instr_ori = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
-    DCHECK(Assembler::IsLui(instr_lui));
-    DCHECK(Assembler::IsOri(instr_ori));
-    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
-    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+    Instr instr1 = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
+    Instr instr2 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+    DCHECK(Assembler::IsLui(instr1));
+    DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2));
+    if (Assembler::IsJicOrJialc(instr2)) {
+      return reinterpret_cast<Address>(
+          Assembler::CreateTargetAddress(instr1, instr2));
+    }
+    int32_t imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+    imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
     return reinterpret_cast<Address>(imm);
   }
 }
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index e50a239..bfa2328 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -534,6 +534,11 @@
   return opcode == POP30 && rs != 0 && rs < rt;  // && rt != 0
 }
 
+bool Assembler::IsJicOrJialc(Instr instr) {
+  uint32_t opcode = GetOpcodeField(instr);
+  uint32_t rs = GetRsField(instr);
+  return (opcode == POP66 || opcode == POP76) && rs == 0;
+}
 
 bool Assembler::IsJump(Instr instr) {
   uint32_t opcode   = GetOpcodeField(instr);
@@ -546,7 +551,6 @@
       ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
 }
 
-
 bool Assembler::IsJ(Instr instr) {
   uint32_t opcode = GetOpcodeField(instr);
   // Checks if the instruction is a jump.
@@ -697,6 +701,47 @@
   }
 }
 
+uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) {
+  DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic));
+  int16_t jic_offset = GetImmediate16(instr_jic);
+  int16_t lui_offset = GetImmediate16(instr_lui);
+
+  if (jic_offset < 0) {
+    lui_offset += kImm16Mask;
+  }
+  uint32_t lui_offset_u = (static_cast<uint32_t>(lui_offset)) << kLuiShift;
+  uint32_t jic_offset_u = static_cast<uint32_t>(jic_offset) & kImm16Mask;
+
+  return lui_offset_u | jic_offset_u;
+}
+
+// Use just lui and jic instructions. Insert lower part of the target address in
+// jic offset part. Since jic sign-extends offset and then add it with register,
+// before that addition, difference between upper part of the target address and
+// upper part of the sign-extended offset (0xffff or 0x0000), will be inserted
+// in jic register with lui instruction.
+void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
+                                    int16_t& jic_offset) {
+  lui_offset = (address & kHiMask) >> kLuiShift;
+  jic_offset = address & kLoMask;
+
+  if (jic_offset < 0) {
+    lui_offset -= kImm16Mask;
+  }
+}
+
+void Assembler::UnpackTargetAddressUnsigned(uint32_t address,
+                                            uint32_t& lui_offset,
+                                            uint32_t& jic_offset) {
+  int16_t lui_offset16 = (address & kHiMask) >> kLuiShift;
+  int16_t jic_offset16 = address & kLoMask;
+
+  if (jic_offset16 < 0) {
+    lui_offset16 -= kImm16Mask;
+  }
+  lui_offset = static_cast<uint32_t>(lui_offset16) & kImm16Mask;
+  jic_offset = static_cast<uint32_t>(jic_offset16) & kImm16Mask;
+}
 
 int Assembler::target_at(int pos, bool is_internal) {
   Instr instr = instr_at(pos);
@@ -724,11 +769,16 @@
   if (IsBranch(instr)) {
     return AddBranchOffset(pos, instr);
   } else {
-    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
-    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
-    DCHECK(IsOri(instr_ori));
-    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
-    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+    Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
+    Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
+    DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
+    int32_t imm;
+    if (IsJicOrJialc(instr2)) {
+      imm = CreateTargetAddress(instr1, instr2);
+    } else {
+      imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+      imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
+    }
 
     if (imm == kEndOfJumpChain) {
       // EndOfChain sentinel is returned directly, not relative to pc or pos.
@@ -781,19 +831,26 @@
     instr = SetBranchOffset(pos, target_pos, instr);
     instr_at_put(pos, instr);
   } else {
-    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
-    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
-    DCHECK(IsOri(instr_ori));
+    Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
+    Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
+    DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
     uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
     DCHECK((imm & 3) == 0);
+    DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
+    instr1 &= ~kImm16Mask;
+    instr2 &= ~kImm16Mask;
 
-    instr_lui &= ~kImm16Mask;
-    instr_ori &= ~kImm16Mask;
-
-    instr_at_put(pos + 0 * Assembler::kInstrSize,
-                 instr_lui | ((imm & kHiMask) >> kLuiShift));
-    instr_at_put(pos + 1 * Assembler::kInstrSize,
-                 instr_ori | (imm & kImm16Mask));
+    if (IsJicOrJialc(instr2)) {
+      uint32_t lui_offset_u, jic_offset_u;
+      UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+      instr_at_put(pos + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
+      instr_at_put(pos + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
+    } else {
+      instr_at_put(pos + 0 * Assembler::kInstrSize,
+                   instr1 | ((imm & kHiMask) >> kLuiShift));
+      instr_at_put(pos + 1 * Assembler::kInstrSize,
+                   instr2 | (imm & kImm16Mask));
+    }
   }
 }
 
@@ -1330,7 +1387,6 @@
 
 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
   DCHECK(IsMipsArchVariant(kMips32r6));
-  DCHECK(!rs.is(zero_reg));
   if (rs.code() >= rt.code()) {
     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   } else {
@@ -1341,7 +1397,6 @@
 
 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
   DCHECK(IsMipsArchVariant(kMips32r6));
-  DCHECK(!rs.is(zero_reg));
   if (rs.code() >= rt.code()) {
     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   } else {
@@ -1704,10 +1759,10 @@
 
 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
-  DCHECK(sa < 5 && sa > 0);
+  DCHECK(sa <= 3);
   DCHECK(IsMipsArchVariant(kMips32r6));
-  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
-                (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA;
+  Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
+                rd.code() << kRdShift | sa << kSaShift | LSA;
   emit(instr);
 }
 
@@ -2085,7 +2140,6 @@
 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
   // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
   // load to two 32-bit loads.
-  DCHECK(!src.rm().is(at));
   if (IsFp32Mode()) {  // fp32 mode.
     if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
       GenInstrImmediate(LWC1, src.rm(), fd,
@@ -2790,24 +2844,36 @@
   } else {
     DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
     if (IsLui(instr)) {
-      Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
-      Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
-      DCHECK(IsOri(instr_ori));
-      int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
-      imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+      Instr instr1 = instr_at(pc + 0 * Assembler::kInstrSize);
+      Instr instr2 = instr_at(pc + 1 * Assembler::kInstrSize);
+      DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
+      int32_t imm;
+      if (IsJicOrJialc(instr2)) {
+        imm = CreateTargetAddress(instr1, instr2);
+      } else {
+        imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+        imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
+      }
+
       if (imm == kEndOfJumpChain) {
         return 0;  // Number of instructions patched.
       }
       imm += pc_delta;
       DCHECK((imm & 3) == 0);
+      instr1 &= ~kImm16Mask;
+      instr2 &= ~kImm16Mask;
 
-      instr_lui &= ~kImm16Mask;
-      instr_ori &= ~kImm16Mask;
-
-      instr_at_put(pc + 0 * Assembler::kInstrSize,
-                   instr_lui | ((imm >> kLuiShift) & kImm16Mask));
-      instr_at_put(pc + 1 * Assembler::kInstrSize,
-                   instr_ori | (imm & kImm16Mask));
+      if (IsJicOrJialc(instr2)) {
+        uint32_t lui_offset_u, jic_offset_u;
+        Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+        instr_at_put(pc + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
+        instr_at_put(pc + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
+      } else {
+        instr_at_put(pc + 0 * Assembler::kInstrSize,
+                     instr1 | ((imm >> kLuiShift) & kImm16Mask));
+        instr_at_put(pc + 1 * Assembler::kInstrSize,
+                     instr2 | (imm & kImm16Mask));
+      }
       return 2;  // Number of instructions patched.
     } else {
       UNREACHABLE();
@@ -2900,7 +2966,7 @@
   // We do not try to reuse pool constants.
   RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
   if (rmode >= RelocInfo::COMMENT &&
-      rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) {
+      rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
     // Adjust code for new modes.
     DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
            || RelocInfo::IsComment(rmode)
@@ -2964,19 +3030,40 @@
       }
 
       int pool_start = pc_offset();
-      for (int i = 0; i < unbound_labels_count_; i++) {
-        uint32_t imm32;
-        imm32 = jump_address(&after_pool);
-        { BlockGrowBufferScope block_buf_growth(this);
-          // Buffer growth (and relocation) must be blocked for internal
-          // references until associated instructions are emitted and available
-          // to be patched.
-          RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
-          lui(at, (imm32 & kHiMask) >> kLuiShift);
-          ori(at, at, (imm32 & kImm16Mask));
+      if (IsMipsArchVariant(kMips32r6)) {
+        for (int i = 0; i < unbound_labels_count_; i++) {
+          uint32_t imm32;
+          imm32 = jump_address(&after_pool);
+          uint32_t lui_offset, jic_offset;
+          UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
+          {
+            BlockGrowBufferScope block_buf_growth(this);
+            // Buffer growth (and relocation) must be blocked for internal
+            // references until associated instructions are emitted and
+            // available to be patched.
+            RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+            lui(at, lui_offset);
+            jic(at, jic_offset);
+          }
+          CheckBuffer();
         }
-        jr(at);
-        nop();
+      } else {
+        for (int i = 0; i < unbound_labels_count_; i++) {
+          uint32_t imm32;
+          imm32 = jump_address(&after_pool);
+          {
+            BlockGrowBufferScope block_buf_growth(this);
+            // Buffer growth (and relocation) must be blocked for internal
+            // references until associated instructions are emitted and
+            // available to be patched.
+            RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+            lui(at, (imm32 & kHiMask) >> kLuiShift);
+            ori(at, at, (imm32 & kImm16Mask));
+          }
+          CheckBuffer();
+          jr(at);
+          nop();
+        }
       }
       bind(&after_pool);
       trampoline_ = Trampoline(pool_start, unbound_labels_count_);
@@ -3000,10 +3087,10 @@
   Instr instr1 = instr_at(pc);
   Instr instr2 = instr_at(pc + kInstrSize);
   // Interpret 2 instructions generated by li: lui/ori
-  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
+  if (IsLui(instr1) && IsOri(instr2)) {
     // Assemble the 32 bit value.
-    return reinterpret_cast<Address>(
-        (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
+    return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
+                                     GetImmediate16(instr2));
   }
 
   // We should never get here, force a bad address if we do.
@@ -3024,6 +3111,8 @@
 // On Mips, a target address is stored in a lui/ori instruction pair, each
 // of which load 16 bits of the 32-bit address to a register.
 // Patching the address must replace both instr, and flush the i-cache.
+// On r6, target address is stored in a lui/jic pair, and both instr have to be
+// patched.
 //
 // There is an optimization below, which emits a nop when the address
 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
@@ -3039,15 +3128,27 @@
 #ifdef DEBUG
   // Check we have the result from a li macro-instruction, using instr pair.
   Instr instr1 = instr_at(pc);
-  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
+  CHECK(IsLui(instr1) && (IsOri(instr2) || IsJicOrJialc(instr2)));
 #endif
 
-  // Must use 2 instructions to insure patchable code => just use lui and ori.
-  // lui rt, upper-16.
-  // ori rt rt, lower-16.
-  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
-  *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+  if (IsJicOrJialc(instr2)) {
+    // Must use 2 instructions to insure patchable code => use lui and jic
+    uint32_t lui_offset, jic_offset;
+    Assembler::UnpackTargetAddressUnsigned(itarget, lui_offset, jic_offset);
 
+    *p &= ~kImm16Mask;
+    *(p + 1) &= ~kImm16Mask;
+
+    *p |= lui_offset;
+    *(p + 1) |= jic_offset;
+
+  } else {
+    // Must use 2 instructions to insure patchable code => just use lui and ori.
+    // lui rt, upper-16.
+    // ori rt rt, lower-16.
+    *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
+    *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+  }
 
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
     Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index b708ef7..886ac6c 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -528,7 +528,11 @@
 
   // Distance between the instruction referring to the address of the call
   // target and the return address.
+#ifdef _MIPS_ARCH_MIPS32R6
+  static const int kCallTargetAddressOffset = 3 * kInstrSize;
+#else
   static const int kCallTargetAddressOffset = 4 * kInstrSize;
+#endif
 
   // Distance between start of patched debug break slot and the emitted address
   // to jump to.
@@ -538,7 +542,11 @@
   // register.
   static const int kPcLoadDelta = 4;
 
+#ifdef _MIPS_ARCH_MIPS32R6
+  static const int kDebugBreakSlotInstructions = 3;
+#else
   static const int kDebugBreakSlotInstructions = 4;
+#endif
   static const int kDebugBreakSlotLength =
       kDebugBreakSlotInstructions * kInstrSize;
 
@@ -750,9 +758,6 @@
   void rotr(Register rd, Register rt, uint16_t sa);
   void rotrv(Register rd, Register rt, Register rs);
 
-  // Address computing instructions with shift.
-  void lsa(Register rd, Register rt, Register rs, uint8_t sa);
-
   // ------------Memory-instructions-------------
 
   void lb(Register rd, const MemOperand& rs);
@@ -1048,7 +1053,9 @@
   void dp(uintptr_t data) { dd(data); }
   void dd(Label* label);
 
-  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+  AssemblerPositionsRecorder* positions_recorder() {
+    return &positions_recorder_;
+  }
 
   // Postpone the generation of the trampoline pool for the specified number of
   // instructions.
@@ -1082,6 +1089,7 @@
   static bool IsBnezc(Instr instr);
   static bool IsBeqc(Instr instr);
   static bool IsBnec(Instr instr);
+  static bool IsJicOrJialc(Instr instr);
 
   static bool IsJump(Instr instr);
   static bool IsJ(Instr instr);
@@ -1121,12 +1129,20 @@
   static int32_t GetBranchOffset(Instr instr);
   static bool IsLw(Instr instr);
   static int16_t GetLwOffset(Instr instr);
+  static int16_t GetJicOrJialcOffset(Instr instr);
+  static int16_t GetLuiOffset(Instr instr);
   static Instr SetLwOffset(Instr instr, int16_t offset);
 
   static bool IsSw(Instr instr);
   static Instr SetSwOffset(Instr instr, int16_t offset);
   static bool IsAddImmediate(Instr instr);
   static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+  static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
+  static void UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
+                                  int16_t& jic_offset);
+  static void UnpackTargetAddressUnsigned(uint32_t address,
+                                          uint32_t& lui_offset,
+                                          uint32_t& jic_offset);
 
   static bool IsAndImmediate(Instr instr);
   static bool IsEmittedConstant(Instr instr);
@@ -1143,6 +1159,9 @@
   bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
 
  protected:
+  // Load Scaled Address instruction.
+  void lsa(Register rd, Register rt, Register rs, uint8_t sa);
+
   // Relocation for a type-recording IC has the AST id added to it.  This
   // member variable is a way to pass the information from the call site to
   // the relocation info.
@@ -1213,6 +1232,8 @@
 
   inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
 
+  inline void CheckBuffer();
+
  private:
   inline static void set_target_internal_reference_encoded_at(Address pc,
                                                               Address target);
@@ -1259,7 +1280,6 @@
   enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };
 
   // Code emission.
-  inline void CheckBuffer();
   void GrowBuffer();
   inline void emit(Instr x,
                    CompactBranchType is_compact_branch = CompactBranchType::NO);
@@ -1406,7 +1426,11 @@
   // branch instruction generation, where we use jump instructions rather
   // than regular branch instructions.
   bool trampoline_emitted_;
+#ifdef _MIPS_ARCH_MIPS32R6
+  static const int kTrampolineSlotsSize = 2 * kInstrSize;
+#else
   static const int kTrampolineSlotsSize = 4 * kInstrSize;
+#endif
   static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
   static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
   static const int kInvalidSlotPos = -1;
@@ -1427,8 +1451,8 @@
   friend class CodePatcher;
   friend class BlockTrampolinePoolScope;
 
-  PositionsRecorder positions_recorder_;
-  friend class PositionsRecorder;
+  AssemblerPositionsRecorder positions_recorder_;
+  friend class AssemblerPositionsRecorder;
   friend class EnsureSpace;
 };
 
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 09f4d59..9693a52 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -149,17 +149,15 @@
   //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- sp[(argc + 1) * 8] : receiver
   // -----------------------------------
-  Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
   Heap::RootListIndex const root_index =
       (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
                                      : Heap::kMinusInfinityValueRootIndex;
-  DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
 
   // Load the accumulator with the default return value (either -Infinity or
   // +Infinity), with the tagged value in a1 and the double value in f0.
   __ LoadRoot(a1, root_index);
   __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
-  __ mov(a3, a0);
+  __ Addu(a3, a0, Operand(1));
 
   Label done_loop, loop;
   __ bind(&loop);
@@ -211,21 +209,24 @@
     __ SmiToDoubleFPURegister(a2, f2, t0);
     __ bind(&done_convert);
 
-    // Perform the actual comparison with the accumulator value on the left hand
-    // side (f0) and the next parameter value on the right hand side (f2).
-    Label compare_equal, compare_nan, compare_swap;
-    __ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
-    __ BranchF(&compare_swap, nullptr, cc, f0, f2);
-    __ Branch(&loop);
-
-    // Left and right hand side are equal, check for -0 vs. +0.
-    __ bind(&compare_equal);
-    __ FmoveHigh(t0, reg);
-    __ Branch(&loop, ne, t0, Operand(0x80000000));
-
-    // Result is on the right hand side.
-    __ bind(&compare_swap);
-    __ mov_d(f0, f2);
+    // Perform the actual comparison with using Min/Max macro instructions the
+    // accumulator value on the left hand side (f0) and the next parameter value
+    // on the right hand side (f2).
+    // We need to work out which HeapNumber (or smi) the result came from.
+    Label compare_nan, set_value;
+    __ BranchF(nullptr, &compare_nan, eq, f0, f2);
+    __ Move(t0, t1, f0);
+    if (kind == MathMaxMinKind::kMin) {
+      __ MinNaNCheck_d(f0, f0, f2);
+    } else {
+      DCHECK(kind == MathMaxMinKind::kMax);
+      __ MaxNaNCheck_d(f0, f0, f2);
+    }
+    __ Move(at, t8, f0);
+    __ Branch(&set_value, ne, t0, Operand(at));
+    __ Branch(&set_value, ne, t1, Operand(t8));
+    __ jmp(&loop);
+    __ bind(&set_value);
     __ mov(a1, a2);
     __ jmp(&loop);
 
@@ -238,8 +239,8 @@
 
   __ bind(&done_loop);
   __ Lsa(sp, sp, a3, kPointerSizeLog2);
-  __ mov(v0, a1);
-  __ DropAndRet(1);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a1);  // In delay slot.
 }
 
 // static
@@ -530,6 +531,7 @@
   //  -- a1     : constructor function
   //  -- a2     : allocation site or undefined
   //  -- a3     : new target
+  //  -- cp     : context
   //  -- ra     : return address
   //  -- sp[...]: constructor arguments
   // -----------------------------------
@@ -543,7 +545,7 @@
     // Preserve the incoming parameters on the stack.
     __ AssertUndefinedOrAllocationSite(a2, t0);
     __ SmiTag(a0);
-    __ Push(a2, a0);
+    __ Push(cp, a2, a0);
 
     if (create_implicit_receiver) {
       // Allocate the new receiver object.
@@ -618,7 +620,7 @@
     }
 
     // Restore context from the frame.
-    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
 
     if (create_implicit_receiver) {
       // If the result is an object (in the ECMA sense), we should get rid
@@ -750,9 +752,6 @@
   // -----------------------------------
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
-  // Clear the context before we push it when entering the JS frame.
-  __ mov(cp, zero_reg);
-
   // Enter an internal frame.
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -847,9 +846,7 @@
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
   FrameScope frame_scope(masm, StackFrame::MANUAL);
-
-  __ Push(ra, fp, cp, a1);
-  __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  __ PushStandardFrame(a1);
 
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into kInterpreterBytecodeRegister.
@@ -1205,8 +1202,7 @@
   __ MultiPop(saved_regs);
 
   // Perform prologue operations usually performed by the young code stub.
-  __ Push(ra, fp, cp, a1);
-  __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  __ PushStandardFrame(a1);
 
   // Jump to point after the code-age stub.
   __ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength));
@@ -1435,23 +1431,6 @@
 }
 
 
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
-  // We check the stack limit as indicator that recompilation might be done.
-  Label ok;
-  __ LoadRoot(at, Heap::kStackLimitRootIndex);
-  __ Branch(&ok, hs, sp, Operand(at));
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kStackGuard);
-  }
-  __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&ok);
-  __ Ret();
-}
-
-
 // static
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
@@ -1498,6 +1477,27 @@
   __ TailCallRuntime(Runtime::kThrowNotDateError);
 }
 
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : argc
+  //  -- sp[0] : first argument (left-hand side)
+  //  -- sp[4] : receiver (right-hand side)
+  // -----------------------------------
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ lw(InstanceOfDescriptor::LeftRegister(),
+          MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
+    __ lw(InstanceOfDescriptor::RightRegister(),
+          MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
+    InstanceOfStub stub(masm->isolate(), true);
+    __ CallStub(&stub);
+  }
+
+  // Pop the argument and the receiver.
+  __ DropAndRet(2);
+}
 
 // static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1961,18 +1961,20 @@
   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
   Comment cmnt(masm, "[ PrepareForTailCall");
 
-  // Prepare for tail call only if the debugger is not active.
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
   Label done;
-  ExternalReference debug_is_active =
-      ExternalReference::debug_is_active_address(masm->isolate());
-  __ li(at, Operand(debug_is_active));
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ li(at, Operand(is_tail_call_elimination_enabled));
   __ lb(scratch1, MemOperand(at));
-  __ Branch(&done, ne, scratch1, Operand(zero_reg));
+  __ Branch(&done, eq, scratch1, Operand(zero_reg));
 
   // Drop possible interpreter handler/stub frame.
   {
     Label no_interpreter_frame;
-    __ lw(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+    __ lw(scratch3,
+          MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ Branch(&no_interpreter_frame, ne, scratch3,
               Operand(Smi::FromInt(StackFrame::STUB)));
     __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1980,72 +1982,37 @@
   }
 
   // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ lw(scratch3,
+        MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&no_arguments_adaptor, ne, scratch3,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
-  // Drop arguments adaptor frame and load arguments count.
+  // Drop current frame and load arguments count from arguments adaptor frame.
   __ mov(fp, scratch2);
-  __ lw(scratch1,
+  __ lw(caller_args_count_reg,
         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(scratch1);
+  __ SmiUntag(caller_args_count_reg);
   __ Branch(&formal_parameter_count_loaded);
 
   __ bind(&no_arguments_adaptor);
   // Load caller's formal parameter count
-  __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(scratch1,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
   __ lw(scratch1,
         FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(scratch1,
+  __ lw(caller_args_count_reg,
         FieldMemOperand(scratch1,
                         SharedFunctionInfo::kFormalParameterCountOffset));
-  __ SmiUntag(scratch1);
+  __ SmiUntag(caller_args_count_reg);
 
   __ bind(&formal_parameter_count_loaded);
 
-  // Calculate the end of destination area where we will put the arguments
-  // after we drop current frame. We add kPointerSize to count the receiver
-  // argument which is not included into formal parameters count.
-  Register dst_reg = scratch2;
-  __ Lsa(dst_reg, fp, scratch1, kPointerSizeLog2);
-  __ Addu(dst_reg, dst_reg,
-          Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
-  Register src_reg = scratch1;
-  __ Lsa(src_reg, sp, args_reg, kPointerSizeLog2);
-  // Count receiver argument as well (not included in args_reg).
-  __ Addu(src_reg, src_reg, Operand(kPointerSize));
-
-  if (FLAG_debug_code) {
-    __ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
-  }
-
-  // Restore caller's frame pointer and return address now as they will be
-  // overwritten by the copying loop.
-  __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
-  __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
-  // Now copy callee arguments to the caller frame going backwards to avoid
-  // callee arguments corruption (source and destination areas could overlap).
-
-  // Both src_reg and dst_reg are pointing to the word after the one to copy,
-  // so they must be pre-decremented in the loop.
-  Register tmp_reg = scratch3;
-  Label loop, entry;
-  __ Branch(&entry);
-  __ bind(&loop);
-  __ Subu(src_reg, src_reg, Operand(kPointerSize));
-  __ Subu(dst_reg, dst_reg, Operand(kPointerSize));
-  __ lw(tmp_reg, MemOperand(src_reg));
-  __ sw(tmp_reg, MemOperand(dst_reg));
-  __ bind(&entry);
-  __ Branch(&loop, ne, sp, Operand(src_reg));
-
-  // Leave current frame.
-  __ mov(sp, dst_reg);
-
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
   __ bind(&done);
 }
 }  // namespace
@@ -2556,27 +2523,6 @@
 
   {  // Too few parameters: Actual < expected.
     __ bind(&too_few);
-
-    // If the function is strong we need to throw an error.
-    Label no_strong_error;
-    __ lw(t1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-    __ lw(t2, FieldMemOperand(t1, SharedFunctionInfo::kCompilerHintsOffset));
-    __ And(t3, t2, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
-                                 kSmiTagSize)));
-    __ Branch(&no_strong_error, eq, t3, Operand(zero_reg));
-
-    // What we really care about is the required number of arguments.
-    __ lw(t2, FieldMemOperand(t1, SharedFunctionInfo::kLengthOffset));
-    __ SmiUntag(t2);
-    __ Branch(&no_strong_error, ge, a0, Operand(t2));
-
-    {
-      FrameScope frame(masm, StackFrame::MANUAL);
-      EnterArgumentsAdaptorFrame(masm);
-      __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
-    }
-
-    __ bind(&no_strong_error);
     EnterArgumentsAdaptorFrame(masm);
     ArgumentAdaptorStackCheck(masm, &stack_overflow);
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 541e73e..fd286fb 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -4,9 +4,10 @@
 
 #if V8_TARGET_ARCH_MIPS
 
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
 #include "src/base/bits.h"
 #include "src/bootstrapper.h"
-#include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
@@ -76,6 +77,10 @@
   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+  descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
 
 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
@@ -506,7 +511,7 @@
          (lhs.is(a1) && rhs.is(a0)));
 
   // a2 is object type of rhs.
-  Label object_test, return_unequal, undetectable;
+  Label object_test, return_equal, return_unequal, undetectable;
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   __ And(at, a2, Operand(kIsNotStringMask));
   __ Branch(&object_test, ne, at, Operand(zero_reg));
@@ -546,6 +551,16 @@
   __ bind(&undetectable);
   __ And(at, t1, Operand(1 << Map::kIsUndetectable));
   __ Branch(&return_unequal, eq, at, Operand(zero_reg));
+
+  // If both sides are JSReceivers, then the result is false according to
+  // the HTML specification, which says that only comparisons with null or
+  // undefined are affected by special casing for document.all.
+  __ GetInstanceType(a2, a2);
+  __ Branch(&return_equal, eq, a2, Operand(ODDBALL_TYPE));
+  __ GetInstanceType(a3, a3);
+  __ Branch(&return_unequal, ne, a3, Operand(ODDBALL_TYPE));
+
+  __ bind(&return_equal);
   __ Ret(USE_DELAY_SLOT);
   __ li(v0, Operand(EQUAL));  // In delay slot.
 }
@@ -1492,8 +1507,12 @@
   __ GetObjectType(function, function_map, scratch);
   __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
 
-  // Ensure that {function} has an instance prototype.
+  // Go to the runtime if the function is not a constructor.
   __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+  __ And(at, scratch, Operand(1 << Map::kIsConstructor));
+  __ Branch(&slow_case, eq, at, Operand(zero_reg));
+
+  // Ensure that {function} has an instance prototype.
   __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
   __ Branch(&slow_case, ne, at, Operand(zero_reg));
 
@@ -1563,7 +1582,8 @@
   // Slow-case: Call the %InstanceOf runtime function.
   __ bind(&slow_case);
   __ Push(object, function);
-  __ TailCallRuntime(Runtime::kInstanceOf);
+  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+                                         : Runtime::kInstanceOf);
 }
 
 
@@ -1582,29 +1602,6 @@
       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
 }
 
-
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
-  // Return address is in ra.
-  Label slow;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-
-  // Check that the key is an array index, that is Uint32.
-  __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
-  __ Branch(&slow, ne, t0, Operand(zero_reg));
-
-  // Everything is fine, call runtime.
-  __ Push(receiver, key);  // Receiver, key.
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
-  __ bind(&slow);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -2769,57 +2766,58 @@
   __ bind(&not_smi);
 
   Label not_heap_number;
-  __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  // a0: object
-  // a1: instance type.
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
   __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);
   __ bind(&not_heap_number);
 
-  Label not_string, slow_string;
+  NonNumberToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes on argument in a0.
+  __ AssertNotNumber(a0);
+
+  Label not_string;
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
   __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
-  // Check if string has a cached array index.
-  __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
-  __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
-  __ Branch(&slow_string, ne, at, Operand(zero_reg));
-  __ IndexFromHash(a2, a0);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&slow_string);
-  __ push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
+  StringToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
   __ bind(&not_string);
 
   Label not_oddball;
   __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
   __ Ret(USE_DELAY_SLOT);
-  __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+  __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));  // In delay slot.
   __ bind(&not_oddball);
 
-  __ push(a0);  // Push argument.
+  __ Push(a0);  // Push argument.
   __ TailCallRuntime(Runtime::kToNumber);
 }
 
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+  // The StringToNumber stub takes on argument in a0.
+  __ AssertString(a0);
 
-void ToLengthStub::Generate(MacroAssembler* masm) {
-  // The ToLength stub takes on argument in a0.
-  Label not_smi, positive_smi;
-  __ JumpIfNotSmi(a0, &not_smi);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ Branch(&positive_smi, ge, a0, Operand(zero_reg));
-  __ mov(a0, zero_reg);
-  __ bind(&positive_smi);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_smi);
+  // Check if string has a cached array index.
+  Label runtime;
+  __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
+  __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ IndexFromHash(a2, v0);
+  __ Ret();
 
-  __ push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToLength);
+  __ bind(&runtime);
+  __ Push(a0);  // Push argument.
+  __ TailCallRuntime(Runtime::kStringToNumber);
 }
 
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes on argument in a0.
   Label is_number;
@@ -2990,39 +2988,6 @@
 }
 
 
-void StringCompareStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a1    : left
-  //  -- a0    : right
-  //  -- ra    : return address
-  // -----------------------------------
-  __ AssertString(a1);
-  __ AssertString(a0);
-
-  Label not_same;
-  __ Branch(&not_same, ne, a0, Operand(a1));
-  __ li(v0, Operand(Smi::FromInt(EQUAL)));
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a1,
-                      a2);
-  __ Ret();
-
-  __ bind(&not_same);
-
-  // Check that both objects are sequential one-byte strings.
-  Label runtime;
-  __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
-
-  // Compare flat ASCII strings natively.
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
-                      a3);
-  StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
-
-  __ bind(&runtime);
-  __ Push(a1, a0);
-  __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a1    : left
@@ -3345,10 +3310,17 @@
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
-  __ Push(left, right);
   if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(left, right);
+      __ CallRuntime(Runtime::kStringEqual);
+    }
+    __ LoadRoot(a0, Heap::kTrueValueRootIndex);
+    __ Ret(USE_DELAY_SLOT);
+    __ Subu(v0, v0, a0);  // In delay slot.
   } else {
+    __ Push(left, right);
     __ TailCallRuntime(Runtime::kStringCompare);
   }
 
@@ -3907,7 +3879,7 @@
   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   int parameter_count_offset =
-      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   __ lw(a1, MemOperand(fp, parameter_count_offset));
   if (function_mode() == JS_FUNCTION_STUB_MODE) {
     __ Addu(a1, a1, Operand(1));
@@ -4889,7 +4861,7 @@
     __ bind(&loop);
     __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ lw(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+    __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
     __ Branch(&loop, ne, a1, Operand(a3));
   }
 
@@ -4897,7 +4869,7 @@
   // arguments adaptor frame below the function frame).
   Label no_rest_parameters;
   __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ lw(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&no_rest_parameters, ne, a3,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
@@ -5036,7 +5008,7 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
+  __ lw(a0, MemOperand(t0, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&adaptor_frame, eq, a0,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
@@ -5244,14 +5216,14 @@
     __ bind(&loop);
     __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ lw(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+    __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
     __ Branch(&loop, ne, a1, Operand(a3));
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
   Label arguments_adaptor, arguments_done;
   __ lw(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a0, MemOperand(a3, StandardFrameConstants::kContextOffset));
+  __ lw(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&arguments_adaptor, eq, a0,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   {
@@ -5607,16 +5579,12 @@
   __ jmp(&leave_exit_frame);
 }
 
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
-                                      const ParameterCount& argc,
-                                      bool return_first_arg,
-                                      bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0                  : callee
   //  -- t0                  : call_data
   //  -- a2                  : holder
   //  -- a1                  : api_function_address
-  //  -- a3                  : number of arguments if argc is a register
   //  -- cp                  : context
   //  --
   //  -- sp[0]               : last argument
@@ -5642,17 +5610,15 @@
   STATIC_ASSERT(FCA::kHolderIndex == 0);
   STATIC_ASSERT(FCA::kArgsLength == 7);
 
-  DCHECK(argc.is_immediate() || a3.is(argc.reg()));
-
   // Save context, callee and call data.
   __ Push(context, callee, call_data);
-  if (!is_lazy) {
+  if (!is_lazy()) {
     // Load context from callee.
     __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   }
 
   Register scratch = call_data;
-  if (!call_data_undefined) {
+  if (!call_data_undefined()) {
     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   }
   // Push return value and default return value.
@@ -5677,29 +5643,14 @@
   __ Addu(a0, sp, Operand(1 * kPointerSize));
   // FunctionCallbackInfo::implicit_args_
   __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
-  if (argc.is_immediate()) {
-    // FunctionCallbackInfo::values_
-    __ Addu(at, scratch,
-            Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
-    __ sw(at, MemOperand(a0, 1 * kPointerSize));
-    // FunctionCallbackInfo::length_ = argc
-    __ li(at, Operand(argc.immediate()));
-    __ sw(at, MemOperand(a0, 2 * kPointerSize));
-    // FunctionCallbackInfo::is_construct_call_ = 0
-    __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
-  } else {
-    // FunctionCallbackInfo::values_
-    __ sll(at, argc.reg(), kPointerSizeLog2);
-    __ Addu(at, at, scratch);
-    __ Addu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
-    __ sw(at, MemOperand(a0, 1 * kPointerSize));
-    // FunctionCallbackInfo::length_ = argc
-    __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
-    // FunctionCallbackInfo::is_construct_call_
-    __ Addu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
-    __ sll(at, argc.reg(), kPointerSizeLog2);
-    __ sw(at, MemOperand(a0, 3 * kPointerSize));
-  }
+  // FunctionCallbackInfo::values_
+  __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+  __ sw(at, MemOperand(a0, 1 * kPointerSize));
+  // FunctionCallbackInfo::length_ = argc
+  __ li(at, Operand(argc()));
+  __ sw(at, MemOperand(a0, 2 * kPointerSize));
+  // FunctionCallbackInfo::is_construct_call_ = 0
+  __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5709,7 +5660,7 @@
       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   // Stores return the first js argument.
   int return_value_offset = 0;
-  if (return_first_arg) {
+  if (is_store()) {
     return_value_offset = 2 + FCA::kArgsLength;
   } else {
     return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5717,33 +5668,14 @@
   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   int stack_space = 0;
   int32_t stack_space_offset = 4 * kPointerSize;
-  if (argc.is_immediate()) {
-    stack_space = argc.immediate() + FCA::kArgsLength + 1;
-    stack_space_offset = kInvalidStackOffset;
-  }
+  stack_space = argc() + FCA::kArgsLength + 1;
+  stack_space_offset = kInvalidStackOffset;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
                            stack_space_offset, return_value_operand,
                            &context_restore_operand);
 }
 
 
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
-  bool call_data_undefined = this->call_data_undefined();
-  CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
-                            call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
-  bool is_store = this->is_store();
-  int argc = this->argc();
-  bool call_data_undefined = this->call_data_undefined();
-  bool is_lazy = this->is_lazy();
-  CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined, is_lazy);
-}
-
-
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- sp[0]                        : name
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 878ba34..1c6c169 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -1197,10 +1197,8 @@
                       young_sequence_.length() / Assembler::kInstrSize,
                       CodePatcher::DONT_FLUSH));
   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
-  patcher->masm()->Push(ra, fp, cp, a1);
+  patcher->masm()->PushStandardFrame(a1);
   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
-  patcher->masm()->Addu(
-      fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
 }
 
 
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 0caaa4c..e9caaad 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -98,12 +98,6 @@
   }
 }
 
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
-  // There is no dynamic alignment padding on MIPS in the input frame.
-  return false;
-}
-
-
 #define __ masm()->
 
 
@@ -160,10 +154,15 @@
   __ Subu(t0, fp, t0);
 
   // Allocate a new deoptimizer object.
-  // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
   __ PrepareCallCFunction(6, t1);
+  // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+  __ mov(a0, zero_reg);
+  Label context_check;
+  __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ JumpIfSmi(a1, &context_check);
   __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ li(a1, Operand(type()));  // bailout type,
+  __ bind(&context_check);
+  __ li(a1, Operand(type()));  // Bailout type.
   // a2: bailout id already loaded.
   // a3: code address or 0 already loaded.
   __ sw(t0, CFunctionArgumentOperand(5));  // Fp-to-sp delta.
@@ -239,6 +238,8 @@
   }
   __ pop(a0);  // Restore deoptimizer object (class Deoptimizer).
 
+  __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
   // Replace the current (input) frame with the output frames.
   Label outer_push_loop, inner_push_loop,
       outer_loop_header, inner_loop_header;
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 849dea2..3511679 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -133,13 +133,11 @@
       -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
 };
 
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
  public:
-  static const int kFrameSize = 2 * kPointerSize;
-
-  static const int kCodeOffset = -2 * kPointerSize;
-  static const int kSPOffset = -1 * kPointerSize;
+  static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
 
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = +0 * kPointerSize;
@@ -161,7 +159,7 @@
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
   static const int kLastParameterOffset = +2 * kPointerSize;
-  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+  static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
 
   // Caller SP-relative.
   static const int kParam0Offset   = -2 * kPointerSize;
diff --git a/src/mips/interface-descriptors-mips.cc b/src/mips/interface-descriptors-mips.cc
index fdb43f3..06e3b77 100644
--- a/src/mips/interface-descriptors-mips.cc
+++ b/src/mips/interface-descriptors-mips.cc
@@ -109,35 +109,8 @@
 }
 
 
-void ToNumberDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 // static
-const Register ToLengthDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
 
 void TypeofDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -266,6 +239,13 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
+      CallInterfaceDescriptorData* data) {                          \
+    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
+  }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
 
 void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -310,6 +290,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (arg count)
+  Register registers[] = {a0};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void CompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -318,20 +304,6 @@
 }
 
 
-void CompareNilDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void BinaryOpDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {a1, a0};
@@ -391,21 +363,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {
-      a0,  // callee
-      t0,  // call_data
-      a2,  // holder
-      a1,  // api_function_address
-      a3,  // actual number of arguments
-  };
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       a0,  // callee
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index e3544c5..7cbbd3a 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1,4 +1,3 @@
-
 // Copyright 2012 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
@@ -100,6 +99,34 @@
   sw(source, MemOperand(s6, index << kPointerSizeLog2));
 }
 
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+  if (marker_reg.is_valid()) {
+    Push(ra, fp, marker_reg);
+    Addu(fp, sp, Operand(kPointerSize));
+  } else {
+    Push(ra, fp);
+    mov(fp, sp);
+  }
+}
+
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
+  if (marker_reg.is_valid()) {
+    Pop(ra, fp, marker_reg);
+  } else {
+    Pop(ra, fp);
+  }
+}
+
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+  int offset = -StandardFrameConstants::kContextOffset;
+  if (function_reg.is_valid()) {
+    Push(ra, fp, cp, function_reg);
+    offset += kPointerSize;
+  } else {
+    Push(ra, fp, cp);
+  }
+  Addu(fp, sp, Operand(offset));
+}
 
 // Push and pop all registers that can hold pointers.
 void MacroAssembler::PushSafepointRegisters() {
@@ -455,12 +482,12 @@
   sw(scratch, MemOperand(t8));
   // Call stub on end of buffer.
   // Check for end of buffer.
-  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+  And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
   if (and_then == kFallThroughAtEnd) {
-    Branch(&done, eq, t8, Operand(zero_reg));
+    Branch(&done, ne, t8, Operand(zero_reg));
   } else {
     DCHECK(and_then == kReturnAtEnd);
-    Ret(eq, t8, Operand(zero_reg));
+    Ret(ne, t8, Operand(zero_reg));
   }
   push(ra);
   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
@@ -481,13 +508,25 @@
                                             Register scratch,
                                             Label* miss) {
   Label same_contexts;
+  Register temporary = t8;
 
   DCHECK(!holder_reg.is(scratch));
   DCHECK(!holder_reg.is(at));
   DCHECK(!scratch.is(at));
 
-  // Load current lexical context from the stack frame.
-  lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  // Load current lexical context from the active StandardFrame, which
+  // may require crawling past STUB frames.
+  Label load_context;
+  Label has_context;
+  mov(at, fp);
+  bind(&load_context);
+  lw(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
+  // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
+  JumpIfNotSmi(scratch, &has_context, temporary);
+  lw(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
+  Branch(&load_context);
+  bind(&has_context);
+
   // In debug mode, make sure the lexical context is set.
 #ifdef DEBUG
   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
@@ -764,6 +803,34 @@
   }
 }
 
+void MacroAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
+                          const Operand& rt) {
+  Register reg;
+  if (rt.is_reg()) {
+    reg = rt.rm();
+  } else {
+    DCHECK(!rs.is(at));
+    reg = at;
+    li(reg, rt);
+  }
+
+  if (!IsMipsArchVariant(kMips32r6)) {
+    multu(rs, reg);
+    mflo(rd_lo);
+    mfhi(rd_hi);
+  } else {
+    if (rd_lo.is(rs)) {
+      DCHECK(!rd_hi.is(rs));
+      DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
+      muhu(rd_hi, rs, reg);
+      mulu(rd_lo, rs, reg);
+    } else {
+      DCHECK(!rd_hi.is(reg) && !rd_lo.is(reg));
+      mulu(rd_lo, rs, reg);
+      muhu(rd_hi, rs, reg);
+    }
+  }
+}
 
 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
@@ -1078,7 +1145,7 @@
     if (rt.is_reg()) {
       rotrv(rd, rs, rt.rm());
     } else {
-      rotr(rd, rs, rt.imm32_);
+      rotr(rd, rs, rt.imm32_ & 0x1f);
     }
   } else {
     if (rt.is_reg()) {
@@ -1090,8 +1157,8 @@
       if (rt.imm32_ == 0) {
         srl(rd, rs, 0);
       } else {
-        srl(at, rs, rt.imm32_);
-        sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
+        srl(at, rs, rt.imm32_ & 0x1f);
+        sll(rd, rs, (0x20 - (rt.imm32_ & 0x1f)) & 0x1f);
         or_(rd, rd, at);
       }
     }
@@ -1110,8 +1177,9 @@
 
 void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
                          Register scratch) {
+  DCHECK(sa >= 1 && sa <= 31);
   if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
-    lsa(rd, rt, rs, sa);
+    lsa(rd, rt, rs, sa - 1);
   } else {
     Register tmp = rd.is(rt) ? scratch : rd;
     DCHECK(!tmp.is(rt));
@@ -1840,6 +1908,185 @@
   }
 }
 
+#define __ masm->
+
+static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
+                         FPURegister src1, FPURegister src2, Label* equal) {
+  if (src1.is(src2)) {
+    __ Move(dst, src1);
+    return true;
+  }
+
+  Label other, compare_not_equal;
+  FPURegister left, right;
+  if (kind == MaxMinKind::kMin) {
+    left = src1;
+    right = src2;
+  } else {
+    left = src2;
+    right = src1;
+  }
+
+  __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
+  // Left and right hand side are equal, check for -0 vs. +0.
+  __ FmoveHigh(t8, src1);
+  __ Branch(&other, eq, t8, Operand(0x80000000));
+  __ Move_d(dst, right);
+  __ Branch(equal);
+  __ bind(&other);
+  __ Move_d(dst, left);
+  __ Branch(equal);
+  __ bind(&compare_not_equal);
+  return false;
+}
+
+static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
+                         FPURegister src1, FPURegister src2, Label* equal) {
+  if (src1.is(src2)) {
+    __ Move(dst, src1);
+    return true;
+  }
+
+  Label other, compare_not_equal;
+  FPURegister left, right;
+  if (kind == MaxMinKind::kMin) {
+    left = src1;
+    right = src2;
+  } else {
+    left = src2;
+    right = src1;
+  }
+
+  __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
+  // Left and right hand side are equal, check for -0 vs. +0.
+  __ FmoveLow(t8, src1);
+  __ Branch(&other, eq, t8, Operand(0x80000000));
+  __ Move_s(dst, right);
+  __ Branch(equal);
+  __ bind(&other);
+  __ Move_s(dst, left);
+  __ Branch(equal);
+  __ bind(&compare_not_equal);
+  return false;
+}
+
+#undef __
+
+void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
+                                   FPURegister src2, Label* nan) {
+  if (nan) {
+    BranchF64(nullptr, nan, eq, src1, src2);
+  }
+  if (IsMipsArchVariant(kMips32r6)) {
+    min_d(dst, src1, src2);
+  } else {
+    Label skip;
+    if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
+      if (dst.is(src1)) {
+        BranchF64(&skip, nullptr, le, src1, src2);
+        Move_d(dst, src2);
+      } else if (dst.is(src2)) {
+        BranchF64(&skip, nullptr, ge, src1, src2);
+        Move_d(dst, src1);
+      } else {
+        Label right;
+        BranchF64(&right, nullptr, gt, src1, src2);
+        Move_d(dst, src1);
+        Branch(&skip);
+        bind(&right);
+        Move_d(dst, src2);
+      }
+    }
+    bind(&skip);
+  }
+}
+
+void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
+                                   FPURegister src2, Label* nan) {
+  if (nan) {
+    BranchF64(nullptr, nan, eq, src1, src2);
+  }
+  if (IsMipsArchVariant(kMips32r6)) {
+    max_d(dst, src1, src2);
+  } else {
+    Label skip;
+    if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
+      if (dst.is(src1)) {
+        BranchF64(&skip, nullptr, ge, src1, src2);
+        Move_d(dst, src2);
+      } else if (dst.is(src2)) {
+        BranchF64(&skip, nullptr, le, src1, src2);
+        Move_d(dst, src1);
+      } else {
+        Label right;
+        BranchF64(&right, nullptr, lt, src1, src2);
+        Move_d(dst, src1);
+        Branch(&skip);
+        bind(&right);
+        Move_d(dst, src2);
+      }
+    }
+    bind(&skip);
+  }
+}
+
+void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
+                                   FPURegister src2, Label* nan) {
+  if (nan) {
+    BranchF32(nullptr, nan, eq, src1, src2);
+  }
+  if (IsMipsArchVariant(kMips32r6)) {
+    min_s(dst, src1, src2);
+  } else {
+    Label skip;
+    if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
+      if (dst.is(src1)) {
+        BranchF32(&skip, nullptr, le, src1, src2);
+        Move_s(dst, src2);
+      } else if (dst.is(src2)) {
+        BranchF32(&skip, nullptr, ge, src1, src2);
+        Move_s(dst, src1);
+      } else {
+        Label right;
+        BranchF32(&right, nullptr, gt, src1, src2);
+        Move_s(dst, src1);
+        Branch(&skip);
+        bind(&right);
+        Move_s(dst, src2);
+      }
+    }
+    bind(&skip);
+  }
+}
+
+void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
+                                   FPURegister src2, Label* nan) {
+  if (nan) {
+    BranchF32(nullptr, nan, eq, src1, src2);
+  }
+  if (IsMipsArchVariant(kMips32r6)) {
+    max_s(dst, src1, src2);
+  } else {
+    Label skip;
+    if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
+      if (dst.is(src1)) {
+        BranchF32(&skip, nullptr, ge, src1, src2);
+        Move_s(dst, src2);
+      } else if (dst.is(src2)) {
+        BranchF32(&skip, nullptr, le, src1, src2);
+        Move_s(dst, src1);
+      } else {
+        Label right;
+        BranchF32(&right, nullptr, lt, src1, src2);
+        Move_s(dst, src1);
+        Branch(&skip);
+        bind(&right);
+        Move_s(dst, src2);
+      }
+    }
+    bind(&skip);
+  }
+}
 
 void MacroAssembler::Clz(Register rd, Register rs) {
   if (IsMipsArchVariant(kLoongson)) {
@@ -3011,16 +3258,25 @@
                           const Operand& rt,
                           BranchDelaySlot bd) {
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  if (cond == cc_always) {
-    jr(target);
+  if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
+    if (cond == cc_always) {
+      jic(target, 0);
+    } else {
+      BRANCH_ARGS_CHECK(cond, rs, rt);
+      Branch(2, NegateCondition(cond), rs, rt);
+      jic(target, 0);
+    }
   } else {
-    BRANCH_ARGS_CHECK(cond, rs, rt);
-    Branch(2, NegateCondition(cond), rs, rt);
-    jr(target);
+    if (cond == cc_always) {
+      jr(target);
+    } else {
+      BRANCH_ARGS_CHECK(cond, rs, rt);
+      Branch(2, NegateCondition(cond), rs, rt);
+      jr(target);
+    }
+    // Emit a nop in the branch delay slot if required.
+    if (bd == PROTECT) nop();
   }
-  // Emit a nop in the branch delay slot if required.
-  if (bd == PROTECT)
-    nop();
 }
 
 
@@ -3078,8 +3334,7 @@
     size += 3;
   }
 
-  if (bd == PROTECT)
-    size += 1;
+  if (bd == PROTECT && !IsMipsArchVariant(kMips32r6)) size += 1;
 
   return size * kInstrSize;
 }
@@ -3098,16 +3353,25 @@
   BlockTrampolinePoolScope block_trampoline_pool(this);
   Label start;
   bind(&start);
-  if (cond == cc_always) {
-    jalr(target);
+  if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
+    if (cond == cc_always) {
+      jialc(target, 0);
+    } else {
+      BRANCH_ARGS_CHECK(cond, rs, rt);
+      Branch(2, NegateCondition(cond), rs, rt);
+      jialc(target, 0);
+    }
   } else {
-    BRANCH_ARGS_CHECK(cond, rs, rt);
-    Branch(2, NegateCondition(cond), rs, rt);
-    jalr(target);
+    if (cond == cc_always) {
+      jalr(target);
+    } else {
+      BRANCH_ARGS_CHECK(cond, rs, rt);
+      Branch(2, NegateCondition(cond), rs, rt);
+      jalr(target);
+    }
+    // Emit a nop in the branch delay slot if required.
+    if (bd == PROTECT) nop();
   }
-  // Emit a nop in the branch delay slot if required.
-  if (bd == PROTECT)
-    nop();
 
 #ifdef DEBUG
   CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
@@ -3198,18 +3462,35 @@
     BlockTrampolinePoolScope block_trampoline_pool(this);
     uint32_t imm32;
     imm32 = jump_address(L);
-    {
-      BlockGrowBufferScope block_buf_growth(this);
-      // Buffer growth (and relocation) must be blocked for internal references
-      // until associated instructions are emitted and available to be patched.
-      RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
-      lui(at, (imm32 & kHiMask) >> kLuiShift);
-      ori(at, at, (imm32 & kImm16Mask));
+    if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+      uint32_t lui_offset, jic_offset;
+      UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
+      {
+        BlockGrowBufferScope block_buf_growth(this);
+        // Buffer growth (and relocation) must be blocked for internal
+        // references until associated instructions are emitted and
+        // available to be patched.
+        RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+        lui(at, lui_offset);
+        jic(at, jic_offset);
+      }
+      CheckBuffer();
+    } else {
+      {
+        BlockGrowBufferScope block_buf_growth(this);
+        // Buffer growth (and relocation) must be blocked for internal
+        // references
+        // until associated instructions are emitted and available to be
+        // patched.
+        RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+        lui(at, (imm32 & kHiMask) >> kLuiShift);
+        ori(at, at, (imm32 & kImm16Mask));
+      }
+      CheckBuffer();
+      jr(at);
+      // Emit a nop in the branch delay slot if required.
+      if (bdslot == PROTECT) nop();
     }
-    jr(at);
-
-    // Emit a nop in the branch delay slot if required.
-    if (bdslot == PROTECT) nop();
   }
 }
 
@@ -3222,18 +3503,35 @@
     BlockTrampolinePoolScope block_trampoline_pool(this);
     uint32_t imm32;
     imm32 = jump_address(L);
-    {
-      BlockGrowBufferScope block_buf_growth(this);
-      // Buffer growth (and relocation) must be blocked for internal references
-      // until associated instructions are emitted and available to be patched.
-      RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
-      lui(at, (imm32 & kHiMask) >> kLuiShift);
-      ori(at, at, (imm32 & kImm16Mask));
+    if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+      uint32_t lui_offset, jic_offset;
+      UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
+      {
+        BlockGrowBufferScope block_buf_growth(this);
+        // Buffer growth (and relocation) must be blocked for internal
+        // references until associated instructions are emitted and
+        // available to be patched.
+        RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+        lui(at, lui_offset);
+        jialc(at, jic_offset);
+      }
+      CheckBuffer();
+    } else {
+      {
+        BlockGrowBufferScope block_buf_growth(this);
+        // Buffer growth (and relocation) must be blocked for internal
+        // references
+        // until associated instructions are emitted and available to be
+        // patched.
+        RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+        lui(at, (imm32 & kHiMask) >> kLuiShift);
+        ori(at, at, (imm32 & kImm16Mask));
+      }
+      CheckBuffer();
+      jalr(at);
+      // Emit a nop in the branch delay slot if required.
+      if (bdslot == PROTECT) nop();
     }
-    jalr(at);
-
-    // Emit a nop in the branch delay slot if required.
-    if (bdslot == PROTECT) nop();
   }
 }
 
@@ -4062,6 +4360,65 @@
 // -----------------------------------------------------------------------------
 // JavaScript invokes.
 
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+                                        Register caller_args_count_reg,
+                                        Register scratch0, Register scratch1) {
+#if DEBUG
+  if (callee_args_count.is_reg()) {
+    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+                       scratch1));
+  } else {
+    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+  }
+#endif
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We add kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch0;
+  Lsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
+  Addu(dst_reg, dst_reg,
+       Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = caller_args_count_reg;
+  // Calculate the end of source area. +kPointerSize is for the receiver.
+  if (callee_args_count.is_reg()) {
+    Lsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
+    Addu(src_reg, src_reg, Operand(kPointerSize));
+  } else {
+    Addu(src_reg, sp,
+         Operand((callee_args_count.immediate() + 1) * kPointerSize));
+  }
+
+  if (FLAG_debug_code) {
+    Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch1;
+  Label loop, entry;
+  Branch(&entry);
+  bind(&loop);
+  Subu(src_reg, src_reg, Operand(kPointerSize));
+  Subu(dst_reg, dst_reg, Operand(kPointerSize));
+  lw(tmp_reg, MemOperand(src_reg));
+  sw(tmp_reg, MemOperand(dst_reg));
+  bind(&entry);
+  Branch(&loop, ne, sp, Operand(src_reg));
+
+  // Leave current frame.
+  mov(sp, dst_reg);
+}
+
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
                                     Label* done,
@@ -4846,12 +5203,9 @@
   }
 }
 
-
-void MacroAssembler::StubPrologue() {
-    Push(ra, fp, cp);
-    Push(Smi::FromInt(StackFrame::STUB));
-    // Adjust FP to point to saved FP.
-    Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
+  li(at, Operand(Smi::FromInt(type)));
+  PushCommonFrame(at);
 }
 
 
@@ -4874,10 +5228,8 @@
     nop();  // Branch delay slot nop.
     nop();  // Pad the empty space.
   } else {
-    Push(ra, fp, cp, a1);
+    PushStandardFrame(a1);
     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
-    // Adjust fp to point to caller's fp.
-    Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
   }
 }
 
@@ -4898,30 +5250,41 @@
 
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
-  addiu(sp, sp, -5 * kPointerSize);
-  li(t8, Operand(Smi::FromInt(type)));
-  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
-  sw(ra, MemOperand(sp, 4 * kPointerSize));
-  sw(fp, MemOperand(sp, 3 * kPointerSize));
-  sw(cp, MemOperand(sp, 2 * kPointerSize));
-  sw(t8, MemOperand(sp, 1 * kPointerSize));
-  sw(t9, MemOperand(sp, 0 * kPointerSize));
+  int stack_offset, fp_offset;
+  if (type == StackFrame::INTERNAL) {
+    stack_offset = -4 * kPointerSize;
+    fp_offset = 2 * kPointerSize;
+  } else {
+    stack_offset = -3 * kPointerSize;
+    fp_offset = 1 * kPointerSize;
+  }
+  addiu(sp, sp, stack_offset);
+  stack_offset = -stack_offset - kPointerSize;
+  sw(ra, MemOperand(sp, stack_offset));
+  stack_offset -= kPointerSize;
+  sw(fp, MemOperand(sp, stack_offset));
+  stack_offset -= kPointerSize;
+  li(t9, Operand(Smi::FromInt(type)));
+  sw(t9, MemOperand(sp, stack_offset));
+  if (type == StackFrame::INTERNAL) {
+    DCHECK_EQ(stack_offset, kPointerSize);
+    li(t9, Operand(CodeObject()));
+    sw(t9, MemOperand(sp, 0));
+  } else {
+    DCHECK_EQ(stack_offset, 0);
+  }
   // Adjust FP to point to saved FP.
-  Addu(fp, sp,
-       Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+  Addu(fp, sp, Operand(fp_offset));
 }
 
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
-  mov(sp, fp);
-  lw(fp, MemOperand(sp, 0 * kPointerSize));
-  lw(ra, MemOperand(sp, 1 * kPointerSize));
-  addiu(sp, sp, 2 * kPointerSize);
+  addiu(sp, fp, 2 * kPointerSize);
+  lw(ra, MemOperand(fp, 1 * kPointerSize));
+  lw(fp, MemOperand(fp, 0 * kPointerSize));
 }
 
-
-void MacroAssembler::EnterExitFrame(bool save_doubles,
-                                    int stack_space) {
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
   // Set up the frame structure on the stack.
   STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
   STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
@@ -4931,16 +5294,20 @@
   // fp + 2 (==kCallerSPDisplacement) - old stack's end
   // [fp + 1 (==kCallerPCOffset)] - saved old ra
   // [fp + 0 (==kCallerFPOffset)] - saved old fp
-  // [fp - 1 (==kSPOffset)] - sp of the called function
-  // [fp - 2 (==kCodeOffset)] - CodeObject
+  // [fp - 1 StackFrame::EXIT Smi
+  // [fp - 2 (==kSPOffset)] - sp of the called function
+  // [fp - 3 (==kCodeOffset)] - CodeObject
   // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
   //   new stack (will contain saved ra)
 
-  // Save registers.
-  addiu(sp, sp, -4 * kPointerSize);
-  sw(ra, MemOperand(sp, 3 * kPointerSize));
-  sw(fp, MemOperand(sp, 2 * kPointerSize));
-  addiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
+  // Save registers and reserve room for saved entry sp and code object.
+  addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
+  sw(ra, MemOperand(sp, 4 * kPointerSize));
+  sw(fp, MemOperand(sp, 3 * kPointerSize));
+  li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
+  sw(at, MemOperand(sp, 2 * kPointerSize));
+  // Set up new frame pointer.
+  addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
 
   if (emit_debug_code()) {
     sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -5177,6 +5544,15 @@
   JumpIfSmi(at, on_either_smi);
 }
 
+void MacroAssembler::AssertNotNumber(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    andi(at, object, kSmiTagMask);
+    Check(ne, kOperandIsANumber, at, Operand(zero_reg));
+    GetObjectType(object, t8, t8);
+    Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
+  }
+}
 
 void MacroAssembler::AssertNotSmi(Register object) {
   if (emit_debug_code()) {
@@ -5708,28 +6084,45 @@
   bind(&done);
 }
 
-
-void MacroAssembler::TestJSArrayForAllocationMemento(
-    Register receiver_reg,
-    Register scratch_reg,
-    Label* no_memento_found,
-    Condition cond,
-    Label* allocation_memento_present) {
-  ExternalReference new_space_start =
-      ExternalReference::new_space_start(isolate());
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
+                                                     Register scratch_reg,
+                                                     Label* no_memento_found) {
+  Label map_check;
+  Label top_check;
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
-  Addu(scratch_reg, receiver_reg,
-       Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
-  Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
+  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+  // Bail out if the object is not in new space.
+  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+  // If the object is in new space, we need to check whether it is on the same
+  // page as the current top.
+  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+  And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
+  Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
+  // The object is on a different page than allocation top. Bail out if the
+  // object sits on the page boundary as no memento can follow and we cannot
+  // touch the memory following it.
+  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
+  And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
+  Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
+  // Continue with the actual map check.
+  jmp(&map_check);
+  // If top is on the same page as the current object, we need to check whether
+  // we are below top.
+  bind(&top_check);
+  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
   li(at, Operand(new_space_allocation_top));
   lw(at, MemOperand(at));
   Branch(no_memento_found, gt, scratch_reg, Operand(at));
-  lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
-  if (allocation_memento_present) {
-    Branch(allocation_memento_present, cond, scratch_reg,
-           Operand(isolate()->factory()->allocation_memento_map()));
-  }
+  // Memento map check.
+  bind(&map_check);
+  lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+  Branch(no_memento_found, ne, scratch_reg,
+         Operand(isolate()->factory()->allocation_memento_map()));
 }
 
 
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 05a8fec..2f02865 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -237,7 +237,8 @@
 
   void Call(Label* target);
 
-  void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
+  inline void Move(Register dst, Handle<Object> handle) { li(dst, handle); }
+  inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
 
   inline void Move(Register dst, Register src) {
     if (!dst.is(src)) {
@@ -245,12 +246,20 @@
     }
   }
 
-  inline void Move(FPURegister dst, FPURegister src) {
+  inline void Move_d(FPURegister dst, FPURegister src) {
     if (!dst.is(src)) {
       mov_d(dst, src);
     }
   }
 
+  inline void Move_s(FPURegister dst, FPURegister src) {
+    if (!dst.is(src)) {
+      mov_s(dst, src);
+    }
+  }
+
+  inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
+
   inline void Move(Register dst_low, Register dst_high, FPURegister src) {
     mfc1(dst_low, src);
     Mfhc1(dst_high, src);
@@ -284,6 +293,17 @@
   void Movt(Register rd, Register rs, uint16_t cc = 0);
   void Movf(Register rd, Register rs, uint16_t cc = 0);
 
+  // Min, Max macros.
+  // On pre-r6 these functions may modify at and t8 registers.
+  void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
+                     Label* nan = nullptr);
+  void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
+                     Label* nan = nullptr);
+  void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
+                     Label* nan = nullptr);
+  void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
+                     Label* nan = nullptr);
+
   void Clz(Register rd, Register rs);
 
   // Jump unconditionally to given label.
@@ -629,6 +649,7 @@
 
   DEFINE_INSTRUCTION3(Div);
   DEFINE_INSTRUCTION3(Mul);
+  DEFINE_INSTRUCTION3(Mulu);
 
   DEFINE_INSTRUCTION(And);
   DEFINE_INSTRUCTION(Or);
@@ -646,8 +667,12 @@
 #undef DEFINE_INSTRUCTION2
 #undef DEFINE_INSTRUCTION3
 
+  // Load Scaled Address instructions. Parameter sa (shift argument) must be
+  // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
+  // may be clobbered.
   void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
            Register scratch = at);
+
   void Pref(int32_t hint, const MemOperand& rs);
 
 
@@ -761,6 +786,14 @@
     Addu(sp, sp, Operand(count * kPointerSize));
   }
 
+  // Push a fixed frame, consisting of ra, fp.
+  void PushCommonFrame(Register marker_reg = no_reg);
+
+  // Push a standard frame, consisting of ra, fp, context and JS function.
+  void PushStandardFrame(Register function_reg);
+
+  void PopCommonFrame(Register marker_reg = no_reg);
+
   // Push and pop the registers that can hold pointers, as defined by the
   // RegList constant kSafepointSavedRegisters.
   void PushSafepointRegisters();
@@ -981,8 +1014,16 @@
   // -------------------------------------------------------------------------
   // JavaScript invokes.
 
-  // Invoke the JavaScript function code by either calling or jumping.
+  // Removes current frame and its arguments from the stack preserving
+  // the arguments and a return address pushed to the stack for the next call.
+  // Both |callee_args_count| and |caller_args_count_reg| do not include
+  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+  // is trashed.
+  void PrepareForTailCall(const ParameterCount& callee_args_count,
+                          Register caller_args_count_reg, Register scratch0,
+                          Register scratch1);
 
+  // Invoke the JavaScript function code by either calling or jumping.
   void InvokeFunctionCode(Register function, Register new_target,
                           const ParameterCount& expected,
                           const ParameterCount& actual, InvokeFlag flag,
@@ -1485,6 +1526,9 @@
   // Jump if either of the registers contain a smi.
   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
 
+  // Abort execution if argument is a number, enabled via --debug-code.
+  void AssertNotNumber(Register object);
+
   // Abort execution if argument is a smi, enabled via --debug-code.
   void AssertNotSmi(Register object);
   void AssertSmi(Register object);
@@ -1604,7 +1648,7 @@
   }
 
   // Generates function and stub prologue code.
-  void StubPrologue();
+  void StubPrologue(StackFrame::Type type);
   void Prologue(bool code_pre_aging);
 
   // Load the type feedback vector from a JavaScript frame.
@@ -1619,25 +1663,22 @@
   // in a0.  Assumes that any other register can be used as a scratch.
   void CheckEnumCache(Label* call_runtime);
 
-  // AllocationMemento support. Arrays may have an associated
-  // AllocationMemento object that can be checked for in order to pretransition
-  // to another type.
-  // On entry, receiver_reg should point to the array object.
-  // scratch_reg gets clobbered.
-  // If allocation info is present, jump to allocation_memento_present.
-  void TestJSArrayForAllocationMemento(
-      Register receiver_reg,
-      Register scratch_reg,
-      Label* no_memento_found,
-      Condition cond = al,
-      Label* allocation_memento_present = NULL);
+  // AllocationMemento support. Arrays may have an associated AllocationMemento
+  // object that can be checked for in order to pretransition to another type.
+  // On entry, receiver_reg should point to the array object. scratch_reg gets
+  // clobbered. If no info is present jump to no_memento_found, otherwise fall
+  // through.
+  void TestJSArrayForAllocationMemento(Register receiver_reg,
+                                       Register scratch_reg,
+                                       Label* no_memento_found);
 
   void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
                                          Register scratch_reg,
                                          Label* memento_found) {
     Label no_memento_found;
     TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
-                                    &no_memento_found, eq, memento_found);
+                                    &no_memento_found);
+    Branch(memento_found);
     bind(&no_memento_found);
   }
 
@@ -1773,16 +1814,18 @@
   if (kArchVariant >= kMips32r6) {
     BlockTrampolinePoolFor(case_count + 5);
     addiupc(at, 5);
-    lsa(at, at, index, kPointerSizeLog2);
+    Lsa(at, at, index, kPointerSizeLog2);
     lw(at, MemOperand(at));
   } else {
     Label here;
-    BlockTrampolinePoolFor(case_count + 6);
+    BlockTrampolinePoolFor(case_count + 10);
+    push(ra);
     bal(&here);
     sll(at, index, kPointerSizeLog2);  // Branch delay slot.
     bind(&here);
     addu(at, at, ra);
-    lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+    pop(ra);
+    lw(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
   }
   jr(at);
   nop();  // Branch delay slot nop.
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 0c91cb5..e37b6e1 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -4483,7 +4483,7 @@
 
   // Set up the callee-saved registers with a known value. To be able to check
   // that they are preserved properly across JS execution.
-  int32_t callee_saved_value = icount_;
+  int32_t callee_saved_value = static_cast<int32_t>(icount_);
   set_register(s0, callee_saved_value);
   set_register(s1, callee_saved_value);
   set_register(s2, callee_saved_value);
diff --git a/src/mips64/assembler-mips64-inl.h b/src/mips64/assembler-mips64-inl.h
index 37ee3a6..dec58e8 100644
--- a/src/mips64/assembler-mips64-inl.h
+++ b/src/mips64/assembler-mips64-inl.h
@@ -102,6 +102,10 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
 
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) ||
@@ -154,6 +158,18 @@
   }
 }
 
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, size_t old_size, size_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  DCHECK(old_base <= wasm_memory_reference() &&
+         wasm_memory_reference() < old_base + old_size);
+  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+  DCHECK(new_base <= updated_reference &&
+         updated_reference < new_base + new_size);
+  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+                                   icache_flush_mode);
+}
 
 Address Assembler::target_address_from_return_address(Address pc) {
   return pc - kCallTargetAddressOffset;
diff --git a/src/mips64/assembler-mips64.cc b/src/mips64/assembler-mips64.cc
index f0d3eba..5a8dd2c 100644
--- a/src/mips64/assembler-mips64.cc
+++ b/src/mips64/assembler-mips64.cc
@@ -1372,17 +1372,21 @@
 
 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
   DCHECK(kArchVariant == kMips64r6);
-  DCHECK(!(rs.is(zero_reg)));
-  DCHECK(rs.code() >= rt.code());
-  GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+  if (rs.code() >= rt.code()) {
+    GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+  } else {
+    GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+  }
 }
 
 
 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
   DCHECK(kArchVariant == kMips64r6);
-  DCHECK(!(rs.is(zero_reg)));
-  DCHECK(rs.code() >= rt.code());
-  GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+  if (rs.code() >= rt.code()) {
+    GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+  } else {
+    GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+  }
 }
 
 
@@ -1863,6 +1867,12 @@
   emit(instr);
 }
 
+void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
+  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
+                (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
+  emit(instr);
+}
 
 void Assembler::drotrv(Register rd, Register rt, Register rs) {
   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
@@ -1899,20 +1909,20 @@
 
 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
-  DCHECK(sa < 5 && sa > 0);
+  DCHECK(sa <= 3);
   DCHECK(kArchVariant == kMips64r6);
-  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
-                (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA;
+  Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
+                rd.code() << kRdShift | sa << kSaShift | LSA;
   emit(instr);
 }
 
 
 void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
-  DCHECK(sa < 5 && sa > 0);
+  DCHECK(sa <= 3);
   DCHECK(kArchVariant == kMips64r6);
-  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
-                (rd.code() << kRdShift) | (sa - 1) << kSaShift | DLSA;
+  Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
+                rd.code() << kRdShift | sa << kSaShift | DLSA;
   emit(instr);
 }
 
@@ -2493,7 +2503,6 @@
 
 
 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
-  DCHECK(!src.rm().is(at));
   if (is_int16(src.offset_)) {
     GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
@@ -3210,7 +3219,7 @@
   // We do not try to reuse pool constants.
   RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
   if (rmode >= RelocInfo::COMMENT &&
-      rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) {
+      rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
     // Adjust code for new modes.
     DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
            || RelocInfo::IsComment(rmode)
diff --git a/src/mips64/assembler-mips64.h b/src/mips64/assembler-mips64.h
index bf2285a..de09366 100644
--- a/src/mips64/assembler-mips64.h
+++ b/src/mips64/assembler-mips64.h
@@ -535,7 +535,11 @@
 
   // Distance between the instruction referring to the address of the call
   // target and the return address.
+#ifdef _MIPS_ARCH_MIPS64R6
+  static const int kCallTargetAddressOffset = 5 * kInstrSize;
+#else
   static const int kCallTargetAddressOffset = 6 * kInstrSize;
+#endif
 
   // Distance between start of patched debug break slot and the emitted address
   // to jump to.
@@ -545,7 +549,11 @@
   // register.
   static const int kPcLoadDelta = 4;
 
+#ifdef _MIPS_ARCH_MIPS64R6
+  static const int kDebugBreakSlotInstructions = 5;
+#else
   static const int kDebugBreakSlotInstructions = 6;
+#endif
   static const int kDebugBreakSlotLength =
       kDebugBreakSlotInstructions * kInstrSize;
 
@@ -783,6 +791,7 @@
   void dsrl(Register rd, Register rt, uint16_t sa);
   void dsrlv(Register rd, Register rt, Register rs);
   void drotr(Register rd, Register rt, uint16_t sa);
+  void drotr32(Register rd, Register rt, uint16_t sa);
   void drotrv(Register rd, Register rt, Register rs);
   void dsra(Register rt, Register rd, uint16_t sa);
   void dsrav(Register rd, Register rt, Register rs);
@@ -790,10 +799,6 @@
   void dsrl32(Register rt, Register rd, uint16_t sa);
   void dsra32(Register rt, Register rd, uint16_t sa);
 
-  // Address computing instructions with shift.
-  void lsa(Register rd, Register rt, Register rs, uint8_t sa);
-  void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
-
   // ------------Memory-instructions-------------
 
   void lb(Register rd, const MemOperand& rs);
@@ -1107,7 +1112,9 @@
   void dp(uintptr_t data) { dq(data); }
   void dd(Label* label);
 
-  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+  AssemblerPositionsRecorder* positions_recorder() {
+    return &positions_recorder_;
+  }
 
   // Postpone the generation of the trampoline pool for the specified number of
   // instructions.
@@ -1206,6 +1213,10 @@
   bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
 
  protected:
+  // Load Scaled Address instructions.
+  void lsa(Register rd, Register rt, Register rs, uint8_t sa);
+  void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
+
   // Relocation for a type-recording IC has the AST id added to it.  This
   // member variable is a way to pass the information from the call site to
   // the relocation info.
@@ -1490,8 +1501,8 @@
   friend class CodePatcher;
   friend class BlockTrampolinePoolScope;
 
-  PositionsRecorder positions_recorder_;
-  friend class PositionsRecorder;
+  AssemblerPositionsRecorder positions_recorder_;
+  friend class AssemblerPositionsRecorder;
   friend class EnsureSpace;
 };
 
diff --git a/src/mips64/builtins-mips64.cc b/src/mips64/builtins-mips64.cc
index 1d8d5d3..b55b77c 100644
--- a/src/mips64/builtins-mips64.cc
+++ b/src/mips64/builtins-mips64.cc
@@ -148,17 +148,15 @@
   //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- sp[(argc + 1) * 8] : receiver
   // -----------------------------------
-  Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
   Heap::RootListIndex const root_index =
       (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
                                      : Heap::kMinusInfinityValueRootIndex;
-  DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
 
   // Load the accumulator with the default return value (either -Infinity or
   // +Infinity), with the tagged value in a1 and the double value in f0.
   __ LoadRoot(a1, root_index);
   __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
-  __ mov(a3, a0);
+  __ Addu(a3, a0, 1);
 
   Label done_loop, loop;
   __ bind(&loop);
@@ -210,23 +208,21 @@
     __ SmiToDoubleFPURegister(a2, f2, a4);
     __ bind(&done_convert);
 
-    // Perform the actual comparison with the accumulator value on the left hand
-    // side (f0) and the next parameter value on the right hand side (f2).
-    Label compare_equal, compare_nan, compare_swap;
-    __ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
-    __ BranchF(&compare_swap, nullptr, cc, f0, f2);
-    __ Branch(&loop);
-
-    // Left and right hand side are equal, check for -0 vs. +0.
-    __ bind(&compare_equal);
-    __ FmoveHigh(a4, reg);
-    // Make a4 unsigned.
-    __ dsll32(a4, a4, 0);
-    __ Branch(&loop, ne, a4, Operand(0x8000000000000000));
-
-    // Result is on the right hand side.
-    __ bind(&compare_swap);
-    __ mov_d(f0, f2);
+    // Perform the actual comparison with using Min/Max macro instructions the
+    // accumulator value on the left hand side (f0) and the next parameter value
+    // on the right hand side (f2).
+    // We need to work out which HeapNumber (or smi) the result came from.
+    Label compare_nan;
+    __ BranchF(nullptr, &compare_nan, eq, f0, f2);
+    __ Move(a4, f0);
+    if (kind == MathMaxMinKind::kMin) {
+      __ MinNaNCheck_d(f0, f0, f2);
+    } else {
+      DCHECK(kind == MathMaxMinKind::kMax);
+      __ MaxNaNCheck_d(f0, f0, f2);
+    }
+    __ Move(at, f0);
+    __ Branch(&loop, eq, a4, Operand(at));
     __ mov(a1, a2);
     __ jmp(&loop);
 
@@ -239,8 +235,8 @@
 
   __ bind(&done_loop);
   __ Dlsa(sp, sp, a3, kPointerSizeLog2);
-  __ mov(v0, a1);
-  __ DropAndRet(1);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a1);  // In delay slot.
 }
 
 // static
@@ -528,6 +524,7 @@
   //  -- a1     : constructor function
   //  -- a2     : allocation site or undefined
   //  -- a3     : new target
+  //  -- cp     : context
   //  -- ra     : return address
   //  -- sp[...]: constructor arguments
   // -----------------------------------
@@ -541,7 +538,7 @@
     // Preserve the incoming parameters on the stack.
     __ AssertUndefinedOrAllocationSite(a2, t0);
     __ SmiTag(a0);
-    __ Push(a2, a0);
+    __ Push(cp, a2, a0);
 
     if (create_implicit_receiver) {
       __ Push(a1, a3);
@@ -612,7 +609,7 @@
     }
 
     // Restore context from the frame.
-    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
 
     if (create_implicit_receiver) {
       // If the result is an object (in the ECMA sense), we should get rid
@@ -743,8 +740,6 @@
   //  -- s0: argv
   // -----------------------------------
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
-  // Clear the context before we push it when entering the JS frame.
-  __ mov(cp, zero_reg);
 
   // Enter an internal frame.
   {
@@ -839,9 +834,7 @@
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
   FrameScope frame_scope(masm, StackFrame::MANUAL);
-
-  __ Push(ra, fp, cp, a1);
-  __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  __ PushStandardFrame(a1);
 
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into kInterpreterBytecodeRegister.
@@ -1197,8 +1190,7 @@
   __ MultiPop(saved_regs);
 
   // Perform prologue operations usually performed by the young code stub.
-  __ Push(ra, fp, cp, a1);
-  __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  __ PushStandardFrame(a1);
 
   // Jump to point after the code-age stub.
   __ Daddu(a0, a0, Operand((kNoCodeAgeSequenceLength)));
@@ -1428,23 +1420,6 @@
 }
 
 
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
-  // We check the stack limit as indicator that recompilation might be done.
-  Label ok;
-  __ LoadRoot(at, Heap::kStackLimitRootIndex);
-  __ Branch(&ok, hs, sp, Operand(at));
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kStackGuard);
-  }
-  __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&ok);
-  __ Ret();
-}
-
-
 // static
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
@@ -1491,6 +1466,27 @@
   __ TailCallRuntime(Runtime::kThrowNotDateError);
 }
 
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : argc
+  //  -- sp[0] : first argument (left-hand side)
+  //  -- sp[8] : receiver (right-hand side)
+  // -----------------------------------
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ ld(InstanceOfDescriptor::LeftRegister(),
+          MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
+    __ ld(InstanceOfDescriptor::RightRegister(),
+          MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
+    InstanceOfStub stub(masm->isolate(), true);
+    __ CallStub(&stub);
+  }
+
+  // Pop the argument and the receiver.
+  __ DropAndRet(2);
+}
 
 // static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1956,18 +1952,20 @@
   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
   Comment cmnt(masm, "[ PrepareForTailCall");
 
-  // Prepare for tail call only if the debugger is not active.
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
   Label done;
-  ExternalReference debug_is_active =
-      ExternalReference::debug_is_active_address(masm->isolate());
-  __ li(at, Operand(debug_is_active));
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ li(at, Operand(is_tail_call_elimination_enabled));
   __ lb(scratch1, MemOperand(at));
-  __ Branch(&done, ne, scratch1, Operand(zero_reg));
+  __ Branch(&done, eq, scratch1, Operand(zero_reg));
 
   // Drop possible interpreter handler/stub frame.
   {
     Label no_interpreter_frame;
-    __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+    __ ld(scratch3,
+          MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ Branch(&no_interpreter_frame, ne, scratch3,
               Operand(Smi::FromInt(StackFrame::STUB)));
     __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1975,71 +1973,36 @@
   }
 
   // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ ld(scratch3,
+        MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&no_arguments_adaptor, ne, scratch3,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
-  // Drop arguments adaptor frame and load arguments count.
+  // Drop current frame and load arguments count from arguments adaptor frame.
   __ mov(fp, scratch2);
-  __ ld(scratch1,
+  __ ld(caller_args_count_reg,
         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(scratch1);
+  __ SmiUntag(caller_args_count_reg);
   __ Branch(&formal_parameter_count_loaded);
 
   __ bind(&no_arguments_adaptor);
   // Load caller's formal parameter count
-  __ ld(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ld(scratch1,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
   __ ld(scratch1,
         FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(scratch1,
+  __ lw(caller_args_count_reg,
         FieldMemOperand(scratch1,
                         SharedFunctionInfo::kFormalParameterCountOffset));
 
   __ bind(&formal_parameter_count_loaded);
 
-  // Calculate the end of destination area where we will put the arguments
-  // after we drop current frame. We add kPointerSize to count the receiver
-  // argument which is not included into formal parameters count.
-  Register dst_reg = scratch2;
-  __ Dlsa(dst_reg, fp, scratch1, kPointerSizeLog2);
-  __ Daddu(dst_reg, dst_reg,
-           Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
-  Register src_reg = scratch1;
-  __ Dlsa(src_reg, sp, args_reg, kPointerSizeLog2);
-  // Count receiver argument as well (not included in args_reg).
-  __ Daddu(src_reg, src_reg, Operand(kPointerSize));
-
-  if (FLAG_debug_code) {
-    __ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
-  }
-
-  // Restore caller's frame pointer and return address now as they will be
-  // overwritten by the copying loop.
-  __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
-  __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
-  // Now copy callee arguments to the caller frame going backwards to avoid
-  // callee arguments corruption (source and destination areas could overlap).
-
-  // Both src_reg and dst_reg are pointing to the word after the one to copy,
-  // so they must be pre-decremented in the loop.
-  Register tmp_reg = scratch3;
-  Label loop, entry;
-  __ Branch(&entry);
-  __ bind(&loop);
-  __ Dsubu(src_reg, src_reg, Operand(kPointerSize));
-  __ Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
-  __ ld(tmp_reg, MemOperand(src_reg));
-  __ sd(tmp_reg, MemOperand(dst_reg));
-  __ bind(&entry);
-  __ Branch(&loop, ne, sp, Operand(src_reg));
-
-  // Leave current frame.
-  __ mov(sp, dst_reg);
-
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
   __ bind(&done);
 }
 }  // namespace
@@ -2549,27 +2512,6 @@
 
   {  // Too few parameters: Actual < expected.
     __ bind(&too_few);
-
-    // If the function is strong we need to throw an error.
-    Label no_strong_error;
-    __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-    __ lbu(a5, FieldMemOperand(a4, SharedFunctionInfo::kStrongModeByteOffset));
-    __ And(a5, a5, Operand(1 << SharedFunctionInfo::kStrongModeBitWithinByte));
-    __ Branch(&no_strong_error, eq, a5, Operand(zero_reg));
-
-    // What we really care about is the required number of arguments.
-    DCHECK_EQ(kPointerSize, kInt64Size);
-    __ lw(a5, FieldMemOperand(a4, SharedFunctionInfo::kLengthOffset));
-    __ srl(a5, a5, 1);
-    __ Branch(&no_strong_error, ge, a0, Operand(a5));
-
-    {
-      FrameScope frame(masm, StackFrame::MANUAL);
-      EnterArgumentsAdaptorFrame(masm);
-      __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
-    }
-
-    __ bind(&no_strong_error);
     EnterArgumentsAdaptorFrame(masm);
     ArgumentAdaptorStackCheck(masm, &stack_overflow);
 
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index 28812ad..fdb6c81 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -4,8 +4,9 @@
 
 #if V8_TARGET_ARCH_MIPS64
 
-#include "src/bootstrapper.h"
 #include "src/code-stubs.h"
+#include "src/api-arguments.h"
+#include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
@@ -75,6 +76,10 @@
   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+  descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
 
 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
@@ -502,7 +507,7 @@
          (lhs.is(a1) && rhs.is(a0)));
 
   // a2 is object type of rhs.
-  Label object_test, return_unequal, undetectable;
+  Label object_test, return_equal, return_unequal, undetectable;
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   __ And(at, a2, Operand(kIsNotStringMask));
   __ Branch(&object_test, ne, at, Operand(zero_reg));
@@ -542,6 +547,16 @@
   __ bind(&undetectable);
   __ And(at, t1, Operand(1 << Map::kIsUndetectable));
   __ Branch(&return_unequal, eq, at, Operand(zero_reg));
+
+  // If both sides are JSReceivers, then the result is false according to
+  // the HTML specification, which says that only comparisons with null or
+  // undefined are affected by special casing for document.all.
+  __ GetInstanceType(a2, a2);
+  __ Branch(&return_equal, eq, a2, Operand(ODDBALL_TYPE));
+  __ GetInstanceType(a3, a3);
+  __ Branch(&return_unequal, ne, a3, Operand(ODDBALL_TYPE));
+
+  __ bind(&return_equal);
   __ Ret(USE_DELAY_SLOT);
   __ li(v0, Operand(EQUAL));  // In delay slot.
 }
@@ -1488,8 +1503,12 @@
   __ GetObjectType(function, function_map, scratch);
   __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
 
-  // Ensure that {function} has an instance prototype.
+  // Go to the runtime if the function is not a constructor.
   __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+  __ And(at, scratch, Operand(1 << Map::kIsConstructor));
+  __ Branch(&slow_case, eq, at, Operand(zero_reg));
+
+  // Ensure that {function} has an instance prototype.
   __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
   __ Branch(&slow_case, ne, at, Operand(zero_reg));
 
@@ -1559,7 +1578,8 @@
   // Slow-case: Call the %InstanceOf runtime function.
   __ bind(&slow_case);
   __ Push(object, function);
-  __ TailCallRuntime(Runtime::kInstanceOf);
+  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+                                         : Runtime::kInstanceOf);
 }
 
 
@@ -1579,29 +1599,6 @@
 }
 
 
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
-  // Return address is in ra.
-  Label slow;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-
-  // Check that the key is an array index, that is Uint32.
-  __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
-  __ Branch(&slow, ne, t0, Operand(zero_reg));
-
-  // Everything is fine, call runtime.
-  __ Push(receiver, key);  // Receiver, key.
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
-  __ bind(&slow);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -2777,57 +2774,58 @@
   __ bind(&not_smi);
 
   Label not_heap_number;
-  __ ld(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  // a0: object
-  // a1: instance type.
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
   __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);
   __ bind(&not_heap_number);
 
-  Label not_string, slow_string;
+  NonNumberToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes on argument in a0.
+  __ AssertNotNumber(a0);
+
+  Label not_string;
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
   __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
-  // Check if string has a cached array index.
-  __ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
-  __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
-  __ Branch(&slow_string, ne, at, Operand(zero_reg));
-  __ IndexFromHash(a2, a0);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&slow_string);
-  __ push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
+  StringToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
   __ bind(&not_string);
 
   Label not_oddball;
   __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
   __ Ret(USE_DELAY_SLOT);
-  __ ld(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+  __ ld(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));  // In delay slot.
   __ bind(&not_oddball);
 
-  __ push(a0);  // Push argument.
+  __ Push(a0);  // Push argument.
   __ TailCallRuntime(Runtime::kToNumber);
 }
 
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+  // The StringToNumber stub takes on argument in a0.
+  __ AssertString(a0);
 
-void ToLengthStub::Generate(MacroAssembler* masm) {
-  // The ToLength stub takes on argument in a0.
-  Label not_smi, positive_smi;
-  __ JumpIfNotSmi(a0, &not_smi);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ Branch(&positive_smi, ge, a0, Operand(zero_reg));
-  __ mov(a0, zero_reg);
-  __ bind(&positive_smi);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&not_smi);
+  // Check if string has a cached array index.
+  Label runtime;
+  __ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
+  __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ IndexFromHash(a2, v0);
+  __ Ret();
 
-  __ push(a0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToLength);
+  __ bind(&runtime);
+  __ Push(a0);  // Push argument.
+  __ TailCallRuntime(Runtime::kStringToNumber);
 }
 
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes on argument in a0.
   Label is_number;
@@ -2998,39 +2996,6 @@
 }
 
 
-void StringCompareStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a1    : left
-  //  -- a0    : right
-  //  -- ra    : return address
-  // -----------------------------------
-  __ AssertString(a1);
-  __ AssertString(a0);
-
-  Label not_same;
-  __ Branch(&not_same, ne, a0, Operand(a1));
-  __ li(v0, Operand(Smi::FromInt(EQUAL)));
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a1,
-                      a2);
-  __ Ret();
-
-  __ bind(&not_same);
-
-  // Check that both objects are sequential one-byte strings.
-  Label runtime;
-  __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
-
-  // Compare flat ASCII strings natively.
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
-                      a3);
-  StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
-
-  __ bind(&runtime);
-  __ Push(a1, a0);
-  __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a1    : left
@@ -3353,10 +3318,17 @@
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
-  __ Push(left, right);
   if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(left, right);
+      __ CallRuntime(Runtime::kStringEqual);
+    }
+    __ LoadRoot(a0, Heap::kTrueValueRootIndex);
+    __ Ret(USE_DELAY_SLOT);
+    __ Subu(v0, v0, a0);  // In delay slot.
   } else {
+    __ Push(left, right);
     __ TailCallRuntime(Runtime::kStringCompare);
   }
 
@@ -3915,7 +3887,7 @@
   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   int parameter_count_offset =
-      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   __ ld(a1, MemOperand(fp, parameter_count_offset));
   if (function_mode() == JS_FUNCTION_STUB_MODE) {
     __ Daddu(a1, a1, Operand(1));
@@ -4900,7 +4872,7 @@
     __ bind(&loop);
     __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ ld(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+    __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
     __ Branch(&loop, ne, a1, Operand(a3));
   }
 
@@ -4908,7 +4880,7 @@
   // arguments adaptor frame below the function frame).
   Label no_rest_parameters;
   __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ ld(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&no_rest_parameters, ne, a3,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
@@ -5053,7 +5025,7 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+  __ ld(a0, MemOperand(a4, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&adaptor_frame, eq, a0,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
@@ -5266,14 +5238,14 @@
     __ bind(&loop);
     __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ ld(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+    __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
     __ Branch(&loop, ne, a1, Operand(a3));
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
   Label arguments_adaptor, arguments_done;
   __ ld(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a0, MemOperand(a3, StandardFrameConstants::kContextOffset));
+  __ ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&arguments_adaptor, eq, a0,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   {
@@ -5635,16 +5607,12 @@
   __ jmp(&leave_exit_frame);
 }
 
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
-                                      const ParameterCount& argc,
-                                      bool return_first_arg,
-                                      bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0                  : callee
   //  -- a4                  : call_data
   //  -- a2                  : holder
   //  -- a1                  : api_function_address
-  //  -- a3                  : number of arguments if argc is a register
   //  -- cp                  : context
   //  --
   //  -- sp[0]               : last argument
@@ -5670,17 +5638,15 @@
   STATIC_ASSERT(FCA::kHolderIndex == 0);
   STATIC_ASSERT(FCA::kArgsLength == 7);
 
-  DCHECK(argc.is_immediate() || a3.is(argc.reg()));
-
   // Save context, callee and call data.
   __ Push(context, callee, call_data);
-  if (!is_lazy) {
+  if (!is_lazy()) {
     // Load context from callee.
     __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   }
 
   Register scratch = call_data;
-  if (!call_data_undefined) {
+  if (!call_data_undefined()) {
     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   }
   // Push return value and default return value.
@@ -5705,33 +5671,17 @@
   __ Daddu(a0, sp, Operand(1 * kPointerSize));
   // FunctionCallbackInfo::implicit_args_
   __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
-  if (argc.is_immediate()) {
-    // FunctionCallbackInfo::values_
-    __ Daddu(at, scratch,
-             Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
-    __ sd(at, MemOperand(a0, 1 * kPointerSize));
-    // FunctionCallbackInfo::length_ = argc
-    // Stored as int field, 32-bit integers within struct on stack always left
-    // justified by n64 ABI.
-    __ li(at, Operand(argc.immediate()));
-    __ sw(at, MemOperand(a0, 2 * kPointerSize));
-    // FunctionCallbackInfo::is_construct_call_ = 0
-    __ sw(zero_reg, MemOperand(a0, 2 * kPointerSize + kIntSize));
-  } else {
-    // FunctionCallbackInfo::values_
-    __ dsll(at, argc.reg(), kPointerSizeLog2);
-    __ Daddu(at, at, scratch);
-    __ Daddu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
-    __ sd(at, MemOperand(a0, 1 * kPointerSize));
-    // FunctionCallbackInfo::length_ = argc
-    // Stored as int field, 32-bit integers within struct on stack always left
-    // justified by n64 ABI.
-    __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
-    // FunctionCallbackInfo::is_construct_call_
-    __ Daddu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
-    __ dsll(at, argc.reg(), kPointerSizeLog2);
-    __ sw(at, MemOperand(a0, 2 * kPointerSize + kIntSize));
-  }
+  // FunctionCallbackInfo::values_
+  __ Daddu(at, scratch,
+           Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+  __ sd(at, MemOperand(a0, 1 * kPointerSize));
+  // FunctionCallbackInfo::length_ = argc
+  // Stored as int field, 32-bit integers within struct on stack always left
+  // justified by n64 ABI.
+  __ li(at, Operand(argc()));
+  __ sw(at, MemOperand(a0, 2 * kPointerSize));
+  // FunctionCallbackInfo::is_construct_call_ = 0
+  __ sw(zero_reg, MemOperand(a0, 2 * kPointerSize + kIntSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5741,7 +5691,7 @@
       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   // Stores return the first js argument.
   int return_value_offset = 0;
-  if (return_first_arg) {
+  if (is_store()) {
     return_value_offset = 2 + FCA::kArgsLength;
   } else {
     return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5749,33 +5699,14 @@
   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   int stack_space = 0;
   int32_t stack_space_offset = 4 * kPointerSize;
-  if (argc.is_immediate()) {
-    stack_space = argc.immediate() + FCA::kArgsLength + 1;
-    stack_space_offset = kInvalidStackOffset;
-  }
+  stack_space = argc() + FCA::kArgsLength + 1;
+  stack_space_offset = kInvalidStackOffset;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
                            stack_space_offset, return_value_operand,
                            &context_restore_operand);
 }
 
 
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
-  bool call_data_undefined = this->call_data_undefined();
-  CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
-                            call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
-  bool is_store = this->is_store();
-  int argc = this->argc();
-  bool call_data_undefined = this->call_data_undefined();
-  bool is_lazy = this->is_lazy();
-  CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined, is_lazy);
-}
-
-
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- sp[0]                        : name
diff --git a/src/mips64/codegen-mips64.cc b/src/mips64/codegen-mips64.cc
index c8cde97..44d822b 100644
--- a/src/mips64/codegen-mips64.cc
+++ b/src/mips64/codegen-mips64.cc
@@ -1194,12 +1194,10 @@
                       young_sequence_.length() / Assembler::kInstrSize,
                       CodePatcher::DONT_FLUSH));
   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
-  patcher->masm()->Push(ra, fp, cp, a1);
+  patcher->masm()->PushStandardFrame(a1);
   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
-  patcher->masm()->Daddu(
-      fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
 }
 
 
diff --git a/src/mips64/deoptimizer-mips64.cc b/src/mips64/deoptimizer-mips64.cc
index ec610f0..90bd11e 100644
--- a/src/mips64/deoptimizer-mips64.cc
+++ b/src/mips64/deoptimizer-mips64.cc
@@ -98,12 +98,6 @@
   }
 }
 
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
-  // There is no dynamic alignment padding on MIPS in the input frame.
-  return false;
-}
-
-
 #define __ masm()->
 
 
@@ -161,9 +155,14 @@
 
   // Allocate a new deoptimizer object.
   __ PrepareCallCFunction(6, a5);
-  // Pass six arguments, according to O32 or n64 ABI. a0..a3 are same for both.
-  __ li(a1, Operand(type()));  // bailout type,
+  // Pass six arguments, according to n64 ABI.
+  __ mov(a0, zero_reg);
+  Label context_check;
+  __ ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ JumpIfSmi(a1, &context_check);
   __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ bind(&context_check);
+  __ li(a1, Operand(type()));  // Bailout type.
   // a2: bailout id already loaded.
   // a3: code address or 0 already loaded.
   // a4: already has fp-to-sp delta.
@@ -238,6 +237,8 @@
   }
   __ pop(a0);  // Restore deoptimizer object (class Deoptimizer).
 
+  __ ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
   // Replace the current (input) frame with the output frames.
   Label outer_push_loop, inner_push_loop,
       outer_loop_header, inner_loop_header;
diff --git a/src/mips64/frames-mips64.h b/src/mips64/frames-mips64.h
index 9c42d8d..d6d3e5c 100644
--- a/src/mips64/frames-mips64.h
+++ b/src/mips64/frames-mips64.h
@@ -133,13 +133,11 @@
       -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
 };
 
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
  public:
-  static const int kFrameSize = 2 * kPointerSize;
-
-  static const int kCodeOffset = -2 * kPointerSize;
-  static const int kSPOffset = -1 * kPointerSize;
+  static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
 
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = +0 * kPointerSize;
@@ -161,7 +159,7 @@
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
   static const int kLastParameterOffset = +2 * kPointerSize;
-  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+  static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
 
   // Caller SP-relative.
   static const int kParam0Offset   = -2 * kPointerSize;
diff --git a/src/mips64/interface-descriptors-mips64.cc b/src/mips64/interface-descriptors-mips64.cc
index 73df66e..7695d0b 100644
--- a/src/mips64/interface-descriptors-mips64.cc
+++ b/src/mips64/interface-descriptors-mips64.cc
@@ -109,35 +109,8 @@
 }
 
 
-void ToNumberDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 // static
-const Register ToLengthDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return a0; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
 
 void TypeofDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -266,6 +239,13 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
+      CallInterfaceDescriptorData* data) {                          \
+    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
+  }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
 
 void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -310,7 +290,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (arg count)
+  Register registers[] = {a0};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 void CompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {a1, a0};
@@ -318,20 +303,6 @@
 }
 
 
-void CompareNilDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void BinaryOpDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {a1, a0};
@@ -391,21 +362,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {
-      a0,  // callee
-      a4,  // call_data
-      a2,  // holder
-      a1,  // api_function_address
-      a3,  // actual number of arguments
-  };
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       a0,  // callee
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
index b49fa76..fb83fe9 100644
--- a/src/mips64/macro-assembler-mips64.cc
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -102,6 +102,34 @@
   sd(source, MemOperand(s6, index << kPointerSizeLog2));
 }
 
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+  if (marker_reg.is_valid()) {
+    Push(ra, fp, marker_reg);
+    Daddu(fp, sp, Operand(kPointerSize));
+  } else {
+    Push(ra, fp);
+    mov(fp, sp);
+  }
+}
+
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
+  if (marker_reg.is_valid()) {
+    Pop(ra, fp, marker_reg);
+  } else {
+    Pop(ra, fp);
+  }
+}
+
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+  int offset = -StandardFrameConstants::kContextOffset;
+  if (function_reg.is_valid()) {
+    Push(ra, fp, cp, function_reg);
+    offset += kPointerSize;
+  } else {
+    Push(ra, fp, cp);
+  }
+  Daddu(fp, sp, Operand(offset));
+}
 
 // Push and pop all registers that can hold pointers.
 void MacroAssembler::PushSafepointRegisters() {
@@ -457,13 +485,13 @@
   sd(scratch, MemOperand(t8));
   // Call stub on end of buffer.
   // Check for end of buffer.
-  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+  And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
   DCHECK(!scratch.is(t8));
   if (and_then == kFallThroughAtEnd) {
-    Branch(&done, eq, t8, Operand(zero_reg));
+    Branch(&done, ne, t8, Operand(zero_reg));
   } else {
     DCHECK(and_then == kReturnAtEnd);
-    Ret(eq, t8, Operand(zero_reg));
+    Ret(ne, t8, Operand(zero_reg));
   }
   push(ra);
   StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
@@ -484,13 +512,25 @@
                                             Register scratch,
                                             Label* miss) {
   Label same_contexts;
+  Register temporary = t8;
 
   DCHECK(!holder_reg.is(scratch));
   DCHECK(!holder_reg.is(at));
   DCHECK(!scratch.is(at));
 
-  // Load current lexical context from the stack frame.
-  ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  // Load current lexical context from the active StandardFrame, which
+  // may require crawling past STUB frames.
+  Label load_context;
+  Label has_context;
+  mov(at, fp);
+  bind(&load_context);
+  ld(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
+  // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
+  JumpIfNotSmi(scratch, &has_context, temporary);
+  ld(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
+  Branch(&load_context);
+  bind(&has_context);
+
   // In debug mode, make sure the lexical context is set.
 #ifdef DEBUG
   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
@@ -1225,7 +1265,11 @@
   if (rt.is_reg()) {
     rotrv(rd, rs, rt.rm());
   } else {
-    rotr(rd, rs, rt.imm64_);
+    int64_t ror_value = rt.imm64_ % 32;
+    if (ror_value < 0) {
+      ror_value += 32;
+    }
+    rotr(rd, rs, ror_value);
   }
 }
 
@@ -1234,7 +1278,13 @@
   if (rt.is_reg()) {
     drotrv(rd, rs, rt.rm());
   } else {
-    drotr(rd, rs, rt.imm64_);
+    int64_t dror_value = rt.imm64_ % 64;
+    if (dror_value < 0) dror_value += 64;
+    if (dror_value <= 31) {
+      drotr(rd, rs, dror_value);
+    } else {
+      drotr32(rd, rs, dror_value - 32);
+    }
   }
 }
 
@@ -1246,8 +1296,9 @@
 
 void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
                          Register scratch) {
+  DCHECK(sa >= 1 && sa <= 31);
   if (kArchVariant == kMips64r6 && sa <= 4) {
-    lsa(rd, rt, rs, sa);
+    lsa(rd, rt, rs, sa - 1);
   } else {
     Register tmp = rd.is(rt) ? scratch : rd;
     DCHECK(!tmp.is(rt));
@@ -1259,8 +1310,9 @@
 
 void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
                           Register scratch) {
+  DCHECK(sa >= 1 && sa <= 31);
   if (kArchVariant == kMips64r6 && sa <= 4) {
-    dlsa(rd, rt, rs, sa);
+    dlsa(rd, rt, rs, sa - 1);
   } else {
     Register tmp = rd.is(rt) ? scratch : rd;
     DCHECK(!tmp.is(rt));
@@ -2310,6 +2362,186 @@
   movf(rd, rs, cc);
 }
 
+#define __ masm->
+
+static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
+                         FPURegister src1, FPURegister src2, Label* equal) {
+  if (src1.is(src2)) {
+    __ Move(dst, src1);
+    return true;
+  }
+
+  Label other, compare_not_equal;
+  FPURegister left, right;
+  if (kind == MaxMinKind::kMin) {
+    left = src1;
+    right = src2;
+  } else {
+    left = src2;
+    right = src1;
+  }
+
+  __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
+  // Left and right hand side are equal, check for -0 vs. +0.
+  __ dmfc1(t8, src1);
+  __ Branch(&other, eq, t8, Operand(0x8000000000000000));
+  __ Move_d(dst, right);
+  __ Branch(equal);
+  __ bind(&other);
+  __ Move_d(dst, left);
+  __ Branch(equal);
+  __ bind(&compare_not_equal);
+  return false;
+}
+
+static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
+                         FPURegister src1, FPURegister src2, Label* equal) {
+  if (src1.is(src2)) {
+    __ Move(dst, src1);
+    return true;
+  }
+
+  Label other, compare_not_equal;
+  FPURegister left, right;
+  if (kind == MaxMinKind::kMin) {
+    left = src1;
+    right = src2;
+  } else {
+    left = src2;
+    right = src1;
+  }
+
+  __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
+  // Left and right hand side are equal, check for -0 vs. +0.
+  __ FmoveLow(t8, src1);
+  __ dsll32(t8, t8, 0);
+  __ Branch(&other, eq, t8, Operand(0x8000000000000000));
+  __ Move_s(dst, right);
+  __ Branch(equal);
+  __ bind(&other);
+  __ Move_s(dst, left);
+  __ Branch(equal);
+  __ bind(&compare_not_equal);
+  return false;
+}
+
+#undef __
+
+void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
+                                   FPURegister src2, Label* nan) {
+  if (nan) {
+    BranchF64(nullptr, nan, eq, src1, src2);
+  }
+  if (kArchVariant >= kMips64r6) {
+    min_d(dst, src1, src2);
+  } else {
+    Label skip;
+    if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
+      if (dst.is(src1)) {
+        BranchF64(&skip, nullptr, le, src1, src2);
+        Move_d(dst, src2);
+      } else if (dst.is(src2)) {
+        BranchF64(&skip, nullptr, ge, src1, src2);
+        Move_d(dst, src1);
+      } else {
+        Label right;
+        BranchF64(&right, nullptr, gt, src1, src2);
+        Move_d(dst, src1);
+        Branch(&skip);
+        bind(&right);
+        Move_d(dst, src2);
+      }
+    }
+    bind(&skip);
+  }
+}
+
+void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
+                                   FPURegister src2, Label* nan) {
+  if (nan) {
+    BranchF64(nullptr, nan, eq, src1, src2);
+  }
+  if (kArchVariant >= kMips64r6) {
+    max_d(dst, src1, src2);
+  } else {
+    Label skip;
+    if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
+      if (dst.is(src1)) {
+        BranchF64(&skip, nullptr, ge, src1, src2);
+        Move_d(dst, src2);
+      } else if (dst.is(src2)) {
+        BranchF64(&skip, nullptr, le, src1, src2);
+        Move_d(dst, src1);
+      } else {
+        Label right;
+        BranchF64(&right, nullptr, lt, src1, src2);
+        Move_d(dst, src1);
+        Branch(&skip);
+        bind(&right);
+        Move_d(dst, src2);
+      }
+    }
+    bind(&skip);
+  }
+}
+
+void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
+                                   FPURegister src2, Label* nan) {
+  if (nan) {
+    BranchF32(nullptr, nan, eq, src1, src2);
+  }
+  if (kArchVariant >= kMips64r6) {
+    min_s(dst, src1, src2);
+  } else {
+    Label skip;
+    if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
+      if (dst.is(src1)) {
+        BranchF32(&skip, nullptr, le, src1, src2);
+        Move_s(dst, src2);
+      } else if (dst.is(src2)) {
+        BranchF32(&skip, nullptr, ge, src1, src2);
+        Move_s(dst, src1);
+      } else {
+        Label right;
+        BranchF32(&right, nullptr, gt, src1, src2);
+        Move_s(dst, src1);
+        Branch(&skip);
+        bind(&right);
+        Move_s(dst, src2);
+      }
+    }
+    bind(&skip);
+  }
+}
+
+void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
+                                   FPURegister src2, Label* nan) {
+  if (nan) {
+    BranchF32(nullptr, nan, eq, src1, src2);
+  }
+  if (kArchVariant >= kMips64r6) {
+    max_s(dst, src1, src2);
+  } else {
+    Label skip;
+    if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
+      if (dst.is(src1)) {
+        BranchF32(&skip, nullptr, ge, src1, src2);
+        Move_s(dst, src2);
+      } else if (dst.is(src2)) {
+        BranchF32(&skip, nullptr, le, src1, src2);
+        Move_s(dst, src1);
+      } else {
+        Label right;
+        BranchF32(&right, nullptr, lt, src1, src2);
+        Move_s(dst, src1);
+        Branch(&skip);
+        bind(&right);
+        Move_s(dst, src2);
+      }
+    }
+    bind(&skip);
+  }
+}
 
 void MacroAssembler::Clz(Register rd, Register rs) {
   clz(rd, rs);
@@ -3468,16 +3700,25 @@
                           const Operand& rt,
                           BranchDelaySlot bd) {
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  if (cond == cc_always) {
-    jr(target);
+  if (kArchVariant == kMips64r6 && bd == PROTECT) {
+    if (cond == cc_always) {
+      jic(target, 0);
+    } else {
+      BRANCH_ARGS_CHECK(cond, rs, rt);
+      Branch(2, NegateCondition(cond), rs, rt);
+      jic(target, 0);
+    }
   } else {
-    BRANCH_ARGS_CHECK(cond, rs, rt);
-    Branch(2, NegateCondition(cond), rs, rt);
-    jr(target);
+    if (cond == cc_always) {
+      jr(target);
+    } else {
+      BRANCH_ARGS_CHECK(cond, rs, rt);
+      Branch(2, NegateCondition(cond), rs, rt);
+      jr(target);
+    }
+    // Emit a nop in the branch delay slot if required.
+    if (bd == PROTECT) nop();
   }
-  // Emit a nop in the branch delay slot if required.
-  if (bd == PROTECT)
-    nop();
 }
 
 
@@ -3535,8 +3776,7 @@
     size += 3;
   }
 
-  if (bd == PROTECT)
-    size += 1;
+  if (bd == PROTECT && kArchVariant != kMips64r6) size += 1;
 
   return size * kInstrSize;
 }
@@ -3555,16 +3795,25 @@
   BlockTrampolinePoolScope block_trampoline_pool(this);
   Label start;
   bind(&start);
-  if (cond == cc_always) {
-    jalr(target);
+  if (kArchVariant == kMips64r6 && bd == PROTECT) {
+    if (cond == cc_always) {
+      jialc(target, 0);
+    } else {
+      BRANCH_ARGS_CHECK(cond, rs, rt);
+      Branch(2, NegateCondition(cond), rs, rt);
+      jialc(target, 0);
+    }
   } else {
-    BRANCH_ARGS_CHECK(cond, rs, rt);
-    Branch(2, NegateCondition(cond), rs, rt);
-    jalr(target);
+    if (cond == cc_always) {
+      jalr(target);
+    } else {
+      BRANCH_ARGS_CHECK(cond, rs, rt);
+      Branch(2, NegateCondition(cond), rs, rt);
+      jalr(target);
+    }
+    // Emit a nop in the branch delay slot if required.
+    if (bd == PROTECT) nop();
   }
-  // Emit a nop in the branch delay slot if required.
-  if (bd == PROTECT)
-    nop();
 
 #ifdef DEBUG
   CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
@@ -3687,44 +3936,6 @@
 }
 
 
-void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
-  BlockTrampolinePoolScope block_trampoline_pool(this);
-
-  uint64_t imm64;
-  imm64 = jump_address(L);
-  { BlockGrowBufferScope block_buf_growth(this);
-    // Buffer growth (and relocation) must be blocked for internal references
-    // until associated instructions are emitted and available to be patched.
-    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
-    li(at, Operand(imm64), ADDRESS_LOAD);
-  }
-  jr(at);
-
-  // Emit a nop in the branch delay slot if required.
-  if (bdslot == PROTECT)
-    nop();
-}
-
-
-void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
-  BlockTrampolinePoolScope block_trampoline_pool(this);
-
-  uint64_t imm64;
-  imm64 = jump_address(L);
-  { BlockGrowBufferScope block_buf_growth(this);
-    // Buffer growth (and relocation) must be blocked for internal references
-    // until associated instructions are emitted and available to be patched.
-    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
-    li(at, Operand(imm64), ADDRESS_LOAD);
-  }
-  jalr(at);
-
-  // Emit a nop in the branch delay slot if required.
-  if (bdslot == PROTECT)
-    nop();
-}
-
-
 void MacroAssembler::DropAndRet(int drop) {
   DCHECK(is_int16(drop * kPointerSize));
   Ret(USE_DELAY_SLOT);
@@ -4551,6 +4762,65 @@
 // -----------------------------------------------------------------------------
 // JavaScript invokes.
 
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+                                        Register caller_args_count_reg,
+                                        Register scratch0, Register scratch1) {
+#if DEBUG
+  if (callee_args_count.is_reg()) {
+    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+                       scratch1));
+  } else {
+    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+  }
+#endif
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We add kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch0;
+  Dlsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
+  Daddu(dst_reg, dst_reg,
+        Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = caller_args_count_reg;
+  // Calculate the end of source area. +kPointerSize is for the receiver.
+  if (callee_args_count.is_reg()) {
+    Dlsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
+    Daddu(src_reg, src_reg, Operand(kPointerSize));
+  } else {
+    Daddu(src_reg, sp,
+          Operand((callee_args_count.immediate() + 1) * kPointerSize));
+  }
+
+  if (FLAG_debug_code) {
+    Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch1;
+  Label loop, entry;
+  Branch(&entry);
+  bind(&loop);
+  Dsubu(src_reg, src_reg, Operand(kPointerSize));
+  Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
+  ld(tmp_reg, MemOperand(src_reg));
+  sd(tmp_reg, MemOperand(dst_reg));
+  bind(&entry);
+  Branch(&loop, ne, sp, Operand(src_reg));
+
+  // Leave current frame.
+  mov(sp, dst_reg);
+}
+
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
                                     Label* done,
@@ -4913,139 +5183,6 @@
   cvt_d_w(value, value);
 }
 
-
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
-                                             const Operand& right,
-                                             Register overflow_dst,
-                                             Register scratch) {
-  if (right.is_reg()) {
-    AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
-  } else {
-    if (dst.is(left)) {
-      li(t9, right);                         // Load right.
-      mov(scratch, left);                    // Preserve left.
-      addu(dst, left, t9);                   // Left is overwritten.
-      xor_(scratch, dst, scratch);           // Original left.
-      xor_(overflow_dst, dst, t9);
-      and_(overflow_dst, overflow_dst, scratch);
-    } else {
-      li(t9, right);
-      addu(dst, left, t9);
-      xor_(overflow_dst, dst, left);
-      xor_(scratch, dst, t9);
-      and_(overflow_dst, scratch, overflow_dst);
-    }
-  }
-}
-
-
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
-                                             Register right,
-                                             Register overflow_dst,
-                                             Register scratch) {
-  DCHECK(!dst.is(overflow_dst));
-  DCHECK(!dst.is(scratch));
-  DCHECK(!overflow_dst.is(scratch));
-  DCHECK(!overflow_dst.is(left));
-  DCHECK(!overflow_dst.is(right));
-
-  if (left.is(right) && dst.is(left)) {
-    DCHECK(!dst.is(t9));
-    DCHECK(!scratch.is(t9));
-    DCHECK(!left.is(t9));
-    DCHECK(!right.is(t9));
-    DCHECK(!overflow_dst.is(t9));
-    mov(t9, right);
-    right = t9;
-  }
-
-  if (dst.is(left)) {
-    mov(scratch, left);           // Preserve left.
-    addu(dst, left, right);       // Left is overwritten.
-    xor_(scratch, dst, scratch);  // Original left.
-    xor_(overflow_dst, dst, right);
-    and_(overflow_dst, overflow_dst, scratch);
-  } else if (dst.is(right)) {
-    mov(scratch, right);          // Preserve right.
-    addu(dst, left, right);       // Right is overwritten.
-    xor_(scratch, dst, scratch);  // Original right.
-    xor_(overflow_dst, dst, left);
-    and_(overflow_dst, overflow_dst, scratch);
-  } else {
-    addu(dst, left, right);
-    xor_(overflow_dst, dst, left);
-    xor_(scratch, dst, right);
-    and_(overflow_dst, scratch, overflow_dst);
-  }
-}
-
-
-void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
-                                              const Operand& right,
-                                              Register overflow_dst,
-                                              Register scratch) {
-  if (right.is_reg()) {
-    DadduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
-  } else {
-    if (dst.is(left)) {
-      li(t9, right);                // Load right.
-      mov(scratch, left);           // Preserve left.
-      daddu(dst, left, t9);         // Left is overwritten.
-      xor_(scratch, dst, scratch);  // Original left.
-      xor_(overflow_dst, dst, t9);
-      and_(overflow_dst, overflow_dst, scratch);
-    } else {
-      li(t9, right);  // Load right.
-      Daddu(dst, left, t9);
-      xor_(overflow_dst, dst, left);
-      xor_(scratch, dst, t9);
-      and_(overflow_dst, scratch, overflow_dst);
-    }
-  }
-}
-
-
-void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
-                                              Register right,
-                                              Register overflow_dst,
-                                              Register scratch) {
-  DCHECK(!dst.is(overflow_dst));
-  DCHECK(!dst.is(scratch));
-  DCHECK(!overflow_dst.is(scratch));
-  DCHECK(!overflow_dst.is(left));
-  DCHECK(!overflow_dst.is(right));
-
-  if (left.is(right) && dst.is(left)) {
-    DCHECK(!dst.is(t9));
-    DCHECK(!scratch.is(t9));
-    DCHECK(!left.is(t9));
-    DCHECK(!right.is(t9));
-    DCHECK(!overflow_dst.is(t9));
-    mov(t9, right);
-    right = t9;
-  }
-
-  if (dst.is(left)) {
-    mov(scratch, left);  // Preserve left.
-    daddu(dst, left, right);  // Left is overwritten.
-    xor_(scratch, dst, scratch);  // Original left.
-    xor_(overflow_dst, dst, right);
-    and_(overflow_dst, overflow_dst, scratch);
-  } else if (dst.is(right)) {
-    mov(scratch, right);  // Preserve right.
-    daddu(dst, left, right);  // Right is overwritten.
-    xor_(scratch, dst, scratch);  // Original right.
-    xor_(overflow_dst, dst, left);
-    and_(overflow_dst, overflow_dst, scratch);
-  } else {
-    daddu(dst, left, right);
-    xor_(overflow_dst, dst, left);
-    xor_(scratch, dst, right);
-    and_(overflow_dst, scratch, overflow_dst);
-  }
-}
-
-
 static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
                                    Label* overflow_label,
                                    Label* no_overflow_label) {
@@ -5059,6 +5196,180 @@
   }
 }
 
+void MacroAssembler::AddBranchOvf(Register dst, Register left,
+                                  const Operand& right, Label* overflow_label,
+                                  Label* no_overflow_label, Register scratch) {
+  if (right.is_reg()) {
+    AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+                 scratch);
+  } else {
+    if (kArchVariant == kMips64r6) {
+      Register right_reg = t9;
+      DCHECK(!left.is(right_reg));
+      li(right_reg, Operand(right));
+      AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
+    } else {
+      Register overflow_dst = t9;
+      DCHECK(!dst.is(scratch));
+      DCHECK(!dst.is(overflow_dst));
+      DCHECK(!scratch.is(overflow_dst));
+      DCHECK(!left.is(overflow_dst));
+      if (dst.is(left)) {
+        mov(scratch, left);  // Preserve left.
+        // Left is overwritten.
+        Addu(dst, left, static_cast<int32_t>(right.immediate()));
+        xor_(scratch, dst, scratch);  // Original left.
+        // Load right since xori takes uint16 as immediate.
+        Addu(overflow_dst, zero_reg, right);
+        xor_(overflow_dst, dst, overflow_dst);
+        and_(overflow_dst, overflow_dst, scratch);
+      } else {
+        Addu(dst, left, static_cast<int32_t>(right.immediate()));
+        xor_(overflow_dst, dst, left);
+        // Load right since xori takes uint16 as immediate.
+        Addu(scratch, zero_reg, right);
+        xor_(scratch, dst, scratch);
+        and_(overflow_dst, scratch, overflow_dst);
+      }
+      BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+    }
+  }
+}
+
+void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
+                                  Label* overflow_label,
+                                  Label* no_overflow_label, Register scratch) {
+  if (kArchVariant == kMips64r6) {
+    if (!overflow_label) {
+      DCHECK(no_overflow_label);
+      DCHECK(!dst.is(scratch));
+      Register left_reg = left.is(dst) ? scratch : left;
+      Register right_reg = right.is(dst) ? t9 : right;
+      DCHECK(!dst.is(left_reg));
+      DCHECK(!dst.is(right_reg));
+      Move(left_reg, left);
+      Move(right_reg, right);
+      addu(dst, left, right);
+      bnvc(left_reg, right_reg, no_overflow_label);
+    } else {
+      bovc(left, right, overflow_label);
+      addu(dst, left, right);
+      if (no_overflow_label) bc(no_overflow_label);
+    }
+  } else {
+    Register overflow_dst = t9;
+    DCHECK(!dst.is(scratch));
+    DCHECK(!dst.is(overflow_dst));
+    DCHECK(!scratch.is(overflow_dst));
+    DCHECK(!left.is(overflow_dst));
+    DCHECK(!right.is(overflow_dst));
+    DCHECK(!left.is(scratch));
+    DCHECK(!right.is(scratch));
+
+    if (left.is(right) && dst.is(left)) {
+      mov(overflow_dst, right);
+      right = overflow_dst;
+    }
+
+    if (dst.is(left)) {
+      mov(scratch, left);           // Preserve left.
+      addu(dst, left, right);       // Left is overwritten.
+      xor_(scratch, dst, scratch);  // Original left.
+      xor_(overflow_dst, dst, right);
+      and_(overflow_dst, overflow_dst, scratch);
+    } else if (dst.is(right)) {
+      mov(scratch, right);          // Preserve right.
+      addu(dst, left, right);       // Right is overwritten.
+      xor_(scratch, dst, scratch);  // Original right.
+      xor_(overflow_dst, dst, left);
+      and_(overflow_dst, overflow_dst, scratch);
+    } else {
+      addu(dst, left, right);
+      xor_(overflow_dst, dst, left);
+      xor_(scratch, dst, right);
+      and_(overflow_dst, scratch, overflow_dst);
+    }
+    BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+  }
+}
+
+void MacroAssembler::SubBranchOvf(Register dst, Register left,
+                                  const Operand& right, Label* overflow_label,
+                                  Label* no_overflow_label, Register scratch) {
+  DCHECK(overflow_label || no_overflow_label);
+  if (right.is_reg()) {
+    SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+                 scratch);
+  } else {
+    Register overflow_dst = t9;
+    DCHECK(!dst.is(scratch));
+    DCHECK(!dst.is(overflow_dst));
+    DCHECK(!scratch.is(overflow_dst));
+    DCHECK(!left.is(overflow_dst));
+    DCHECK(!left.is(scratch));
+    if (dst.is(left)) {
+      mov(scratch, left);  // Preserve left.
+      // Left is overwritten.
+      Subu(dst, left, static_cast<int32_t>(right.immediate()));
+      // Load right since xori takes uint16 as immediate.
+      Addu(overflow_dst, zero_reg, right);
+      xor_(overflow_dst, scratch, overflow_dst);  // scratch is original left.
+      xor_(scratch, dst, scratch);                // scratch is original left.
+      and_(overflow_dst, scratch, overflow_dst);
+    } else {
+      Subu(dst, left, right);
+      xor_(overflow_dst, dst, left);
+      // Load right since xori takes uint16 as immediate.
+      Addu(scratch, zero_reg, right);
+      xor_(scratch, left, scratch);
+      and_(overflow_dst, scratch, overflow_dst);
+    }
+    BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+  }
+}
+
+void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
+                                  Label* overflow_label,
+                                  Label* no_overflow_label, Register scratch) {
+  DCHECK(overflow_label || no_overflow_label);
+  Register overflow_dst = t9;
+  DCHECK(!dst.is(scratch));
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!scratch.is(overflow_dst));
+  DCHECK(!overflow_dst.is(left));
+  DCHECK(!overflow_dst.is(right));
+  DCHECK(!scratch.is(left));
+  DCHECK(!scratch.is(right));
+
+  // This happens with some crankshaft code. Since Subu works fine if
+  // left == right, let's not make that restriction here.
+  if (left.is(right)) {
+    mov(dst, zero_reg);
+    if (no_overflow_label) {
+      Branch(no_overflow_label);
+    }
+  }
+
+  if (dst.is(left)) {
+    mov(scratch, left);  // Preserve left.
+    subu(dst, left, right);            // Left is overwritten.
+    xor_(overflow_dst, dst, scratch);  // scratch is original left.
+    xor_(scratch, scratch, right);     // scratch is original left.
+    and_(overflow_dst, scratch, overflow_dst);
+  } else if (dst.is(right)) {
+    mov(scratch, right);  // Preserve right.
+    subu(dst, left, right);  // Right is overwritten.
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, left, scratch);  // Original right.
+    and_(overflow_dst, scratch, overflow_dst);
+  } else {
+    subu(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, left, right);
+    and_(overflow_dst, scratch, overflow_dst);
+  }
+  BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+}
 
 void MacroAssembler::DaddBranchOvf(Register dst, Register left,
                                    const Operand& right, Label* overflow_label,
@@ -5129,138 +5440,6 @@
 }
 
 
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
-                                             const Operand& right,
-                                             Register overflow_dst,
-                                             Register scratch) {
-  if (right.is_reg()) {
-    SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
-  } else {
-    if (dst.is(left)) {
-      li(t9, right);                            // Load right.
-      mov(scratch, left);                       // Preserve left.
-      Subu(dst, left, t9);                      // Left is overwritten.
-      xor_(overflow_dst, dst, scratch);         // scratch is original left.
-      xor_(scratch, scratch, t9);  // scratch is original left.
-      and_(overflow_dst, scratch, overflow_dst);
-    } else {
-      li(t9, right);
-      subu(dst, left, t9);
-      xor_(overflow_dst, dst, left);
-      xor_(scratch, left, t9);
-      and_(overflow_dst, scratch, overflow_dst);
-    }
-  }
-}
-
-
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
-                                             Register right,
-                                             Register overflow_dst,
-                                             Register scratch) {
-  DCHECK(!dst.is(overflow_dst));
-  DCHECK(!dst.is(scratch));
-  DCHECK(!overflow_dst.is(scratch));
-  DCHECK(!overflow_dst.is(left));
-  DCHECK(!overflow_dst.is(right));
-  DCHECK(!scratch.is(left));
-  DCHECK(!scratch.is(right));
-
-  // This happens with some crankshaft code. Since Subu works fine if
-  // left == right, let's not make that restriction here.
-  if (left.is(right)) {
-    mov(dst, zero_reg);
-    mov(overflow_dst, zero_reg);
-    return;
-  }
-
-  if (dst.is(left)) {
-    mov(scratch, left);                // Preserve left.
-    subu(dst, left, right);            // Left is overwritten.
-    xor_(overflow_dst, dst, scratch);  // scratch is original left.
-    xor_(scratch, scratch, right);     // scratch is original left.
-    and_(overflow_dst, scratch, overflow_dst);
-  } else if (dst.is(right)) {
-    mov(scratch, right);     // Preserve right.
-    subu(dst, left, right);  // Right is overwritten.
-    xor_(overflow_dst, dst, left);
-    xor_(scratch, left, scratch);  // Original right.
-    and_(overflow_dst, scratch, overflow_dst);
-  } else {
-    subu(dst, left, right);
-    xor_(overflow_dst, dst, left);
-    xor_(scratch, left, right);
-    and_(overflow_dst, scratch, overflow_dst);
-  }
-}
-
-
-void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
-                                              const Operand& right,
-                                              Register overflow_dst,
-                                              Register scratch) {
-  if (right.is_reg()) {
-    DsubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
-  } else {
-    if (dst.is(left)) {
-      li(t9, right);                     // Load right.
-      mov(scratch, left);                // Preserve left.
-      dsubu(dst, left, t9);              // Left is overwritten.
-      xor_(overflow_dst, dst, scratch);  // scratch is original left.
-      xor_(scratch, scratch, t9);        // scratch is original left.
-      and_(overflow_dst, scratch, overflow_dst);
-    } else {
-      li(t9, right);
-      dsubu(dst, left, t9);
-      xor_(overflow_dst, dst, left);
-      xor_(scratch, left, t9);
-      and_(overflow_dst, scratch, overflow_dst);
-    }
-  }
-}
-
-
-void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
-                                              Register right,
-                                              Register overflow_dst,
-                                              Register scratch) {
-  DCHECK(!dst.is(overflow_dst));
-  DCHECK(!dst.is(scratch));
-  DCHECK(!overflow_dst.is(scratch));
-  DCHECK(!overflow_dst.is(left));
-  DCHECK(!overflow_dst.is(right));
-  DCHECK(!scratch.is(left));
-  DCHECK(!scratch.is(right));
-
-  // This happens with some crankshaft code. Since Subu works fine if
-  // left == right, let's not make that restriction here.
-  if (left.is(right)) {
-    mov(dst, zero_reg);
-    mov(overflow_dst, zero_reg);
-    return;
-  }
-
-  if (dst.is(left)) {
-    mov(scratch, left);  // Preserve left.
-    dsubu(dst, left, right);  // Left is overwritten.
-    xor_(overflow_dst, dst, scratch);  // scratch is original left.
-    xor_(scratch, scratch, right);  // scratch is original left.
-    and_(overflow_dst, scratch, overflow_dst);
-  } else if (dst.is(right)) {
-    mov(scratch, right);  // Preserve right.
-    dsubu(dst, left, right);  // Right is overwritten.
-    xor_(overflow_dst, dst, left);
-    xor_(scratch, left, scratch);  // Original right.
-    and_(overflow_dst, scratch, overflow_dst);
-  } else {
-    dsubu(dst, left, right);
-    xor_(overflow_dst, dst, left);
-    xor_(scratch, left, right);
-    and_(overflow_dst, scratch, overflow_dst);
-  }
-}
-
-
 void MacroAssembler::DsubBranchOvf(Register dst, Register left,
                                    const Operand& right, Label* overflow_label,
                                    Label* no_overflow_label, Register scratch) {
@@ -5566,12 +5745,9 @@
   }
 }
 
-
-void MacroAssembler::StubPrologue() {
-    Push(ra, fp, cp);
-    Push(Smi::FromInt(StackFrame::STUB));
-    // Adjust FP to point to saved FP.
-    Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
+  li(at, Operand(Smi::FromInt(type)));
+  PushCommonFrame(at);
 }
 
 
@@ -5594,16 +5770,13 @@
     nop();  // Branch delay slot nop.
     nop();  // Pad the empty space.
   } else {
-    Push(ra, fp, cp, a1);
+    PushStandardFrame(a1);
     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
     nop(Assembler::CODE_AGE_SEQUENCE_NOP);
-    // Adjust fp to point to caller's fp.
-    Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
   }
 }
 
-
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
@@ -5620,30 +5793,41 @@
 
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
-  daddiu(sp, sp, -5 * kPointerSize);
-  li(t8, Operand(Smi::FromInt(type)));
-  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
-  sd(ra, MemOperand(sp, 4 * kPointerSize));
-  sd(fp, MemOperand(sp, 3 * kPointerSize));
-  sd(cp, MemOperand(sp, 2 * kPointerSize));
-  sd(t8, MemOperand(sp, 1 * kPointerSize));
-  sd(t9, MemOperand(sp, 0 * kPointerSize));
+  int stack_offset, fp_offset;
+  if (type == StackFrame::INTERNAL) {
+    stack_offset = -4 * kPointerSize;
+    fp_offset = 2 * kPointerSize;
+  } else {
+    stack_offset = -3 * kPointerSize;
+    fp_offset = 1 * kPointerSize;
+  }
+  daddiu(sp, sp, stack_offset);
+  stack_offset = -stack_offset - kPointerSize;
+  sd(ra, MemOperand(sp, stack_offset));
+  stack_offset -= kPointerSize;
+  sd(fp, MemOperand(sp, stack_offset));
+  stack_offset -= kPointerSize;
+  li(t9, Operand(Smi::FromInt(type)));
+  sd(t9, MemOperand(sp, stack_offset));
+  if (type == StackFrame::INTERNAL) {
+    DCHECK_EQ(stack_offset, kPointerSize);
+    li(t9, Operand(CodeObject()));
+    sd(t9, MemOperand(sp, 0));
+  } else {
+    DCHECK_EQ(stack_offset, 0);
+  }
   // Adjust FP to point to saved FP.
-  Daddu(fp, sp,
-       Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+  Daddu(fp, sp, Operand(fp_offset));
 }
 
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
-  mov(sp, fp);
-  ld(fp, MemOperand(sp, 0 * kPointerSize));
-  ld(ra, MemOperand(sp, 1 * kPointerSize));
-  daddiu(sp, sp, 2 * kPointerSize);
+  daddiu(sp, fp, 2 * kPointerSize);
+  ld(ra, MemOperand(fp, 1 * kPointerSize));
+  ld(fp, MemOperand(fp, 0 * kPointerSize));
 }
 
-
-void MacroAssembler::EnterExitFrame(bool save_doubles,
-                                    int stack_space) {
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
   // Set up the frame structure on the stack.
   STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
   STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
@@ -5653,16 +5837,20 @@
   // fp + 2 (==kCallerSPDisplacement) - old stack's end
   // [fp + 1 (==kCallerPCOffset)] - saved old ra
   // [fp + 0 (==kCallerFPOffset)] - saved old fp
-  // [fp - 1 (==kSPOffset)] - sp of the called function
-  // [fp - 2 (==kCodeOffset)] - CodeObject
+  // [fp - 1 StackFrame::EXIT Smi
+  // [fp - 2 (==kSPOffset)] - sp of the called function
+  // [fp - 3 (==kCodeOffset)] - CodeObject
   // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
   //   new stack (will contain saved ra)
 
-  // Save registers.
-  daddiu(sp, sp, -4 * kPointerSize);
-  sd(ra, MemOperand(sp, 3 * kPointerSize));
-  sd(fp, MemOperand(sp, 2 * kPointerSize));
-  daddiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
+  // Save registers and reserve room for saved entry sp and code object.
+  daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
+  sd(ra, MemOperand(sp, 4 * kPointerSize));
+  sd(fp, MemOperand(sp, 3 * kPointerSize));
+  li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
+  sd(at, MemOperand(sp, 2 * kPointerSize));
+  // Set up new frame pointer.
+  daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
 
   if (emit_debug_code()) {
     sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -5715,8 +5903,8 @@
   if (save_doubles) {
     // Remember: we only need to restore every 2nd double FPU value.
     int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
-    Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
-        kNumOfSavedRegisters * kDoubleSize));
+    Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
+                          kNumOfSavedRegisters * kDoubleSize));
     for (int i = 0; i < kNumOfSavedRegisters; i++) {
       FPURegister reg = FPURegister::from_code(2 * i);
       ldc1(reg, MemOperand(t8, i  * kDoubleSize));
@@ -5966,6 +6154,15 @@
   JumpIfSmi(at, on_either_smi);
 }
 
+void MacroAssembler::AssertNotNumber(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    andi(at, object, kSmiTagMask);
+    Check(ne, kOperandIsANumber, at, Operand(zero_reg));
+    GetObjectType(object, t8, t8);
+    Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
+  }
+}
 
 void MacroAssembler::AssertNotSmi(Register object) {
   if (emit_debug_code()) {
@@ -6494,28 +6691,45 @@
   bind(&done);
 }
 
-
-void MacroAssembler::TestJSArrayForAllocationMemento(
-    Register receiver_reg,
-    Register scratch_reg,
-    Label* no_memento_found,
-    Condition cond,
-    Label* allocation_memento_present) {
-  ExternalReference new_space_start =
-      ExternalReference::new_space_start(isolate());
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
+                                                     Register scratch_reg,
+                                                     Label* no_memento_found) {
+  Label map_check;
+  Label top_check;
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
-  Daddu(scratch_reg, receiver_reg,
-       Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
-  Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
+  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+  // Bail out if the object is not in new space.
+  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+  // If the object is in new space, we need to check whether it is on the same
+  // page as the current top.
+  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+  And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
+  Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
+  // The object is on a different page than allocation top. Bail out if the
+  // object sits on the page boundary as no memento can follow and we cannot
+  // touch the memory following it.
+  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
+  And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
+  Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
+  // Continue with the actual map check.
+  jmp(&map_check);
+  // If top is on the same page as the current object, we need to check whether
+  // we are below top.
+  bind(&top_check);
+  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
   li(at, Operand(new_space_allocation_top));
-  ld(at, MemOperand(at));
+  lw(at, MemOperand(at));
   Branch(no_memento_found, gt, scratch_reg, Operand(at));
-  ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
-  if (allocation_memento_present) {
-    Branch(allocation_memento_present, cond, scratch_reg,
-           Operand(isolate()->factory()->allocation_memento_map()));
-  }
+  // Memento map check.
+  bind(&map_check);
+  lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+  Branch(no_memento_found, ne, scratch_reg,
+         Operand(isolate()->factory()->allocation_memento_map()));
 }
 
 
diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h
index 7f44ab9..401112d 100644
--- a/src/mips64/macro-assembler-mips64.h
+++ b/src/mips64/macro-assembler-mips64.h
@@ -265,7 +265,8 @@
 
   void Call(Label* target);
 
-  void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
+  inline void Move(Register dst, Handle<Object> handle) { li(dst, handle); }
+  inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
 
   inline void Move(Register dst, Register src) {
     if (!dst.is(src)) {
@@ -273,17 +274,29 @@
     }
   }
 
-  inline void Move(FPURegister dst, FPURegister src) {
+  inline void Move_d(FPURegister dst, FPURegister src) {
     if (!dst.is(src)) {
       mov_d(dst, src);
     }
   }
 
+  inline void Move_s(FPURegister dst, FPURegister src) {
+    if (!dst.is(src)) {
+      mov_s(dst, src);
+    }
+  }
+
+  inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
+
   inline void Move(Register dst_low, Register dst_high, FPURegister src) {
     mfc1(dst_low, src);
     mfhc1(dst_high, src);
   }
 
+  inline void Move(Register dst, FPURegister src) { dmfc1(dst, src); }
+
+  inline void Move(FPURegister dst, Register src) { dmtc1(src, dst); }
+
   inline void FmoveHigh(Register dst_high, FPURegister src) {
     mfhc1(dst_high, src);
   }
@@ -312,6 +325,17 @@
   void Movt(Register rd, Register rs, uint16_t cc = 0);
   void Movf(Register rd, Register rs, uint16_t cc = 0);
 
+  // Min, Max macros.
+  // On pre-r6 these functions may modify at and t8 registers.
+  void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
+                     Label* nan = nullptr);
+  void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
+                     Label* nan = nullptr);
+  void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
+                     Label* nan = nullptr);
+  void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
+                     Label* nan = nullptr);
+
   void Clz(Register rd, Register rs);
 
   // Jump unconditionally to given label.
@@ -676,6 +700,9 @@
 #undef DEFINE_INSTRUCTION2
 #undef DEFINE_INSTRUCTION3
 
+  // Load Scaled Address instructions. Parameter sa (shift argument) must be
+  // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
+  // may be clobbered.
   void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
            Register scratch = at);
   void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
@@ -803,6 +830,14 @@
     Daddu(sp, sp, Operand(count * kPointerSize));
   }
 
+  // Push a fixed frame, consisting of ra, fp.
+  void PushCommonFrame(Register marker_reg = no_reg);
+
+  // Push a standard frame, consisting of ra, fp, context and JS function.
+  void PushStandardFrame(Register function_reg);
+
+  void PopCommonFrame(Register marker_reg = no_reg);
+
   // Push and pop the registers that can hold pointers, as defined by the
   // RegList constant kSafepointSavedRegisters.
   void PushSafepointRegisters();
@@ -1053,6 +1088,15 @@
   // -------------------------------------------------------------------------
   // JavaScript invokes.
 
+  // Removes current frame and its arguments from the stack preserving
+  // the arguments and a return address pushed to the stack for the next call.
+  // Both |callee_args_count| and |caller_args_count_reg| do not include
+  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+  // is trashed.
+  void PrepareForTailCall(const ParameterCount& callee_args_count,
+                          Register caller_args_count_reg, Register scratch0,
+                          Register scratch1);
+
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeFunctionCode(Register function, Register new_target,
                           const ParameterCount& expected,
@@ -1277,32 +1321,41 @@
   // Usage: first call the appropriate arithmetic function, then call one of the
   // jump functions with the overflow_dst register as the second parameter.
 
-  void AdduAndCheckForOverflow(Register dst,
-                               Register left,
-                               Register right,
-                               Register overflow_dst,
-                               Register scratch = at);
+  inline void AddBranchOvf(Register dst, Register left, const Operand& right,
+                           Label* overflow_label, Register scratch = at) {
+    AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+  }
 
-  void AdduAndCheckForOverflow(Register dst, Register left,
-                               const Operand& right, Register overflow_dst,
-                               Register scratch);
+  inline void AddBranchNoOvf(Register dst, Register left, const Operand& right,
+                             Label* no_overflow_label, Register scratch = at) {
+    AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+  }
 
-  void SubuAndCheckForOverflow(Register dst,
-                               Register left,
-                               Register right,
-                               Register overflow_dst,
-                               Register scratch = at);
+  void AddBranchOvf(Register dst, Register left, const Operand& right,
+                    Label* overflow_label, Label* no_overflow_label,
+                    Register scratch = at);
 
-  void SubuAndCheckForOverflow(Register dst, Register left,
-                               const Operand& right, Register overflow_dst,
-                               Register scratch);
+  void AddBranchOvf(Register dst, Register left, Register right,
+                    Label* overflow_label, Label* no_overflow_label,
+                    Register scratch = at);
 
-  void DadduAndCheckForOverflow(Register dst, Register left, Register right,
-                                Register overflow_dst, Register scratch = at);
+  inline void SubBranchOvf(Register dst, Register left, const Operand& right,
+                           Label* overflow_label, Register scratch = at) {
+    SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+  }
 
-  void DadduAndCheckForOverflow(Register dst, Register left,
-                                const Operand& right, Register overflow_dst,
-                                Register scratch);
+  inline void SubBranchNoOvf(Register dst, Register left, const Operand& right,
+                             Label* no_overflow_label, Register scratch = at) {
+    SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+  }
+
+  void SubBranchOvf(Register dst, Register left, const Operand& right,
+                    Label* overflow_label, Label* no_overflow_label,
+                    Register scratch = at);
+
+  void SubBranchOvf(Register dst, Register left, Register right,
+                    Label* overflow_label, Label* no_overflow_label,
+                    Register scratch = at);
 
   inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
                             Label* overflow_label, Register scratch = at) {
@@ -1322,13 +1375,6 @@
                      Label* overflow_label, Label* no_overflow_label,
                      Register scratch = at);
 
-  void DsubuAndCheckForOverflow(Register dst, Register left, Register right,
-                                Register overflow_dst, Register scratch = at);
-
-  void DsubuAndCheckForOverflow(Register dst, Register left,
-                                const Operand& right, Register overflow_dst,
-                                Register scratch);
-
   inline void DsubBranchOvf(Register dst, Register left, const Operand& right,
                             Label* overflow_label, Register scratch = at) {
     DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
@@ -1648,6 +1694,9 @@
   // Jump if either of the registers contain a smi.
   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
 
+  // Abort execution if argument is a number, enabled via --debug-code.
+  void AssertNotNumber(Register object);
+
   // Abort execution if argument is a smi, enabled via --debug-code.
   void AssertNotSmi(Register object);
   void AssertSmi(Register object);
@@ -1758,7 +1807,7 @@
     DecodeField<Field>(reg, reg);
   }
   // Generates function and stub prologue code.
-  void StubPrologue();
+  void StubPrologue(StackFrame::Type type);
   void Prologue(bool code_pre_aging);
 
   // Load the type feedback vector from a JavaScript frame.
@@ -1773,25 +1822,22 @@
   // in a0.  Assumes that any other register can be used as a scratch.
   void CheckEnumCache(Label* call_runtime);
 
-  // AllocationMemento support. Arrays may have an associated
-  // AllocationMemento object that can be checked for in order to pretransition
-  // to another type.
-  // On entry, receiver_reg should point to the array object.
-  // scratch_reg gets clobbered.
-  // If allocation info is present, jump to allocation_memento_present.
-  void TestJSArrayForAllocationMemento(
-      Register receiver_reg,
-      Register scratch_reg,
-      Label* no_memento_found,
-      Condition cond = al,
-      Label* allocation_memento_present = NULL);
+  // AllocationMemento support. Arrays may have an associated AllocationMemento
+  // object that can be checked for in order to pretransition to another type.
+  // On entry, receiver_reg should point to the array object. scratch_reg gets
+  // clobbered. If no info is present jump to no_memento_found, otherwise fall
+  // through.
+  void TestJSArrayForAllocationMemento(Register receiver_reg,
+                                       Register scratch_reg,
+                                       Label* no_memento_found);
 
   void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
                                          Register scratch_reg,
                                          Label* memento_found) {
     Label no_memento_found;
     TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
-                                    &no_memento_found, eq, memento_found);
+                                    &no_memento_found);
+    Branch(memento_found);
     bind(&no_memento_found);
   }
 
@@ -1832,8 +1878,6 @@
                                BranchDelaySlot bdslot);
   void BranchLong(Label* L, BranchDelaySlot bdslot);
   void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
-  void Jr(Label* L, BranchDelaySlot bdslot);
-  void Jalr(Label* L, BranchDelaySlot bdslot);
 
   // Common implementation of BranchF functions for the different formats.
   void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
@@ -1936,17 +1980,19 @@
       nop();
     }
     addiupc(at, 5);
-    dlsa(at, at, index, kPointerSizeLog2);
+    Dlsa(at, at, index, kPointerSizeLog2);
     ld(at, MemOperand(at));
   } else {
     Label here;
-    BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
+    BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 11);
     Align(8);
+    push(ra);
     bal(&here);
     dsll(at, index, kPointerSizeLog2);  // Branch delay slot.
     bind(&here);
     daddu(at, at, ra);
-    ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+    pop(ra);
+    ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
   }
   jr(at);
   nop();  // Branch delay slot nop.
diff --git a/src/mips64/simulator-mips64.cc b/src/mips64/simulator-mips64.cc
index 70c06c8..9519865 100644
--- a/src/mips64/simulator-mips64.cc
+++ b/src/mips64/simulator-mips64.cc
@@ -24,11 +24,8 @@
 namespace v8 {
 namespace internal {
 
-// Utils functions.
-bool HaveSameSign(int64_t a, int64_t b) {
-  return ((a ^ b) >= 0);
-}
-
+// Util functions.
+inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); }
 
 uint32_t get_fcsr_condition_bit(uint32_t cc) {
   if (cc == 0) {
@@ -3478,9 +3475,7 @@
         // Logical right-rotate of a word by a variable number of bits.
         // This is special case od SRLV instruction, added in MIPS32
         // Release 2. SA field is equal to 00001.
-        alu_out =
-            base::bits::RotateRight32(static_cast<const uint32_t>(rt_u()),
-                                      static_cast<const uint32_t>(rs_u()));
+        alu_out = base::bits::RotateRight64(rt_u(), rs_u());
       }
       SetResult(rd_reg(), alu_out);
       break;
@@ -4331,13 +4326,8 @@
     case POP10:  // BOVC, BEQZALC, BEQC / ADDI (pre-r6)
       if (kArchVariant == kMips64r6) {
         if (rs_reg >= rt_reg) {  // BOVC
-          if (HaveSameSign(rs, rt)) {
-            if (rs > 0) {
-              BranchCompactHelper(rs > Registers::kMaxValue - rt, 16);
-            } else if (rs < 0) {
-              BranchCompactHelper(rs < Registers::kMinValue - rt, 16);
-            }
-          }
+          bool condition = !is_int32(rs) || !is_int32(rt) || !is_int32(rs + rt);
+          BranchCompactHelper(condition, 16);
         } else {
           if (rs_reg == 0) {  // BEQZALC
             BranchAndLinkCompactHelper(rt == 0, 16);
@@ -4363,15 +4353,8 @@
     case POP30:  // BNVC, BNEZALC, BNEC / DADDI (pre-r6)
       if (kArchVariant == kMips64r6) {
         if (rs_reg >= rt_reg) {  // BNVC
-          if (!HaveSameSign(rs, rt) || rs == 0 || rt == 0) {
-            BranchCompactHelper(true, 16);
-          } else {
-            if (rs > 0) {
-              BranchCompactHelper(rs <= Registers::kMaxValue - rt, 16);
-            } else if (rs < 0) {
-              BranchCompactHelper(rs >= Registers::kMinValue - rt, 16);
-            }
-          }
+          bool condition = is_int32(rs) && is_int32(rt) && is_int32(rs + rt);
+          BranchCompactHelper(condition, 16);
         } else {
           if (rs_reg == 0) {  // BNEZALC
             BranchAndLinkCompactHelper(rt != 0, 16);
diff --git a/src/objects-body-descriptors-inl.h b/src/objects-body-descriptors-inl.h
index cd4be13..bfc1895 100644
--- a/src/objects-body-descriptors-inl.h
+++ b/src/objects-body-descriptors-inl.h
@@ -472,6 +472,7 @@
     case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
+    case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_MESSAGE_OBJECT_TYPE:
     case JS_BOUND_FUNCTION_TYPE:
       return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 0d01ec2..b023b03 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -99,6 +99,7 @@
       Oddball::cast(this)->OddballVerify();
       break;
     case JS_OBJECT_TYPE:
+    case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_PROMISE_TYPE:
       JSObject::cast(this)->JSObjectVerify();
@@ -228,6 +229,10 @@
 
 void BytecodeArray::BytecodeArrayVerify() {
   // TODO(oth): Walk bytecodes and immediate values to validate sanity.
+  // - All bytecodes are known and well formed.
+  // - Jumps must go to new instructions starts.
+  // - No Illegal bytecodes.
+  // - No consecutive sequences of prefix Wide / ExtraWide.
   CHECK(IsBytecodeArray());
   CHECK(constant_pool()->IsFixedArray());
   VerifyHeapPointer(constant_pool());
@@ -618,7 +623,7 @@
     CHECK(number->IsSmi());
     int value = Smi::cast(number)->value();
     // Hidden oddballs have negative smis.
-    const int kLeastHiddenOddballNumber = -5;
+    const int kLeastHiddenOddballNumber = -6;
     CHECK_LE(value, 1);
     CHECK(value >= kLeastHiddenOddballNumber);
   }
@@ -641,6 +646,8 @@
     CHECK(this == heap->termination_exception());
   } else if (map() == heap->exception_map()) {
     CHECK(this == heap->exception());
+  } else if (map() == heap->optimized_out_map()) {
+    CHECK(this == heap->optimized_out());
   } else {
     UNREACHABLE();
   }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index e00478a..f4d7fb9 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -22,6 +22,7 @@
 #include "src/heap/heap-inl.h"
 #include "src/heap/heap.h"
 #include "src/isolate.h"
+#include "src/isolate-inl.h"
 #include "src/layout-descriptor-inl.h"
 #include "src/lookup.h"
 #include "src/objects.h"
@@ -738,13 +739,12 @@
 bool HeapObject::IsContext() const {
   Map* map = this->map();
   Heap* heap = GetHeap();
-  return (map == heap->function_context_map() ||
-      map == heap->catch_context_map() ||
-      map == heap->with_context_map() ||
-      map == heap->native_context_map() ||
-      map == heap->block_context_map() ||
-      map == heap->module_context_map() ||
-      map == heap->script_context_map());
+  return (
+      map == heap->function_context_map() || map == heap->catch_context_map() ||
+      map == heap->with_context_map() || map == heap->native_context_map() ||
+      map == heap->block_context_map() || map == heap->module_context_map() ||
+      map == heap->script_context_map() ||
+      map == heap->debug_evaluate_context_map());
 }
 
 bool HeapObject::IsNativeContext() const {
@@ -845,6 +845,8 @@
 
 bool HeapObject::IsStringTable() const { return IsHashTable(); }
 
+bool HeapObject::IsStringSet() const { return IsHashTable(); }
+
 bool HeapObject::IsNormalizedMapCache() const {
   return NormalizedMapCache::IsNormalizedMapCache(this);
 }
@@ -908,9 +910,7 @@
 
 TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
 
-bool HeapObject::IsUndetectableObject() const {
-  return map()->is_undetectable();
-}
+bool HeapObject::IsUndetectable() const { return map()->is_undetectable(); }
 
 bool HeapObject::IsAccessCheckNeeded() const {
   if (IsJSGlobalProxy()) {
@@ -1003,6 +1003,24 @@
   return true;
 }
 
+bool Object::ToUint32(uint32_t* value) {
+  if (IsSmi()) {
+    int num = Smi::cast(this)->value();
+    if (num < 0) return false;
+    *value = static_cast<uint32_t>(num);
+    return true;
+  }
+  if (IsHeapNumber()) {
+    double num = HeapNumber::cast(this)->value();
+    if (num < 0) return false;
+    uint32_t uint_value = FastD2UI(num);
+    if (FastUI2D(uint_value) == num) {
+      *value = uint_value;
+      return true;
+    }
+  }
+  return false;
+}
 
 // static
 MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
@@ -1013,6 +1031,12 @@
 
 
 // static
+MaybeHandle<Name> Object::ToName(Isolate* isolate, Handle<Object> input) {
+  if (input->IsName()) return Handle<Name>::cast(input);
+  return ConvertToName(isolate, input);
+}
+
+// static
 MaybeHandle<Object> Object::ToPrimitive(Handle<Object> input,
                                         ToPrimitiveHint hint) {
   if (input->IsPrimitive()) return input;
@@ -1027,15 +1051,39 @@
 MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
                                         Handle<Name> name) {
   LookupIterator it(object, name);
+  if (!it.IsFound()) return it.factory()->undefined_value();
   return GetProperty(&it);
 }
 
+MaybeHandle<Object> JSReceiver::GetProperty(Handle<JSReceiver> receiver,
+                                            Handle<Name> name) {
+  LookupIterator it(receiver, name, receiver);
+  if (!it.IsFound()) return it.factory()->undefined_value();
+  return Object::GetProperty(&it);
+}
+
 MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
                                        uint32_t index) {
   LookupIterator it(isolate, object, index);
+  if (!it.IsFound()) return it.factory()->undefined_value();
   return GetProperty(&it);
 }
 
+MaybeHandle<Object> JSReceiver::GetElement(Isolate* isolate,
+                                           Handle<JSReceiver> receiver,
+                                           uint32_t index) {
+  LookupIterator it(isolate, receiver, index, receiver);
+  if (!it.IsFound()) return it.factory()->undefined_value();
+  return Object::GetProperty(&it);
+}
+
+Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
+                                           Handle<Name> name) {
+  LookupIterator it(object, name, object,
+                    LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+  if (!it.IsFound()) return it.factory()->undefined_value();
+  return GetDataProperty(&it);
+}
 
 MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
                                        uint32_t index, Handle<Object> value,
@@ -1059,10 +1107,11 @@
   return PrototypeIterator::GetCurrent(iter);
 }
 
-MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
-                                        const char* name) {
+MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
+                                            Handle<JSReceiver> receiver,
+                                            const char* name) {
   Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
-  return GetProperty(object, str);
+  return GetProperty(receiver, str);
 }
 
 
@@ -1836,17 +1885,33 @@
 
 
 InterceptorInfo* JSObject::GetIndexedInterceptor() {
-  DCHECK(map()->has_indexed_interceptor());
-  JSFunction* constructor = JSFunction::cast(map()->GetConstructor());
+  return map()->GetIndexedInterceptor();
+}
+
+InterceptorInfo* JSObject::GetNamedInterceptor() {
+  return map()->GetNamedInterceptor();
+}
+
+InterceptorInfo* Map::GetNamedInterceptor() {
+  DCHECK(has_named_interceptor());
+  JSFunction* constructor = JSFunction::cast(GetConstructor());
   DCHECK(constructor->shared()->IsApiFunction());
-  Object* result =
-      constructor->shared()->get_api_func_data()->indexed_property_handler();
-  return InterceptorInfo::cast(result);
+  return InterceptorInfo::cast(
+      constructor->shared()->get_api_func_data()->named_property_handler());
+}
+
+InterceptorInfo* Map::GetIndexedInterceptor() {
+  DCHECK(has_indexed_interceptor());
+  JSFunction* constructor = JSFunction::cast(GetConstructor());
+  DCHECK(constructor->shared()->IsApiFunction());
+  return InterceptorInfo::cast(
+      constructor->shared()->get_api_func_data()->indexed_property_handler());
 }
 
 
 ACCESSORS(Oddball, to_string, String, kToStringOffset)
 ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
+ACCESSORS(Oddball, to_boolean, Oddball, kToBooleanOffset)
 ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
 
 
@@ -1896,11 +1961,14 @@
 
 void WeakCell::initialize(HeapObject* val) {
   WRITE_FIELD(this, kValueOffset, val);
-  Heap* heap = GetHeap();
   // We just have to execute the generational barrier here because we never
   // mark through a weak cell and collect evacuation candidates when we process
   // all weak cells.
-  heap->RecordWrite(this, kValueOffset, val);
+  WriteBarrierMode mode =
+      Page::FromAddress(this->address())->IsFlagSet(Page::BLACK_PAGE)
+          ? UPDATE_WRITE_BARRIER
+          : UPDATE_WEAK_WRITE_BARRIER;
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
 }
 
 
@@ -1936,6 +2004,8 @@
   // field operations considerably on average.
   if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
   switch (type) {
+    case JS_SPECIAL_API_OBJECT_TYPE:
+      return JSObject::kHeaderSize;
     case JS_GENERATOR_OBJECT_TYPE:
       return JSGeneratorObject::kSize;
     case JS_MODULE_TYPE:
@@ -2182,7 +2252,6 @@
   }
 }
 
-
 bool Object::ToArrayLength(uint32_t* index) { return Object::ToUint32(index); }
 
 
@@ -2929,6 +2998,9 @@
   return Max(capacity, kMinCapacity);
 }
 
+bool HashTableBase::IsKey(Heap* heap, Object* k) {
+  return k != heap->the_hole_value() && k != heap->undefined_value();
+}
 
 bool HashTableBase::IsKey(Object* k) {
   return !k->IsTheHole() && !k->IsUndefined();
@@ -2977,6 +3049,15 @@
   return kNotFound;
 }
 
+bool StringSetShape::IsMatch(String* key, Object* value) {
+  return value->IsString() && key->Equals(String::cast(value));
+}
+
+uint32_t StringSetShape::Hash(String* key) { return key->Hash(); }
+
+uint32_t StringSetShape::HashForObject(String* key, Object* object) {
+  return object->IsString() ? String::cast(object)->Hash() : 0;
+}
 
 bool SeededNumberDictionary::requires_slow_elements() {
   Object* max_index_object = get(kMaxNumberKeyIndex);
@@ -3080,6 +3161,7 @@
 CAST_ACCESSOR(SlicedString)
 CAST_ACCESSOR(Smi)
 CAST_ACCESSOR(String)
+CAST_ACCESSOR(StringSet)
 CAST_ACCESSOR(StringTable)
 CAST_ACCESSOR(Struct)
 CAST_ACCESSOR(Symbol)
@@ -3902,10 +3984,9 @@
 
 ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
 ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
-ACCESSORS(BytecodeArray, source_position_table, FixedArray,
+ACCESSORS(BytecodeArray, source_position_table, ByteArray,
           kSourcePositionTableOffset)
 
-
 Address BytecodeArray::GetFirstBytecodeAddress() {
   return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
 }
@@ -4514,16 +4595,6 @@
 }
 
 
-void Map::set_is_strong() {
-  set_bit_field3(IsStrong::update(bit_field3(), true));
-}
-
-
-bool Map::is_strong() {
-  return IsStrong::decode(bit_field3());
-}
-
-
 void Map::set_new_target_is_base(bool value) {
   set_bit_field3(NewTargetIsBase::update(bit_field3(), value));
 }
@@ -4690,8 +4761,7 @@
   return kind() == STUB || kind() == HANDLER || kind() == LOAD_IC ||
          kind() == KEYED_LOAD_IC || kind() == CALL_IC || kind() == STORE_IC ||
          kind() == KEYED_STORE_IC || kind() == BINARY_OP_IC ||
-         kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
-         kind() == TO_BOOLEAN_IC;
+         kind() == COMPARE_IC || kind() == TO_BOOLEAN_IC;
 }
 
 
@@ -4723,7 +4793,6 @@
   return ExtractTypeFromFlags(flags());
 }
 
-
 // For initialization.
 void Code::set_raw_kind_specific_flags1(int value) {
   WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
@@ -4870,14 +4939,10 @@
   }
 }
 
-
-int Code::builtin_index() {
-  return READ_INT32_FIELD(this, kKindSpecificFlags1Offset);
-}
-
+int Code::builtin_index() { return READ_INT_FIELD(this, kBuiltinIndexOffset); }
 
 void Code::set_builtin_index(int index) {
-  WRITE_INT32_FIELD(this, kKindSpecificFlags1Offset, index);
+  WRITE_INT_FIELD(this, kBuiltinIndexOffset, index);
 }
 
 
@@ -4981,15 +5046,14 @@
 bool Code::is_call_stub() { return kind() == CALL_IC; }
 bool Code::is_binary_op_stub() { return kind() == BINARY_OP_IC; }
 bool Code::is_compare_ic_stub() { return kind() == COMPARE_IC; }
-bool Code::is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
 bool Code::is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
 bool Code::is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
-
+bool Code::is_wasm_code() { return kind() == WASM_FUNCTION; }
 
 bool Code::embeds_maps_weakly() {
   Kind k = kind();
   return (k == LOAD_IC || k == STORE_IC || k == KEYED_LOAD_IC ||
-          k == KEYED_STORE_IC || k == COMPARE_NIL_IC) &&
+          k == KEYED_STORE_IC) &&
          ic_state() == MONOMORPHIC;
 }
 
@@ -5005,20 +5069,17 @@
   return constant_pool;
 }
 
-
 Code::Flags Code::ComputeFlags(Kind kind, InlineCacheState ic_state,
                                ExtraICState extra_ic_state, StubType type,
                                CacheHolderFlag holder) {
   // Compute the bit mask.
-  unsigned int bits = KindField::encode(kind)
-      | ICStateField::encode(ic_state)
-      | TypeField::encode(type)
-      | ExtraICStateField::encode(extra_ic_state)
-      | CacheHolderField::encode(holder);
+  unsigned int bits = KindField::encode(kind) | ICStateField::encode(ic_state) |
+                      TypeField::encode(type) |
+                      ExtraICStateField::encode(extra_ic_state) |
+                      CacheHolderField::encode(holder);
   return static_cast<Flags>(bits);
 }
 
-
 Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
                                           ExtraICState extra_ic_state,
                                           CacheHolderFlag holder,
@@ -5052,7 +5113,6 @@
   return TypeField::decode(flags);
 }
 
-
 CacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
   return CacheHolderField::decode(flags);
 }
@@ -5135,7 +5195,7 @@
   friend class Code;
 };
 
-int AbstractCode::Size() {
+int AbstractCode::instruction_size() {
   if (IsCode()) {
     return GetCode()->instruction_size();
   } else {
@@ -5143,6 +5203,45 @@
   }
 }
 
+int AbstractCode::ExecutableSize() {
+  if (IsCode()) {
+    return GetCode()->ExecutableSize();
+  } else {
+    return GetBytecodeArray()->BytecodeArraySize();
+  }
+}
+
+Address AbstractCode::instruction_start() {
+  if (IsCode()) {
+    return GetCode()->instruction_start();
+  } else {
+    return GetBytecodeArray()->GetFirstBytecodeAddress();
+  }
+}
+
+Address AbstractCode::instruction_end() {
+  if (IsCode()) {
+    return GetCode()->instruction_end();
+  } else {
+    return GetBytecodeArray()->GetFirstBytecodeAddress() +
+           GetBytecodeArray()->length();
+  }
+}
+
+bool AbstractCode::contains(byte* inner_pointer) {
+  return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
+}
+
+AbstractCode::Kind AbstractCode::kind() {
+  if (IsCode()) {
+    STATIC_ASSERT(AbstractCode::FUNCTION ==
+                  static_cast<AbstractCode::Kind>(Code::FUNCTION));
+    return static_cast<AbstractCode::Kind>(GetCode()->kind());
+  } else {
+    return INTERPRETED_FUNCTION;
+  }
+}
+
 Code* AbstractCode::GetCode() { return Code::cast(this); }
 
 BytecodeArray* AbstractCode::GetBytecodeArray() {
@@ -5500,8 +5599,8 @@
 ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
 ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
 ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
-ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
-
+ACCESSORS(SharedFunctionInfo, function_identifier, Object,
+          kFunctionIdentifierOffset)
 
 SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
@@ -5634,6 +5733,13 @@
             optimization_disabled,
             kOptimizationDisabled)
 
+AbstractCode* SharedFunctionInfo::abstract_code() {
+  if (HasBytecodeArray()) {
+    return AbstractCode::cast(bytecode_array());
+  } else {
+    return AbstractCode::cast(code());
+  }
+}
 
 void SharedFunctionInfo::set_optimization_disabled(bool disable) {
   set_compiler_hints(BooleanBit::set(compiler_hints(),
@@ -5645,8 +5751,7 @@
 LanguageMode SharedFunctionInfo::language_mode() {
   STATIC_ASSERT(LANGUAGE_END == 3);
   return construct_language_mode(
-      BooleanBit::get(compiler_hints(), kStrictModeFunction),
-      BooleanBit::get(compiler_hints(), kStrongModeFunction));
+      BooleanBit::get(compiler_hints(), kStrictModeFunction));
 }
 
 
@@ -5657,7 +5762,6 @@
   DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
   int hints = compiler_hints();
   hints = BooleanBit::set(hints, kStrictModeFunction, is_strict(language_mode));
-  hints = BooleanBit::set(hints, kStrongModeFunction, is_strong(language_mode));
   set_compiler_hints(hints);
 }
 
@@ -5719,7 +5823,7 @@
 
 
 void SharedFunctionInfo::DontAdaptArguments() {
-  DCHECK(code()->kind() == Code::BUILTIN);
+  DCHECK(code()->kind() == Code::BUILTIN || code()->kind() == Code::STUB);
   set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
 }
 
@@ -5824,18 +5928,11 @@
   return FunctionTemplateInfo::cast(function_data());
 }
 
-
-bool SharedFunctionInfo::HasBuiltinFunctionId() {
-  return function_data()->IsSmi();
+void SharedFunctionInfo::set_api_func_data(FunctionTemplateInfo* data) {
+  DCHECK(function_data()->IsUndefined());
+  set_function_data(data);
 }
 
-
-BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
-  DCHECK(HasBuiltinFunctionId());
-  return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
-}
-
-
 bool SharedFunctionInfo::HasBytecodeArray() {
   return function_data()->IsBytecodeArray();
 }
@@ -5846,6 +5943,46 @@
   return BytecodeArray::cast(function_data());
 }
 
+void SharedFunctionInfo::set_bytecode_array(BytecodeArray* bytecode) {
+  DCHECK(function_data()->IsUndefined());
+  set_function_data(bytecode);
+}
+
+void SharedFunctionInfo::ClearBytecodeArray() {
+  DCHECK(function_data()->IsUndefined() || HasBytecodeArray());
+  set_function_data(GetHeap()->undefined_value());
+}
+
+bool SharedFunctionInfo::HasBuiltinFunctionId() {
+  return function_identifier()->IsSmi();
+}
+
+BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
+  DCHECK(HasBuiltinFunctionId());
+  return static_cast<BuiltinFunctionId>(
+      Smi::cast(function_identifier())->value());
+}
+
+void SharedFunctionInfo::set_builtin_function_id(BuiltinFunctionId id) {
+  set_function_identifier(Smi::FromInt(id));
+}
+
+bool SharedFunctionInfo::HasInferredName() {
+  return function_identifier()->IsString();
+}
+
+String* SharedFunctionInfo::inferred_name() {
+  if (HasInferredName()) {
+    return String::cast(function_identifier());
+  }
+  DCHECK(function_identifier()->IsUndefined() || HasBuiltinFunctionId());
+  return GetIsolate()->heap()->empty_string();
+}
+
+void SharedFunctionInfo::set_inferred_name(String* inferred_name) {
+  DCHECK(function_identifier()->IsUndefined() || HasInferredName());
+  set_function_identifier(inferred_name);
+}
 
 int SharedFunctionInfo::ic_age() {
   return ICAgeBits::decode(counters());
@@ -5944,26 +6081,6 @@
 }
 
 
-// static
-void SharedFunctionInfo::AddToOptimizedCodeMap(
-    Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
-    Handle<Code> code, Handle<LiteralsArray> literals, BailoutId osr_ast_id) {
-  AddToOptimizedCodeMapInternal(shared, native_context, code, literals,
-                                osr_ast_id);
-}
-
-
-// static
-void SharedFunctionInfo::AddLiteralsToOptimizedCodeMap(
-    Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
-    Handle<LiteralsArray> literals) {
-  Isolate* isolate = shared->GetIsolate();
-  Handle<Oddball> undefined = isolate->factory()->undefined_value();
-  AddToOptimizedCodeMapInternal(shared, native_context, undefined, literals,
-                                BailoutId::None());
-}
-
-
 bool JSFunction::IsOptimized() {
   return code()->kind() == Code::OPTIMIZED_FUNCTION;
 }
@@ -6008,6 +6125,14 @@
   }
 }
 
+AbstractCode* JSFunction::abstract_code() {
+  Code* code = this->code();
+  if (code->is_interpreter_entry_trampoline()) {
+    return AbstractCode::cast(shared()->bytecode_array());
+  } else {
+    return AbstractCode::cast(code);
+  }
+}
 
 Code* JSFunction::code() {
   return Code::cast(
@@ -6939,6 +7064,17 @@
   return GetProperty(&it);
 }
 
+MaybeHandle<Object> Object::SetPropertyOrElement(Handle<Object> object,
+                                                 Handle<Name> name,
+                                                 Handle<Object> value,
+                                                 LanguageMode language_mode,
+                                                 StoreFromKeyed store_mode) {
+  LookupIterator it =
+      LookupIterator::PropertyOrElement(name->GetIsolate(), object, name);
+  MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_mode));
+  return value;
+}
+
 MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
                                                  Handle<Name> name,
                                                  Handle<JSReceiver> holder) {
@@ -6972,11 +7108,10 @@
   return NameDictionary::cast(properties());
 }
 
-
 Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
                                     Handle<Name> name) {
-  LookupIterator it =
-      LookupIterator::PropertyOrElement(object->GetIsolate(), object, name);
+  LookupIterator it = LookupIterator::PropertyOrElement(object->GetIsolate(),
+                                                        object, name, object);
   return HasProperty(&it);
 }
 
@@ -6985,7 +7120,7 @@
                                        Handle<Name> name) {
   if (object->IsJSObject()) {  // Shortcut
     LookupIterator it = LookupIterator::PropertyOrElement(
-        object->GetIsolate(), object, name, LookupIterator::HIDDEN);
+        object->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
     return HasProperty(&it);
   }
 
@@ -6998,8 +7133,8 @@
 
 Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
     Handle<JSReceiver> object, Handle<Name> name) {
-  LookupIterator it =
-      LookupIterator::PropertyOrElement(name->GetIsolate(), object, name);
+  LookupIterator it = LookupIterator::PropertyOrElement(name->GetIsolate(),
+                                                        object, name, object);
   return GetPropertyAttributes(&it);
 }
 
@@ -7007,13 +7142,13 @@
 Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
     Handle<JSReceiver> object, Handle<Name> name) {
   LookupIterator it = LookupIterator::PropertyOrElement(
-      name->GetIsolate(), object, name, LookupIterator::HIDDEN);
+      name->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
   return GetPropertyAttributes(&it);
 }
 
 
 Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
-  LookupIterator it(object->GetIsolate(), object, index);
+  LookupIterator it(object->GetIsolate(), object, index, object);
   return HasProperty(&it);
 }
 
@@ -7021,7 +7156,7 @@
 Maybe<PropertyAttributes> JSReceiver::GetElementAttributes(
     Handle<JSReceiver> object, uint32_t index) {
   Isolate* isolate = object->GetIsolate();
-  LookupIterator it(isolate, object, index);
+  LookupIterator it(isolate, object, index, object);
   return GetPropertyAttributes(&it);
 }
 
@@ -7029,7 +7164,7 @@
 Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttributes(
     Handle<JSReceiver> object, uint32_t index) {
   Isolate* isolate = object->GetIsolate();
-  LookupIterator it(isolate, object, index, LookupIterator::HIDDEN);
+  LookupIterator it(isolate, object, index, object, LookupIterator::HIDDEN);
   return GetPropertyAttributes(&it);
 }
 
@@ -7052,11 +7187,12 @@
       : JSObject::GetOrCreateIdentityHash(Handle<JSObject>::cast(object));
 }
 
-
-Object* JSReceiver::GetIdentityHash() {
-  return IsJSProxy()
-      ? JSProxy::cast(this)->GetIdentityHash()
-      : JSObject::cast(this)->GetIdentityHash();
+Handle<Object> JSReceiver::GetIdentityHash(Isolate* isolate,
+                                           Handle<JSReceiver> receiver) {
+  return receiver->IsJSProxy() ? JSProxy::GetIdentityHash(
+                                     isolate, Handle<JSProxy>::cast(receiver))
+                               : JSObject::GetIdentityHash(
+                                     isolate, Handle<JSObject>::cast(receiver));
 }
 
 
@@ -7089,6 +7225,11 @@
   set_flag(BooleanBit::set(flag(), kSpecialDataProperty, value));
 }
 
+bool AccessorInfo::is_sloppy() { return BooleanBit::get(flag(), kIsSloppy); }
+
+void AccessorInfo::set_is_sloppy(bool value) {
+  set_flag(BooleanBit::set(flag(), kIsSloppy, value));
+}
 
 PropertyAttributes AccessorInfo::property_attributes() {
   return AttributesField::decode(static_cast<uint32_t>(flag()));
@@ -7448,6 +7589,11 @@
 }
 
 
+bool JSArray::HasArrayPrototype(Isolate* isolate) {
+  return map()->prototype() == *isolate->initial_array_prototype();
+}
+
+
 int TypeFeedbackInfo::ic_total_count() {
   int current = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
   return ICTotalCountField::decode(current);
@@ -7645,6 +7791,30 @@
   return value & MemoryChunk::kAlignmentMask;
 }
 
+static inline Handle<Object> MakeEntryPair(Isolate* isolate, uint32_t index,
+                                           Handle<Object> value) {
+  Handle<Object> key = isolate->factory()->Uint32ToString(index);
+  Handle<FixedArray> entry_storage =
+      isolate->factory()->NewUninitializedFixedArray(2);
+  {
+    entry_storage->set(0, *key, SKIP_WRITE_BARRIER);
+    entry_storage->set(1, *value, SKIP_WRITE_BARRIER);
+  }
+  return isolate->factory()->NewJSArrayWithElements(entry_storage,
+                                                    FAST_ELEMENTS, 2);
+}
+
+static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Name> key,
+                                           Handle<Object> value) {
+  Handle<FixedArray> entry_storage =
+      isolate->factory()->NewUninitializedFixedArray(2);
+  {
+    entry_storage->set(0, *key, SKIP_WRITE_BARRIER);
+    entry_storage->set(1, *value, SKIP_WRITE_BARRIER);
+  }
+  return isolate->factory()->NewJSArrayWithElements(entry_storage,
+                                                    FAST_ELEMENTS, 2);
+}
 
 #undef TYPE_CHECKER
 #undef CAST_ACCESSOR
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 67bc62e..58092a4 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -95,6 +95,7 @@
       os << "filler";
       break;
     case JS_OBJECT_TYPE:  // fall through
+    case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_ARRAY_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
@@ -395,10 +396,16 @@
   obj->PrintHeader(os, id);
   // Don't call GetElementsKind, its validation code can cause the printer to
   // fail when debugging.
-  PrototypeIterator iter(obj->GetIsolate(), obj);
   os << "\n - map = " << reinterpret_cast<void*>(obj->map()) << " ["
-     << ElementsKindToString(obj->map()->elements_kind())
-     << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
+     << ElementsKindToString(obj->map()->elements_kind());
+  if (obj->elements()->map() == obj->GetHeap()->fixed_cow_array_map()) {
+    os << " (COW)";
+  }
+  PrototypeIterator iter(obj->GetIsolate(), obj);
+  os << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
+  if (obj->elements()->length() > 0) {
+    os << "\n - elements = " << Brief(obj->elements());
+  }
 }
 
 
@@ -454,6 +461,12 @@
   }
   os << "\n - elements kind: " << ElementsKindToString(elements_kind());
   os << "\n - unused property fields: " << unused_property_fields();
+  os << "\n - enum length: ";
+  if (EnumLength() == kInvalidEnumCacheSentinel) {
+    os << "invalid";
+  } else {
+    os << EnumLength();
+  }
   if (is_deprecated()) os << "\n - deprecated_map";
   if (is_stable()) os << "\n - stable_map";
   if (is_dictionary_map()) os << "\n - dictionary_map";
@@ -466,7 +479,6 @@
   if (is_access_check_needed()) os << "\n - access_check_needed";
   if (!is_extensible()) os << "\n - non-extensible";
   if (is_observed()) os << "\n - observed";
-  if (is_strong()) os << "\n - strong_map";
   if (is_prototype_map()) {
     os << "\n - prototype_map";
     os << "\n - prototype info: " << Brief(prototype_info());
@@ -856,6 +868,8 @@
   if (has_initial_map()) os << Brief(initial_map());
   os << "\n - shared_info = " << Brief(shared());
   os << "\n - name = " << Brief(shared()->name());
+  os << "\n - formal_parameter_count = "
+     << shared()->internal_formal_parameter_count();
   if (shared()->is_generator()) {
     os << "\n   - generator";
   }
@@ -868,9 +882,10 @@
 
 void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "SharedFunctionInfo");
-  os << "\n - name: " << Brief(name());
-  os << "\n - expected_nof_properties: " << expected_nof_properties();
-  os << "\n - ast_node_count: " << ast_node_count();
+  os << "\n - name = " << Brief(name());
+  os << "\n - formal_parameter_count = " << internal_formal_parameter_count();
+  os << "\n - expected_nof_properties = " << expected_nof_properties();
+  os << "\n - ast_node_count = " << ast_node_count();
   os << "\n - instance class name = ";
   instance_class_name()->Print(os);
   os << "\n - code = " << Brief(code());
@@ -1297,8 +1312,6 @@
          << ")";
     } else if (key == heap->strict_function_transition_symbol()) {
       os << " (transition to strict function)";
-    } else if (key == heap->strong_function_transition_symbol()) {
-      os << " (transition to strong function)";
     } else if (key == heap->observed_symbol()) {
       os << " (transition to Object.observe)";
     } else {
diff --git a/src/objects.cc b/src/objects.cc
index f577d5e..1a82c3c 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -8,11 +8,13 @@
 #include <iomanip>
 #include <sstream>
 
+#include "src/objects-inl.h"
+
 #include "src/accessors.h"
 #include "src/allocation-site-scopes.h"
 #include "src/api.h"
+#include "src/api-arguments.h"
 #include "src/api-natives.h"
-#include "src/arguments.h"
 #include "src/base/bits.h"
 #include "src/base/utils/random-number-generator.h"
 #include "src/bootstrapper.h"
@@ -31,16 +33,16 @@
 #include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
 #include "src/identity-map.h"
-#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/interpreter.h"
 #include "src/interpreter/source-position-table.h"
 #include "src/isolate-inl.h"
-#include "src/key-accumulator.h"
+#include "src/keys.h"
 #include "src/list.h"
 #include "src/log.h"
 #include "src/lookup.h"
 #include "src/macro-assembler.h"
 #include "src/messages.h"
-#include "src/objects-inl.h"
 #include "src/objects-body-descriptors-inl.h"
 #include "src/profiler/cpu-profiler.h"
 #include "src/property-descriptor.h"
@@ -113,17 +115,18 @@
   return result;
 }
 
-
+// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
 // static
-MaybeHandle<Name> Object::ToName(Isolate* isolate, Handle<Object> input) {
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, input, Object::ToPrimitive(input, ToPrimitiveHint::kString),
-      Name);
-  if (input->IsName()) return Handle<Name>::cast(input);
-  return ToString(isolate, input);
+MaybeHandle<JSReceiver> Object::ConvertReceiver(Isolate* isolate,
+                                                Handle<Object> object) {
+  if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
+  if (*object == isolate->heap()->null_value() ||
+      *object == isolate->heap()->undefined_value()) {
+    return handle(isolate->global_proxy(), isolate);
+  }
+  return Object::ToObject(isolate, object);
 }
 
-
 // static
 MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
   while (true) {
@@ -175,6 +178,16 @@
 
 
 // static
+MaybeHandle<Name> Object::ConvertToName(Isolate* isolate,
+                                        Handle<Object> input) {
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, input, Object::ToPrimitive(input, ToPrimitiveHint::kString),
+      Name);
+  if (input->IsName()) return Handle<Name>::cast(input);
+  return ToString(isolate, input);
+}
+
+// static
 MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
   while (true) {
     if (input->IsString()) {
@@ -218,7 +231,7 @@
   if (IsBoolean()) return IsTrue();
   if (IsSmi()) return Smi::cast(this)->value() != 0;
   if (IsUndefined() || IsNull()) return false;
-  if (IsUndetectableObject()) return false;   // Undetectable object is false.
+  if (IsUndetectable()) return false;  // Undetectable object is false.
   if (IsString()) return String::cast(this)->length() != 0;
   if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue();
   return true;
@@ -284,6 +297,10 @@
 
 // static
 Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
+  // This is the generic version of Abstract Equality Comparison; a version in
+  // JavaScript land is available in the EqualStub and NotEqualStub. Whenever
+  // you change something functionality wise in here, remember to update the
+  // TurboFan code stubs as well.
   while (true) {
     if (x->IsNumber()) {
       if (y->IsNumber()) {
@@ -292,7 +309,7 @@
         return Just(NumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
       } else if (y->IsString()) {
         return Just(NumberEquals(x, String::ToNumber(Handle<String>::cast(y))));
-      } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+      } else if (y->IsJSReceiver()) {
         if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
                  .ToHandle(&y)) {
           return Nothing<bool>();
@@ -310,7 +327,7 @@
       } else if (y->IsBoolean()) {
         x = String::ToNumber(Handle<String>::cast(x));
         return Just(NumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
-      } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+      } else if (y->IsJSReceiver()) {
         if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
                  .ToHandle(&y)) {
           return Nothing<bool>();
@@ -326,7 +343,7 @@
       } else if (y->IsString()) {
         y = String::ToNumber(Handle<String>::cast(y));
         return Just(NumberEquals(Handle<Oddball>::cast(x)->to_number(), *y));
-      } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+      } else if (y->IsJSReceiver()) {
         if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
                  .ToHandle(&y)) {
           return Nothing<bool>();
@@ -338,7 +355,7 @@
     } else if (x->IsSymbol()) {
       if (y->IsSymbol()) {
         return Just(x.is_identical_to(y));
-      } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+      } else if (y->IsJSReceiver()) {
         if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
                  .ToHandle(&y)) {
           return Nothing<bool>();
@@ -350,7 +367,7 @@
       if (y->IsSimd128Value()) {
         return Just(Simd128Value::Equals(Handle<Simd128Value>::cast(x),
                                          Handle<Simd128Value>::cast(y)));
-      } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+      } else if (y->IsJSReceiver()) {
         if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
                  .ToHandle(&y)) {
           return Nothing<bool>();
@@ -358,11 +375,11 @@
       } else {
         return Just(false);
       }
-    } else if (x->IsJSReceiver() && !x->IsUndetectableObject()) {
+    } else if (x->IsJSReceiver()) {
       if (y->IsJSReceiver()) {
         return Just(x.is_identical_to(y));
-      } else if (y->IsNull() || y->IsUndefined()) {
-        return Just(false);
+      } else if (y->IsUndetectable()) {
+        return Just(x->IsUndetectable());
       } else if (y->IsBoolean()) {
         y = Oddball::ToNumber(Handle<Oddball>::cast(y));
       } else if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(x))
@@ -370,9 +387,7 @@
         return Nothing<bool>();
       }
     } else {
-      return Just(
-          (x->IsNull() || x->IsUndefined() || x->IsUndetectableObject()) &&
-          (y->IsNull() || y->IsUndefined() || y->IsUndetectableObject()));
+      return Just(x->IsUndetectable() && y->IsUndetectable());
     }
   }
 }
@@ -397,7 +412,7 @@
 Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
   if (object->IsNumber()) return isolate->factory()->number_string();
   if (object->IsOddball()) return handle(Oddball::cast(*object)->type_of());
-  if (object->IsUndetectableObject()) {
+  if (object->IsUndetectable()) {
     return isolate->factory()->undefined_string();
   }
   if (object->IsString()) return isolate->factory()->string_string();
@@ -618,10 +633,11 @@
                     FixedArray);
   }
   // 4. Let len be ? ToLength(? Get(obj, "length")).
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
   Handle<Object> raw_length_obj;
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate, raw_length_obj,
-      JSReceiver::GetProperty(object, isolate->factory()->length_string()),
+      JSReceiver::GetProperty(receiver, isolate->factory()->length_string()),
       FixedArray);
   Handle<Object> raw_length_number;
   ASSIGN_RETURN_ON_EXCEPTION(isolate, raw_length_number,
@@ -642,8 +658,9 @@
     // 7a. Let indexName be ToString(index).
     // 7b. Let next be ? Get(obj, indexName).
     Handle<Object> next;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, next, Object::GetElement(isolate, object, index), FixedArray);
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, next,
+                               JSReceiver::GetElement(isolate, receiver, index),
+                               FixedArray);
     switch (element_types) {
       case ElementTypes::kAll:
         // Nothing to do.
@@ -678,13 +695,12 @@
       case LookupIterator::TRANSITION:
         UNREACHABLE();
       case LookupIterator::JSPROXY:
-        // Call the "has" trap on proxies.
         return JSProxy::HasProperty(it->isolate(), it->GetHolder<JSProxy>(),
                                     it->GetName());
       case LookupIterator::INTERCEPTOR: {
         Maybe<PropertyAttributes> result =
             JSObject::GetPropertyAttributesWithInterceptor(it);
-        if (!result.IsJust()) return Nothing<bool>();
+        if (result.IsNothing()) return Nothing<bool>();
         if (result.FromJust() != ABSENT) return Just(true);
         break;
       }
@@ -692,7 +708,7 @@
         if (it->HasAccess()) break;
         Maybe<PropertyAttributes> result =
             JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
-        if (!result.IsJust()) return Nothing<bool>();
+        if (result.IsNothing()) return Nothing<bool>();
         return Just(result.FromJust() != ABSENT);
       }
       case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -714,9 +730,14 @@
       case LookupIterator::NOT_FOUND:
       case LookupIterator::TRANSITION:
         UNREACHABLE();
-      case LookupIterator::JSPROXY:
-        return JSProxy::GetProperty(it->isolate(), it->GetHolder<JSProxy>(),
-                                    it->GetName(), it->GetReceiver());
+      case LookupIterator::JSPROXY: {
+        bool was_found;
+        MaybeHandle<Object> result =
+            JSProxy::GetProperty(it->isolate(), it->GetHolder<JSProxy>(),
+                                 it->GetName(), it->GetReceiver(), &was_found);
+        if (!was_found) it->NotFound();
+        return result;
+      }
       case LookupIterator::INTERCEPTOR: {
         bool done;
         Handle<Object> result;
@@ -756,7 +777,9 @@
 MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
                                          Handle<JSProxy> proxy,
                                          Handle<Name> name,
-                                         Handle<Object> receiver) {
+                                         Handle<Object> receiver,
+                                         bool* was_found) {
+  *was_found = true;
   if (receiver->IsJSGlobalObject()) {
     THROW_NEW_ERROR(
         isolate,
@@ -789,7 +812,9 @@
     // 7.a Return target.[[Get]](P, Receiver).
     LookupIterator it =
         LookupIterator::PropertyOrElement(isolate, receiver, name, target);
-    return Object::GetProperty(&it);
+    MaybeHandle<Object> result = Object::GetProperty(&it);
+    *was_found = it.IsFound();
+    return result;
   }
   // 8. Let trapResult be ? Call(trap, handler, «target, P, Receiver»).
   Handle<Object> trap_result;
@@ -838,14 +863,6 @@
 }
 
 
-Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
-                                           Handle<Name> name) {
-  LookupIterator it(object, name,
-                    LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
-  return GetDataProperty(&it);
-}
-
-
 Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
@@ -892,26 +909,6 @@
 }
 
 
-bool Object::ToUint32(uint32_t* value) {
-  if (IsSmi()) {
-    int num = Smi::cast(this)->value();
-    if (num < 0) return false;
-    *value = static_cast<uint32_t>(num);
-    return true;
-  }
-  if (IsHeapNumber()) {
-    double num = HeapNumber::cast(this)->value();
-    if (num < 0) return false;
-    uint32_t uint_value = FastD2UI(num);
-    if (FastUI2D(uint_value) == num) {
-      *value = uint_value;
-      return true;
-    }
-  }
-  return false;
-}
-
-
 bool FunctionTemplateInfo::IsTemplateFor(Object* object) {
   if (!object->IsHeapObject()) return false;
   return IsTemplateFor(HeapObject::cast(object)->map());
@@ -983,19 +980,18 @@
   return result;
 }
 
-
-Handle<FixedArray> JSObject::EnsureWritableFastElements(
-    Handle<JSObject> object) {
+void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
   DCHECK(object->HasFastSmiOrObjectElements() ||
          object->HasFastStringWrapperElements());
-  Isolate* isolate = object->GetIsolate();
-  Handle<FixedArray> elems(FixedArray::cast(object->elements()), isolate);
-  if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
+  FixedArray* raw_elems = FixedArray::cast(object->elements());
+  Heap* heap = object->GetHeap();
+  if (raw_elems->map() != heap->fixed_cow_array_map()) return;
+  Isolate* isolate = heap->isolate();
+  Handle<FixedArray> elems(raw_elems, isolate);
   Handle<FixedArray> writable_elems = isolate->factory()->CopyFixedArrayWithMap(
       elems, isolate->factory()->fixed_array_map());
   object->set_elements(*writable_elems);
   isolate->counters()->cow_arrays_converted()->Increment();
-  return writable_elems;
 }
 
 
@@ -1084,18 +1080,19 @@
         v8::ToCData<v8::AccessorNameGetterCallback>(info->getter());
     if (call_fun == nullptr) return isolate->factory()->undefined_value();
 
-    LOG(isolate, ApiNamedPropertyAccess("load", *holder, *name));
+    if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+      ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+                                 Object::ConvertReceiver(isolate, receiver),
+                                 Object);
+    }
+
     PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
                                    Object::DONT_THROW);
-    v8::Local<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(name));
+    Handle<Object> result = args.Call(call_fun, name);
     RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-    if (result.IsEmpty()) {
-      return ReadAbsentProperty(isolate, receiver, name);
-    }
-    Handle<Object> return_value = v8::Utils::OpenHandle(*result);
-    return_value->VerifyApiCallResultType();
+    if (result.is_null()) return ReadAbsentProperty(isolate, receiver, name);
     // Rebox handle before return.
-    return handle(*return_value, isolate);
+    return handle(*result, isolate);
   }
 
   // Regular accessor.
@@ -1159,10 +1156,15 @@
     // have a setter.
     if (call_fun == nullptr) return Just(true);
 
-    LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name));
+    if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate, receiver, Object::ConvertReceiver(isolate, receiver),
+          Nothing<bool>());
+    }
+
     PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
                                    should_throw);
-    args.Call(call_fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
+    args.Call(call_fun, name, value);
     RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
     return Just(true);
   }
@@ -1424,8 +1426,11 @@
   Object* hash = GetSimpleHash();
   if (hash->IsSmi()) return hash;
 
+  DisallowHeapAllocation no_gc;
   DCHECK(IsJSReceiver());
-  return JSReceiver::cast(this)->GetIdentityHash();
+  JSReceiver* receiver = JSReceiver::cast(this);
+  Isolate* isolate = receiver->GetIsolate();
+  return *JSReceiver::GetIdentityHash(isolate, handle(receiver, isolate));
 }
 
 
@@ -1556,7 +1561,7 @@
     return default_species;
   }
   if (original_array->IsJSArray() &&
-      Handle<JSReceiver>::cast(original_array)->map()->new_target_is_base() &&
+      Handle<JSArray>::cast(original_array)->HasArrayPrototype(isolate) &&
       isolate->IsArraySpeciesLookupChainIntact()) {
     return default_species;
   }
@@ -1583,8 +1588,8 @@
     if (constructor->IsJSReceiver()) {
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, constructor,
-          Object::GetProperty(constructor,
-                              isolate->factory()->species_symbol()),
+          JSReceiver::GetProperty(Handle<JSReceiver>::cast(constructor),
+                                  isolate->factory()->species_symbol()),
           Object);
       if (constructor->IsNull()) {
         constructor = isolate->factory()->undefined_value();
@@ -1739,7 +1744,8 @@
 
   // Byte size of the external String object.
   int new_size = this->SizeFromMap(new_map);
-  heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+  heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
+                             ClearRecordedSlots::kNo);
 
   // We are storing the new map using release store after creating a filler for
   // the left-over space to avoid races with the sweeper thread.
@@ -1800,7 +1806,8 @@
 
   // Byte size of the external String object.
   int new_size = this->SizeFromMap(new_map);
-  heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+  heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
+                             ClearRecordedSlots::kNo);
 
   // We are storing the new map using release store after creating a filler for
   // the left-over space to avoid races with the sweeper thread.
@@ -1953,7 +1960,7 @@
       break;
     }
     // All other JSObjects are rather similar to each other (JSObject,
-    // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
+    // JSGlobalProxy, JSGlobalObject, JSUndetectable, JSValue).
     default: {
       Map* map_of_this = map();
       Heap* heap = GetHeap();
@@ -2492,11 +2499,9 @@
     }
   }
 
-  if (FLAG_harmony_tostring) {
-    Handle<Object> maybe_tag = JSReceiver::GetDataProperty(
-        receiver, isolate->factory()->to_string_tag_symbol());
-    if (maybe_tag->IsString()) return Handle<String>::cast(maybe_tag);
-  }
+  Handle<Object> maybe_tag = JSReceiver::GetDataProperty(
+      receiver, isolate->factory()->to_string_tag_symbol());
+  if (maybe_tag->IsString()) return Handle<String>::cast(maybe_tag);
 
   PrototypeIterator iter(isolate, receiver);
   if (iter.IsAtEnd()) return handle(receiver->class_name());
@@ -2944,8 +2949,8 @@
 
   if (instance_size_delta > 0) {
     Address address = object->address();
-    heap->CreateFillerObjectAt(
-        address + new_instance_size, instance_size_delta);
+    heap->CreateFillerObjectAt(address + new_instance_size, instance_size_delta,
+                               ClearRecordedSlots::kYes);
     heap->AdjustLiveBytes(*object, -instance_size_delta,
                           Heap::CONCURRENT_TO_SWEEPER);
   }
@@ -3040,7 +3045,7 @@
   if (instance_size_delta > 0) {
     Heap* heap = isolate->heap();
     heap->CreateFillerObjectAt(object->address() + new_instance_size,
-                               instance_size_delta);
+                               instance_size_delta, ClearRecordedSlots::kYes);
     heap->AdjustLiveBytes(*object, -instance_size_delta,
                           Heap::CONCURRENT_TO_SWEEPER);
   }
@@ -3226,7 +3231,7 @@
   }
 
   DescriptorArray* to_replace = instance_descriptors();
-  GetHeap()->incremental_marking()->RecordWrites(to_replace);
+  GetHeap()->incremental_marking()->IterateBlackObject(to_replace);
   Map* current = this;
   while (current->instance_descriptors() == to_replace) {
     Object* next = current->GetBackPointer();
@@ -3316,25 +3321,40 @@
                           Representation new_representation,
                           Handle<Object> new_wrapped_type) {
   DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeakCell());
+  // We store raw pointers in the queue, so no allocations are allowed.
   DisallowHeapAllocation no_allocation;
   PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
   if (details.type() != DATA) return;
-  Object* transitions = raw_transitions();
-  int num_transitions = TransitionArray::NumberOfTransitions(transitions);
-  for (int i = 0; i < num_transitions; ++i) {
-    Map* target = TransitionArray::GetTarget(transitions, i);
-    target->UpdateFieldType(descriptor, name, new_representation,
-                            new_wrapped_type);
-  }
-  // It is allowed to change representation here only from None to something.
-  DCHECK(details.representation().Equals(new_representation) ||
-         details.representation().IsNone());
 
-  // Skip if already updated the shared descriptor.
-  if (instance_descriptors()->GetValue(descriptor) == *new_wrapped_type) return;
-  DataDescriptor d(name, instance_descriptors()->GetFieldIndex(descriptor),
-                   new_wrapped_type, details.attributes(), new_representation);
-  instance_descriptors()->Replace(descriptor, &d);
+  Zone zone(GetIsolate()->allocator());
+  ZoneQueue<Map*> backlog(&zone);
+  backlog.push(this);
+
+  while (!backlog.empty()) {
+    Map* current = backlog.front();
+    backlog.pop();
+
+    Object* transitions = current->raw_transitions();
+    int num_transitions = TransitionArray::NumberOfTransitions(transitions);
+    for (int i = 0; i < num_transitions; ++i) {
+      Map* target = TransitionArray::GetTarget(transitions, i);
+      backlog.push(target);
+    }
+    DescriptorArray* descriptors = current->instance_descriptors();
+    PropertyDetails details = descriptors->GetDetails(descriptor);
+
+    // It is allowed to change representation here only from None to something.
+    DCHECK(details.representation().Equals(new_representation) ||
+           details.representation().IsNone());
+
+    // Skip if already updated the shared descriptor.
+    if (descriptors->GetValue(descriptor) != *new_wrapped_type) {
+      DataDescriptor d(name, descriptors->GetFieldIndex(descriptor),
+                       new_wrapped_type, details.attributes(),
+                       new_representation);
+      descriptors->Replace(descriptor, &d);
+    }
+  }
 }
 
 bool FieldTypeIsCleared(Representation rep, FieldType* type) {
@@ -4095,17 +4115,23 @@
   if (interceptor->setter()->IsUndefined()) return Just(false);
 
   Handle<JSObject> holder = it->GetHolder<JSObject>();
-  v8::Local<v8::Value> result;
-  PropertyCallbackArguments args(isolate, interceptor->data(),
-                                 *it->GetReceiver(), *holder, should_throw);
+  bool result;
+  Handle<Object> receiver = it->GetReceiver();
+  if (!receiver->IsJSReceiver()) {
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+                                     Object::ConvertReceiver(isolate, receiver),
+                                     Nothing<bool>());
+  }
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *holder, should_throw);
 
   if (it->IsElement()) {
     uint32_t index = it->index();
     v8::IndexedPropertySetterCallback setter =
         v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
-    LOG(isolate,
-        ApiIndexedPropertyAccess("interceptor-indexed-set", *holder, index));
-    result = args.Call(setter, index, v8::Utils::ToLocal(value));
+    // TODO(neis): In the future, we may want to actually return the
+    // interceptor's result, which then should be a boolean.
+    result = !args.Call(setter, index, value).is_null();
   } else {
     Handle<Name> name = it->name();
     DCHECK(!name->IsPrivate());
@@ -4117,21 +4143,11 @@
     v8::GenericNamedPropertySetterCallback setter =
         v8::ToCData<v8::GenericNamedPropertySetterCallback>(
             interceptor->setter());
-    LOG(it->isolate(),
-        ApiNamedPropertyAccess("interceptor-named-set", *holder, *name));
-    result =
-        args.Call(setter, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
+    result = !args.Call(setter, name, value).is_null();
   }
 
   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
-  if (result.IsEmpty()) return Just(false);
-#ifdef DEBUG
-  Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
-  result_internal->VerifyApiCallResultType();
-#endif
-  return Just(true);
-  // TODO(neis): In the future, we may want to actually return the interceptor's
-  // result, which then should be a boolean.
+  return Just(result);
 }
 
 
@@ -4150,7 +4166,7 @@
                                         LanguageMode language_mode,
                                         StoreFromKeyed store_mode,
                                         bool* found) {
-  it->UpdateProtector();
+  DCHECK(it->IsFound());
   ShouldThrow should_throw =
       is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
 
@@ -4158,10 +4174,7 @@
   // interceptor calls.
   AssertNoContextChange ncc(it->isolate());
 
-  *found = true;
-
-  bool done = false;
-  for (; it->IsFound(); it->Next()) {
+  do {
     switch (it->state()) {
       case LookupIterator::NOT_FOUND:
         UNREACHABLE();
@@ -4186,10 +4199,12 @@
           Maybe<PropertyAttributes> maybe_attributes =
               JSObject::GetPropertyAttributesWithInterceptor(it);
           if (!maybe_attributes.IsJust()) return Nothing<bool>();
-          done = maybe_attributes.FromJust() != ABSENT;
-          if (done && (maybe_attributes.FromJust() & READ_ONLY) != 0) {
+          if (maybe_attributes.FromJust() == ABSENT) break;
+          if ((maybe_attributes.FromJust() & READ_ONLY) != 0) {
             return WriteToReadOnlyProperty(it, value, should_throw);
           }
+          *found = false;
+          return Nothing<bool>();
         }
         break;
 
@@ -4201,13 +4216,13 @@
         if (accessors->IsAccessorInfo() &&
             !it->HolderIsReceiverOrHiddenPrototype() &&
             AccessorInfo::cast(*accessors)->is_special_data_property()) {
-          done = true;
-          break;
+          *found = false;
+          return Nothing<bool>();
         }
         return SetPropertyWithAccessor(it, value, should_throw);
       }
       case LookupIterator::INTEGER_INDEXED_EXOTIC:
-        // TODO(verwaest): We should throw an exception.
+        // TODO(verwaest): We should throw an exception if holder is receiver.
         return Just(true);
 
       case LookupIterator::DATA:
@@ -4217,25 +4232,13 @@
         if (it->HolderIsReceiverOrHiddenPrototype()) {
           return SetDataProperty(it, value);
         }
-        done = true;
-        break;
-
+      // Fall through.
       case LookupIterator::TRANSITION:
-        done = true;
-        break;
+        *found = false;
+        return Nothing<bool>();
     }
-
-    if (done) break;
-  }
-
-  // If the receiver is the JSGlobalObject, the store was contextual. In case
-  // the property did not exist yet on the global object itself, we have to
-  // throw a reference error in strict mode.  In sloppy mode, we continue.
-  if (it->GetReceiver()->IsJSGlobalObject() && is_strict(language_mode)) {
-    it->isolate()->Throw(*it->isolate()->factory()->NewReferenceError(
-        MessageTemplate::kNotDefined, it->name()));
-    return Nothing<bool>();
-  }
+    it->Next();
+  } while (it->IsFound());
 
   *found = false;
   return Nothing<bool>();
@@ -4245,10 +4248,23 @@
 Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
                                 LanguageMode language_mode,
                                 StoreFromKeyed store_mode) {
-  bool found = false;
-  Maybe<bool> result =
-      SetPropertyInternal(it, value, language_mode, store_mode, &found);
-  if (found) return result;
+  it->UpdateProtector();
+  if (it->IsFound()) {
+    bool found = true;
+    Maybe<bool> result =
+        SetPropertyInternal(it, value, language_mode, store_mode, &found);
+    if (found) return result;
+  }
+
+  // If the receiver is the JSGlobalObject, the store was contextual. In case
+  // the property did not exist yet on the global object itself, we have to
+  // throw a reference error in strict mode.  In sloppy mode, we continue.
+  if (is_strict(language_mode) && it->GetReceiver()->IsJSGlobalObject()) {
+    it->isolate()->Throw(*it->isolate()->factory()->NewReferenceError(
+        MessageTemplate::kNotDefined, it->name()));
+    return Nothing<bool>();
+  }
+
   ShouldThrow should_throw =
       is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
   return AddDataProperty(it, value, NONE, should_throw, store_mode);
@@ -4260,10 +4276,13 @@
                                      StoreFromKeyed store_mode) {
   Isolate* isolate = it->isolate();
 
-  bool found = false;
-  Maybe<bool> result =
-      SetPropertyInternal(it, value, language_mode, store_mode, &found);
-  if (found) return result;
+  it->UpdateProtector();
+  if (it->IsFound()) {
+    bool found = true;
+    Maybe<bool> result =
+        SetPropertyInternal(it, value, language_mode, store_mode, &found);
+    if (found) return result;
+  }
 
   // The property either doesn't exist on the holder or exists there as a data
   // property.
@@ -4276,7 +4295,7 @@
   }
   Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
 
-  LookupIterator::Configuration c = LookupIterator::OWN;
+  LookupIterator::Configuration c = LookupIterator::HIDDEN;
   LookupIterator own_lookup =
       it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
                       : LookupIterator(receiver, it->name(), c);
@@ -4338,8 +4357,7 @@
     }
   }
 
-  return JSObject::AddDataProperty(&own_lookup, value, NONE, should_throw,
-                                   store_mode);
+  return AddDataProperty(&own_lookup, value, NONE, should_throw, store_mode);
 }
 
 MaybeHandle<Object> Object::ReadAbsentProperty(LookupIterator* it) {
@@ -4608,7 +4626,7 @@
   }
 
   // Replace descriptors by new_descriptors in all maps that share it.
-  map->GetHeap()->incremental_marking()->RecordWrites(*descriptors);
+  map->GetHeap()->incremental_marking()->IterateBlackObject(*descriptors);
 
   Map* current = *map;
   while (current->instance_descriptors() == *descriptors) {
@@ -4848,11 +4866,9 @@
   } else if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
     // Reuse map transitions for JSArrays.
     DisallowHeapAllocation no_gc;
-    Strength strength = map->is_strong() ? Strength::STRONG : Strength::WEAK;
-    if (native_context->get(Context::ArrayMapIndex(from_kind, strength)) ==
-        *map) {
+    if (native_context->get(Context::ArrayMapIndex(from_kind)) == *map) {
       Object* maybe_transitioned_map =
-          native_context->get(Context::ArrayMapIndex(to_kind, strength));
+          native_context->get(Context::ArrayMapIndex(to_kind));
       if (maybe_transitioned_map->IsMap()) {
         return handle(Map::cast(maybe_transitioned_map), isolate);
       }
@@ -5180,11 +5196,9 @@
 
 
 Maybe<PropertyAttributes> JSProxy::GetPropertyAttributes(LookupIterator* it) {
-  Isolate* isolate = it->isolate();
-  HandleScope scope(isolate);
   PropertyDescriptor desc;
   Maybe<bool> found = JSProxy::GetOwnPropertyDescriptor(
-      isolate, it->GetHolder<JSProxy>(), it->GetName(), &desc);
+      it->isolate(), it->GetHolder<JSProxy>(), it->GetName(), &desc);
   MAYBE_RETURN(found, Nothing<PropertyAttributes>());
   if (!found.FromJust()) return Just(ABSENT);
   return Just(desc.ToAttributes());
@@ -5253,7 +5267,7 @@
 void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
                            Handle<Object> value,
                            PropertyAttributes attributes) {
-  LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
   CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
 #ifdef DEBUG
   uint32_t index;
@@ -5364,7 +5378,6 @@
                                             should_throw);
 
       case LookupIterator::DATA: {
-        Handle<Object> old_value = it->factory()->the_hole_value();
         // Regular property update if the attributes match.
         if (it->property_attributes() == attributes) {
           return SetDataProperty(it, value);
@@ -5378,6 +5391,7 @@
         }
 
         // Reconfigure the data property if the attributes mismatch.
+        Handle<Object> old_value = it->factory()->the_hole_value();
         if (is_observed) old_value = it->GetDataValue();
 
         it->ReconfigureDataProperty(value, attributes);
@@ -5404,7 +5418,7 @@
     Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
     PropertyAttributes attributes) {
   DCHECK(!value->IsTheHole());
-  LookupIterator it(object, name, LookupIterator::OWN);
+  LookupIterator it(object, name, object, LookupIterator::OWN);
   return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
 }
 
@@ -5412,7 +5426,7 @@
     Handle<JSObject> object, uint32_t index, Handle<Object> value,
     PropertyAttributes attributes) {
   Isolate* isolate = object->GetIsolate();
-  LookupIterator it(isolate, object, index, LookupIterator::OWN);
+  LookupIterator it(isolate, object, index, object, LookupIterator::OWN);
   return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
 }
 
@@ -5420,8 +5434,8 @@
     Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
     PropertyAttributes attributes) {
   Isolate* isolate = object->GetIsolate();
-  LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
-                                                        LookupIterator::OWN);
+  LookupIterator it = LookupIterator::PropertyOrElement(
+      isolate, object, name, object, LookupIterator::OWN);
   return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
 }
 
@@ -5440,17 +5454,20 @@
       !interceptor->can_intercept_symbols()) {
     return Just(ABSENT);
   }
-  PropertyCallbackArguments args(isolate, interceptor->data(),
-                                 *it->GetReceiver(), *holder,
-                                 Object::DONT_THROW);
+  Handle<Object> receiver = it->GetReceiver();
+  if (!receiver->IsJSReceiver()) {
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+                                     Object::ConvertReceiver(isolate, receiver),
+                                     Nothing<PropertyAttributes>());
+  }
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *holder, Object::DONT_THROW);
   if (!interceptor->query()->IsUndefined()) {
-    v8::Local<v8::Integer> result;
+    Handle<Object> result;
     if (it->IsElement()) {
       uint32_t index = it->index();
       v8::IndexedPropertyQueryCallback query =
           v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
-      LOG(isolate,
-          ApiIndexedPropertyAccess("interceptor-indexed-has", *holder, index));
       result = args.Call(query, index);
     } else {
       Handle<Name> name = it->name();
@@ -5458,25 +5475,20 @@
       v8::GenericNamedPropertyQueryCallback query =
           v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
               interceptor->query());
-      LOG(isolate,
-          ApiNamedPropertyAccess("interceptor-named-has", *holder, *name));
-      result = args.Call(query, v8::Utils::ToLocal(name));
+      result = args.Call(query, name);
     }
-    if (!result.IsEmpty()) {
-      DCHECK(result->IsInt32());
-      return Just(static_cast<PropertyAttributes>(
-          result->Int32Value(reinterpret_cast<v8::Isolate*>(isolate)
-                                 ->GetCurrentContext()).FromJust()));
+    if (!result.is_null()) {
+      int32_t value;
+      CHECK(result->ToInt32(&value));
+      return Just(static_cast<PropertyAttributes>(value));
     }
   } else if (!interceptor->getter()->IsUndefined()) {
     // TODO(verwaest): Use GetPropertyWithInterceptor?
-    v8::Local<v8::Value> result;
+    Handle<Object> result;
     if (it->IsElement()) {
       uint32_t index = it->index();
       v8::IndexedPropertyGetterCallback getter =
           v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
-      LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-get-has",
-                                            *holder, index));
       result = args.Call(getter, index);
     } else {
       Handle<Name> name = it->name();
@@ -5484,11 +5496,9 @@
       v8::GenericNamedPropertyGetterCallback getter =
           v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
               interceptor->getter());
-      LOG(isolate,
-          ApiNamedPropertyAccess("interceptor-named-get-has", *holder, *name));
-      result = args.Call(getter, v8::Utils::ToLocal(name));
+      result = args.Call(getter, name);
     }
-    if (!result.IsEmpty()) return Just(DONT_ENUM);
+    if (!result.is_null()) return Just(DONT_ENUM);
   }
 
   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
@@ -5742,35 +5752,6 @@
 }
 
 
-static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
-    Handle<FixedArrayBase> array, int length,
-    Handle<SeededNumberDictionary> dictionary, bool used_as_prototype) {
-  Isolate* isolate = array->GetIsolate();
-  Factory* factory = isolate->factory();
-  bool has_double_elements = array->IsFixedDoubleArray();
-  for (int i = 0; i < length; i++) {
-    Handle<Object> value;
-    if (has_double_elements) {
-      Handle<FixedDoubleArray> double_array =
-          Handle<FixedDoubleArray>::cast(array);
-      if (double_array->is_the_hole(i)) {
-        value = factory->the_hole_value();
-      } else {
-        value = factory->NewHeapNumber(double_array->get_scalar(i));
-      }
-    } else {
-      value = handle(Handle<FixedArray>::cast(array)->get(i), isolate);
-    }
-    if (!value->IsTheHole()) {
-      PropertyDetails details = PropertyDetails::Empty();
-      dictionary = SeededNumberDictionary::AddNumberEntry(
-          dictionary, i, value, details, used_as_prototype);
-    }
-  }
-  return dictionary;
-}
-
-
 void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
   if (dictionary->requires_slow_elements()) return;
   dictionary->set_requires_slow_elements();
@@ -5781,40 +5762,23 @@
 }
 
 
-Handle<SeededNumberDictionary> JSObject::GetNormalizedElementDictionary(
-    Handle<JSObject> object, Handle<FixedArrayBase> elements) {
-  DCHECK(!object->HasDictionaryElements());
-  DCHECK(!object->HasSlowArgumentsElements());
-  Isolate* isolate = object->GetIsolate();
-  // Ensure that notifications fire if the array or object prototypes are
-  // normalizing.
-  isolate->UpdateArrayProtectorOnNormalizeElements(object);
-  int length = object->IsJSArray()
-                   ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
-                   : elements->length();
-  int used = object->GetFastElementsUsage();
-  Handle<SeededNumberDictionary> dictionary =
-      SeededNumberDictionary::New(isolate, used);
-  return CopyFastElementsToDictionary(elements, length, dictionary,
-                                      object->map()->is_prototype_map());
-}
-
-
 Handle<SeededNumberDictionary> JSObject::NormalizeElements(
     Handle<JSObject> object) {
   DCHECK(!object->HasFixedTypedArrayElements());
   Isolate* isolate = object->GetIsolate();
-
-  // Find the backing store.
-  Handle<FixedArrayBase> elements(object->elements(), isolate);
   bool is_arguments = object->HasSloppyArgumentsElements();
-  if (is_arguments) {
-    FixedArray* parameter_map = FixedArray::cast(*elements);
-    elements = handle(FixedArrayBase::cast(parameter_map->get(1)), isolate);
-  }
+  {
+    DisallowHeapAllocation no_gc;
+    FixedArrayBase* elements = object->elements();
 
-  if (elements->IsDictionary()) {
-    return Handle<SeededNumberDictionary>::cast(elements);
+    if (is_arguments) {
+      FixedArray* parameter_map = FixedArray::cast(elements);
+      elements = FixedArrayBase::cast(parameter_map->get(1));
+    }
+
+    if (elements->IsDictionary()) {
+      return handle(SeededNumberDictionary::cast(elements), isolate);
+    }
   }
 
   DCHECK(object->HasFastSmiOrObjectElements() ||
@@ -5823,7 +5787,7 @@
          object->HasFastStringWrapperElements());
 
   Handle<SeededNumberDictionary> dictionary =
-      GetNormalizedElementDictionary(object, elements);
+      object->GetElementsAccessor()->Normalize(object);
 
   // Switch to using the dictionary as the backing storage for elements.
   ElementsKind target_kind = is_arguments
@@ -5873,14 +5837,6 @@
 }
 
 
-void JSObject::SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash) {
-  DCHECK(!object->IsJSGlobalProxy());
-  Isolate* isolate = object->GetIsolate();
-  Handle<Name> hash_code_symbol(isolate->heap()->hash_code_symbol());
-  JSObject::AddProperty(object, hash_code_symbol, hash, NONE);
-}
-
-
 template<typename ProxyType>
 static Handle<Smi> GetOrCreateIdentityHashHelper(Handle<ProxyType> proxy) {
   Isolate* isolate = proxy->GetIsolate();
@@ -5893,40 +5849,42 @@
   return hash;
 }
 
-
-Object* JSObject::GetIdentityHash() {
-  DisallowHeapAllocation no_gc;
-  Isolate* isolate = GetIsolate();
-  if (IsJSGlobalProxy()) {
-    return JSGlobalProxy::cast(this)->hash();
+// static
+Handle<Object> JSObject::GetIdentityHash(Isolate* isolate,
+                                         Handle<JSObject> object) {
+  if (object->IsJSGlobalProxy()) {
+    return handle(JSGlobalProxy::cast(*object)->hash(), isolate);
   }
-  Handle<Name> hash_code_symbol(isolate->heap()->hash_code_symbol());
-  Handle<Object> stored_value =
-      Object::GetPropertyOrElement(Handle<Object>(this, isolate),
-                                   hash_code_symbol).ToHandleChecked();
-  return stored_value->IsSmi() ? *stored_value
-                               : isolate->heap()->undefined_value();
+  Handle<Name> hash_code_symbol = isolate->factory()->hash_code_symbol();
+  return JSReceiver::GetDataProperty(object, hash_code_symbol);
 }
 
-
+// static
 Handle<Smi> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
   if (object->IsJSGlobalProxy()) {
     return GetOrCreateIdentityHashHelper(Handle<JSGlobalProxy>::cast(object));
   }
   Isolate* isolate = object->GetIsolate();
 
-  Handle<Object> maybe_hash(object->GetIdentityHash(), isolate);
-  if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
+  Handle<Name> hash_code_symbol = isolate->factory()->hash_code_symbol();
+  LookupIterator it(object, hash_code_symbol, object, LookupIterator::OWN);
+  if (it.IsFound()) {
+    DCHECK_EQ(LookupIterator::DATA, it.state());
+    Handle<Object> maybe_hash = it.GetDataValue();
+    if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
+  }
 
   Handle<Smi> hash(GenerateIdentityHash(isolate), isolate);
-  Handle<Name> hash_code_symbol(isolate->heap()->hash_code_symbol());
-  JSObject::AddProperty(object, hash_code_symbol, hash, NONE);
+  CHECK(AddDataProperty(&it, hash, NONE, THROW_ON_ERROR,
+                        CERTAINLY_NOT_STORE_FROM_KEYED)
+            .IsJust());
   return hash;
 }
 
-
-Object* JSProxy::GetIdentityHash() {
-  return this->hash();
+// static
+Handle<Object> JSProxy::GetIdentityHash(Isolate* isolate,
+                                        Handle<JSProxy> proxy) {
+  return handle(proxy->hash(), isolate);
 }
 
 
@@ -6018,7 +5976,7 @@
 bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
   Isolate* isolate = object->GetIsolate();
   Handle<Symbol> hidden = isolate->factory()->hidden_properties_symbol();
-  LookupIterator it(object, hidden);
+  LookupIterator it(object, hidden, object);
   Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
   // Cannot get an exception since the hidden_properties_symbol isn't exposed to
   // JS.
@@ -6053,8 +6011,10 @@
       return GetHeap()->undefined_value();
     }
   } else {
-    Handle<Symbol> hidden = GetIsolate()->factory()->hidden_properties_symbol();
-    LookupIterator it(handle(this), hidden);
+    Isolate* isolate = GetIsolate();
+    Handle<Symbol> hidden = isolate->factory()->hidden_properties_symbol();
+    Handle<JSObject> receiver(this, isolate);
+    LookupIterator it(receiver, hidden, receiver);
     // Access check is always skipped for the hidden string anyways.
     return *GetDataProperty(&it);
   }
@@ -6101,16 +6061,20 @@
   if (interceptor->deleter()->IsUndefined()) return Nothing<bool>();
 
   Handle<JSObject> holder = it->GetHolder<JSObject>();
+  Handle<Object> receiver = it->GetReceiver();
+  if (!receiver->IsJSReceiver()) {
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+                                     Object::ConvertReceiver(isolate, receiver),
+                                     Nothing<bool>());
+  }
 
-  PropertyCallbackArguments args(isolate, interceptor->data(),
-                                 *it->GetReceiver(), *holder, should_throw);
-  v8::Local<v8::Boolean> result;
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *holder, should_throw);
+  Handle<Object> result;
   if (it->IsElement()) {
     uint32_t index = it->index();
     v8::IndexedPropertyDeleterCallback deleter =
         v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
-    LOG(isolate,
-        ApiIndexedPropertyAccess("interceptor-indexed-delete", *holder, index));
     result = args.Call(deleter, index);
   } else if (it->name()->IsSymbol() && !interceptor->can_intercept_symbols()) {
     return Nothing<bool>();
@@ -6120,19 +6084,15 @@
     v8::GenericNamedPropertyDeleterCallback deleter =
         v8::ToCData<v8::GenericNamedPropertyDeleterCallback>(
             interceptor->deleter());
-    LOG(isolate,
-        ApiNamedPropertyAccess("interceptor-named-delete", *holder, *name));
-    result = args.Call(deleter, v8::Utils::ToLocal(name));
+    result = args.Call(deleter, name);
   }
 
   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
-  if (result.IsEmpty()) return Nothing<bool>();
+  if (result.is_null()) return Nothing<bool>();
 
   DCHECK(result->IsBoolean());
-  Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
-  result_internal->VerifyApiCallResultType();
   // Rebox CustomArguments::kReturnValueOffset before returning.
-  return Just(result_internal->BooleanValue());
+  return Just(result->IsTrue());
 }
 
 
@@ -6222,15 +6182,12 @@
         }
       // Fall through.
       case LookupIterator::ACCESSOR: {
-        if (!it->IsConfigurable() || receiver->map()->is_strong()) {
-          // Fail if the property is not configurable, or on a strong object.
+        if (!it->IsConfigurable()) {
+          // Fail if the property is not configurable.
           if (is_strict(language_mode)) {
-            MessageTemplate::Template templ =
-                receiver->map()->is_strong()
-                    ? MessageTemplate::kStrongDeleteProperty
-                    : MessageTemplate::kStrictDeleteProperty;
             isolate->Throw(*isolate->factory()->NewTypeError(
-                templ, it->GetName(), receiver));
+                MessageTemplate::kStrictDeleteProperty, it->GetName(),
+                receiver));
             return Nothing<bool>();
           }
           return Just(false);
@@ -6256,7 +6213,7 @@
 
 Maybe<bool> JSReceiver::DeleteElement(Handle<JSReceiver> object, uint32_t index,
                                       LanguageMode language_mode) {
-  LookupIterator it(object->GetIsolate(), object, index,
+  LookupIterator it(object->GetIsolate(), object, index, object,
                     LookupIterator::HIDDEN);
   return DeleteProperty(&it, language_mode);
 }
@@ -6265,7 +6222,7 @@
 Maybe<bool> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
                                        Handle<Name> name,
                                        LanguageMode language_mode) {
-  LookupIterator it(object, name, LookupIterator::HIDDEN);
+  LookupIterator it(object, name, object, LookupIterator::HIDDEN);
   return DeleteProperty(&it, language_mode);
 }
 
@@ -6274,7 +6231,7 @@
                                                 Handle<Name> name,
                                                 LanguageMode language_mode) {
   LookupIterator it = LookupIterator::PropertyOrElement(
-      name->GetIsolate(), object, name, LookupIterator::HIDDEN);
+      name->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
   return DeleteProperty(&it, language_mode);
 }
 
@@ -6650,14 +6607,6 @@
   } else if (current_is_data_descriptor && desc_is_data_descriptor) {
     // 8a. If the [[Configurable]] field of current is false, then:
     if (!current->configurable()) {
-      // [Strong mode] Disallow changing writable -> readonly for
-      // non-configurable properties.
-      if (it != NULL && current->writable() && desc->has_writable() &&
-          !desc->writable() && object->map()->is_strong()) {
-        RETURN_FAILURE(isolate, should_throw,
-                       NewTypeError(MessageTemplate::kStrongRedefineDisallowed,
-                                    object, it->GetName()));
-      }
       // 8a i. Return false, if the [[Writable]] field of current is false and
       // the [[Writable]] field of Desc is true.
       if (!current->writable() && desc->has_writable() && desc->writable()) {
@@ -6778,7 +6727,7 @@
   Isolate* isolate = receiver->GetIsolate();
 
   if (receiver->IsJSObject()) {
-    return JSObject::CreateDataProperty(it, value);  // Shortcut.
+    return JSObject::CreateDataProperty(it, value, should_throw);  // Shortcut.
   }
 
   PropertyDescriptor new_desc;
@@ -6791,17 +6740,28 @@
                                        &new_desc, should_throw);
 }
 
-
 Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
-                                         Handle<Object> value) {
+                                         Handle<Object> value,
+                                         ShouldThrow should_throw) {
   DCHECK(it->GetReceiver()->IsJSObject());
   MAYBE_RETURN(JSReceiver::GetPropertyAttributes(it), Nothing<bool>());
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
+  Isolate* isolate = receiver->GetIsolate();
 
   if (it->IsFound()) {
-    if (!it->IsConfigurable()) return Just(false);
+    Maybe<PropertyAttributes> attributes = GetPropertyAttributes(it);
+    MAYBE_RETURN(attributes, Nothing<bool>());
+    if ((attributes.FromJust() & DONT_DELETE) != 0) {
+      RETURN_FAILURE(
+          isolate, should_throw,
+          NewTypeError(MessageTemplate::kRedefineDisallowed, it->GetName()));
+    }
   } else {
-    if (!JSObject::IsExtensible(Handle<JSObject>::cast(it->GetReceiver())))
-      return Just(false);
+    if (!JSObject::IsExtensible(Handle<JSObject>::cast(it->GetReceiver()))) {
+      RETURN_FAILURE(
+          isolate, should_throw,
+          NewTypeError(MessageTemplate::kDefineDisallowed, it->GetName()));
+    }
   }
 
   RETURN_ON_EXCEPTION_VALUE(it->isolate(),
@@ -7149,7 +7109,7 @@
           ? desc->value()
           : Handle<Object>::cast(isolate->factory()->undefined_value());
 
-  LookupIterator it(proxy, private_name);
+  LookupIterator it(proxy, private_name, proxy);
 
   if (it.IsFound()) {
     DCHECK_EQ(LookupIterator::DATA, it.state());
@@ -7211,7 +7171,7 @@
   if (!is_accessor_pair) {
     // 5a. Set D.[[Value]] to the value of X's [[Value]] attribute.
     Handle<Object> value;
-    if (!JSObject::GetProperty(it).ToHandle(&value)) {
+    if (!Object::GetProperty(it).ToHandle(&value)) {
       DCHECK(isolate->has_pending_exception());
       return Nothing<bool>();
     }
@@ -7501,8 +7461,7 @@
   if (receiver->IsJSObject()) {
     Handle<JSObject> object = Handle<JSObject>::cast(receiver);
     if (!object->HasSloppyArgumentsElements() &&
-        !object->map()->is_observed() &&
-        (!object->map()->is_strong() || level == SEALED)) {  // Fast path.
+        !object->map()->is_observed()) {  // Fast path.
       if (level == SEALED) {
         return JSObject::PreventExtensionsWithTransition<SEALED>(object,
                                                                  should_throw);
@@ -7832,8 +7791,7 @@
             : object->elements()->length();
     new_element_dictionary =
         length == 0 ? isolate->factory()->empty_slow_element_dictionary()
-                    : GetNormalizedElementDictionary(
-                          object, handle(object->elements()));
+                    : object->GetElementsAccessor()->Normalize(object);
   }
 
   Handle<Symbol> transition_marker;
@@ -7955,9 +7913,7 @@
   return Object::WrapForRead(isolate, raw_value, representation);
 }
 
-enum class BoilerplateKind { kNormalBoilerplate, kApiBoilerplate };
-
-template <class ContextObject, BoilerplateKind boilerplate_kind>
+template <class ContextObject>
 class JSObjectWalkVisitor {
  public:
   JSObjectWalkVisitor(ContextObject* site_context, bool copying,
@@ -7989,9 +7945,9 @@
   const JSObject::DeepCopyHints hints_;
 };
 
-template <class ContextObject, BoilerplateKind boilerplate_kind>
-MaybeHandle<JSObject> JSObjectWalkVisitor<
-    ContextObject, boilerplate_kind>::StructureWalk(Handle<JSObject> object) {
+template <class ContextObject>
+MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
+    Handle<JSObject> object) {
   Isolate* isolate = this->isolate();
   bool copying = this->copying();
   bool shallow = hints_ == JSObject::kObjectIsShallow;
@@ -8011,26 +7967,8 @@
 
   Handle<JSObject> copy;
   if (copying) {
-    if (boilerplate_kind == BoilerplateKind::kApiBoilerplate) {
-      if (object->IsJSFunction()) {
-#ifdef DEBUG
-        // Ensure that it is an Api function and template_instantiations_cache
-        // contains an entry for function's FunctionTemplateInfo.
-        JSFunction* function = JSFunction::cast(*object);
-        CHECK(function->shared()->IsApiFunction());
-        FunctionTemplateInfo* data = function->shared()->get_api_func_data();
-        auto serial_number = handle(Smi::cast(data->serial_number()), isolate);
-        CHECK(serial_number->value());
-        auto cache = isolate->template_instantiations_cache();
-        Object* element = cache->Lookup(serial_number);
-        CHECK_EQ(function, element);
-#endif
-        return object;
-      }
-    } else {
-      // JSFunction objects are not allowed to be in normal boilerplates at all.
-      DCHECK(!object->IsJSFunction());
-    }
+    // JSFunction objects are not allowed to be in normal boilerplates at all.
+    DCHECK(!object->IsJSFunction());
     Handle<AllocationSite> site_to_pass;
     if (site_context()->ShouldCreateMemento(object)) {
       site_to_pass = site_context()->current();
@@ -8099,7 +8037,7 @@
         DCHECK(names->get(i)->IsName());
         Handle<Name> name(Name::cast(names->get(i)));
         Handle<Object> value =
-            Object::GetProperty(copy, name).ToHandleChecked();
+            JSObject::GetProperty(copy, name).ToHandleChecked();
         if (value->IsJSObject()) {
           Handle<JSObject> result;
           ASSIGN_RETURN_ON_EXCEPTION(
@@ -8199,9 +8137,8 @@
 MaybeHandle<JSObject> JSObject::DeepWalk(
     Handle<JSObject> object,
     AllocationSiteCreationContext* site_context) {
-  JSObjectWalkVisitor<AllocationSiteCreationContext,
-                      BoilerplateKind::kNormalBoilerplate> v(site_context,
-                                                             false, kNoHints);
+  JSObjectWalkVisitor<AllocationSiteCreationContext> v(site_context, false,
+                                                       kNoHints);
   MaybeHandle<JSObject> result = v.StructureWalk(object);
   Handle<JSObject> for_assert;
   DCHECK(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object));
@@ -8213,30 +8150,7 @@
     Handle<JSObject> object,
     AllocationSiteUsageContext* site_context,
     DeepCopyHints hints) {
-  JSObjectWalkVisitor<AllocationSiteUsageContext,
-                      BoilerplateKind::kNormalBoilerplate> v(site_context, true,
-                                                             hints);
-  MaybeHandle<JSObject> copy = v.StructureWalk(object);
-  Handle<JSObject> for_assert;
-  DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
-  return copy;
-}
-
-class DummyContextObject : public AllocationSiteContext {
- public:
-  explicit DummyContextObject(Isolate* isolate)
-      : AllocationSiteContext(isolate) {}
-
-  bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
-  Handle<AllocationSite> EnterNewScope() { return Handle<AllocationSite>(); }
-  void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object) {}
-};
-
-MaybeHandle<JSObject> JSObject::DeepCopyApiBoilerplate(
-    Handle<JSObject> object) {
-  DummyContextObject dummy_context_object(object->GetIsolate());
-  JSObjectWalkVisitor<DummyContextObject, BoilerplateKind::kApiBoilerplate> v(
-      &dummy_context_object, true, kNoHints);
+  JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, true, hints);
   MaybeHandle<JSObject> copy = v.StructureWalk(object);
   Handle<JSObject> for_assert;
   DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
@@ -8314,7 +8228,9 @@
 
 
 // TODO(cbruni/jkummerow): Consider moving this into elements.cc.
-bool HasEnumerableElements(JSObject* object) {
+bool JSObject::HasEnumerableElements() {
+  // TODO(cbruni): cleanup
+  JSObject* object = this;
   switch (object->GetElementsKind()) {
     case FAST_SMI_ELEMENTS:
     case FAST_ELEMENTS:
@@ -8379,7 +8295,6 @@
   return true;
 }
 
-
 // Tests for the fast common case for property enumeration:
 // - This object and all prototypes has an enum cache (which means that
 //   it is no proxy, has no interceptors and needs no access checks).
@@ -8396,7 +8311,7 @@
     if (current->IsAccessCheckNeeded()) return false;
     DCHECK(!current->HasNamedInterceptor());
     DCHECK(!current->HasIndexedInterceptor());
-    if (HasEnumerableElements(current)) return false;
+    if (current->HasEnumerableElements()) return false;
     if (current != this && enum_length != 0) return false;
   }
   return true;
@@ -8456,15 +8371,13 @@
   // Wrapped string elements aren't explicitly stored in the elements backing
   // store, but are loaded indirectly from the underlying string.
   return !IsStringWrapperElementsKind(elements_kind()) &&
-         !is_access_check_needed() && !has_named_interceptor() &&
-         !has_indexed_interceptor() && !has_hidden_prototype() &&
-         !is_dictionary_map();
+         instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
+         !has_hidden_prototype() && !is_dictionary_map();
 }
 
-namespace {
-
-Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
-                                           Handle<JSObject> object) {
+// static
+Handle<FixedArray> JSObject::GetFastEnumPropertyKeys(Isolate* isolate,
+                                                     Handle<JSObject> object) {
   Handle<Map> map(object->map());
   bool cache_enum_length = map->OnlyHasSimpleProperties();
 
@@ -8515,8 +8428,9 @@
 
   for (int i = 0; i < size; i++) {
     PropertyDetails details = descs->GetDetails(i);
+    if (details.IsDontEnum()) continue;
     Object* key = descs->GetKey(i);
-    if (details.IsDontEnum() || key->IsSymbol()) continue;
+    if (key->IsSymbol()) continue;
     storage->set(index, key);
     if (!indices.is_null()) {
       if (details.type() != DATA) {
@@ -8538,7 +8452,6 @@
   return storage;
 }
 
-}  // namespace
 
 Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object) {
   Isolate* isolate = object->GetIsolate();
@@ -8548,7 +8461,7 @@
     Handle<GlobalDictionary> dictionary(object->global_dictionary());
     int length = dictionary->NumberOfEnumElements();
     if (length == 0) {
-      return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
+      return isolate->factory()->empty_fixed_array();
     }
     Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
     dictionary->CopyEnumKeysTo(*storage);
@@ -8557,7 +8470,7 @@
     Handle<NameDictionary> dictionary(object->property_dictionary());
     int length = dictionary->NumberOfEnumElements();
     if (length == 0) {
-      return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
+      return isolate->factory()->empty_fixed_array();
     }
     Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
     dictionary->CopyEnumKeysTo(*storage);
@@ -8590,7 +8503,7 @@
   }
   PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
                                  *object, Object::DONT_THROW);
-  v8::Local<v8::Object> result;
+  Handle<JSObject> result;
   if (!interceptor->enumerator()->IsUndefined()) {
     Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
     const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
@@ -8599,18 +8512,13 @@
     result = args.Call(enum_fun);
   }
   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
-  if (result.IsEmpty()) return Just(true);
-  DCHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
-         (v8::Utils::OpenHandle(*result)->IsJSObject() &&
-          Handle<JSObject>::cast(v8::Utils::OpenHandle(*result))
-              ->HasSloppyArgumentsElements()));
+  if (result.is_null()) return Just(true);
+  DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
   // The accumulator takes care of string/symbol filtering.
   if (type == kIndexed) {
-    accumulator->AddElementKeysFromInterceptor(
-        Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)));
+    accumulator->AddElementKeysFromInterceptor(result);
   } else {
-    accumulator->AddKeys(Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)),
-                         DO_NOT_CONVERT);
+    accumulator->AddKeys(result, DO_NOT_CONVERT);
   }
   return Just(true);
 }
@@ -8670,12 +8578,23 @@
                                     KeyCollectionType type,
                                     PropertyFilter filter,
                                     KeyAccumulator* accumulator) {
+  // Proxies have no hidden prototype and we should not trigger the
+  // [[GetPrototypeOf]] trap on the last iteration when using
+  // AdvanceFollowingProxies.
+  if (type == OWN_ONLY && object->IsJSProxy()) {
+    MAYBE_RETURN(JSProxy::OwnPropertyKeys(isolate, receiver,
+                                          Handle<JSProxy>::cast(object), filter,
+                                          accumulator),
+                 Nothing<bool>());
+    return Just(true);
+  }
+
   PrototypeIterator::WhereToEnd end = type == OWN_ONLY
                                           ? PrototypeIterator::END_AT_NON_HIDDEN
                                           : PrototypeIterator::END_AT_NULL;
   for (PrototypeIterator iter(isolate, object,
                               PrototypeIterator::START_AT_RECEIVER, end);
-       !iter.IsAtEnd(); iter.Advance()) {
+       !iter.IsAtEnd();) {
     Handle<JSReceiver> current =
         PrototypeIterator::GetCurrent<JSReceiver>(iter);
     Maybe<bool> result = Just(false);  // Dummy initialization.
@@ -8691,6 +8610,11 @@
     }
     MAYBE_RETURN(result, Nothing<bool>());
     if (!result.FromJust()) break;  // |false| means "stop iterating".
+    // Iterate through proxies but ignore access checks for the ALL_CAN_READ
+    // case on API objects for OWN_ONLY keys handlede in GgetKeysFromJSObject.
+    if (!iter.AdvanceFollowingProxiesIgnoringAccessChecks()) {
+      return Nothing<bool>();
+    }
   }
   return Just(true);
 }
@@ -8789,7 +8713,7 @@
     return accumulator->AddKeysFromProxy(proxy, trap_result);
   }
   // 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
-  Zone set_zone;
+  Zone set_zone(isolate->allocator());
   const int kPresent = 1;
   const int kGone = 0;
   IdentityMap<int> unchecked_result_keys(isolate->heap(), &set_zone);
@@ -8849,14 +8773,15 @@
   return accumulator->AddKeysFromProxy(proxy, trap_result);
 }
 
-
 MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
                                             KeyCollectionType type,
                                             PropertyFilter filter,
-                                            GetKeysConversion keys_conversion) {
+                                            GetKeysConversion keys_conversion,
+                                            bool filter_proxy_keys) {
   USE(ContainsOnlyValidKeys);
   Isolate* isolate = object->GetIsolate();
   KeyAccumulator accumulator(isolate, type, filter);
+  accumulator.set_filter_proxy_keys(filter_proxy_keys);
   MAYBE_RETURN(
       GetKeys_Internal(isolate, object, object, type, filter, &accumulator),
       MaybeHandle<FixedArray>());
@@ -8865,10 +8790,94 @@
   return keys;
 }
 
+MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
+    Isolate* isolate, Handle<JSReceiver> receiver, bool get_entries,
+    Handle<FixedArray>* result) {
+  Handle<Map> map(JSReceiver::cast(*receiver)->map(), isolate);
+
+  if (!map->IsJSObjectMap()) return Just(false);
+  if (!map->OnlyHasSimpleProperties()) return Just(false);
+
+  Handle<JSObject> object(JSObject::cast(*receiver));
+
+  Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+  int number_of_own_elements =
+      object->GetElementsAccessor()->GetCapacity(*object, object->elements());
+  Handle<FixedArray> values_or_entries = isolate->factory()->NewFixedArray(
+      number_of_own_descriptors + number_of_own_elements);
+  int count = 0;
+
+  if (object->elements() != isolate->heap()->empty_fixed_array()) {
+    MAYBE_RETURN(object->GetElementsAccessor()->CollectValuesOrEntries(
+                     isolate, object, values_or_entries, get_entries, &count,
+                     ENUMERABLE_STRINGS),
+                 Nothing<bool>());
+  }
+
+  bool stable = object->map() == *map;
+
+  for (int index = 0; index < number_of_own_descriptors; index++) {
+    Handle<Name> next_key(descriptors->GetKey(index), isolate);
+    if (!next_key->IsString()) continue;
+    Handle<Object> prop_value;
+
+    // Directly decode from the descriptor array if |from| did not change shape.
+    if (stable) {
+      PropertyDetails details = descriptors->GetDetails(index);
+      if (!details.IsEnumerable()) continue;
+      if (details.kind() == kData) {
+        if (details.location() == kDescriptor) {
+          prop_value = handle(descriptors->GetValue(index), isolate);
+        } else {
+          Representation representation = details.representation();
+          FieldIndex field_index = FieldIndex::ForDescriptor(*map, index);
+          prop_value =
+              JSObject::FastPropertyAt(object, representation, field_index);
+        }
+      } else {
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+            isolate, prop_value, JSReceiver::GetProperty(object, next_key),
+            Nothing<bool>());
+        stable = object->map() == *map;
+      }
+    } else {
+      // If the map did change, do a slower lookup. We are still guaranteed that
+      // the object has a simple shape, and that the key is a name.
+      LookupIterator it(object, next_key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+      if (!it.IsFound()) continue;
+      DCHECK(it.state() == LookupIterator::DATA ||
+             it.state() == LookupIterator::ACCESSOR);
+      if (!it.IsEnumerable()) continue;
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
+    }
+
+    if (get_entries) {
+      prop_value = MakeEntryPair(isolate, next_key, prop_value);
+    }
+
+    values_or_entries->set(count, *prop_value);
+    count++;
+  }
+
+  if (count < values_or_entries->length()) values_or_entries->Shrink(count);
+  *result = values_or_entries;
+  return Just(true);
+}
+
 MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
                                               Handle<JSReceiver> object,
                                               PropertyFilter filter,
                                               bool get_entries) {
+  Handle<FixedArray> values_or_entries;
+  if (filter == ENUMERABLE_STRINGS) {
+    Maybe<bool> fast_values_or_entries = FastGetOwnValuesOrEntries(
+        isolate, object, get_entries, &values_or_entries);
+    if (fast_values_or_entries.IsNothing()) return MaybeHandle<FixedArray>();
+    if (fast_values_or_entries.FromJust()) return values_or_entries;
+  }
+
   PropertyFilter key_filter =
       static_cast<PropertyFilter>(filter & ~ONLY_ENUMERABLE);
   KeyAccumulator accumulator(isolate, OWN_ONLY, key_filter);
@@ -8878,8 +8887,7 @@
   Handle<FixedArray> keys = accumulator.GetKeys(CONVERT_TO_STRING);
   DCHECK(ContainsOnlyValidKeys(keys));
 
-  Handle<FixedArray> values_or_entries =
-      isolate->factory()->NewFixedArray(keys->length());
+  values_or_entries = isolate->factory()->NewFixedArray(keys->length());
   int length = 0;
 
   for (int i = 0; i < keys->length(); ++i) {
@@ -8973,6 +8981,8 @@
                                              PropertyAttributes attributes) {
   Isolate* isolate = it->isolate();
 
+  it->UpdateProtector();
+
   if (it->state() == LookupIterator::ACCESS_CHECK) {
     if (!it->HasAccess()) {
       isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
@@ -8997,7 +9007,7 @@
     preexists = it->IsFound();
     if (preexists && (it->state() == LookupIterator::DATA ||
                       it->GetAccessors()->IsAccessorInfo())) {
-      old_value = GetProperty(it).ToHandleChecked();
+      old_value = Object::GetProperty(it).ToHandleChecked();
     }
   }
 
@@ -9274,16 +9284,14 @@
                                 int unused_property_fields) {
 #ifdef DEBUG
   Isolate* isolate = map->GetIsolate();
-  // Strict and strong function maps have Function as a constructor but the
+  // Strict function maps have Function as a constructor but the
   // Function's initial map is a sloppy function map. Same holds for
   // GeneratorFunction and its initial map.
   Object* constructor = map->GetConstructor();
   DCHECK(constructor->IsJSFunction());
   DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
          *map == *isolate->strict_function_map() ||
-         *map == *isolate->strong_function_map() ||
-         *map == *isolate->strict_generator_function_map() ||
-         *map == *isolate->strong_generator_function_map());
+         *map == *isolate->strict_generator_function_map());
 #endif
   // Initial maps must always own their descriptors and it's descriptor array
   // does not contain descriptors that do not belong to the map.
@@ -9589,9 +9597,8 @@
                                 LanguageMode language_mode, FunctionKind kind) {
   DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
   // Initial map for sloppy mode function is stored in the function
-  // constructor. Initial maps for strict and strong modes are cached as
-  // special transitions using |strict_function_transition_symbol| and
-  // |strong_function_transition_symbol| respectively as a key.
+  // constructor. Initial maps for strict mode are cached as special transitions
+  // using |strict_function_transition_symbol| as a key.
   if (language_mode == SLOPPY) return initial_map;
   Isolate* isolate = initial_map->GetIsolate();
   Factory* factory = isolate->factory();
@@ -9606,9 +9613,6 @@
     case STRICT:
       transition_symbol = factory->strict_function_transition_symbol();
       break;
-    case STRONG:
-      transition_symbol = factory->strong_function_transition_symbol();
-      break;
     default:
       UNREACHABLE();
       break;
@@ -9896,9 +9900,8 @@
   return new_map;
 }
 
-
 Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
-                                              Handle<Name> name,
+                                              Handle<Name> name, int descriptor,
                                               AccessorComponent component,
                                               Handle<Object> accessor,
                                               PropertyAttributes attributes) {
@@ -9941,7 +9944,6 @@
 
   Handle<AccessorPair> pair;
   DescriptorArray* old_descriptors = map->instance_descriptors();
-  int descriptor = old_descriptors->SearchWithCache(isolate, *name, *map);
   if (descriptor != DescriptorArray::kNotFound) {
     if (descriptor != map->LastAdded()) {
       return Map::Normalize(map, mode, "AccessorsOverwritingNonLast");
@@ -10187,6 +10189,7 @@
 
 void CodeCache::UpdateDefaultCache(
     Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
+  Isolate* isolate = code_cache->GetIsolate();
   // When updating the default code cache we disregard the type encoded in the
   // flags. This allows call constant stubs to overwrite call field
   // stubs, etc.
@@ -10199,19 +10202,23 @@
   {
     DisallowHeapAllocation no_alloc;
     int deleted_index = -1;
+    Object* null = isolate->heap()->null_value();
+    Object* undefined = isolate->heap()->undefined_value();
+    DCHECK(name->IsUniqueName());
     for (int i = 0; i < length; i += kCodeCacheEntrySize) {
       Object* key = cache->get(i);
-      if (key->IsNull()) {
+      if (key == null) {
         if (deleted_index < 0) deleted_index = i;
         continue;
       }
-      if (key->IsUndefined()) {
+      if (key == undefined) {
         if (deleted_index >= 0) i = deleted_index;
         cache->set(i + kCodeCacheEntryNameOffset, *name);
         cache->set(i + kCodeCacheEntryCodeOffset, *code);
         return;
       }
-      if (name->Equals(Name::cast(key))) {
+      DCHECK(key->IsUniqueName());
+      if (*name == key) {
         Code::Flags found =
             Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
         if (Code::RemoveTypeFromFlags(found) == flags) {
@@ -10232,7 +10239,6 @@
 
   // Extend the code cache with some new entries (at least one). Must be a
   // multiple of the entry size.
-  Isolate* isolate = cache->GetIsolate();
   int new_length = length + (length >> 1) + kCodeCacheEntrySize;
   new_length = new_length - new_length % kCodeCacheEntrySize;
   DCHECK((new_length % kCodeCacheEntrySize) == 0);
@@ -10267,13 +10273,18 @@
 
 Object* CodeCache::LookupDefaultCache(Name* name, Code::Flags flags) {
   FixedArray* cache = default_cache();
+  Heap* heap = GetHeap();
+  Object* null = heap->null_value();
+  Object* undefined = heap->undefined_value();
   int length = cache->length();
+  DCHECK(name->IsUniqueName());
   for (int i = 0; i < length; i += kCodeCacheEntrySize) {
     Object* key = cache->get(i + kCodeCacheEntryNameOffset);
     // Skip deleted elements.
-    if (key->IsNull()) continue;
-    if (key->IsUndefined()) return key;
-    if (name->Equals(Name::cast(key))) {
+    if (key == null) continue;
+    if (key == undefined) return key;
+    DCHECK(key->IsUniqueName());
+    if (name == key) {
       Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
       if (Code::RemoveTypeFromFlags(code->flags()) == flags) {
         return code;
@@ -10758,7 +10769,6 @@
   return array;
 }
 
-
 Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
                                  Handle<Object> obj2, AddMode mode) {
   int length = array->Length();
@@ -11586,6 +11596,10 @@
           // Right hand side is longer.  Recurse over left.
           if (from < boundary) {
             WriteToFlat(first, sink, from, boundary);
+            if (from == 0 && cons_string->second() == first) {
+              CopyChars(sink + boundary, sink, boundary);
+              return;
+            }
             sink += boundary - from;
             from = 0;
           } else {
@@ -12116,7 +12130,8 @@
   Heap* heap = string->GetHeap();
   // Sizes are pointer size aligned, so that we can use filler objects
   // that are a multiple of pointer size.
-  heap->CreateFillerObjectAt(start_of_string + new_size, delta);
+  heap->CreateFillerObjectAt(start_of_string + new_size, delta,
+                             ClearRecordedSlots::kNo);
   heap->AdjustLiveBytes(*string, -delta, Heap::CONCURRENT_TO_SWEEPER);
 
   // We are storing the new length using release store after creating a filler
@@ -12271,7 +12286,6 @@
          first->instance_type() == second->instance_type() &&
          first->bit_field() == second->bit_field() &&
          first->is_extensible() == second->is_extensible() &&
-         first->is_strong() == second->is_strong() &&
          first->new_target_is_base() == second->new_target_is_base() &&
          first->has_hidden_prototype() == second->has_hidden_prototype();
 }
@@ -12340,14 +12354,6 @@
     MarkForOptimization();
     return;
   }
-  if (isolate->concurrent_osr_enabled() &&
-      isolate->optimizing_compile_dispatcher()->IsQueuedForOSR(this)) {
-    // Do not attempt regular recompilation if we already queued this for OSR.
-    // TODO(yangguo): This is necessary so that we don't install optimized
-    // code on a function that is already optimized, since OSR and regular
-    // recompilation race.  This goes away as soon as OSR becomes one-shot.
-    return;
-  }
   DCHECK(!IsInOptimizationQueue());
   DCHECK(!IsOptimized());
   DCHECK(shared()->allows_lazy_compilation() ||
@@ -12380,17 +12386,15 @@
   }
 }
 
-
-void SharedFunctionInfo::AddToOptimizedCodeMapInternal(
+// static
+void SharedFunctionInfo::AddToOptimizedCodeMap(
     Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
-    Handle<HeapObject> code, Handle<LiteralsArray> literals,
+    MaybeHandle<Code> code, Handle<LiteralsArray> literals,
     BailoutId osr_ast_id) {
   Isolate* isolate = shared->GetIsolate();
   if (isolate->serializer_enabled()) return;
-  DCHECK(*code == isolate->heap()->undefined_value() ||
-         !shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
-  DCHECK(*code == isolate->heap()->undefined_value() ||
-         Code::cast(*code)->kind() == Code::OPTIMIZED_FUNCTION);
+  DCHECK(code.is_null() ||
+         code.ToHandleChecked()->kind() == Code::OPTIMIZED_FUNCTION);
   DCHECK(native_context->IsNativeContext());
   STATIC_ASSERT(kEntryLength == 4);
   Handle<FixedArray> new_code_map;
@@ -12405,15 +12409,10 @@
     Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
     entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
     if (entry > kSharedCodeIndex) {
-      // Found an existing context-specific entry. If the user provided valid
-      // code, it must not contain any code.
-      DCHECK(code->IsUndefined() ||
-             WeakCell::cast(old_code_map->get(entry + kCachedCodeOffset))
-                 ->cleared());
-
-      // Just set the code and literals to the entry.
-      if (!code->IsUndefined()) {
-        Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
+      // Just set the code and literals of the entry.
+      if (!code.is_null()) {
+        Handle<WeakCell> code_cell =
+            isolate->factory()->NewWeakCell(code.ToHandleChecked());
         old_code_map->set(entry + kCachedCodeOffset, *code_cell);
       }
       Handle<WeakCell> literals_cell =
@@ -12446,9 +12445,9 @@
     }
   }
 
-  Handle<WeakCell> code_cell = code->IsUndefined()
-                                   ? isolate->factory()->empty_weak_cell()
-                                   : isolate->factory()->NewWeakCell(code);
+  Handle<WeakCell> code_cell =
+      code.is_null() ? isolate->factory()->empty_weak_cell()
+                     : isolate->factory()->NewWeakCell(code.ToHandleChecked());
   Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
   WeakCell* context_cell = native_context->self_weak_cell();
 
@@ -12866,12 +12865,10 @@
     Handle<Context> native_context, Handle<Map> initial_map) {
   // Replace all of the cached initial array maps in the native context with
   // the appropriate transitioned elements kind maps.
-  Strength strength =
-      initial_map->is_strong() ? Strength::STRONG : Strength::WEAK;
   Handle<Map> current_map = initial_map;
   ElementsKind kind = current_map->elements_kind();
   DCHECK_EQ(GetInitialFastElementsKind(), kind);
-  native_context->set(Context::ArrayMapIndex(kind, strength), *current_map);
+  native_context->set(Context::ArrayMapIndex(kind), *current_map);
   for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
        i < kFastElementsKindCount; ++i) {
     Handle<Map> new_map;
@@ -12883,7 +12880,7 @@
           current_map, next_kind, INSERT_TRANSITION);
     }
     DCHECK_EQ(next_kind, new_map->elements_kind());
-    native_context->set(Context::ArrayMapIndex(next_kind, strength), *new_map);
+    native_context->set(Context::ArrayMapIndex(next_kind), *new_map);
     current_map = new_map;
   }
   return initial_map;
@@ -12915,9 +12912,6 @@
       function->set_prototype_or_initial_map(*value);
     } else {
       Handle<Map> new_map = Map::Copy(initial_map, "SetInstancePrototype");
-      if (function->map()->is_strong()) {
-        new_map->set_is_strong();
-      }
       JSFunction::SetInitialMap(function, new_map, value);
 
       // If the function is used as the global Array function, cache the
@@ -12929,9 +12923,6 @@
       if (array_function->IsJSFunction() &&
           *function == JSFunction::cast(*array_function)) {
         CacheInitialJSArrayMaps(native_context, new_map);
-        Handle<Map> new_strong_map = Map::Copy(new_map, "SetInstancePrototype");
-        new_strong_map->set_is_strong();
-        CacheInitialJSArrayMaps(native_context, new_strong_map);
       }
     }
 
@@ -13100,7 +13091,7 @@
 
   // The constructor should be compiled for the optimization hints to be
   // available.
-  Compiler::Compile(function, CLEAR_EXCEPTION);
+  Compiler::Compile(function, Compiler::CLEAR_EXCEPTION);
 
   // First create a new map with the size and number of in-object properties
   // suggested by the function.
@@ -13116,9 +13107,6 @@
                                   &in_object_properties);
 
   Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size);
-  if (function->map()->is_strong()) {
-    map->set_is_strong();
-  }
 
   // Fetch or allocate prototype.
   Handle<Object> prototype;
@@ -13247,43 +13235,6 @@
 }
 
 
-// The filter is a pattern that matches function names in this way:
-//   "*"      all; the default
-//   "-"      all but the top-level function
-//   "-name"  all but the function "name"
-//   ""       only the top-level function
-//   "name"   only the function "name"
-//   "name*"  only functions starting with "name"
-//   "~"      none; the tilde is not an identifier
-bool JSFunction::PassesFilter(const char* raw_filter) {
-  if (*raw_filter == '*') return true;
-  String* name = shared()->DebugName();
-  Vector<const char> filter = CStrVector(raw_filter);
-  if (filter.length() == 0) return name->length() == 0;
-  if (filter[0] == '-') {
-    // Negative filter.
-    if (filter.length() == 1) {
-      return (name->length() != 0);
-    } else if (name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
-      return false;
-    }
-    if (filter[filter.length() - 1] == '*' &&
-        name->IsUtf8EqualTo(filter.SubVector(1, filter.length() - 1), true)) {
-      return false;
-    }
-    return true;
-
-  } else if (name->IsUtf8EqualTo(filter)) {
-    return true;
-  }
-  if (filter[filter.length() - 1] == '*' &&
-      name->IsUtf8EqualTo(filter.SubVector(0, filter.length() - 1), true)) {
-    return true;
-  }
-  return false;
-}
-
-
 Handle<String> JSFunction::GetName(Handle<JSFunction> function) {
   Isolate* isolate = function->GetIsolate();
   Handle<Object> name =
@@ -13345,6 +13296,18 @@
   return isolate->factory()->NewStringFromAsciiChecked(kNativeCodeSource);
 }
 
+// static
+MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
+                                             Handle<JSBoundFunction> function) {
+  Handle<String> prefix = isolate->factory()->bound__string();
+  if (!function->bound_target_function()->IsJSFunction()) return prefix;
+  Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
+                            isolate);
+  Handle<Object> target_name = JSFunction::GetName(target);
+  if (!target_name->IsString()) return prefix;
+  Factory* factory = isolate->factory();
+  return factory->NewConsString(prefix, Handle<String>::cast(target_name));
+}
 
 // static
 Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
@@ -13396,14 +13359,14 @@
   return builder.Finish().ToHandleChecked();
 }
 
-
 void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
                          const char* to_string, Handle<Object> to_number,
-                         const char* type_of, byte kind) {
+                         bool to_boolean, const char* type_of, byte kind) {
   Handle<String> internalized_to_string =
       isolate->factory()->InternalizeUtf8String(to_string);
   Handle<String> internalized_type_of =
       isolate->factory()->InternalizeUtf8String(type_of);
+  oddball->set_to_boolean(isolate->heap()->ToBoolean(to_boolean));
   oddball->set_to_number(*to_number);
   oddball->set_to_string(*internalized_to_string);
   oddball->set_type_of(*internalized_type_of);
@@ -13505,8 +13468,9 @@
       isolate->factory()->InternalizeOneByteString(
           STATIC_CHAR_VECTOR("nameOrSourceURL"));
   Handle<JSObject> script_wrapper = Script::GetWrapper(script);
-  Handle<Object> property = Object::GetProperty(
-      script_wrapper, name_or_source_url_key).ToHandleChecked();
+  Handle<Object> property =
+      JSReceiver::GetProperty(script_wrapper, name_or_source_url_key)
+          .ToHandleChecked();
   DCHECK(property->IsJSFunction());
   Handle<Object> result;
   // Do not check against pending exception, since this function may be called
@@ -13606,7 +13570,7 @@
   }
 
 #ifdef DEBUG
-  {
+  if (FLAG_enable_slow_asserts) {
     WeakFixedArray::Iterator iterator(*list);
     SharedFunctionInfo* next;
     while ((next = iterator.Next<SharedFunctionInfo>())) {
@@ -13648,6 +13612,41 @@
   return String::cast(n);
 }
 
+// The filter is a pattern that matches function names in this way:
+//   "*"      all; the default
+//   "-"      all but the top-level function
+//   "-name"  all but the function "name"
+//   ""       only the top-level function
+//   "name"   only the function "name"
+//   "name*"  only functions starting with "name"
+//   "~"      none; the tilde is not an identifier
+bool SharedFunctionInfo::PassesFilter(const char* raw_filter) {
+  if (*raw_filter == '*') return true;
+  String* name = DebugName();
+  Vector<const char> filter = CStrVector(raw_filter);
+  if (filter.length() == 0) return name->length() == 0;
+  if (filter[0] == '-') {
+    // Negative filter.
+    if (filter.length() == 1) {
+      return (name->length() != 0);
+    } else if (name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+      return false;
+    }
+    if (filter[filter.length() - 1] == '*' &&
+        name->IsUtf8EqualTo(filter.SubVector(1, filter.length() - 1), true)) {
+      return false;
+    }
+    return true;
+
+  } else if (name->IsUtf8EqualTo(filter)) {
+    return true;
+  }
+  if (filter[filter.length() - 1] == '*' &&
+      name->IsUtf8EqualTo(filter.SubVector(0, filter.length() - 1), true)) {
+    return true;
+  }
+  return false;
+}
 
 bool SharedFunctionInfo::HasSourceCode() const {
   return !script()->IsUndefined() &&
@@ -13674,14 +13673,11 @@
   return end_position() - start_position();
 }
 
-
-namespace {
-
-void CalculateInstanceSizeHelper(InstanceType instance_type,
-                                 int requested_internal_fields,
-                                 int requested_in_object_properties,
-                                 int* instance_size,
-                                 int* in_object_properties) {
+void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
+                                             int requested_internal_fields,
+                                             int requested_in_object_properties,
+                                             int* instance_size,
+                                             int* in_object_properties) {
   int header_size = JSObject::GetHeaderSize(instance_type);
   DCHECK_LE(requested_internal_fields,
             (JSObject::kMaxInstanceSize - header_size) >> kPointerSizeLog2);
@@ -13694,8 +13690,6 @@
                           requested_internal_fields;
 }
 
-}  // namespace
-
 
 void JSFunction::CalculateInstanceSize(InstanceType instance_type,
                                        int requested_internal_fields,
@@ -13808,8 +13802,10 @@
   set_optimization_disabled(true);
   set_disable_optimization_reason(reason);
   // Code should be the lazy compilation stub or else unoptimized.
-  DCHECK(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
-  PROFILE(GetIsolate(), CodeDisableOptEvent(code(), this));
+  DCHECK(abstract_code()->kind() == AbstractCode::FUNCTION ||
+         abstract_code()->kind() == AbstractCode::INTERPRETED_FUNCTION ||
+         abstract_code()->kind() == AbstractCode::BUILTIN);
+  PROFILE(GetIsolate(), CodeDisableOptEvent(abstract_code(), this));
   if (FLAG_trace_opt) {
     PrintF("[disabled optimization for ");
     ShortPrint();
@@ -14064,8 +14060,6 @@
 
 
 void Code::CopyFrom(const CodeDesc& desc) {
-  DCHECK(Marking::Color(this) == Marking::WHITE_OBJECT);
-
   // copy code
   CopyBytes(instruction_start(), desc.buffer,
             static_cast<size_t>(desc.instr_size));
@@ -14089,21 +14083,22 @@
     RelocInfo::Mode mode = it.rinfo()->rmode();
     if (mode == RelocInfo::EMBEDDED_OBJECT) {
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
-      it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+      it.rinfo()->set_target_object(*p, UPDATE_WRITE_BARRIER,
+                                    SKIP_ICACHE_FLUSH);
     } else if (mode == RelocInfo::CELL) {
       Handle<Cell> cell  = it.rinfo()->target_cell_handle();
-      it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+      it.rinfo()->set_target_cell(*cell, UPDATE_WRITE_BARRIER,
+                                  SKIP_ICACHE_FLUSH);
     } else if (RelocInfo::IsCodeTarget(mode)) {
       // rewrite code handles in inline cache targets to direct
       // pointers to the first instruction in the code object
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
       Code* code = Code::cast(*p);
       it.rinfo()->set_target_address(code->instruction_start(),
-                                     SKIP_WRITE_BARRIER,
-                                     SKIP_ICACHE_FLUSH);
+                                     UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
     } else if (RelocInfo::IsRuntimeEntry(mode)) {
       Address p = it.rinfo()->target_runtime_entry(origin);
-      it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER,
+      it.rinfo()->set_target_runtime_entry(p, UPDATE_WRITE_BARRIER,
                                            SKIP_ICACHE_FLUSH);
     } else if (mode == RelocInfo::CODE_AGE_SEQUENCE) {
       Handle<Object> p = it.rinfo()->code_age_stub_handle(origin);
@@ -14147,6 +14142,8 @@
     }
     it.next();
   }
+  DCHECK(kind() == FUNCTION || (is_optimized_code() && is_turbofanned()) ||
+         is_wasm_code() || position == RelocInfo::kNoPosition);
   return position;
 }
 
@@ -14438,6 +14435,12 @@
   if (sequence != NULL) MakeCodeAgeSequenceYoung(sequence, isolate);
 }
 
+void Code::PreAge(Isolate* isolate) {
+  byte* sequence = FindCodeAgeSequence();
+  if (sequence != NULL) {
+    PatchPlatformCodeAge(isolate, sequence, kPreAgedCodeAge, NO_MARKING_PARITY);
+  }
+}
 
 void Code::MarkToBeExecutedOnce(Isolate* isolate) {
   byte* sequence = FindCodeAgeSequence();
@@ -14447,7 +14450,6 @@
   }
 }
 
-
 void Code::MakeOlder(MarkingParity current_parity) {
   byte* sequence = FindCodeAgeSequence();
   if (sequence != NULL) {
@@ -14720,6 +14722,15 @@
           break;
         }
 
+        case Translation::TAIL_CALLER_FRAME: {
+          int shared_info_id = iterator.Next();
+          Object* shared_info = LiteralArray()->get(shared_info_id);
+          os << "{function="
+             << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+             << "}";
+          break;
+        }
+
         case Translation::GETTER_STUB_FRAME:
         case Translation::SETTER_STUB_FRAME: {
           int shared_info_id = iterator.Next();
@@ -14925,11 +14936,16 @@
       os << "compare_operation = " << Token::Name(stub.op()) << "\n";
     }
   }
-  if ((name != NULL) && (name[0] != '\0')) {
+  if ((name != nullptr) && (name[0] != '\0')) {
     os << "name = " << name << "\n";
   } else if (kind() == BUILTIN) {
     name = GetIsolate()->builtins()->Lookup(instruction_start());
-    if (name != NULL) {
+    if (name != nullptr) {
+      os << "name = " << name << "\n";
+    }
+  } else if (kind() == BYTECODE_HANDLER) {
+    name = GetIsolate()->interpreter()->LookupNameOfBytecodeHandler(this);
+    if (name != nullptr) {
       os << "name = " << name << "\n";
     }
   }
@@ -15030,7 +15046,6 @@
     }
 #ifdef OBJECT_PRINT
     if (!type_feedback_info()->IsUndefined()) {
-      OFStream os(stdout);
       TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(os);
       os << "\n";
     }
@@ -15057,7 +15072,8 @@
 
 int BytecodeArray::SourcePosition(int offset) {
   int last_position = 0;
-  for (interpreter::SourcePositionTableIterator iterator(this);
+  for (interpreter::SourcePositionTableIterator iterator(
+           source_position_table());
        !iterator.done() && iterator.bytecode_offset() <= offset;
        iterator.Advance()) {
     last_position = iterator.source_position();
@@ -15071,7 +15087,7 @@
   int position = SourcePosition(offset);
   // Now find the closest statement position before the position.
   int statement_position = 0;
-  interpreter::SourcePositionTableIterator iterator(this);
+  interpreter::SourcePositionTableIterator iterator(source_position_table());
   while (!iterator.done()) {
     if (iterator.is_statement()) {
       int p = iterator.source_position();
@@ -15089,49 +15105,30 @@
   os << "Frame size " << frame_size() << "\n";
   Vector<char> buf = Vector<char>::New(50);
 
-  const uint8_t* first_bytecode_address = GetFirstBytecodeAddress();
-  int bytecode_size = 0;
+  const uint8_t* base_address = GetFirstBytecodeAddress();
+  interpreter::SourcePositionTableIterator source_positions(
+      source_position_table());
 
-  interpreter::SourcePositionTableIterator source_positions(this);
-
-  for (int i = 0; i < this->length(); i += bytecode_size) {
-    const uint8_t* bytecode_start = &first_bytecode_address[i];
-    interpreter::Bytecode bytecode =
-        interpreter::Bytecodes::FromByte(bytecode_start[0]);
-    bytecode_size = interpreter::Bytecodes::Size(bytecode);
-
-    if (!source_positions.done() && i == source_positions.bytecode_offset()) {
+  interpreter::BytecodeArrayIterator iterator(handle(this));
+  while (!iterator.done()) {
+    if (!source_positions.done() &&
+        iterator.current_offset() == source_positions.bytecode_offset()) {
       os << std::setw(5) << source_positions.source_position();
       os << (source_positions.is_statement() ? " S> " : " E> ");
       source_positions.Advance();
     } else {
       os << "         ";
     }
-
-    SNPrintF(buf, "%p", bytecode_start);
+    const uint8_t* current_address = base_address + iterator.current_offset();
+    SNPrintF(buf, "%p", current_address);
     os << buf.start() << " : ";
-    interpreter::Bytecodes::Decode(os, bytecode_start, parameter_count());
-
-    if (interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
-      DCHECK_EQ(bytecode_size, 3);
-      int index = static_cast<int>(ReadUnalignedUInt16(bytecode_start + 1));
-      int offset = Smi::cast(constant_pool()->get(index))->value();
-      SNPrintF(buf, " (%p)", bytecode_start + offset);
-      os << buf.start();
-    } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
-      DCHECK_EQ(bytecode_size, 2);
-      int index = static_cast<int>(bytecode_start[1]);
-      int offset = Smi::cast(constant_pool()->get(index))->value();
-      SNPrintF(buf, " (%p)", bytecode_start + offset);
-      os << buf.start();
-    } else if (interpreter::Bytecodes::IsJump(bytecode)) {
-      DCHECK_EQ(bytecode_size, 2);
-      int offset = static_cast<int8_t>(bytecode_start[1]);
-      SNPrintF(buf, " (%p)", bytecode_start + offset);
+    interpreter::Bytecodes::Decode(os, current_address, parameter_count());
+    if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
+      SNPrintF(buf, " (%p)", base_address + iterator.GetJumpTargetOffset());
       os << buf.start();
     }
-
     os << std::endl;
+    iterator.Advance();
   }
 
   if (constant_pool()->length() > 0) {
@@ -15170,7 +15167,7 @@
                         uint32_t index,
                         List<Handle<Object> >* old_values,
                         List<uint32_t>* indices) {
-  LookupIterator it(isolate, object, index, LookupIterator::HIDDEN);
+  LookupIterator it(isolate, object, index, object, LookupIterator::HIDDEN);
   CHECK(JSReceiver::GetPropertyAttributes(&it).IsJust());
   DCHECK(it.IsFound());
   if (!it.IsConfigurable()) return false;
@@ -15677,16 +15674,6 @@
                                    ShouldThrow should_throw) {
   Isolate* isolate = object->GetIsolate();
 
-  // Setting the prototype of an Array instance invalidates the species
-  // protector
-  // because it could change the constructor property of the instance, which
-  // could change the @@species constructor.
-  if (object->IsJSArray() && isolate->IsArraySpeciesLookupChainIntact()) {
-    isolate->CountUsage(
-        v8::Isolate::UseCounterFeature::kArrayInstanceProtoModified);
-    isolate->InvalidateArraySpeciesProtector();
-  }
-
   const bool observed = from_javascript && object->map()->is_observed();
   Handle<Object> old_value;
   if (observed) {
@@ -15739,12 +15726,6 @@
     DCHECK(!object->IsAccessCheckNeeded());
   }
 
-  // Strong objects may not have their prototype set via __proto__ or
-  // setPrototypeOf.
-  if (from_javascript && object->map()->is_strong()) {
-    RETURN_FAILURE(isolate, should_throw,
-                   NewTypeError(MessageTemplate::kStrongSetProto, object));
-  }
   Heap* heap = isolate->heap();
   // Silently ignore the change if value is not a JSObject or null.
   // SpiderMonkey behaves this way.
@@ -16235,10 +16216,18 @@
 
 
 bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
-  LookupIterator it(array, array->GetIsolate()->factory()->length_string(),
+  Map* map = array->map();
+  // Fast path: "length" is the first fast property of arrays. Since it's not
+  // configurable, it's guaranteed to be the first in the descriptor array.
+  if (!map->is_dictionary_map()) {
+    DCHECK(map->instance_descriptors()->GetKey(0) ==
+           array->GetHeap()->length_string());
+    return map->instance_descriptors()->GetDetails(0).IsReadOnly();
+  }
+
+  Isolate* isolate = array->GetIsolate();
+  LookupIterator it(array, isolate->factory()->length_string(), array,
                     LookupIterator::OWN_SKIP_INTERCEPTOR);
-  CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
-  CHECK(it.IsFound());
   CHECK_EQ(LookupIterator::ACCESSOR, it.state());
   return it.IsReadOnly();
 }
@@ -16341,16 +16330,6 @@
 }
 
 
-InterceptorInfo* JSObject::GetNamedInterceptor() {
-  DCHECK(map()->has_named_interceptor());
-  JSFunction* constructor = JSFunction::cast(map()->GetConstructor());
-  DCHECK(constructor->shared()->IsApiFunction());
-  Object* result =
-      constructor->shared()->get_api_func_data()->named_property_handler();
-  return InterceptorInfo::cast(result);
-}
-
-
 MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
                                                          bool* done) {
   *done = false;
@@ -16366,17 +16345,19 @@
   }
 
   Handle<JSObject> holder = it->GetHolder<JSObject>();
-  v8::Local<v8::Value> result;
-  PropertyCallbackArguments args(isolate, interceptor->data(),
-                                 *it->GetReceiver(), *holder,
-                                 Object::DONT_THROW);
+  Handle<Object> result;
+  Handle<Object> receiver = it->GetReceiver();
+  if (!receiver->IsJSReceiver()) {
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, receiver, Object::ConvertReceiver(isolate, receiver), Object);
+  }
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *holder, Object::DONT_THROW);
 
   if (it->IsElement()) {
     uint32_t index = it->index();
     v8::IndexedPropertyGetterCallback getter =
         v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
-    LOG(isolate,
-        ApiIndexedPropertyAccess("interceptor-indexed-get", *holder, index));
     result = args.Call(getter, index);
   } else {
     Handle<Name> name = it->name();
@@ -16389,18 +16370,14 @@
     v8::GenericNamedPropertyGetterCallback getter =
         v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
             interceptor->getter());
-    LOG(isolate,
-        ApiNamedPropertyAccess("interceptor-named-get", *holder, *name));
-    result = args.Call(getter, v8::Utils::ToLocal(name));
+    result = args.Call(getter, name);
   }
 
   RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-  if (result.IsEmpty()) return isolate->factory()->undefined_value();
-  Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
-  result_internal->VerifyApiCallResultType();
+  if (result.is_null()) return isolate->factory()->undefined_value();
   *done = true;
   // Rebox handle before return
-  return handle(*result_internal, isolate);
+  return handle(*result, isolate);
 }
 
 
@@ -16415,7 +16392,7 @@
 Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
                                              uint32_t index) {
   Isolate* isolate = object->GetIsolate();
-  LookupIterator it(isolate, object, index,
+  LookupIterator it(isolate, object, index, object,
                     LookupIterator::OWN_SKIP_INTERCEPTOR);
   return HasProperty(&it);
 }
@@ -16543,7 +16520,6 @@
   }
 }
 
-
 void JSObject::CollectOwnPropertyNames(KeyAccumulator* keys,
                                        PropertyFilter filter) {
   if (HasFastProperties()) {
@@ -16584,7 +16560,6 @@
   return GetOwnElementKeys(NULL, filter);
 }
 
-
 void JSObject::CollectOwnElementKeys(Handle<JSObject> object,
                                      KeyAccumulator* keys,
                                      PropertyFilter filter) {
@@ -16736,15 +16711,14 @@
       Object::ToObject(isolate, object).ToHandleChecked();
 
   Handle<String> tag;
-  if (FLAG_harmony_tostring) {
-    Handle<Object> to_string_tag;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, to_string_tag,
-        GetProperty(receiver, isolate->factory()->to_string_tag_symbol()),
-        String);
-    if (to_string_tag->IsString()) {
-      tag = Handle<String>::cast(to_string_tag);
-    }
+  Handle<Object> to_string_tag;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, to_string_tag,
+      JSReceiver::GetProperty(receiver,
+                              isolate->factory()->to_string_tag_symbol()),
+      String);
+  if (to_string_tag->IsString()) {
+    tag = Handle<String>::cast(to_string_tag);
   }
 
   if (tag.is_null()) {
@@ -16830,7 +16804,6 @@
       hash ^= String::cast(script->source())->Hash();
       STATIC_ASSERT(LANGUAGE_END == 3);
       if (is_strict(language_mode)) hash ^= 0x8000;
-      if (is_strong(language_mode)) hash ^= 0x10000;
       hash += scope_position;
     }
     return hash;
@@ -16899,7 +16872,6 @@
         flag = JSRegExp::kUnicode;
         break;
       case 'y':
-        if (!FLAG_harmony_regexps) return JSRegExp::Flags(0);
         flag = JSRegExp::kSticky;
         break;
       default:
@@ -17130,7 +17102,7 @@
 class InternalizedStringKey : public HashTableKey {
  public:
   explicit InternalizedStringKey(Handle<String> string)
-      : string_(string) { }
+      : string_(String::Flatten(string)) {}
 
   bool IsMatch(Object* string) override {
     return String::cast(string)->Equals(*string_);
@@ -17520,6 +17492,10 @@
 Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
            uint32_t>::DeleteProperty(Handle<SeededNumberDictionary>, int);
 
+template Handle<Object>
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
+           uint32_t>::DeleteProperty(Handle<UnseededNumberDictionary>, int);
+
 template Handle<NameDictionary>
 HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >::
     New(Isolate*, int, MinimumCapacity, PretenureFlag);
@@ -17532,6 +17508,10 @@
 HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
     Shrink(Handle<SeededNumberDictionary>, uint32_t);
 
+template Handle<UnseededNumberDictionary>
+    HashTable<UnseededNumberDictionary, UnseededNumberDictionaryShape,
+              uint32_t>::Shrink(Handle<UnseededNumberDictionary>, uint32_t);
+
 template Handle<NameDictionary>
 Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::Add(
     Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails);
@@ -17578,9 +17558,6 @@
 Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
     EnsureCapacity(Handle<NameDictionary>, int, Handle<Name>);
 
-template bool Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
-                         uint32_t>::HasComplexElements();
-
 template int HashTable<SeededNumberDictionary, SeededNumberDictionaryShape,
                        uint32_t>::FindEntry(uint32_t);
 
@@ -17994,7 +17971,7 @@
                                                    int expected) {
   Handle<StringTable> table = isolate->factory()->string_table();
   // We need a key instance for the virtual hash function.
-  InternalizedStringKey dummy_key(Handle<String>::null());
+  InternalizedStringKey dummy_key(isolate->factory()->empty_string());
   table = StringTable::EnsureCapacity(table, expected, &dummy_key);
   isolate->heap()->SetRootStringTable(*table);
 }
@@ -18002,8 +17979,20 @@
 
 Handle<String> StringTable::LookupString(Isolate* isolate,
                                          Handle<String> string) {
+  if (string->IsConsString() && string->IsFlat()) {
+    string = String::Flatten(string);
+    if (string->IsInternalizedString()) return string;
+  }
+
   InternalizedStringKey key(string);
-  return LookupKey(isolate, &key);
+  Handle<String> result = LookupKey(isolate, &key);
+
+  if (string->IsConsString()) {
+    Handle<ConsString> cons = Handle<ConsString>::cast(string);
+    cons->set_first(*result);
+    cons->set_second(isolate->heap()->empty_string());
+  }
+  return result;
 }
 
 
@@ -18042,6 +18031,25 @@
   return NULL;
 }
 
+Handle<StringSet> StringSet::New(Isolate* isolate) {
+  return HashTable::New(isolate, 0);
+}
+
+Handle<StringSet> StringSet::Add(Handle<StringSet> stringset,
+                                 Handle<String> name) {
+  if (!stringset->Has(name)) {
+    stringset = EnsureCapacity(stringset, 1, *name);
+    uint32_t hash = StringSetShape::Hash(*name);
+    int entry = stringset->FindInsertionEntry(hash);
+    stringset->set(EntryToIndex(entry), *name);
+    stringset->ElementAdded();
+  }
+  return stringset;
+}
+
+bool StringSet::Has(Handle<String> name) {
+  return FindEntry(*name) != kNotFound;
+}
 
 Handle<Object> CompilationCacheTable::Lookup(Handle<String> src,
                                              Handle<Context> context,
@@ -18199,40 +18207,6 @@
 }
 
 
-// StringsKey used for HashTable where key is array of internalized strings.
-class StringsKey : public HashTableKey {
- public:
-  explicit StringsKey(Handle<FixedArray> strings) : strings_(strings) { }
-
-  bool IsMatch(Object* strings) override {
-    FixedArray* o = FixedArray::cast(strings);
-    int len = strings_->length();
-    if (o->length() != len) return false;
-    for (int i = 0; i < len; i++) {
-      if (o->get(i) != strings_->get(i)) return false;
-    }
-    return true;
-  }
-
-  uint32_t Hash() override { return HashForObject(*strings_); }
-
-  uint32_t HashForObject(Object* obj) override {
-    FixedArray* strings = FixedArray::cast(obj);
-    int len = strings->length();
-    uint32_t hash = 0;
-    for (int i = 0; i < len; i++) {
-      hash ^= String::cast(strings->get(i))->Hash();
-    }
-    return hash;
-  }
-
-  Handle<Object> AsHandle(Isolate* isolate) override { return strings_; }
-
- private:
-  Handle<FixedArray> strings_;
-};
-
-
 template<typename Derived, typename Shape, typename Key>
 Handle<Derived> Dictionary<Derived, Shape, Key>::New(
     Isolate* isolate,
@@ -18409,6 +18383,21 @@
   dictionary->ElementAdded();
 }
 
+bool SeededNumberDictionary::HasComplexElements() {
+  if (!requires_slow_elements()) return false;
+  int capacity = this->Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* k = this->KeyAt(i);
+    if (this->IsKey(k)) {
+      DCHECK(!IsDeleted(i));
+      PropertyDetails details = this->DetailsAt(i);
+      if (details.type() == ACCESSOR_CONSTANT) return true;
+      PropertyAttributes attr = details.attributes();
+      if (attr & ALL_ATTRIBUTES_MASK) return true;
+    }
+  }
+  return false;
+}
 
 void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key,
                                                 bool used_as_prototype) {
@@ -18516,23 +18505,6 @@
 }
 
 
-template <typename Derived, typename Shape, typename Key>
-bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
-  int capacity = this->Capacity();
-  for (int i = 0; i < capacity; i++) {
-    Object* k = this->KeyAt(i);
-    if (this->IsKey(k) && !k->FilterKey(ALL_PROPERTIES)) {
-      if (this->IsDeleted(i)) continue;
-      PropertyDetails details = this->DetailsAt(i);
-      if (details.type() == ACCESSOR_CONSTANT) return true;
-      PropertyAttributes attr = details.attributes();
-      if (attr & ALL_ATTRIBUTES_MASK) return true;
-    }
-  }
-  return false;
-}
-
-
 template <typename Dictionary>
 struct EnumIndexComparator {
   explicit EnumIndexComparator(Dictionary* dict) : dict(dict) {}
@@ -18594,7 +18566,6 @@
   return index - start_index;
 }
 
-
 template <typename Derived, typename Shape, typename Key>
 void Dictionary<Derived, Shape, Key>::CollectKeysTo(
     Handle<Dictionary<Derived, Shape, Key> > dictionary, KeyAccumulator* keys,
diff --git a/src/objects.h b/src/objects.h
index 61c6e5e..cbc9c04 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -33,6 +33,8 @@
 #include "src/mips64/constants-mips64.h"  // NOLINT
 #elif V8_TARGET_ARCH_PPC
 #include "src/ppc/constants-ppc.h"  // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/constants-s390.h"  // NOLINT
 #endif
 
 
@@ -78,6 +80,7 @@
 //         - HashTable
 //           - Dictionary
 //           - StringTable
+//           - StringSet
 //           - CompilationCacheTable
 //           - CodeCacheHashTable
 //           - MapCache
@@ -419,6 +422,7 @@
   V(JS_MODULE_TYPE)                                             \
   V(JS_GLOBAL_OBJECT_TYPE)                                      \
   V(JS_GLOBAL_PROXY_TYPE)                                       \
+  V(JS_SPECIAL_API_OBJECT_TYPE)                                 \
   V(JS_ARRAY_TYPE)                                              \
   V(JS_ARRAY_BUFFER_TYPE)                                       \
   V(JS_TYPED_ARRAY_TYPE)                                        \
@@ -438,7 +442,6 @@
   V(DEBUG_INFO_TYPE)                                            \
   V(BREAK_POINT_INFO_TYPE)
 
-
 // Since string types are not consecutive, this macro is used to
 // iterate over them.
 #define STRING_TYPE_LIST(V)                                                   \
@@ -591,7 +594,6 @@
   return ((type & kShortcutTypeMask) == kShortcutTypeTag);
 }
 
-
 enum InstanceType {
   // String types.
   INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag |
@@ -703,16 +705,18 @@
   // objects in the JS sense. The first and the last type in this range are
   // the two forms of function. This organization enables using the same
   // compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
-  JS_PROXY_TYPE,  // FIRST_JS_RECEIVER_TYPE
-  JS_VALUE_TYPE,  // FIRST_JS_OBJECT_TYPE
+  JS_PROXY_TYPE,          // FIRST_JS_RECEIVER_TYPE
+  JS_GLOBAL_OBJECT_TYPE,  // FIRST_JS_OBJECT_TYPE
+  JS_GLOBAL_PROXY_TYPE,
+  // Like JS_OBJECT_TYPE, but requires access checks and/or has interceptors.
+  JS_SPECIAL_API_OBJECT_TYPE,  // LAST_SPECIAL_RECEIVER_TYPE
+  JS_VALUE_TYPE,               // LAST_CUSTOM_ELEMENTS_RECEIVER
   JS_MESSAGE_OBJECT_TYPE,
   JS_DATE_TYPE,
   JS_OBJECT_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GENERATOR_OBJECT_TYPE,
   JS_MODULE_TYPE,
-  JS_GLOBAL_OBJECT_TYPE,
-  JS_GLOBAL_PROXY_TYPE,
   JS_ARRAY_TYPE,
   JS_ARRAY_BUFFER_TYPE,
   JS_TYPED_ARRAY_TYPE,
@@ -753,8 +757,14 @@
   FIRST_JS_RECEIVER_TYPE = JS_PROXY_TYPE,
   LAST_JS_RECEIVER_TYPE = LAST_TYPE,
   // Boundaries for testing the types represented as JSObject
-  FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
+  FIRST_JS_OBJECT_TYPE = JS_GLOBAL_OBJECT_TYPE,
   LAST_JS_OBJECT_TYPE = LAST_TYPE,
+  // Boundary for testing JSReceivers that need special property lookup handling
+  LAST_SPECIAL_RECEIVER_TYPE = JS_SPECIAL_API_OBJECT_TYPE,
+  // Boundary case for testing JSReceivers that may have elements while having
+  // an empty fixed array as elements backing store. This is true for string
+  // wrappers.
+  LAST_CUSTOM_ELEMENTS_RECEIVER = JS_VALUE_TYPE,
 };
 
 STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
@@ -967,6 +977,7 @@
   V(HashTable)                     \
   V(Dictionary)                    \
   V(StringTable)                   \
+  V(StringSet)                     \
   V(NormalizedMapCache)            \
   V(CompilationCacheTable)         \
   V(CodeCacheHashTable)            \
@@ -974,7 +985,7 @@
   V(MapCache)                      \
   V(JSGlobalObject)                \
   V(JSGlobalProxy)                 \
-  V(UndetectableObject)            \
+  V(Undetectable)                  \
   V(AccessCheckNeeded)             \
   V(Callable)                      \
   V(Function)                      \
@@ -999,7 +1010,8 @@
   V(Uninitialized)      \
   V(True)               \
   V(False)              \
-  V(ArgumentsMarker)
+  V(ArgumentsMarker)    \
+  V(OptimizedOut)
 
 // The element types selection for CreateListFromArrayLike.
 enum class ElementTypes { kAll, kStringAndSymbol };
@@ -1068,7 +1080,7 @@
   INLINE(bool IsNaN() const);
   INLINE(bool IsMinusZero() const);
   bool ToInt32(int32_t* value);
-  bool ToUint32(uint32_t* value);
+  inline bool ToUint32(uint32_t* value);
 
   inline Representation OptimalRepresentation();
 
@@ -1119,9 +1131,13 @@
   MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(
       Isolate* isolate, Handle<Object> object, Handle<Context> context);
 
+  // ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
+  MUST_USE_RESULT static MaybeHandle<JSReceiver> ConvertReceiver(
+      Isolate* isolate, Handle<Object> object);
+
   // ES6 section 7.1.14 ToPropertyKey
-  MUST_USE_RESULT static MaybeHandle<Name> ToName(Isolate* isolate,
-                                                  Handle<Object> input);
+  MUST_USE_RESULT static inline MaybeHandle<Name> ToName(Isolate* isolate,
+                                                         Handle<Object> input);
 
   // ES6 section 7.1.1 ToPrimitive
   MUST_USE_RESULT static inline MaybeHandle<Object> ToPrimitive(
@@ -1231,6 +1247,10 @@
       Handle<Object> object, Handle<Name> name, Handle<Object> value,
       LanguageMode language_mode,
       StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
+  MUST_USE_RESULT static inline MaybeHandle<Object> SetPropertyOrElement(
+      Handle<Object> object, Handle<Name> name, Handle<Object> value,
+      LanguageMode language_mode,
+      StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
 
   MUST_USE_RESULT static Maybe<bool> SetSuperProperty(
       LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
@@ -1261,8 +1281,6 @@
   MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
       Handle<Object> receiver, Handle<Name> name, Handle<JSReceiver> holder);
   MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
-      Isolate* isolate, Handle<Object> object, const char* key);
-  MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
       Handle<Object> object, Handle<Name> name);
 
   MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
@@ -1371,6 +1389,9 @@
       LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
       StoreFromKeyed store_mode, bool* found);
 
+  MUST_USE_RESULT static MaybeHandle<Name> ConvertToName(Isolate* isolate,
+                                                         Handle<Object> input);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
 };
 
@@ -1812,6 +1833,13 @@
   MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(
       Handle<JSReceiver> object, Handle<Name> name);
 
+  MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
+      Isolate* isolate, Handle<JSReceiver> receiver, const char* key);
+  MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
+      Handle<JSReceiver> receiver, Handle<Name> name);
+  MUST_USE_RESULT static inline MaybeHandle<Object> GetElement(
+      Isolate* isolate, Handle<JSReceiver> receiver, uint32_t index);
+
   // Implementation of ES6 [[Delete]]
   MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
       Handle<JSReceiver> object, Handle<Name> name,
@@ -1919,15 +1947,15 @@
                                                   bool from_javascript,
                                                   ShouldThrow should_throw);
 
-
-  static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
-                                        Handle<Name> name);
+  inline static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
+                                               Handle<Name> name);
   static Handle<Object> GetDataProperty(LookupIterator* it);
 
 
   // Retrieves a permanent object identity hash code. The undefined value might
   // be returned in case no hash was created yet.
-  inline Object* GetIdentityHash();
+  static inline Handle<Object> GetIdentityHash(Isolate* isolate,
+                                               Handle<JSReceiver> object);
 
   // Retrieves a permanent object identity hash code. May create and store a
   // hash code if needed and none exists.
@@ -1944,7 +1972,8 @@
   // "for (n in object) { }".
   MUST_USE_RESULT static MaybeHandle<FixedArray> GetKeys(
       Handle<JSReceiver> object, KeyCollectionType type, PropertyFilter filter,
-      GetKeysConversion keys_conversion = KEEP_NUMBERS);
+      GetKeysConversion keys_conversion = KEEP_NUMBERS,
+      bool filter_proxy_keys_ = true);
 
   MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
       Handle<JSReceiver> object, PropertyFilter filter);
@@ -2037,11 +2066,12 @@
   inline bool HasSlowArgumentsElements();
   inline bool HasFastStringWrapperElements();
   inline bool HasSlowStringWrapperElements();
+  bool HasEnumerableElements();
+
   inline SeededNumberDictionary* element_dictionary();  // Gets slow elements.
 
   // Requires: HasFastElements().
-  static Handle<FixedArray> EnsureWritableFastElements(
-      Handle<JSObject> object);
+  static void EnsureWritableFastElements(Handle<JSObject> object);
 
   // Collects elements starting at index 0.
   // Undefined values are placed after non-undefined values.
@@ -2089,8 +2119,9 @@
 
   // Adds or reconfigures a property to attributes NONE. It will fail when it
   // cannot.
-  MUST_USE_RESULT static Maybe<bool> CreateDataProperty(LookupIterator* it,
-                                                        Handle<Object> value);
+  MUST_USE_RESULT static Maybe<bool> CreateDataProperty(
+      LookupIterator* it, Handle<Object> value,
+      ShouldThrow should_throw = DONT_THROW);
 
   static void AddProperty(Handle<JSObject> object, Handle<Name> name,
                           Handle<Object> value, PropertyAttributes attributes);
@@ -2144,7 +2175,7 @@
   };
 
   // Retrieve interceptors.
-  InterceptorInfo* GetNamedInterceptor();
+  inline InterceptorInfo* GetNamedInterceptor();
   inline InterceptorInfo* GetIndexedInterceptor();
 
   // Used from JSReceiver.
@@ -2207,8 +2238,6 @@
   // Returns true if the object has a property with the hidden string as name.
   static bool HasHiddenProperties(Handle<JSObject> object);
 
-  static void SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash);
-
   static void ValidateElements(Handle<JSObject> object);
 
   // Makes sure that this object can contain HeapObject as elements.
@@ -2289,6 +2318,9 @@
 
   static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
 
+  static Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
+                                                    Handle<JSObject> object);
+
   // Returns a new map with all transitions dropped from the object's current
   // map and the ElementsKind set.
   static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
@@ -2378,10 +2410,6 @@
       Handle<JSObject> object,
       AllocationSiteUsageContext* site_context,
       DeepCopyHints hints = kNoHints);
-  // Deep copies given object with special handling for JSFunctions which
-  // 1) must be Api functions and 2) are not copied but left as is.
-  MUST_USE_RESULT static MaybeHandle<JSObject> DeepCopyApiBoilerplate(
-      Handle<JSObject> object);
   MUST_USE_RESULT static MaybeHandle<JSObject> DeepWalk(
       Handle<JSObject> object,
       AllocationSiteCreationContext* site_context);
@@ -2525,13 +2553,11 @@
       Handle<JSObject> object,
       Handle<Object> value);
 
-  MUST_USE_RESULT Object* GetIdentityHash();
+  static Handle<Object> GetIdentityHash(Isolate* isolate,
+                                        Handle<JSObject> object);
 
   static Handle<Smi> GetOrCreateIdentityHash(Handle<JSObject> object);
 
-  static Handle<SeededNumberDictionary> GetNormalizedElementDictionary(
-      Handle<JSObject> object, Handle<FixedArrayBase> elements);
-
   // Helper for fast versions of preventExtensions, seal, and freeze.
   // attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
   template <PropertyAttributes attrs>
@@ -3065,6 +3091,16 @@
     return ToKeyIndex(number_of_descriptors);
   }
 
+  static int ToDetailsIndex(int descriptor_number) {
+    return kFirstIndex + (descriptor_number * kDescriptorSize) +
+           kDescriptorDetails;
+  }
+
+  // Conversion from descriptor number to array indices.
+  static int ToKeyIndex(int descriptor_number) {
+    return kFirstIndex + (descriptor_number * kDescriptorSize) + kDescriptorKey;
+  }
+
  private:
   // An entry in a DescriptorArray, represented as an (array, index) pair.
   class Entry {
@@ -3080,19 +3116,6 @@
     int index_;
   };
 
-  // Conversion from descriptor number to array indices.
-  static int ToKeyIndex(int descriptor_number) {
-    return kFirstIndex +
-           (descriptor_number * kDescriptorSize) +
-           kDescriptorKey;
-  }
-
-  static int ToDetailsIndex(int descriptor_number) {
-    return kFirstIndex +
-           (descriptor_number * kDescriptorSize) +
-           kDescriptorDetails;
-  }
-
   static int ToValueIndex(int descriptor_number) {
     return kFirstIndex +
            (descriptor_number * kDescriptorSize) +
@@ -3196,6 +3219,7 @@
   // Tells whether k is a real key.  The hole and undefined are not allowed
   // as keys and can be used to indicate missing or deleted elements.
   inline bool IsKey(Object* k);
+  inline bool IsKey(Heap* heap, Object* k);
 
   // Compute the probe offset (quadratic probing).
   INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
@@ -3417,6 +3441,25 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable);
 };
 
+class StringSetShape : public BaseShape<String*> {
+ public:
+  static inline bool IsMatch(String* key, Object* value);
+  static inline uint32_t Hash(String* key);
+  static inline uint32_t HashForObject(String* key, Object* object);
+
+  static const int kPrefixSize = 0;
+  static const int kEntrySize = 1;
+};
+
+class StringSet : public HashTable<StringSet, StringSetShape, String*> {
+ public:
+  static Handle<StringSet> New(Isolate* isolate);
+  static Handle<StringSet> Add(Handle<StringSet> blacklist,
+                               Handle<String> name);
+  bool Has(Handle<String> name);
+
+  DECLARE_CAST(StringSet)
+};
 
 template <typename Derived, typename Shape, typename Key>
 class Dictionary: public HashTable<Derived, Shape, Key> {
@@ -3473,10 +3516,6 @@
     return NumberOfElementsFilterAttributes(ENUMERABLE_STRINGS);
   }
 
-  // Returns true if the dictionary contains any elements that are non-writable,
-  // non-configurable, non-enumerable, or have getters/setters.
-  bool HasComplexElements();
-
   enum SortMode { UNSORTED, SORTED };
 
   // Fill in details for properties into storage.
@@ -3709,6 +3748,10 @@
 
   void UpdateMaxNumberKey(uint32_t key, bool used_as_prototype);
 
+  // Returns true if the dictionary contains any elements that are non-writable,
+  // non-configurable, non-enumerable, or have getters/setters.
+  bool HasComplexElements();
+
   // If slow elements are required we will never go back to fast-case
   // for the elements kept in this dictionary.  We require slow
   // elements if an element has been added at an index larger than
@@ -4462,7 +4505,7 @@
 
   // Accessors for source position table containing mappings between byte code
   // offset and source position.
-  DECL_ACCESSORS(source_position_table, FixedArray)
+  DECL_ACCESSORS(source_position_table, ByteArray)
 
   DECLARE_CAST(BytecodeArray)
 
@@ -4870,11 +4913,14 @@
 #define NON_IC_KIND_LIST(V) \
   V(FUNCTION)               \
   V(OPTIMIZED_FUNCTION)     \
+  V(BYTECODE_HANDLER)       \
   V(STUB)                   \
   V(HANDLER)                \
   V(BUILTIN)                \
   V(REGEXP)                 \
-  V(WASM_FUNCTION)
+  V(WASM_FUNCTION)          \
+  V(WASM_TO_JS_FUNCTION)    \
+  V(JS_TO_WASM_FUNCTION)
 
 #define IC_KIND_LIST(V) \
   V(LOAD_IC)            \
@@ -4884,7 +4930,6 @@
   V(KEYED_STORE_IC)     \
   V(BINARY_OP_IC)       \
   V(COMPARE_IC)         \
-  V(COMPARE_NIL_IC)     \
   V(TO_BOOLEAN_IC)
 
 #define CODE_KIND_LIST(V) \
@@ -4998,10 +5043,10 @@
   inline bool is_call_stub();
   inline bool is_binary_op_stub();
   inline bool is_compare_ic_stub();
-  inline bool is_compare_nil_ic_stub();
   inline bool is_to_boolean_ic_stub();
   inline bool is_keyed_stub();
   inline bool is_optimized_code();
+  inline bool is_wasm_code();
   inline bool embeds_maps_weakly();
 
   inline bool IsCodeStubOrIC();
@@ -5244,6 +5289,7 @@
   static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
   static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
   void MakeYoung(Isolate* isolate);
+  void PreAge(Isolate* isolate);
   void MarkToBeExecutedOnce(Isolate* isolate);
   void MakeOlder(MarkingParity);
   static bool IsYoungSequence(Isolate* isolate, byte* sequence);
@@ -5301,8 +5347,9 @@
   // Note: We might be able to squeeze this into the flags above.
   static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
   static const int kConstantPoolOffset = kPrologueOffset + kIntSize;
-  static const int kHeaderPaddingStart =
+  static const int kBuiltinIndexOffset =
       kConstantPoolOffset + kConstantPoolSize;
+  static const int kHeaderPaddingStart = kBuiltinIndexOffset + kIntSize;
 
   // Add padding to align the instruction start following right after
   // the Code object header.
@@ -5326,10 +5373,11 @@
   class TypeField : public BitField<StubType, 3, 1> {};
   class CacheHolderField : public BitField<CacheHolderFlag, 4, 2> {};
   class KindField : public BitField<Kind, 6, 5> {};
-  class ExtraICStateField: public BitField<ExtraICState, 11,
-      PlatformSmiTagging::kSmiValueSize - 11 + 1> {};  // NOLINT
+  class ExtraICStateField
+      : public BitField<ExtraICState, 11, PlatformSmiTagging::kSmiValueSize -
+                                              11 + 1> {};  // NOLINT
 
-  // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
+  // KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
   static const int kStackSlotsFirstBit = 0;
   static const int kStackSlotsBitCount = 24;
   static const int kMarkedForDeoptimizationBit =
@@ -5404,11 +5452,37 @@
 
 class AbstractCode : public HeapObject {
  public:
+  // All code kinds and INTERPRETED_FUNCTION.
+  enum Kind {
+#define DEFINE_CODE_KIND_ENUM(name) name,
+    CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
+#undef DEFINE_CODE_KIND_ENUM
+        INTERPRETED_FUNCTION,
+  };
+
   int SourcePosition(int offset);
   int SourceStatementPosition(int offset);
 
+  // Returns the address of the first instruction.
+  inline Address instruction_start();
+
+  // Returns the address right after the last instruction.
+  inline Address instruction_end();
+
+  // Returns the of the code instructions.
+  inline int instruction_size();
+
+  // Returns true if pc is inside this object's instructions.
+  inline bool contains(byte* pc);
+
+  // Returns the AbstractCode::Kind of the code.
+  inline Kind kind();
+
+  // Calculate the size of the code object to report for log events. This takes
+  // the layout of the code object into account.
+  inline int ExecutableSize();
+
   DECLARE_CAST(AbstractCode)
-  inline int Size();
   inline Code* GetCode();
   inline BytecodeArray* GetBytecodeArray();
 };
@@ -5565,6 +5639,10 @@
   static MaybeHandle<JSFunction> GetConstructorFunction(
       Handle<Map> map, Handle<Context> native_context);
 
+  // Retrieve interceptors.
+  inline InterceptorInfo* GetNamedInterceptor();
+  inline InterceptorInfo* GetIndexedInterceptor();
+
   // Instance type.
   inline InstanceType instance_type();
   inline void set_instance_type(InstanceType value);
@@ -5597,7 +5675,7 @@
   class Deprecated : public BitField<bool, 23, 1> {};
   class IsUnstable : public BitField<bool, 24, 1> {};
   class IsMigrationTarget : public BitField<bool, 25, 1> {};
-  class IsStrong : public BitField<bool, 26, 1> {};
+  // Bit 26 is free.
   class NewTargetIsBase : public BitField<bool, 27, 1> {};
   // Bit 28 is free.
 
@@ -5706,8 +5784,6 @@
   inline void set_is_callable();
   inline bool is_callable() const;
 
-  inline void set_is_strong();
-  inline bool is_strong();
   inline void set_new_target_is_base(bool value);
   inline bool new_target_is_base();
   inline void set_is_extensible(bool value);
@@ -5961,8 +6037,9 @@
                                               PropertyAttributes attributes,
                                               StoreFromKeyed store_mode);
   static Handle<Map> TransitionToAccessorProperty(
-      Handle<Map> map, Handle<Name> name, AccessorComponent component,
-      Handle<Object> accessor, PropertyAttributes attributes);
+      Handle<Map> map, Handle<Name> name, int descriptor,
+      AccessorComponent component, Handle<Object> accessor,
+      PropertyAttributes attributes);
   static Handle<Map> ReconfigureExistingProperty(Handle<Map> map,
                                                  int descriptor,
                                                  PropertyKind kind,
@@ -6535,41 +6612,43 @@
 //
 // Installation of ids for the selected builtin functions is handled
 // by the bootstrapper.
-#define FUNCTIONS_WITH_ID_LIST(V)                     \
-  V(Array.prototype, indexOf, ArrayIndexOf)           \
-  V(Array.prototype, lastIndexOf, ArrayLastIndexOf)   \
-  V(Array.prototype, push, ArrayPush)                 \
-  V(Array.prototype, pop, ArrayPop)                   \
-  V(Array.prototype, shift, ArrayShift)               \
-  V(Function.prototype, apply, FunctionApply)         \
-  V(Function.prototype, call, FunctionCall)           \
-  V(String.prototype, charCodeAt, StringCharCodeAt)   \
-  V(String.prototype, charAt, StringCharAt)           \
-  V(String.prototype, concat, StringConcat)           \
-  V(String.prototype, toLowerCase, StringToLowerCase) \
-  V(String.prototype, toUpperCase, StringToUpperCase) \
-  V(String, fromCharCode, StringFromCharCode)         \
-  V(Math, random, MathRandom)                         \
-  V(Math, floor, MathFloor)                           \
-  V(Math, round, MathRound)                           \
-  V(Math, ceil, MathCeil)                             \
-  V(Math, abs, MathAbs)                               \
-  V(Math, log, MathLog)                               \
-  V(Math, exp, MathExp)                               \
-  V(Math, sqrt, MathSqrt)                             \
-  V(Math, pow, MathPow)                               \
-  V(Math, max, MathMax)                               \
-  V(Math, min, MathMin)                               \
-  V(Math, cos, MathCos)                               \
-  V(Math, sin, MathSin)                               \
-  V(Math, tan, MathTan)                               \
-  V(Math, acos, MathAcos)                             \
-  V(Math, asin, MathAsin)                             \
-  V(Math, atan, MathAtan)                             \
-  V(Math, atan2, MathAtan2)                           \
-  V(Math, imul, MathImul)                             \
-  V(Math, clz32, MathClz32)                           \
-  V(Math, fround, MathFround)
+#define FUNCTIONS_WITH_ID_LIST(V)                           \
+  V(Array.prototype, indexOf, ArrayIndexOf)                 \
+  V(Array.prototype, lastIndexOf, ArrayLastIndexOf)         \
+  V(Array.prototype, push, ArrayPush)                       \
+  V(Array.prototype, pop, ArrayPop)                         \
+  V(Array.prototype, shift, ArrayShift)                     \
+  V(Function.prototype, apply, FunctionApply)               \
+  V(Function.prototype, call, FunctionCall)                 \
+  V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
+  V(String.prototype, charCodeAt, StringCharCodeAt)         \
+  V(String.prototype, charAt, StringCharAt)                 \
+  V(String.prototype, concat, StringConcat)                 \
+  V(String.prototype, toLowerCase, StringToLowerCase)       \
+  V(String.prototype, toUpperCase, StringToUpperCase)       \
+  V(String, fromCharCode, StringFromCharCode)               \
+  V(Math, random, MathRandom)                               \
+  V(Math, floor, MathFloor)                                 \
+  V(Math, round, MathRound)                                 \
+  V(Math, ceil, MathCeil)                                   \
+  V(Math, abs, MathAbs)                                     \
+  V(Math, log, MathLog)                                     \
+  V(Math, exp, MathExp)                                     \
+  V(Math, sqrt, MathSqrt)                                   \
+  V(Math, pow, MathPow)                                     \
+  V(Math, max, MathMax)                                     \
+  V(Math, min, MathMin)                                     \
+  V(Math, cos, MathCos)                                     \
+  V(Math, sin, MathSin)                                     \
+  V(Math, tan, MathTan)                                     \
+  V(Math, acos, MathAcos)                                   \
+  V(Math, asin, MathAsin)                                   \
+  V(Math, atan, MathAtan)                                   \
+  V(Math, atan2, MathAtan2)                                 \
+  V(Math, imul, MathImul)                                   \
+  V(Math, clz32, MathClz32)                                 \
+  V(Math, fround, MathFround)                               \
+  V(Math, trunc, MathTrunc)
 
 #define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
   V(Atomics, load, AtomicsLoad)          \
@@ -6577,6 +6656,9 @@
 
 enum BuiltinFunctionId {
   kArrayCode,
+  kGeneratorObjectNext,
+  kGeneratorObjectReturn,
+  kGeneratorObjectThrow,
 #define DECLARE_FUNCTION_ID(ignored1, ignore2, name)    \
   k##name,
   FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
@@ -6606,6 +6688,10 @@
   // [code]: Function code.
   DECL_ACCESSORS(code, Code)
 
+  // Get the abstract code associated with the function, which will either be
+  // a Code object or a BytecodeArray.
+  inline AbstractCode* abstract_code();
+
   inline void ReplaceCode(Code* code);
 
   // [optimized_code_map]: Map from native context to optimized code
@@ -6635,22 +6721,17 @@
   // Trims the optimized code map after entries have been removed.
   void TrimOptimizedCodeMap(int shrink_by);
 
-  // Add a new entry to the optimized code map for context-independent code.
+  // Add or update entry in the optimized code map for context-independent code.
   static void AddSharedCodeToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
                                               Handle<Code> code);
 
-  // Add a new entry to the optimized code map for context-dependent code.
-  inline static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
-                                           Handle<Context> native_context,
-                                           Handle<Code> code,
-                                           Handle<LiteralsArray> literals,
-                                           BailoutId osr_ast_id);
-
-  // We may already have cached the code, but want to store literals in the
-  // cache.
-  inline static void AddLiteralsToOptimizedCodeMap(
-      Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
-      Handle<LiteralsArray> literals);
+  // Add or update entry in the optimized code map for context-dependent code.
+  // If {code} is not given, then an existing entry's code won't be overwritten.
+  static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
+                                    Handle<Context> native_context,
+                                    MaybeHandle<Code> code,
+                                    Handle<LiteralsArray> literals,
+                                    BailoutId osr_ast_id);
 
   // Set up the link between shared function info and the script. The shared
   // function info is added to the list on the script.
@@ -6721,19 +6802,34 @@
   // [function data]: This field holds some additional data for function.
   // Currently it has one of:
   //  - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
-  //  - a Smi identifying a builtin function [HasBuiltinFunctionId()].
   //  - a BytecodeArray for the interpreter [HasBytecodeArray()].
-  // In the long run we don't want all functions to have this field but
-  // we can fix that when we have a better model for storing hidden data
-  // on objects.
   DECL_ACCESSORS(function_data, Object)
 
   inline bool IsApiFunction();
   inline FunctionTemplateInfo* get_api_func_data();
-  inline bool HasBuiltinFunctionId();
-  inline BuiltinFunctionId builtin_function_id();
+  inline void set_api_func_data(FunctionTemplateInfo* data);
   inline bool HasBytecodeArray();
   inline BytecodeArray* bytecode_array();
+  inline void set_bytecode_array(BytecodeArray* bytecode);
+  inline void ClearBytecodeArray();
+
+  // [function identifier]: This field holds an additional identifier for the
+  // function.
+  //  - a Smi identifying a builtin function [HasBuiltinFunctionId()].
+  //  - a String identifying the function's inferred name [HasInferredName()].
+  // The inferred_name is inferred from variable or property
+  // assignment of this function. It is used to facilitate debugging and
+  // profiling of JavaScript code written in OO style, where almost
+  // all functions are anonymous but are assigned to object
+  // properties.
+  DECL_ACCESSORS(function_identifier, Object)
+
+  inline bool HasBuiltinFunctionId();
+  inline BuiltinFunctionId builtin_function_id();
+  inline void set_builtin_function_id(BuiltinFunctionId id);
+  inline bool HasInferredName();
+  inline String* inferred_name();
+  inline void set_inferred_name(String* inferred_name);
 
   // [script info]: Script from which the function originates.
   DECL_ACCESSORS(script, Object)
@@ -6760,16 +6856,12 @@
   // [debug info]: Debug information.
   DECL_ACCESSORS(debug_info, Object)
 
-  // [inferred name]: Name inferred from variable or property
-  // assignment of this function. Used to facilitate debugging and
-  // profiling of JavaScript code written in OO style, where almost
-  // all functions are anonymous but are assigned to object
-  // properties.
-  DECL_ACCESSORS(inferred_name, String)
-
   // The function's name if it is non-empty, otherwise the inferred name.
   String* DebugName();
 
+  // Used for flags such as --hydrogen-filter.
+  bool PassesFilter(const char* raw_filter);
+
   // Position of the 'function' token in the script source.
   inline int function_token_position() const;
   inline void set_function_token_position(int function_token_position);
@@ -7006,9 +7098,9 @@
       kInstanceClassNameOffset + kPointerSize;
   static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
   static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
-  static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
+  static const int kFunctionIdentifierOffset = kDebugInfoOffset + kPointerSize;
   static const int kFeedbackVectorOffset =
-      kInferredNameOffset + kPointerSize;
+      kFunctionIdentifierOffset + kPointerSize;
 #if TRACE_MAPS
   static const int kUniqueIdOffset = kFeedbackVectorOffset + kPointerSize;
   static const int kLastPointerFieldOffset = kUniqueIdOffset;
@@ -7139,9 +7231,9 @@
     kAllowLazyCompilation,
     kAllowLazyCompilationWithoutContext,
     kOptimizationDisabled,
+    kNeverCompiled,
     kNative,
     kStrictModeFunction,
-    kStrongModeFunction,
     kUsesArguments,
     kNeedsHomeObject,
     // byte 1
@@ -7165,7 +7257,6 @@
     kIsSetterFunction,
     // byte 3
     kDeserialized,
-    kNeverCompiled,
     kIsDeclaration,
     kCompilerHintsCount,  // Pseudo entry
   };
@@ -7215,8 +7306,6 @@
   // native tests when using integer-width instructions.
   static const int kStrictModeBit =
       kStrictModeFunction + kCompilerHintsSmiTagSize;
-  static const int kStrongModeBit =
-      kStrongModeFunction + kCompilerHintsSmiTagSize;
   static const int kNativeBit = kNative + kCompilerHintsSmiTagSize;
 
   static const int kClassConstructorBits =
@@ -7227,7 +7316,6 @@
   // native tests.
   // Allows to use byte-width instructions.
   static const int kStrictModeBitWithinByte = kStrictModeBit % kBitsPerByte;
-  static const int kStrongModeBitWithinByte = kStrongModeBit % kBitsPerByte;
   static const int kNativeBitWithinByte = kNativeBit % kBitsPerByte;
 
   static const int kClassConstructorBitsWithinByte =
@@ -7246,7 +7334,6 @@
 #error Unknown byte ordering
 #endif
   static const int kStrictModeByteOffset = BYTE_OFFSET(kStrictModeFunction);
-  static const int kStrongModeByteOffset = BYTE_OFFSET(kStrongModeFunction);
   static const int kNativeByteOffset = BYTE_OFFSET(kNative);
   static const int kFunctionKindByteOffset = BYTE_OFFSET(kFunctionKind);
 #undef BYTE_OFFSET
@@ -7258,13 +7345,6 @@
   int SearchOptimizedCodeMapEntry(Context* native_context,
                                   BailoutId osr_ast_id);
 
-  // If code is undefined, then existing code won't be overwritten.
-  static void AddToOptimizedCodeMapInternal(Handle<SharedFunctionInfo> shared,
-                                            Handle<Context> native_context,
-                                            Handle<HeapObject> code,
-                                            Handle<LiteralsArray> literals,
-                                            BailoutId osr_ast_id);
-
   DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
 };
 
@@ -7394,6 +7474,9 @@
   // to ES6 section 19.2.3.5 Function.prototype.toString ( ).
   static Handle<String> ToString(Handle<JSBoundFunction> function);
 
+  static MaybeHandle<String> GetName(Isolate* isolate,
+                                     Handle<JSBoundFunction> function);
+
   // Layout description.
   static const int kBoundTargetFunctionOffset = JSObject::kHeaderSize;
   static const int kBoundThisOffset = kBoundTargetFunctionOffset + kPointerSize;
@@ -7438,6 +7521,10 @@
   inline void set_code_no_write_barrier(Code* code);
   inline void ReplaceCode(Code* code);
 
+  // Get the abstract code associated with the function, which will either be
+  // a Code object or a BytecodeArray.
+  inline AbstractCode* abstract_code();
+
   // Tells whether this function inlines the given shared function info.
   bool Inlines(SharedFunctionInfo* candidate);
 
@@ -7524,7 +7611,11 @@
                                             int requested_internal_fields,
                                             int* instance_size,
                                             int* in_object_properties);
-
+  static void CalculateInstanceSizeHelper(InstanceType instance_type,
+                                          int requested_internal_fields,
+                                          int requested_in_object_properties,
+                                          int* instance_size,
+                                          int* in_object_properties);
   // Visiting policy flags define whether the code entry or next function
   // should be visited or not.
   enum BodyVisitingPolicy {
@@ -7553,9 +7644,6 @@
   // Returns the number of allocated literals.
   inline int NumberOfLiterals();
 
-  // Used for flags such as --hydrogen-filter.
-  bool PassesFilter(const char* raw_filter);
-
   // The function's name if it is configured, otherwise shared function info
   // debug name.
   static Handle<String> GetName(Handle<JSFunction> function);
@@ -9507,6 +9595,9 @@
   // [to_number]: Cached to_number computed at startup.
   DECL_ACCESSORS(to_number, Object)
 
+  // [to_number]: Cached to_boolean computed at startup.
+  DECL_ACCESSORS(to_boolean, Oddball)
+
   // [typeof]: Cached type_of computed at startup.
   DECL_ACCESSORS(type_of, String)
 
@@ -9524,12 +9615,13 @@
   // Initialize the fields.
   static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
                          const char* to_string, Handle<Object> to_number,
-                         const char* type_of, byte kind);
+                         bool to_boolean, const char* type_of, byte kind);
 
   // Layout description.
   static const int kToStringOffset = HeapObject::kHeaderSize;
   static const int kToNumberOffset = kToStringOffset + kPointerSize;
-  static const int kTypeOfOffset = kToNumberOffset + kPointerSize;
+  static const int kToBooleanOffset = kToNumberOffset + kPointerSize;
+  static const int kTypeOfOffset = kToBooleanOffset + kPointerSize;
   static const int kKindOffset = kTypeOfOffset + kPointerSize;
   static const int kSize = kKindOffset + kPointerSize;
 
@@ -9543,6 +9635,7 @@
   static const byte kUninitialized = 6;
   static const byte kOther = 7;
   static const byte kException = 8;
+  static const byte kOptimizedOut = 9;
 
   typedef FixedBodyDescriptor<kToStringOffset, kTypeOfOffset + kPointerSize,
                               kSize> BodyDescriptor;
@@ -9731,7 +9824,7 @@
   // ES6 9.5.8
   MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
       Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
-      Handle<Object> receiver);
+      Handle<Object> receiver, bool* was_found);
 
   // ES6 9.5.9
   MUST_USE_RESULT static Maybe<bool> SetProperty(Handle<JSProxy> proxy,
@@ -9765,7 +9858,8 @@
   typedef FixedBodyDescriptor<JSReceiver::kPropertiesOffset, kSize, kSize>
       BodyDescriptor;
 
-  MUST_USE_RESULT Object* GetIdentityHash();
+  static Handle<Object> GetIdentityHash(Isolate* isolate,
+                                        Handle<JSProxy> receiver);
 
   static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
 
@@ -10232,6 +10326,12 @@
                                                     PropertyDescriptor* desc,
                                                     ShouldThrow should_throw);
 
+  // Checks whether the Array has the current realm's Array.prototype as its
+  // prototype. This function is best-effort and only gives a conservative
+  // approximation, erring on the side of false, in particular with respect
+  // to Proxies and objects with a hidden prototype.
+  inline bool HasArrayPrototype(Isolate* isolate);
+
   DECLARE_CAST(JSArray)
 
   // Dispatched behavior.
@@ -10312,6 +10412,9 @@
   inline bool is_special_data_property();
   inline void set_is_special_data_property(bool value);
 
+  inline bool is_sloppy();
+  inline void set_is_sloppy(bool value);
+
   inline PropertyAttributes property_attributes();
   inline void set_property_attributes(PropertyAttributes attributes);
 
@@ -10348,7 +10451,8 @@
   static const int kAllCanReadBit = 0;
   static const int kAllCanWriteBit = 1;
   static const int kSpecialDataProperty = 2;
-  class AttributesField : public BitField<PropertyAttributes, 3, 3> {};
+  static const int kIsSloppy = 3;
+  class AttributesField : public BitField<PropertyAttributes, 4, 3> {};
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
 };
diff --git a/src/optimizing-compile-dispatcher.cc b/src/optimizing-compile-dispatcher.cc
index 4836b9b..ed20224 100644
--- a/src/optimizing-compile-dispatcher.cc
+++ b/src/optimizing-compile-dispatcher.cc
@@ -20,17 +20,8 @@
   // The recompile job is allocated in the CompilationInfo's zone.
   CompilationInfo* info = job->info();
   if (restore_function_code) {
-    if (info->is_osr()) {
-      if (!job->IsWaitingForInstall()) {
-        // Remove stack check that guards OSR entry on original code.
-        Handle<Code> code = info->unoptimized_code();
-        uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
-        BackEdgeTable::RemoveStackCheck(code, offset);
-      }
-    } else {
-      Handle<JSFunction> function = info->closure();
-      function->ReplaceCode(function->shared()->code());
-    }
+    Handle<JSFunction> function = info->closure();
+    function->ReplaceCode(function->shared()->code());
   }
   delete info;
 }
@@ -92,14 +83,6 @@
 #endif
   DCHECK_EQ(0, input_queue_length_);
   DeleteArray(input_queue_);
-  if (FLAG_concurrent_osr) {
-#ifdef DEBUG
-    for (int i = 0; i < osr_buffer_capacity_; i++) {
-      CHECK_NULL(osr_buffer_[i]);
-    }
-#endif
-    DeleteArray(osr_buffer_);
-  }
 }
 
 
@@ -159,16 +142,6 @@
 }
 
 
-void OptimizingCompileDispatcher::FlushOsrBuffer(bool restore_function_code) {
-  for (int i = 0; i < osr_buffer_capacity_; i++) {
-    if (osr_buffer_[i] != NULL) {
-      DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
-      osr_buffer_[i] = NULL;
-    }
-  }
-}
-
-
 void OptimizingCompileDispatcher::Flush() {
   base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
   if (FLAG_block_concurrent_recompilation) Unblock();
@@ -178,7 +151,6 @@
     base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
   }
   FlushOutputQueue(true);
-  if (FLAG_concurrent_osr) FlushOsrBuffer(true);
   if (FLAG_trace_concurrent_recompilation) {
     PrintF("  ** Flushed concurrent recompilation queues.\n");
   }
@@ -202,13 +174,6 @@
   } else {
     FlushOutputQueue(false);
   }
-
-  if (FLAG_concurrent_osr) FlushOsrBuffer(false);
-
-  if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
-      FLAG_concurrent_osr) {
-    PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
-  }
 }
 
 
@@ -225,31 +190,15 @@
     }
     CompilationInfo* info = job->info();
     Handle<JSFunction> function(*info->closure());
-    if (info->is_osr()) {
-      if (FLAG_trace_osr) {
-        PrintF("[COSR - ");
+    if (function->IsOptimized()) {
+      if (FLAG_trace_concurrent_recompilation) {
+        PrintF("  ** Aborting compilation for ");
         function->ShortPrint();
-        PrintF(" is ready for install and entry at AST id %d]\n",
-               info->osr_ast_id().ToInt());
+        PrintF(" as it has already been optimized.\n");
       }
-      job->WaitForInstall();
-      // Remove stack check that guards OSR entry on original code.
-      Handle<Code> code = info->unoptimized_code();
-      uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
-      BackEdgeTable::RemoveStackCheck(code, offset);
+      DisposeOptimizedCompileJob(job, false);
     } else {
-      if (function->IsOptimized()) {
-        if (FLAG_trace_concurrent_recompilation) {
-          PrintF("  ** Aborting compilation for ");
-          function->ShortPrint();
-          PrintF(" as it has already been optimized.\n");
-        }
-        DisposeOptimizedCompileJob(job, false);
-      } else {
-        MaybeHandle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
-        function->ReplaceCode(code.is_null() ? function->shared()->code()
-                                             : *code.ToHandleChecked());
-      }
+      Compiler::FinalizeOptimizedCompileJob(job);
     }
   }
 }
@@ -258,18 +207,7 @@
 void OptimizingCompileDispatcher::QueueForOptimization(
     OptimizedCompileJob* job) {
   DCHECK(IsQueueAvailable());
-  CompilationInfo* info = job->info();
-  if (info->is_osr()) {
-    osr_attempts_++;
-    AddToOsrBuffer(job);
-    // Add job to the front of the input queue.
-    base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
-    DCHECK_LT(input_queue_length_, input_queue_capacity_);
-    // Move shift_ back by one.
-    input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
-    input_queue_[InputQueueIndex(0)] = job;
-    input_queue_length_++;
-  } else {
+  {
     // Add job to the back of the input queue.
     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
     DCHECK_LT(input_queue_length_, input_queue_capacity_);
@@ -294,67 +232,5 @@
 }
 
 
-OptimizedCompileJob* OptimizingCompileDispatcher::FindReadyOSRCandidate(
-    Handle<JSFunction> function, BailoutId osr_ast_id) {
-  for (int i = 0; i < osr_buffer_capacity_; i++) {
-    OptimizedCompileJob* current = osr_buffer_[i];
-    if (current != NULL && current->IsWaitingForInstall() &&
-        current->info()->HasSameOsrEntry(function, osr_ast_id)) {
-      osr_hits_++;
-      osr_buffer_[i] = NULL;
-      return current;
-    }
-  }
-  return NULL;
-}
-
-
-bool OptimizingCompileDispatcher::IsQueuedForOSR(Handle<JSFunction> function,
-                                                 BailoutId osr_ast_id) {
-  for (int i = 0; i < osr_buffer_capacity_; i++) {
-    OptimizedCompileJob* current = osr_buffer_[i];
-    if (current != NULL &&
-        current->info()->HasSameOsrEntry(function, osr_ast_id)) {
-      return !current->IsWaitingForInstall();
-    }
-  }
-  return false;
-}
-
-
-bool OptimizingCompileDispatcher::IsQueuedForOSR(JSFunction* function) {
-  for (int i = 0; i < osr_buffer_capacity_; i++) {
-    OptimizedCompileJob* current = osr_buffer_[i];
-    if (current != NULL && *current->info()->closure() == function) {
-      return !current->IsWaitingForInstall();
-    }
-  }
-  return false;
-}
-
-
-void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) {
-  // Find the next slot that is empty or has a stale job.
-  OptimizedCompileJob* stale = NULL;
-  while (true) {
-    stale = osr_buffer_[osr_buffer_cursor_];
-    if (stale == NULL || stale->IsWaitingForInstall()) break;
-    osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
-  }
-
-  // Add to found slot and dispose the evicted job.
-  if (stale != NULL) {
-    DCHECK(stale->IsWaitingForInstall());
-    CompilationInfo* info = stale->info();
-    if (FLAG_trace_osr) {
-      PrintF("[COSR - Discarded ");
-      info->closure()->PrintName();
-      PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
-    }
-    DisposeOptimizedCompileJob(stale, false);
-  }
-  osr_buffer_[osr_buffer_cursor_] = job;
-  osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
-}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/optimizing-compile-dispatcher.h b/src/optimizing-compile-dispatcher.h
index 9c4e4cb..e14e8aa 100644
--- a/src/optimizing-compile-dispatcher.h
+++ b/src/optimizing-compile-dispatcher.h
@@ -28,20 +28,11 @@
         input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
         input_queue_length_(0),
         input_queue_shift_(0),
-        osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
-        osr_buffer_cursor_(0),
-        osr_hits_(0),
-        osr_attempts_(0),
         blocked_jobs_(0),
         ref_count_(0),
         recompilation_delay_(FLAG_concurrent_recompilation_delay) {
     base::NoBarrier_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
     input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
-    if (FLAG_concurrent_osr) {
-      // Allocate and mark OSR buffer slots as empty.
-      osr_buffer_ = NewArray<OptimizedCompileJob*>(osr_buffer_capacity_);
-      for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
-    }
   }
 
   ~OptimizingCompileDispatcher();
@@ -52,24 +43,12 @@
   void QueueForOptimization(OptimizedCompileJob* optimizing_compiler);
   void Unblock();
   void InstallOptimizedFunctions();
-  OptimizedCompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
-                                             BailoutId osr_ast_id);
-  bool IsQueuedForOSR(Handle<JSFunction> function, BailoutId osr_ast_id);
-
-  bool IsQueuedForOSR(JSFunction* function);
 
   inline bool IsQueueAvailable() {
     base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
     return input_queue_length_ < input_queue_capacity_;
   }
 
-  inline void AgeBufferedOsrJobs() {
-    // Advance cursor of the cyclic buffer to next empty slot or stale OSR job.
-    // Dispose said OSR job in the latter case.  Calling this on every GC
-    // should make sure that we do not hold onto stale jobs indefinitely.
-    AddToOsrBuffer(NULL);
-  }
-
   static bool Enabled() { return FLAG_concurrent_recompilation; }
 
  private:
@@ -78,14 +57,9 @@
   enum ModeFlag { COMPILE, FLUSH };
 
   void FlushOutputQueue(bool restore_function_code);
-  void FlushOsrBuffer(bool restore_function_code);
   void CompileNext(OptimizedCompileJob* job);
   OptimizedCompileJob* NextInput(bool check_if_flushing = false);
 
-  // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
-  // Tasks evicted from the cyclic buffer are discarded.
-  void AddToOsrBuffer(OptimizedCompileJob* compiler);
-
   inline int InputQueueIndex(int i) {
     int result = (i + input_queue_shift_) % input_queue_capacity_;
     DCHECK_LE(0, result);
@@ -108,16 +82,8 @@
   // different threads.
   base::Mutex output_queue_mutex_;
 
-  // Cyclic buffer of recompilation tasks for OSR.
-  OptimizedCompileJob** osr_buffer_;
-  int osr_buffer_capacity_;
-  int osr_buffer_cursor_;
-
   volatile base::AtomicWord mode_;
 
-  int osr_hits_;
-  int osr_attempts_;
-
   int blocked_jobs_;
 
   int ref_count_;
diff --git a/src/parsing/expression-classifier.h b/src/parsing/expression-classifier.h
index fa1a2f9..71fa3d3 100644
--- a/src/parsing/expression-classifier.h
+++ b/src/parsing/expression-classifier.h
@@ -36,18 +36,16 @@
     AssignmentPatternProduction = 1 << 3,
     DistinctFormalParametersProduction = 1 << 4,
     StrictModeFormalParametersProduction = 1 << 5,
-    StrongModeFormalParametersProduction = 1 << 6,
-    ArrowFormalParametersProduction = 1 << 7,
-    LetPatternProduction = 1 << 8,
-    CoverInitializedNameProduction = 1 << 9,
+    ArrowFormalParametersProduction = 1 << 6,
+    LetPatternProduction = 1 << 7,
+    CoverInitializedNameProduction = 1 << 8,
 
     ExpressionProductions =
         (ExpressionProduction | FormalParameterInitializerProduction),
     PatternProductions = (BindingPatternProduction |
                           AssignmentPatternProduction | LetPatternProduction),
     FormalParametersProductions = (DistinctFormalParametersProduction |
-                                   StrictModeFormalParametersProduction |
-                                   StrongModeFormalParametersProduction),
+                                   StrictModeFormalParametersProduction),
     StandardProductions = ExpressionProductions | PatternProductions,
     AllProductions =
         (StandardProductions | FormalParametersProductions |
@@ -110,12 +108,6 @@
     return is_valid(StrictModeFormalParametersProduction);
   }
 
-  // Note: callers should also check is_valid_strict_mode_formal_parameters()
-  // and is_valid_formal_parameter_list_without_duplicates().
-  bool is_valid_strong_mode_formal_parameters() const {
-    return is_valid(StrongModeFormalParametersProduction);
-  }
-
   bool is_valid_let_pattern() const { return is_valid(LetPatternProduction); }
 
   const Error& expression_error() const { return expression_error_; }
@@ -142,10 +134,6 @@
     return strict_mode_formal_parameter_error_;
   }
 
-  const Error& strong_mode_formal_parameter_error() const {
-    return strong_mode_formal_parameter_error_;
-  }
-
   const Error& let_pattern_error() const { return let_pattern_error_; }
 
   bool has_cover_initialized_name() const {
@@ -252,16 +240,6 @@
     strict_mode_formal_parameter_error_.arg = arg;
   }
 
-  void RecordStrongModeFormalParameterError(const Scanner::Location& loc,
-                                            MessageTemplate::Template message,
-                                            const char* arg = nullptr) {
-    if (!is_valid_strong_mode_formal_parameters()) return;
-    invalid_productions_ |= StrongModeFormalParametersProduction;
-    strong_mode_formal_parameter_error_.location = loc;
-    strong_mode_formal_parameter_error_.message = message;
-    strong_mode_formal_parameter_error_.arg = arg;
-  }
-
   void RecordLetPatternError(const Scanner::Location& loc,
                              MessageTemplate::Template message,
                              const char* arg = nullptr) {
@@ -323,9 +301,6 @@
       if (errors & StrictModeFormalParametersProduction)
         strict_mode_formal_parameter_error_ =
             inner->strict_mode_formal_parameter_error_;
-      if (errors & StrongModeFormalParametersProduction)
-        strong_mode_formal_parameter_error_ =
-            inner->strong_mode_formal_parameter_error_;
       if (errors & LetPatternProduction)
         let_pattern_error_ = inner->let_pattern_error_;
       if (errors & CoverInitializedNameProduction)
@@ -372,7 +347,6 @@
   Error arrow_formal_parameters_error_;
   Error duplicate_formal_parameter_error_;
   Error strict_mode_formal_parameter_error_;
-  Error strong_mode_formal_parameter_error_;
   Error let_pattern_error_;
   Error cover_initialized_name_error_;
   DuplicateFinder* duplicate_finder_;
diff --git a/src/parsing/parameter-initializer-rewriter.cc b/src/parsing/parameter-initializer-rewriter.cc
index 003bbeb..3e3587b 100644
--- a/src/parsing/parameter-initializer-rewriter.cc
+++ b/src/parsing/parameter-initializer-rewriter.cc
@@ -62,7 +62,7 @@
 void Rewriter::VisitVariableProxy(VariableProxy* proxy) {
   if (proxy->is_resolved()) {
     Variable* var = proxy->var();
-    DCHECK_EQ(var->mode(), TEMPORARY);
+    if (var->mode() != TEMPORARY) return;
     if (old_scope_->RemoveTemporary(var)) {
       var->set_scope(new_scope_);
       new_scope_->AddTemporary(var);
diff --git a/src/parsing/parser-base.h b/src/parsing/parser-base.h
index 6be19b3..dde6b1d 100644
--- a/src/parsing/parser-base.h
+++ b/src/parsing/parser-base.h
@@ -24,6 +24,10 @@
   kFunctionNameValidityUnknown
 };
 
+enum AllowLabelledFunctionStatement {
+  kAllowLabelledFunctionStatement,
+  kDisallowLabelledFunctionStatement,
+};
 
 struct FormalParametersBase {
   explicit FormalParametersBase(Scope* scope) : scope(scope) {}
@@ -108,14 +112,11 @@
         stack_overflow_(false),
         allow_lazy_(false),
         allow_natives_(false),
+        allow_tailcalls_(false),
         allow_harmony_sloppy_(false),
         allow_harmony_sloppy_function_(false),
         allow_harmony_sloppy_let_(false),
-        allow_harmony_default_parameters_(false),
-        allow_harmony_destructuring_bind_(false),
-        allow_harmony_destructuring_assignment_(false),
-        allow_strong_mode_(false),
-        allow_legacy_const_(true),
+        allow_harmony_restrictive_declarations_(false),
         allow_harmony_do_expressions_(false),
         allow_harmony_function_name_(false),
         allow_harmony_function_sent_(false) {}
@@ -124,19 +125,25 @@
   bool allow_##name() const { return allow_##name##_; } \
   void set_allow_##name(bool allow) { allow_##name##_ = allow; }
 
+#define SCANNER_ACCESSORS(name)                                  \
+  bool allow_##name() const { return scanner_->allow_##name(); } \
+  void set_allow_##name(bool allow) {                            \
+    return scanner_->set_allow_##name(allow);                    \
+  }
+
   ALLOW_ACCESSORS(lazy);
   ALLOW_ACCESSORS(natives);
+  ALLOW_ACCESSORS(tailcalls);
   ALLOW_ACCESSORS(harmony_sloppy);
   ALLOW_ACCESSORS(harmony_sloppy_function);
   ALLOW_ACCESSORS(harmony_sloppy_let);
-  ALLOW_ACCESSORS(harmony_default_parameters);
-  ALLOW_ACCESSORS(harmony_destructuring_bind);
-  ALLOW_ACCESSORS(harmony_destructuring_assignment);
-  ALLOW_ACCESSORS(strong_mode);
-  ALLOW_ACCESSORS(legacy_const);
+  ALLOW_ACCESSORS(harmony_restrictive_declarations);
   ALLOW_ACCESSORS(harmony_do_expressions);
   ALLOW_ACCESSORS(harmony_function_name);
   ALLOW_ACCESSORS(harmony_function_sent);
+  SCANNER_ACCESSORS(harmony_exponentiation_operator);
+
+#undef SCANNER_ACCESSORS
 #undef ALLOW_ACCESSORS
 
   uintptr_t stack_limit() const { return stack_limit_; }
@@ -368,7 +375,6 @@
 
   Scope* NewScope(Scope* parent, ScopeType scope_type, FunctionKind kind) {
     DCHECK(ast_value_factory());
-    DCHECK(scope_type != MODULE_SCOPE || FLAG_harmony_modules);
     Scope* result = new (zone())
         Scope(zone(), parent, scope_type, ast_value_factory(), kind);
     result->Initialize();
@@ -481,12 +487,7 @@
 
   bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode, bool* ok) {
     if (Check(Token::IN)) {
-      if (is_strong(language_mode())) {
-        ReportMessageAt(scanner()->location(), MessageTemplate::kStrongForIn);
-        *ok = false;
-      } else {
-        *visit_mode = ForEachStatement::ENUMERATE;
-      }
+      *visit_mode = ForEachStatement::ENUMERATE;
       return true;
     } else if (CheckContextualKeyword(CStrVector("of"))) {
       *visit_mode = ForEachStatement::ITERATE;
@@ -547,12 +548,6 @@
       *ok = false;
       return;
     }
-    if (is_strong(language_mode) && this->IsUndefined(function_name)) {
-      Traits::ReportMessageAt(function_name_loc,
-                              MessageTemplate::kStrongUndefined);
-      *ok = false;
-      return;
-    }
   }
 
   // Determine precedence of given token.
@@ -570,8 +565,7 @@
   bool is_generator() const { return function_state_->is_generator(); }
 
   bool allow_const() {
-    return is_strict(language_mode()) || allow_harmony_sloppy() ||
-           allow_legacy_const();
+    return is_strict(language_mode()) || allow_harmony_sloppy();
   }
 
   bool allow_let() {
@@ -593,7 +587,8 @@
   }
 
   void GetUnexpectedTokenMessage(
-      Token::Value token, MessageTemplate::Template* message, const char** arg,
+      Token::Value token, MessageTemplate::Template* message,
+      Scanner::Location* location, const char** arg,
       MessageTemplate::Template default_ = MessageTemplate::kUnexpectedToken);
 
   void ReportUnexpectedToken(Token::Value token);
@@ -657,10 +652,6 @@
                !classifier->is_valid_strict_mode_formal_parameters()) {
       ReportClassifierError(classifier->strict_mode_formal_parameter_error());
       *ok = false;
-    } else if (is_strong(language_mode) &&
-               !classifier->is_valid_strong_mode_formal_parameters()) {
-      ReportClassifierError(classifier->strong_mode_formal_parameter_error());
-      *ok = false;
     }
   }
 
@@ -698,33 +689,25 @@
   void ExpressionUnexpectedToken(ExpressionClassifier* classifier) {
     MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
     const char* arg;
-    GetUnexpectedTokenMessage(peek(), &message, &arg);
-    classifier->RecordExpressionError(scanner()->peek_location(), message, arg);
+    Scanner::Location location = scanner()->peek_location();
+    GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
+    classifier->RecordExpressionError(location, message, arg);
   }
 
   void BindingPatternUnexpectedToken(ExpressionClassifier* classifier) {
     MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
     const char* arg;
-    GetUnexpectedTokenMessage(peek(), &message, &arg);
-    classifier->RecordBindingPatternError(scanner()->peek_location(), message,
-                                          arg);
+    Scanner::Location location = scanner()->peek_location();
+    GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
+    classifier->RecordBindingPatternError(location, message, arg);
   }
 
   void ArrowFormalParametersUnexpectedToken(ExpressionClassifier* classifier) {
     MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
     const char* arg;
-    GetUnexpectedTokenMessage(peek(), &message, &arg);
-    classifier->RecordArrowFormalParametersError(scanner()->peek_location(),
-                                                 message, arg);
-  }
-
-  void FormalParameterInitializerUnexpectedToken(
-      ExpressionClassifier* classifier) {
-    MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
-    const char* arg;
-    GetUnexpectedTokenMessage(peek(), &message, &arg);
-    classifier->RecordFormalParameterInitializerError(
-        scanner()->peek_location(), message, arg);
+    Scanner::Location location = scanner()->peek_location();
+    GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
+    classifier->RecordArrowFormalParametersError(location, message, arg);
   }
 
   // Recursive descent functions:
@@ -804,10 +787,6 @@
   ExpressionT ParseSuperExpression(bool is_new,
                                    ExpressionClassifier* classifier, bool* ok);
   ExpressionT ParseNewTargetExpression(bool* ok);
-  ExpressionT ParseStrongInitializationExpression(
-      ExpressionClassifier* classifier, bool* ok);
-  ExpressionT ParseStrongSuperCallExpression(ExpressionClassifier* classifier,
-                                             bool* ok);
 
   void ParseFormalParameter(FormalParametersT* parameters,
                             ExpressionClassifier* classifier, bool* ok);
@@ -825,10 +804,6 @@
   ExpressionT CheckAndRewriteReferenceExpression(
       ExpressionT expression, int beg_pos, int end_pos,
       MessageTemplate::Template message, bool* ok);
-  ExpressionT ClassifyAndRewriteReferenceExpression(
-      ExpressionClassifier* classifier, ExpressionT expression, int beg_pos,
-      int end_pos, MessageTemplate::Template message,
-      ParseErrorType type = kSyntaxError);
   ExpressionT CheckAndRewriteReferenceExpression(
       ExpressionT expression, int beg_pos, int end_pos,
       MessageTemplate::Template message, ParseErrorType type, bool* ok);
@@ -841,10 +816,6 @@
         Traits::IsEvalOrArguments(Traits::AsIdentifier(expression))) {
       return false;
     }
-    if (is_strong(language_mode()) &&
-        Traits::IsUndefined(Traits::AsIdentifier(expression))) {
-      return false;
-    }
     return true;
   }
 
@@ -858,8 +829,12 @@
   void CheckPossibleEvalCall(ExpressionT expression, Scope* scope) {
     if (Traits::IsIdentifier(expression) &&
         Traits::IsEval(Traits::AsIdentifier(expression))) {
-      scope->DeclarationScope()->RecordEvalCall();
       scope->RecordEvalCall();
+      if (is_sloppy(scope->language_mode())) {
+        // For sloppy scopes we also have to record the call at function level,
+        // in case it includes declarations that will be hoisted.
+        scope->DeclarationScope()->RecordEvalCall();
+      }
     }
   }
 
@@ -945,14 +920,11 @@
 
   bool allow_lazy_;
   bool allow_natives_;
+  bool allow_tailcalls_;
   bool allow_harmony_sloppy_;
   bool allow_harmony_sloppy_function_;
   bool allow_harmony_sloppy_let_;
-  bool allow_harmony_default_parameters_;
-  bool allow_harmony_destructuring_bind_;
-  bool allow_harmony_destructuring_assignment_;
-  bool allow_strong_mode_;
-  bool allow_legacy_const_;
+  bool allow_harmony_restrictive_declarations_;
   bool allow_harmony_do_expressions_;
   bool allow_harmony_function_name_;
   bool allow_harmony_function_sent_;
@@ -987,32 +959,28 @@
   *function_state_stack_ = outer_function_state_;
 }
 
-
 template <class Traits>
 void ParserBase<Traits>::GetUnexpectedTokenMessage(
-    Token::Value token, MessageTemplate::Template* message, const char** arg,
+    Token::Value token, MessageTemplate::Template* message,
+    Scanner::Location* location, const char** arg,
     MessageTemplate::Template default_) {
+  *arg = nullptr;
   switch (token) {
     case Token::EOS:
       *message = MessageTemplate::kUnexpectedEOS;
-      *arg = nullptr;
       break;
     case Token::SMI:
     case Token::NUMBER:
       *message = MessageTemplate::kUnexpectedTokenNumber;
-      *arg = nullptr;
       break;
     case Token::STRING:
       *message = MessageTemplate::kUnexpectedTokenString;
-      *arg = nullptr;
       break;
     case Token::IDENTIFIER:
       *message = MessageTemplate::kUnexpectedTokenIdentifier;
-      *arg = nullptr;
       break;
     case Token::FUTURE_RESERVED_WORD:
       *message = MessageTemplate::kUnexpectedReserved;
-      *arg = nullptr;
       break;
     case Token::LET:
     case Token::STATIC:
@@ -1021,17 +989,22 @@
       *message = is_strict(language_mode())
                      ? MessageTemplate::kUnexpectedStrictReserved
                      : MessageTemplate::kUnexpectedTokenIdentifier;
-      *arg = nullptr;
       break;
     case Token::TEMPLATE_SPAN:
     case Token::TEMPLATE_TAIL:
       *message = MessageTemplate::kUnexpectedTemplateString;
-      *arg = nullptr;
       break;
     case Token::ESCAPED_STRICT_RESERVED_WORD:
     case Token::ESCAPED_KEYWORD:
       *message = MessageTemplate::kInvalidEscapedReservedWord;
-      *arg = nullptr;
+      break;
+    case Token::ILLEGAL:
+      if (scanner()->has_error()) {
+        *message = scanner()->error();
+        *location = scanner()->error_location();
+      } else {
+        *message = MessageTemplate::kInvalidOrUnexpectedToken;
+      }
       break;
     default:
       const char* name = Token::String(token);
@@ -1053,7 +1026,7 @@
     Scanner::Location source_location, Token::Value token,
     MessageTemplate::Template message) {
   const char* arg;
-  GetUnexpectedTokenMessage(token, &message, &arg);
+  GetUnexpectedTokenMessage(token, &message, &source_location, &arg);
   Traits::ReportMessageAt(source_location, message, arg);
 }
 
@@ -1105,19 +1078,6 @@
         classifier->RecordBindingPatternError(
             scanner()->location(), MessageTemplate::kStrictEvalArguments);
       }
-      if (is_strong(language_mode())) {
-        classifier->RecordExpressionError(scanner()->location(),
-                                          MessageTemplate::kStrongArguments);
-      }
-    }
-    if (this->IsUndefined(name)) {
-      classifier->RecordStrongModeFormalParameterError(
-          scanner()->location(), MessageTemplate::kStrongUndefined);
-      if (is_strong(language_mode())) {
-        // TODO(dslomov): allow 'undefined' in nested patterns.
-        classifier->RecordPatternError(scanner()->location(),
-                                       MessageTemplate::kStrongUndefined);
-      }
     }
 
     if (classifier->duplicate_finder() != nullptr &&
@@ -1218,8 +1178,7 @@
   }
   int js_flags = flags.FromJust();
   Next();
-  return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index,
-                                     is_strong(language_mode()), pos);
+  return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
 }
 
 
@@ -1262,15 +1221,6 @@
     case Token::THIS: {
       BindingPatternUnexpectedToken(classifier);
       Consume(Token::THIS);
-      if (FLAG_strong_this && is_strong(language_mode())) {
-        // Constructors' usages of 'this' in strong mode are parsed separately.
-        // TODO(rossberg): this does not work with arrow functions yet.
-        if (IsClassConstructor(function_state_->kind())) {
-          ReportMessage(MessageTemplate::kStrongConstructorThis);
-          *ok = false;
-          return this->EmptyExpression();
-        }
-      }
       return this->ThisExpression(scope_, factory(), beg_pos);
     }
 
@@ -1313,15 +1263,9 @@
       return this->ParseRegExpLiteral(false, classifier, ok);
 
     case Token::LBRACK:
-      if (!allow_harmony_destructuring_bind()) {
-        BindingPatternUnexpectedToken(classifier);
-      }
       return this->ParseArrayLiteral(classifier, ok);
 
     case Token::LBRACE:
-      if (!allow_harmony_destructuring_bind()) {
-        BindingPatternUnexpectedToken(classifier);
-      }
       return this->ParseObjectLiteral(classifier, ok);
 
     case Token::LPAREN: {
@@ -1394,7 +1338,7 @@
                                                    CHECK_OK);
         class_name_location = scanner()->location();
       }
-      return this->ParseClassLiteral(name, class_name_location,
+      return this->ParseClassLiteral(classifier, name, class_name_location,
                                      is_strict_reserved_name,
                                      class_token_position, ok);
     }
@@ -1510,12 +1454,6 @@
   while (peek() != Token::RBRACK) {
     ExpressionT elem = this->EmptyExpression();
     if (peek() == Token::COMMA) {
-      if (is_strong(language_mode())) {
-        ReportMessageAt(scanner()->peek_location(),
-                        MessageTemplate::kStrongEllision);
-        *ok = false;
-        return this->EmptyExpression();
-      }
       elem = this->GetLiteralTheHole(peek_position(), factory());
     } else if (peek() == Token::ELLIPSIS) {
       int start_pos = peek_position();
@@ -1559,9 +1497,8 @@
   // Update the scope information before the pre-parsing bailout.
   int literal_index = function_state_->NextMaterializedLiteralIndex();
 
-  ExpressionT result =
-      factory()->NewArrayLiteral(values, first_spread_index, literal_index,
-                                 is_strong(language_mode()), pos);
+  ExpressionT result = factory()->NewArrayLiteral(values, first_spread_index,
+                                                  literal_index, pos);
   if (first_spread_index >= 0) {
     result = factory()->NewRewritableExpression(result);
     Traits::QueueNonPatternForRewriting(result);
@@ -1823,7 +1760,6 @@
   typename Traits::Type::PropertyList properties =
       this->NewPropertyList(4, zone_);
   int number_of_boilerplate_properties = 0;
-  bool has_function = false;
   bool has_computed_names = false;
   ObjectLiteralChecker checker(this);
 
@@ -1845,12 +1781,6 @@
       has_computed_names = true;
     }
 
-    // Mark top-level object literals that contain function literals and
-    // pretenure the literal so it can be added as a constant function
-    // property. (Parser only.)
-    this->CheckFunctionLiteralInsideTopLevelObjectLiteral(scope_, property,
-                                                          &has_function);
-
     // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
     if (!has_computed_names && this->IsBoilerplateProperty(property)) {
       number_of_boilerplate_properties++;
@@ -1876,8 +1806,6 @@
   return factory()->NewObjectLiteral(properties,
                                      literal_index,
                                      number_of_boilerplate_properties,
-                                     has_function,
-                                     is_strong(language_mode()),
                                      pos);
 }
 
@@ -1984,6 +1912,13 @@
                                    Token::String(Token::ARROW));
     ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
                                   parenthesized_formals, CHECK_OK);
+    // This reads strangely, but is correct: it checks whether any
+    // sub-expression of the parameter list failed to be a valid formal
+    // parameter initializer. Since YieldExpressions are banned anywhere
+    // in an arrow parameter list, this is correct.
+    // TODO(adamk): Rename "FormalParameterInitializerError" to refer to
+    // "YieldExpression", which is its only use.
+    ValidateFormalParameterInitializer(&arrow_formals_classifier, ok);
     Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
     Scope* scope =
         this->NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
@@ -2039,23 +1974,10 @@
   // Now pending non-pattern expressions must be discarded.
   arrow_formals_classifier.Discard();
 
-  if (!(allow_harmony_destructuring_bind() ||
-        allow_harmony_default_parameters())) {
-    BindingPatternUnexpectedToken(classifier);
-  }
-
-  if (allow_harmony_destructuring_assignment() && IsValidPattern(expression) &&
-      peek() == Token::ASSIGN) {
+  if (IsValidPattern(expression) && peek() == Token::ASSIGN) {
     classifier->ForgiveCoverInitializedNameError();
     ValidateAssignmentPattern(classifier, CHECK_OK);
     is_destructuring_assignment = true;
-  } else if (allow_harmony_default_parameters() &&
-             !allow_harmony_destructuring_assignment()) {
-    // TODO(adamk): This branch should be removed once the destructuring
-    // assignment and default parameter flags are removed.
-    expression = this->ClassifyAndRewriteReferenceExpression(
-        classifier, expression, lhs_beg_pos, scanner()->location().end_pos,
-        MessageTemplate::kInvalidLhsInAssignment);
   } else {
     expression = this->CheckAndRewriteReferenceExpression(
         expression, lhs_beg_pos, scanner()->location().end_pos,
@@ -2108,6 +2030,11 @@
     Traits::SetFunctionNameFromIdentifierRef(right, expression);
   }
 
+  if (op == Token::ASSIGN_EXP) {
+    DCHECK(!is_destructuring_assignment);
+    return Traits::RewriteAssignExponentiation(expression, right, pos);
+  }
+
   ExpressionT result = factory()->NewAssignment(op, expression, right, pos);
 
   if (is_destructuring_assignment) {
@@ -2127,14 +2054,15 @@
   int pos = peek_position();
   classifier->RecordPatternError(scanner()->peek_location(),
                                  MessageTemplate::kInvalidDestructuringTarget);
-  FormalParameterInitializerUnexpectedToken(classifier);
+  classifier->RecordFormalParameterInitializerError(
+      scanner()->peek_location(), MessageTemplate::kYieldInParameter);
   Expect(Token::YIELD, CHECK_OK);
   ExpressionT generator_object =
       factory()->NewVariableProxy(function_state_->generator_object_variable());
   ExpressionT expression = Traits::EmptyExpression();
-  Yield::Kind kind = Yield::kSuspend;
+  bool delegating = false;  // yield*
   if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
-    if (Check(Token::MUL)) kind = Yield::kDelegating;
+    if (Check(Token::MUL)) delegating = true;
     switch (peek()) {
       case Token::EOS:
       case Token::SEMICOLON:
@@ -2146,10 +2074,8 @@
         // The above set of tokens is the complete set of tokens that can appear
         // after an AssignmentExpression, and none of them can start an
         // AssignmentExpression.  This allows us to avoid looking for an RHS for
-        // a Yield::kSuspend operation, given only one look-ahead token.
-        if (kind == Yield::kSuspend)
-          break;
-        DCHECK_EQ(Yield::kDelegating, kind);
+        // a regular yield, given only one look-ahead token.
+        if (!delegating) break;
         // Delegating yields require an RHS; fall through.
       default:
         expression = ParseAssignmentExpression(false, classifier, CHECK_OK);
@@ -2157,13 +2083,16 @@
         break;
     }
   }
-  if (kind == Yield::kDelegating) {
+
+  if (delegating) {
     return Traits::RewriteYieldStar(generator_object, expression, pos);
   }
+
+  expression = Traits::BuildIteratorResult(expression, false);
   // Hackily disambiguate o from o.next and o [Symbol.iterator]().
   // TODO(verwaest): Come up with a better solution.
   typename Traits::Type::YieldExpression yield =
-      factory()->NewYield(generator_object, expression, kind, pos);
+      factory()->NewYield(generator_object, expression, pos);
   return yield;
 }
 
@@ -2215,10 +2144,12 @@
       BindingPatternUnexpectedToken(classifier);
       ArrowFormalParametersUnexpectedToken(classifier);
       Token::Value op = Next();
-      Scanner::Location op_location = scanner()->location();
       int pos = position();
+
+      const bool is_right_associative = op == Token::EXP;
+      const int next_prec = is_right_associative ? prec1 : prec1 + 1;
       ExpressionT y =
-          ParseBinaryExpression(prec1 + 1, accept_IN, classifier, CHECK_OK);
+          ParseBinaryExpression(next_prec, accept_IN, classifier, CHECK_OK);
       Traits::RewriteNonPattern(classifier, CHECK_OK);
 
       if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
@@ -2237,11 +2168,7 @@
           case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
           default: break;
         }
-        if (cmp == Token::EQ && is_strong(language_mode())) {
-          ReportMessageAt(op_location, MessageTemplate::kStrongEqual);
-          *ok = false;
-          return this->EmptyExpression();
-        } else if (FLAG_harmony_instanceof && cmp == Token::INSTANCEOF) {
+        if (FLAG_harmony_instanceof && cmp == Token::INSTANCEOF) {
           x = Traits::RewriteInstanceof(x, y, pos);
         } else {
           x = factory()->NewCompareOperation(cmp, x, y, pos);
@@ -2250,6 +2177,9 @@
             x = factory()->NewUnaryOperation(Token::NOT, x, pos);
           }
         }
+
+      } else if (op == Token::EXP) {
+        x = Traits::RewriteExponentiation(x, y, pos);
       } else {
         // We have a "normal" binary operation.
         x = factory()->NewBinaryOperation(op, x, y, pos);
@@ -2287,11 +2217,7 @@
     Traits::RewriteNonPattern(classifier, CHECK_OK);
 
     if (op == Token::DELETE && is_strict(language_mode())) {
-      if (is_strong(language_mode())) {
-        ReportMessage(MessageTemplate::kStrongDelete);
-        *ok = false;
-        return this->EmptyExpression();
-      } else if (this->IsIdentifier(expression)) {
+      if (this->IsIdentifier(expression)) {
         // "delete identifier" is a syntax error in strict mode.
         ReportMessage(MessageTemplate::kStrictDelete);
         *ok = false;
@@ -2299,6 +2225,12 @@
       }
     }
 
+    if (peek() == Token::EXP) {
+      ReportUnexpectedToken(Next());
+      *ok = false;
+      return this->EmptyExpression();
+    }
+
     // Allow Traits do rewrite the expression.
     return this->BuildUnaryExpression(expression, op, pos, factory());
   } else if (Token::IsCountOp(op)) {
@@ -2386,12 +2318,6 @@
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
 
-        if (is_strong(language_mode()) && this->IsIdentifier(result) &&
-            this->IsEval(this->AsIdentifier(result))) {
-          ReportMessage(MessageTemplate::kStrongDirectEval);
-          *ok = false;
-          return this->EmptyExpression();
-        }
         int pos;
         if (scanner()->current_token() == Token::IDENTIFIER ||
             scanner()->current_token() == Token::SUPER) {
@@ -2609,148 +2535,6 @@
 
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseStrongInitializationExpression(
-    ExpressionClassifier* classifier, bool* ok) {
-  // InitializationExpression ::  (strong mode)
-  //  'this' '.' IdentifierName '=' AssignmentExpression
-  //  'this' '[' Expression ']' '=' AssignmentExpression
-
-  FuncNameInferrer::State fni_state(fni_);
-
-  Consume(Token::THIS);
-  int pos = position();
-  function_state_->set_this_location(scanner()->location());
-  ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
-
-  ExpressionT left = this->EmptyExpression();
-  switch (peek()) {
-    case Token::LBRACK: {
-      Consume(Token::LBRACK);
-      int pos = position();
-      ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
-      Traits::RewriteNonPattern(classifier, CHECK_OK);
-      left = factory()->NewProperty(this_expr, index, pos);
-      if (fni_ != NULL) {
-        this->PushPropertyName(fni_, index);
-      }
-      Expect(Token::RBRACK, CHECK_OK);
-      break;
-    }
-    case Token::PERIOD: {
-      Consume(Token::PERIOD);
-      int pos = position();
-      IdentifierT name = ParseIdentifierName(CHECK_OK);
-      left = factory()->NewProperty(
-          this_expr, factory()->NewStringLiteral(name, pos), pos);
-      if (fni_ != NULL) {
-        this->PushLiteralName(fni_, name);
-      }
-      break;
-    }
-    default:
-      ReportMessage(MessageTemplate::kStrongConstructorThis);
-      *ok = false;
-      return this->EmptyExpression();
-  }
-
-  if (peek() != Token::ASSIGN) {
-    ReportMessageAt(function_state_->this_location(),
-                    MessageTemplate::kStrongConstructorThis);
-    *ok = false;
-    return this->EmptyExpression();
-  }
-  Consume(Token::ASSIGN);
-  left = this->MarkExpressionAsAssigned(left);
-
-  ExpressionT right =
-      this->ParseAssignmentExpression(true, classifier, CHECK_OK);
-  Traits::RewriteNonPattern(classifier, CHECK_OK);
-  this->CheckAssigningFunctionLiteralToProperty(left, right);
-  function_state_->AddProperty();
-  if (fni_ != NULL) {
-    // Check if the right hand side is a call to avoid inferring a
-    // name if we're dealing with "this.a = function(){...}();"-like
-    // expression.
-    if (!right->IsCall() && !right->IsCallNew()) {
-      fni_->Infer();
-    } else {
-      fni_->RemoveLastFunction();
-    }
-  }
-
-  if (function_state_->return_location().IsValid()) {
-    ReportMessageAt(function_state_->return_location(),
-                    MessageTemplate::kStrongConstructorReturnMisplaced);
-    *ok = false;
-    return this->EmptyExpression();
-  }
-
-  return factory()->NewAssignment(Token::ASSIGN, left, right, pos);
-}
-
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseStrongSuperCallExpression(
-    ExpressionClassifier* classifier, bool* ok) {
-  // SuperCallExpression ::  (strong mode)
-  //  'super' '(' ExpressionList ')'
-  BindingPatternUnexpectedToken(classifier);
-
-  Consume(Token::SUPER);
-  int pos = position();
-  Scanner::Location super_loc = scanner()->location();
-  ExpressionT expr = this->SuperCallReference(scope_, factory(), pos);
-
-  if (peek() != Token::LPAREN) {
-    ReportMessage(MessageTemplate::kStrongConstructorSuper);
-    *ok = false;
-    return this->EmptyExpression();
-  }
-
-  Scanner::Location spread_pos;
-  typename Traits::Type::ExpressionList args =
-      ParseArguments(&spread_pos, classifier, CHECK_OK);
-
-  // TODO(rossberg): This doesn't work with arrow functions yet.
-  if (!IsSubclassConstructor(function_state_->kind())) {
-    ReportMessage(MessageTemplate::kUnexpectedSuper);
-    *ok = false;
-    return this->EmptyExpression();
-  } else if (function_state_->super_location().IsValid()) {
-    ReportMessageAt(scanner()->location(),
-                    MessageTemplate::kStrongSuperCallDuplicate);
-    *ok = false;
-    return this->EmptyExpression();
-  } else if (function_state_->this_location().IsValid()) {
-    ReportMessageAt(scanner()->location(),
-                    MessageTemplate::kStrongSuperCallMisplaced);
-    *ok = false;
-    return this->EmptyExpression();
-  } else if (function_state_->return_location().IsValid()) {
-    ReportMessageAt(function_state_->return_location(),
-                    MessageTemplate::kStrongConstructorReturnMisplaced);
-    *ok = false;
-    return this->EmptyExpression();
-  }
-
-  function_state_->set_super_location(super_loc);
-  if (spread_pos.IsValid()) {
-    args = Traits::PrepareSpreadArguments(args);
-    expr = Traits::SpreadCall(expr, args, pos);
-  } else {
-    expr = factory()->NewCall(expr, args, pos);
-  }
-
-  // Explicit calls to the super constructor using super() perform an implicit
-  // binding assignment to the 'this' variable.
-  ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
-  return factory()->NewAssignment(Token::INIT, this_expr, expr, pos);
-}
-
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParseSuperExpression(bool is_new,
                                          ExpressionClassifier* classifier,
                                          bool* ok) {
@@ -2768,13 +2552,6 @@
     // new super() is never allowed.
     // super() is only allowed in derived constructor
     if (!is_new && peek() == Token::LPAREN && IsSubclassConstructor(kind)) {
-      if (is_strong(language_mode())) {
-        // Super calls in strong mode are parsed separately.
-        ReportMessageAt(scanner()->location(),
-                        MessageTemplate::kStrongConstructorSuper);
-        *ok = false;
-        return this->EmptyExpression();
-      }
       // TODO(rossberg): This might not be the correct FunctionState for the
       // method here.
       function_state_->set_super_location(scanner()->location());
@@ -2878,6 +2655,11 @@
             ParseTemplateLiteral(expression, pos, classifier, CHECK_OK);
         break;
       }
+      case Token::ILLEGAL: {
+        ReportUnexpectedTokenAt(scanner()->peek_location(), Token::ILLEGAL);
+        *ok = false;
+        return this->EmptyExpression();
+      }
       default:
         return expression;
     }
@@ -2894,7 +2676,6 @@
   //   BindingElement[?Yield, ?GeneratorParameter]
   bool is_rest = parameters->has_rest;
 
-  Token::Value next = peek();
   ExpressionT pattern = ParsePrimaryExpression(classifier, ok);
   if (!*ok) return;
 
@@ -2902,11 +2683,6 @@
   if (!*ok) return;
 
   if (!Traits::IsIdentifier(pattern)) {
-    if (!allow_harmony_destructuring_bind()) {
-      ReportUnexpectedToken(next);
-      *ok = false;
-      return;
-    }
     parameters->is_simple = false;
     ValidateFormalParameterInitializer(classifier, ok);
     if (!*ok) return;
@@ -2914,7 +2690,7 @@
   }
 
   ExpressionT initializer = Traits::EmptyExpression();
-  if (!is_rest && allow_harmony_default_parameters() && Check(Token::ASSIGN)) {
+  if (!is_rest && Check(Token::ASSIGN)) {
     ExpressionClassifier init_classifier(this);
     initializer = ParseAssignmentExpression(true, &init_classifier, ok);
     if (!*ok) return;
@@ -3099,6 +2875,10 @@
       body->Add(factory()->NewReturnStatement(expression, pos), zone());
       materialized_literal_count = function_state.materialized_literal_count();
       expected_property_count = function_state.expected_property_count();
+      // ES6 14.6.1 Static Semantics: IsInTailPosition
+      if (allow_tailcalls() && !is_sloppy(language_mode())) {
+        this->MarkTailPosition(expression);
+      }
     }
     super_loc = function_state.super_location();
 
@@ -3253,47 +3033,25 @@
 ParserBase<Traits>::CheckAndRewriteReferenceExpression(
     ExpressionT expression, int beg_pos, int end_pos,
     MessageTemplate::Template message, ParseErrorType type, bool* ok) {
-  ExpressionClassifier classifier(this);
-  ExpressionT result = ClassifyAndRewriteReferenceExpression(
-      &classifier, expression, beg_pos, end_pos, message, type);
-  ValidateExpression(&classifier, ok);
-  if (!*ok) return this->EmptyExpression();
-  return result;
-}
-
-
-template <typename Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ClassifyAndRewriteReferenceExpression(
-    ExpressionClassifier* classifier, ExpressionT expression, int beg_pos,
-    int end_pos, MessageTemplate::Template message, ParseErrorType type) {
-  Scanner::Location location(beg_pos, end_pos);
-  if (this->IsIdentifier(expression)) {
-    if (is_strict(language_mode()) &&
-        this->IsEvalOrArguments(this->AsIdentifier(expression))) {
-      classifier->RecordExpressionError(
-          location, MessageTemplate::kStrictEvalArguments, kSyntaxError);
-      return expression;
-    }
-    if (is_strong(language_mode()) &&
-        this->IsUndefined(this->AsIdentifier(expression))) {
-      classifier->RecordExpressionError(
-          location, MessageTemplate::kStrongUndefined, kSyntaxError);
-      return expression;
-    }
+  if (this->IsIdentifier(expression) && is_strict(language_mode()) &&
+      this->IsEvalOrArguments(this->AsIdentifier(expression))) {
+    ReportMessageAt(Scanner::Location(beg_pos, end_pos),
+                    MessageTemplate::kStrictEvalArguments, kSyntaxError);
+    *ok = false;
+    return this->EmptyExpression();
   }
   if (expression->IsValidReferenceExpression()) {
     return expression;
-  } else if (expression->IsCall()) {
+  }
+  if (expression->IsCall()) {
     // If it is a call, make it a runtime error for legacy web compatibility.
     // Rewrite `expr' to `expr[throw ReferenceError]'.
-    int pos = location.beg_pos;
-    ExpressionT error = this->NewThrowReferenceError(message, pos);
-    return factory()->NewProperty(expression, error, pos);
-  } else {
-    classifier->RecordExpressionError(location, message, type);
-    return expression;
+    ExpressionT error = this->NewThrowReferenceError(message, beg_pos);
+    return factory()->NewProperty(expression, error, beg_pos);
   }
+  ReportMessageAt(Scanner::Location(beg_pos, end_pos), message, type);
+  *ok = false;
+  return this->EmptyExpression();
 }
 
 
diff --git a/src/parsing/parser.cc b/src/parsing/parser.cc
index 968e8ed..fa2893b 100644
--- a/src/parsing/parser.cc
+++ b/src/parsing/parser.cc
@@ -439,6 +439,14 @@
         *x = factory->NewNumberLiteral(value, pos, has_dot);
         return true;
       }
+      case Token::EXP: {
+        double value = Pow(x_val, y_val);
+        int int_value = static_cast<int>(value);
+        *x = factory->NewNumberLiteral(
+            int_value == value && value != -0.0 ? int_value : value, pos,
+            has_dot);
+        return true;
+      }
       default:
         break;
     }
@@ -491,6 +499,20 @@
   return factory->NewUnaryOperation(op, expression, pos);
 }
 
+Expression* ParserTraits::BuildIteratorResult(Expression* value, bool done) {
+  int pos = RelocInfo::kNoPosition;
+  AstNodeFactory* factory = parser_->factory();
+  Zone* zone = parser_->zone();
+
+  if (value == nullptr) value = factory->NewUndefinedLiteral(pos);
+
+  auto args = new (zone) ZoneList<Expression*>(2, zone);
+  args->Add(value, zone);
+  args->Add(factory->NewBooleanLiteral(done, pos), zone);
+
+  return factory->NewCallRuntime(Runtime::kInlineCreateIterResultObject, args,
+                                 pos);
+}
 
 Expression* ParserTraits::NewThrowReferenceError(
     MessageTemplate::Template message, int pos) {
@@ -734,14 +756,17 @@
       function_token_position, type, language_mode, ok);
 }
 
-
 ClassLiteral* ParserTraits::ParseClassLiteral(
-    const AstRawString* name, Scanner::Location class_name_location,
-    bool name_is_strict_reserved, int pos, bool* ok) {
-  return parser_->ParseClassLiteral(name, class_name_location,
+    Type::ExpressionClassifier* classifier, const AstRawString* name,
+    Scanner::Location class_name_location, bool name_is_strict_reserved,
+    int pos, bool* ok) {
+  return parser_->ParseClassLiteral(classifier, name, class_name_location,
                                     name_is_strict_reserved, pos, ok);
 }
 
+void ParserTraits::MarkTailPosition(Expression* expression) {
+  expression->MarkTail();
+}
 
 Parser::Parser(ParseInfo* info)
     : ParserBase<ParserTraits>(info->zone(), &scanner_, info->stack_limit(),
@@ -762,18 +787,18 @@
   DCHECK(!info->script().is_null() || info->source_stream() != NULL);
   set_allow_lazy(info->allow_lazy_parsing());
   set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
+  set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
+                      info->isolate()->is_tail_call_elimination_enabled());
   set_allow_harmony_sloppy(FLAG_harmony_sloppy);
   set_allow_harmony_sloppy_function(FLAG_harmony_sloppy_function);
   set_allow_harmony_sloppy_let(FLAG_harmony_sloppy_let);
-  set_allow_harmony_default_parameters(FLAG_harmony_default_parameters);
-  set_allow_harmony_destructuring_bind(FLAG_harmony_destructuring_bind);
-  set_allow_harmony_destructuring_assignment(
-      FLAG_harmony_destructuring_assignment);
-  set_allow_strong_mode(FLAG_strong_mode);
-  set_allow_legacy_const(FLAG_legacy_const);
   set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
   set_allow_harmony_function_name(FLAG_harmony_function_name);
   set_allow_harmony_function_sent(FLAG_harmony_function_sent);
+  set_allow_harmony_restrictive_declarations(
+      FLAG_harmony_restrictive_declarations);
+  set_allow_harmony_exponentiation_operator(
+      FLAG_harmony_exponentiation_operator);
   for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
        ++feature) {
     use_counts_[feature] = 0;
@@ -901,7 +926,7 @@
                                  kNormalFunction, &function_factory);
 
     // Don't count the mode in the use counters--give the program a chance
-    // to enable script/module-wide strict/strong mode below.
+    // to enable script/module-wide strict mode below.
     scope_->SetLanguageMode(info->language_mode());
     ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
     bool ok = true;
@@ -926,8 +951,7 @@
       // unchanged if the property already exists.
       InsertSloppyBlockFunctionVarBindings(scope, &ok);
     }
-    if (ok && (is_strict(language_mode()) || allow_harmony_sloppy() ||
-               allow_harmony_destructuring_bind())) {
+    if (ok) {
       CheckConflictingVarDeclarations(scope_, &ok);
     }
 
@@ -1154,28 +1178,7 @@
     }
 
     Scanner::Location token_loc = scanner()->peek_location();
-    Scanner::Location old_this_loc = function_state_->this_location();
-    Scanner::Location old_super_loc = function_state_->super_location();
     Statement* stat = ParseStatementListItem(CHECK_OK);
-
-    if (is_strong(language_mode()) && scope_->is_function_scope() &&
-        IsClassConstructor(function_state_->kind())) {
-      Scanner::Location this_loc = function_state_->this_location();
-      Scanner::Location super_loc = function_state_->super_location();
-      if (this_loc.beg_pos != old_this_loc.beg_pos &&
-          this_loc.beg_pos != token_loc.beg_pos) {
-        ReportMessageAt(this_loc, MessageTemplate::kStrongConstructorThis);
-        *ok = false;
-        return nullptr;
-      }
-      if (super_loc.beg_pos != old_super_loc.beg_pos &&
-          super_loc.beg_pos != token_loc.beg_pos) {
-        ReportMessageAt(super_loc, MessageTemplate::kStrongConstructorSuper);
-        *ok = false;
-        return nullptr;
-      }
-    }
-
     if (stat == NULL || stat->IsEmpty()) {
       directive_prologue = false;   // End of directive prologue.
       continue;
@@ -1189,43 +1192,21 @@
       if ((e_stat = stat->AsExpressionStatement()) != NULL &&
           (literal = e_stat->expression()->AsLiteral()) != NULL &&
           literal->raw_value()->IsString()) {
-        // Check "use strict" directive (ES5 14.1), "use asm" directive, and
-        // "use strong" directive (experimental).
+        // Check "use strict" directive (ES5 14.1), "use asm" directive.
         bool use_strict_found =
             literal->raw_value()->AsString() ==
                 ast_value_factory()->use_strict_string() &&
             token_loc.end_pos - token_loc.beg_pos ==
                 ast_value_factory()->use_strict_string()->length() + 2;
-        bool use_strong_found =
-            allow_strong_mode() &&
-            literal->raw_value()->AsString() ==
-                ast_value_factory()->use_strong_string() &&
-            token_loc.end_pos - token_loc.beg_pos ==
-                ast_value_factory()->use_strong_string()->length() + 2;
-        if (use_strict_found || use_strong_found) {
-          // Strong mode implies strict mode. If there are several "use strict"
-          // / "use strong" directives, do the strict mode changes only once.
+        if (use_strict_found) {
           if (is_sloppy(scope_->language_mode())) {
             RaiseLanguageMode(STRICT);
           }
 
-          if (use_strong_found) {
-            RaiseLanguageMode(STRONG);
-            if (IsClassConstructor(function_state_->kind())) {
-              // "use strong" cannot occur in a class constructor body, to avoid
-              // unintuitive strong class object semantics.
-              ParserTraits::ReportMessageAt(
-                  token_loc, MessageTemplate::kStrongConstructorDirective);
-              *ok = false;
-              return nullptr;
-            }
-          }
           if (!scope_->HasSimpleParameters()) {
             // TC39 deemed "use strict" directives to be an error when occurring
             // in the body of a function with non-simple parameter list, on
             // 29/7/2015. https://goo.gl/ueA7Ln
-            //
-            // In V8, this also applies to "use strong " directives.
             const AstRawString* string = literal->raw_value()->AsString();
             ParserTraits::ReportMessageAt(
                 token_loc, MessageTemplate::kIllegalLanguageModeDirective,
@@ -1294,7 +1275,7 @@
     default:
       break;
   }
-  return ParseStatement(NULL, ok);
+  return ParseStatement(NULL, kAllowLabelledFunctionStatement, ok);
 }
 
 
@@ -1445,10 +1426,6 @@
       *ok = false;
       ReportMessage(MessageTemplate::kStrictEvalArguments);
       return NULL;
-    } else if (is_strong(language_mode()) && IsUndefined(local_name)) {
-      *ok = false;
-      ReportMessage(MessageTemplate::kStrongUndefined);
-      return NULL;
     }
     VariableProxy* proxy = NewUnresolved(local_name, IMPORT);
     ImportDeclaration* declaration =
@@ -1594,9 +1571,9 @@
       if (peek() == Token::EXTENDS || peek() == Token::LBRACE) {
         // ClassDeclaration[+Default] ::
         //   'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
-        default_export =
-            ParseClassLiteral(default_string, Scanner::Location::invalid(),
-                              false, position(), CHECK_OK);
+        default_export = ParseClassLiteral(nullptr, default_string,
+                                           Scanner::Location::invalid(), false,
+                                           position(), CHECK_OK);
         result = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
       } else {
         result = ParseClassDeclaration(&names, CHECK_OK);
@@ -1748,8 +1725,8 @@
   return result;
 }
 
-
 Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
+                                  AllowLabelledFunctionStatement allow_function,
                                   bool* ok) {
   // Statement ::
   //   EmptyStatement
@@ -1759,12 +1736,12 @@
     Next();
     return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
   }
-  return ParseSubStatement(labels, ok);
+  return ParseSubStatement(labels, allow_function, ok);
 }
 
-
-Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
-                                     bool* ok) {
+Statement* Parser::ParseSubStatement(
+    ZoneList<const AstRawString*>* labels,
+    AllowLabelledFunctionStatement allow_function, bool* ok) {
   // Statement ::
   //   Block
   //   VariableStatement
@@ -1793,12 +1770,6 @@
       return ParseBlock(labels, ok);
 
     case Token::SEMICOLON:
-      if (is_strong(language_mode())) {
-        ReportMessageAt(scanner()->peek_location(),
-                        MessageTemplate::kStrongEmpty);
-        *ok = false;
-        return NULL;
-      }
       Next();
       return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
 
@@ -1839,26 +1810,18 @@
     case Token::SWITCH:
       return ParseSwitchStatement(labels, ok);
 
-    case Token::FUNCTION: {
-      // FunctionDeclaration is only allowed in the context of SourceElements
-      // (Ecma 262 5th Edition, clause 14):
-      // SourceElement:
-      //    Statement
-      //    FunctionDeclaration
-      // Common language extension is to allow function declaration in place
-      // of any statement. This language extension is disabled in strict mode.
-      //
-      // In Harmony mode, this case also handles the extension:
-      // Statement:
-      //    GeneratorDeclaration
-      if (is_strict(language_mode())) {
-        ReportMessageAt(scanner()->peek_location(),
-                        MessageTemplate::kStrictFunction);
-        *ok = false;
-        return NULL;
-      }
-      return ParseFunctionDeclaration(NULL, ok);
-    }
+    case Token::FUNCTION:
+      // FunctionDeclaration only allowed as a StatementListItem, not in
+      // an arbitrary Statement position. Exceptions such as
+      // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+      // are handled by calling ParseScopedStatement rather than
+      // ParseSubStatement directly.
+      ReportMessageAt(scanner()->peek_location(),
+                      is_strict(language_mode())
+                          ? MessageTemplate::kStrictFunction
+                          : MessageTemplate::kSloppyFunction);
+      *ok = false;
+      return nullptr;
 
     case Token::DEBUGGER:
       return ParseDebuggerStatement(ok);
@@ -1866,17 +1829,8 @@
     case Token::VAR:
       return ParseVariableStatement(kStatement, NULL, ok);
 
-    case Token::CONST:
-      // In ES6 CONST is not allowed as a Statement, only as a
-      // LexicalDeclaration, however we continue to allow it in sloppy mode for
-      // backwards compatibility.
-      if (is_sloppy(language_mode()) && allow_legacy_const()) {
-        return ParseVariableStatement(kStatement, NULL, ok);
-      }
-
-    // Fall through.
     default:
-      return ParseExpressionOrLabelledStatement(labels, ok);
+      return ParseExpressionOrLabelledStatement(labels, allow_function, ok);
   }
 }
 
@@ -1958,13 +1912,6 @@
       }
       var = declaration_scope->DeclareLocal(
           name, mode, declaration->initialization(), kind, kNotAssigned);
-    } else if ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
-               !declaration_scope->is_script_scope()) {
-      // Duplicate legacy const definitions throw at runtime.
-      DCHECK(is_sloppy(language_mode()));
-      Expression* expression = NewThrowSyntaxError(
-          MessageTemplate::kVarRedeclaration, name, declaration->position());
-      declaration_scope->SetIllegalRedeclaration(expression);
     } else if ((IsLexicalVariableMode(mode) ||
                 IsLexicalVariableMode(var->mode())) &&
                // Lexical bindings may appear for some parameters in sloppy
@@ -2160,12 +2107,10 @@
   // In ES6, a function behaves as a lexical binding, except in
   // a script scope, or the initial scope of eval or another function.
   VariableMode mode =
-      is_strong(language_mode())
-          ? CONST
-          : (is_strict(language_mode()) || allow_harmony_sloppy_function()) &&
-                    !scope_->is_declaration_scope()
-                ? LET
-                : VAR;
+      (is_strict(language_mode()) || allow_harmony_sloppy_function()) &&
+      !scope_->is_declaration_scope()
+          ? LET
+          : VAR;
   VariableProxy* proxy = NewUnresolved(name, mode);
   Declaration* declaration =
       factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
@@ -2211,13 +2156,12 @@
   bool is_strict_reserved = false;
   const AstRawString* name =
       ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
-  ClassLiteral* value = ParseClassLiteral(name, scanner()->location(),
+  ClassLiteral* value = ParseClassLiteral(nullptr, name, scanner()->location(),
                                           is_strict_reserved, pos, CHECK_OK);
 
-  VariableMode mode = is_strong(language_mode()) ? CONST : LET;
-  VariableProxy* proxy = NewUnresolved(name, mode);
+  VariableProxy* proxy = NewUnresolved(name, LET);
   Declaration* declaration =
-      factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
+      factory()->NewVariableDeclaration(proxy, LET, scope_, pos);
   Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
   proxy->var()->set_initializer_position(position());
   Assignment* assignment =
@@ -2337,23 +2281,12 @@
   }
 
   if (peek() == Token::VAR) {
-    if (is_strong(language_mode())) {
-      Scanner::Location location = scanner()->peek_location();
-      ReportMessageAt(location, MessageTemplate::kStrongVar);
-      *ok = false;
-      return nullptr;
-    }
     Consume(Token::VAR);
   } else if (peek() == Token::CONST && allow_const()) {
     Consume(Token::CONST);
-    if (is_sloppy(language_mode()) && allow_legacy_const()) {
-      parsing_result->descriptor.mode = CONST_LEGACY;
-      ++use_counts_[v8::Isolate::kLegacyConst];
-    } else {
-      DCHECK(is_strict(language_mode()) || allow_harmony_sloppy());
-      DCHECK(var_context != kStatement);
-      parsing_result->descriptor.mode = CONST;
-    }
+    DCHECK(is_strict(language_mode()) || allow_harmony_sloppy());
+    DCHECK(var_context != kStatement);
+    parsing_result->descriptor.mode = CONST;
   } else if (peek() == Token::LET && allow_let()) {
     Consume(Token::LET);
     DCHECK(var_context != kStatement);
@@ -2378,17 +2311,11 @@
     int decl_pos = peek_position();
     {
       ExpressionClassifier pattern_classifier(this);
-      Token::Value next = peek();
       pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
       ValidateBindingPattern(&pattern_classifier, CHECK_OK);
       if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
         ValidateLetPattern(&pattern_classifier, CHECK_OK);
       }
-      if (!allow_harmony_destructuring_bind() && !pattern->IsVariableProxy()) {
-        ReportUnexpectedToken(next);
-        *ok = false;
-        return nullptr;
-      }
     }
 
     Scanner::Location variable_loc = scanner()->location();
@@ -2492,9 +2419,9 @@
   return false;
 }
 
-
 Statement* Parser::ParseExpressionOrLabelledStatement(
-    ZoneList<const AstRawString*>* labels, bool* ok) {
+    ZoneList<const AstRawString*>* labels,
+    AllowLabelledFunctionStatement allow_function, bool* ok) {
   // ExpressionStatement | LabelledStatement ::
   //   Expression ';'
   //   Identifier ':' Statement
@@ -2513,42 +2440,6 @@
       *ok = false;
       return nullptr;
 
-    case Token::THIS:
-      if (!FLAG_strong_this) break;
-      // Fall through.
-    case Token::SUPER:
-      if (is_strong(language_mode()) &&
-          IsClassConstructor(function_state_->kind())) {
-        bool is_this = peek() == Token::THIS;
-        Expression* expr;
-        ExpressionClassifier classifier(this);
-        if (is_this) {
-          expr = ParseStrongInitializationExpression(&classifier, CHECK_OK);
-        } else {
-          expr = ParseStrongSuperCallExpression(&classifier, CHECK_OK);
-        }
-        RewriteNonPattern(&classifier, CHECK_OK);
-        switch (peek()) {
-          case Token::SEMICOLON:
-            Consume(Token::SEMICOLON);
-            break;
-          case Token::RBRACE:
-          case Token::EOS:
-            break;
-          default:
-            if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
-              ReportMessageAt(function_state_->this_location(),
-                              is_this
-                                  ? MessageTemplate::kStrongConstructorThis
-                                  : MessageTemplate::kStrongConstructorSuper);
-              *ok = false;
-              return nullptr;
-            }
-        }
-        return factory()->NewExpressionStatement(expr, pos);
-      }
-      break;
-
     default:
       break;
   }
@@ -2581,7 +2472,15 @@
     // during the scope processing.
     scope_->RemoveUnresolved(var);
     Expect(Token::COLON, CHECK_OK);
-    return ParseStatement(labels, ok);
+    // ES#sec-labelled-function-declarations Labelled Function Declarations
+    if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
+      if (allow_function == kAllowLabelledFunctionStatement) {
+        return ParseFunctionDeclaration(labels, ok);
+      } else {
+        return ParseScopedStatement(labels, true, ok);
+      }
+    }
+    return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
   }
 
   // If we have an extension, we allow a native function declaration.
@@ -2621,11 +2520,11 @@
   Expect(Token::LPAREN, CHECK_OK);
   Expression* condition = ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
-  Statement* then_statement = ParseSubStatement(labels, CHECK_OK);
+  Statement* then_statement = ParseScopedStatement(labels, false, CHECK_OK);
   Statement* else_statement = NULL;
   if (peek() == Token::ELSE) {
     Next();
-    else_statement = ParseSubStatement(labels, CHECK_OK);
+    else_statement = ParseScopedStatement(labels, false, CHECK_OK);
   } else {
     else_statement = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
   }
@@ -2724,15 +2623,6 @@
       return_value = GetLiteralUndefined(position());
     }
   } else {
-    if (is_strong(language_mode()) &&
-        IsClassConstructor(function_state_->kind())) {
-      int pos = peek_position();
-      ReportMessageAt(Scanner::Location(pos, pos + 1),
-                      MessageTemplate::kStrongConstructorReturnValue);
-      *ok = false;
-      return NULL;
-    }
-
     int pos = peek_position();
     return_value = ParseExpression(true, CHECK_OK);
 
@@ -2778,22 +2668,18 @@
     }
 
     // ES6 14.6.1 Static Semantics: IsInTailPosition
-    if (FLAG_harmony_tailcalls && !is_sloppy(language_mode())) {
+    if (allow_tailcalls() && !is_sloppy(language_mode())) {
       function_state_->AddExpressionInTailPosition(return_value);
     }
   }
   ExpectSemicolon(CHECK_OK);
 
   if (is_generator()) {
-    Expression* generator = factory()->NewVariableProxy(
-        function_state_->generator_object_variable());
-    Expression* yield = factory()->NewYield(
-        generator, return_value, Yield::kFinal, loc.beg_pos);
-    result = factory()->NewExpressionStatement(yield, loc.beg_pos);
-  } else {
-    result = factory()->NewReturnStatement(return_value, loc.beg_pos);
+    return_value = BuildIteratorResult(return_value, true);
   }
 
+  result = factory()->NewReturnStatement(return_value, loc.beg_pos);
+
   Scope* decl_scope = scope_->DeclarationScope();
   if (decl_scope->is_script_scope() || decl_scope->is_eval_scope()) {
     ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
@@ -2822,27 +2708,11 @@
   Expression* expr = ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
 
-  scope_->DeclarationScope()->RecordWithStatement();
   Scope* with_scope = NewScope(scope_, WITH_SCOPE);
-  Block* body;
+  Statement* body;
   { BlockState block_state(&scope_, with_scope);
     with_scope->set_start_position(scanner()->peek_location().beg_pos);
-
-    // The body of the with statement must be enclosed in an additional
-    // lexical scope in case the body is a FunctionDeclaration.
-    body = factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition);
-    Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
-    block_scope->set_start_position(scanner()->location().beg_pos);
-    {
-      BlockState block_state(&scope_, block_scope);
-      Target target(&this->target_stack_, body);
-      Statement* stmt = ParseSubStatement(labels, CHECK_OK);
-      body->statements()->Add(stmt, zone());
-      block_scope->set_end_position(scanner()->location().end_pos);
-      block_scope = block_scope->FinalizeBlockScope();
-      body->set_scope(block_scope);
-    }
-
+    body = ParseScopedStatement(labels, true, CHECK_OK);
     with_scope->set_end_position(scanner()->location().end_pos);
   }
   return factory()->NewWithStatement(with_scope, expr, body, pos);
@@ -2878,13 +2748,6 @@
     stat = ParseStatementListItem(CHECK_OK);
     statements->Add(stat, zone());
   }
-  if (is_strong(language_mode()) && stat != NULL && !stat->IsJump() &&
-      peek() != Token::RBRACE) {
-    ReportMessageAt(scanner()->location(),
-                    MessageTemplate::kStrongSwitchFallthrough);
-    *ok = false;
-    return NULL;
-  }
   return factory()->NewCaseClause(label, statements, pos);
 }
 
@@ -3108,8 +2971,11 @@
               pattern, pattern->position(),
               factory()->NewVariableProxy(catch_variable));
 
+          Block* init_block =
+              factory()->NewBlock(nullptr, 8, true, RelocInfo::kNoPosition);
           PatternRewriter::DeclareAndInitializeVariables(
-              catch_block, &descriptor, &decl, nullptr, CHECK_OK);
+              init_block, &descriptor, &decl, nullptr, CHECK_OK);
+          catch_block->statements()->Add(init_block, zone());
         }
 
         Expect(Token::LBRACE, CHECK_OK);
@@ -3183,7 +3049,7 @@
   Target target(&this->target_stack_, loop);
 
   Expect(Token::DO, CHECK_OK);
-  Statement* body = ParseSubStatement(NULL, CHECK_OK);
+  Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
   Expect(Token::WHILE, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
 
@@ -3213,7 +3079,7 @@
   Expect(Token::LPAREN, CHECK_OK);
   Expression* cond = ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
-  Statement* body = ParseSubStatement(NULL, CHECK_OK);
+  Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
 
   if (loop != NULL) loop->Initialize(cond, body);
   return loop;
@@ -3257,76 +3123,15 @@
       throw_call, pos);
 }
 
-
 void Parser::InitializeForEachStatement(ForEachStatement* stmt,
                                         Expression* each, Expression* subject,
-                                        Statement* body,
-                                        bool is_destructuring) {
-  DCHECK(!is_destructuring || allow_harmony_destructuring_assignment());
+                                        Statement* body) {
   ForOfStatement* for_of = stmt->AsForOfStatement();
-
   if (for_of != NULL) {
-    Variable* iterator = scope_->NewTemporary(
-        ast_value_factory()->dot_iterator_string());
-    Variable* result = scope_->NewTemporary(
-        ast_value_factory()->dot_result_string());
-
-    Expression* assign_iterator;
-    Expression* next_result;
-    Expression* result_done;
-    Expression* assign_each;
-
-    // iterator = subject[Symbol.iterator]()
-    // Hackily disambiguate o from o.next and o [Symbol.iterator]().
-    // TODO(verwaest): Come up with a better solution.
-    assign_iterator = factory()->NewAssignment(
-        Token::ASSIGN, factory()->NewVariableProxy(iterator),
-        GetIterator(subject, factory(), subject->position() - 2),
-        subject->position());
-
-    // !%_IsJSReceiver(result = iterator.next()) &&
-    //     %ThrowIteratorResultNotAnObject(result)
-    {
-      // result = iterator.next()
-      Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
-      // Hackily disambiguate o from o.next and o [Symbol.iterator]().
-      // TODO(verwaest): Come up with a better solution.
-      next_result = BuildIteratorNextResult(iterator_proxy, result,
-                                            subject->position() - 1);
-    }
-
-    // result.done
-    {
-      Expression* done_literal = factory()->NewStringLiteral(
-          ast_value_factory()->done_string(), RelocInfo::kNoPosition);
-      Expression* result_proxy = factory()->NewVariableProxy(result);
-      result_done = factory()->NewProperty(
-          result_proxy, done_literal, RelocInfo::kNoPosition);
-    }
-
-    // each = result.value
-    {
-      Expression* value_literal = factory()->NewStringLiteral(
-          ast_value_factory()->value_string(), RelocInfo::kNoPosition);
-      Expression* result_proxy = factory()->NewVariableProxy(result);
-      Expression* result_value = factory()->NewProperty(
-          result_proxy, value_literal, RelocInfo::kNoPosition);
-      assign_each = factory()->NewAssignment(Token::ASSIGN, each, result_value,
-                                             RelocInfo::kNoPosition);
-      if (is_destructuring) {
-        assign_each = PatternRewriter::RewriteDestructuringAssignment(
-            this, assign_each->AsAssignment(), scope_);
-      }
-    }
-
-    for_of->Initialize(each, subject, body,
-                       iterator,
-                       assign_iterator,
-                       next_result,
-                       result_done,
-                       assign_each);
+    InitializeForOfStatement(for_of, each, subject, body,
+                             RelocInfo::kNoPosition);
   } else {
-    if (is_destructuring) {
+    if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
       Variable* temp =
           scope_->NewTemporary(ast_value_factory()->empty_string());
       VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
@@ -3347,6 +3152,70 @@
   }
 }
 
+void Parser::InitializeForOfStatement(ForOfStatement* for_of, Expression* each,
+                                      Expression* iterable, Statement* body,
+                                      int iterable_pos) {
+  Variable* iterator =
+      scope_->NewTemporary(ast_value_factory()->dot_iterator_string());
+  Variable* result =
+      scope_->NewTemporary(ast_value_factory()->dot_result_string());
+
+  Expression* assign_iterator;
+  Expression* next_result;
+  Expression* result_done;
+  Expression* assign_each;
+
+  // Hackily disambiguate o from o.next and o [Symbol.iterator]().
+  // TODO(verwaest): Come up with a better solution.
+  int get_iterator_pos = iterable_pos != RelocInfo::kNoPosition
+                             ? iterable_pos
+                             : iterable->position() - 2;
+  int next_result_pos = iterable_pos != RelocInfo::kNoPosition
+                            ? iterable_pos
+                            : iterable->position() - 1;
+
+  // iterator = iterable[Symbol.iterator]()
+  assign_iterator = factory()->NewAssignment(
+      Token::ASSIGN, factory()->NewVariableProxy(iterator),
+      GetIterator(iterable, factory(), get_iterator_pos), iterable->position());
+
+  // !%_IsJSReceiver(result = iterator.next()) &&
+  //     %ThrowIteratorResultNotAnObject(result)
+  {
+    // result = iterator.next()
+    Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+    next_result =
+        BuildIteratorNextResult(iterator_proxy, result, next_result_pos);
+  }
+
+  // result.done
+  {
+    Expression* done_literal = factory()->NewStringLiteral(
+        ast_value_factory()->done_string(), RelocInfo::kNoPosition);
+    Expression* result_proxy = factory()->NewVariableProxy(result);
+    result_done = factory()->NewProperty(result_proxy, done_literal,
+                                         RelocInfo::kNoPosition);
+  }
+
+  // each = result.value
+  {
+    Expression* value_literal = factory()->NewStringLiteral(
+        ast_value_factory()->value_string(), RelocInfo::kNoPosition);
+    Expression* result_proxy = factory()->NewVariableProxy(result);
+    Expression* result_value = factory()->NewProperty(
+        result_proxy, value_literal, RelocInfo::kNoPosition);
+    assign_each = factory()->NewAssignment(Token::ASSIGN, each, result_value,
+                                           RelocInfo::kNoPosition);
+    if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
+      assign_each = PatternRewriter::RewriteDestructuringAssignment(
+          this, assign_each->AsAssignment(), scope_);
+    }
+  }
+
+  for_of->Initialize(each, iterable, body, iterator, assign_iterator,
+                     next_result, result_done, assign_each);
+}
+
 Statement* Parser::DesugarLexicalBindingsInForStatement(
     Scope* inner_scope, VariableMode mode, ZoneList<const AstRawString*>* names,
     ForStatement* loop, Statement* init, Expression* cond, Statement* next,
@@ -3595,6 +3464,28 @@
   return outer_block;
 }
 
+Statement* Parser::ParseScopedStatement(ZoneList<const AstRawString*>* labels,
+                                        bool legacy, bool* ok) {
+  if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
+      (legacy && allow_harmony_restrictive_declarations())) {
+    return ParseSubStatement(labels, kDisallowLabelledFunctionStatement, ok);
+  } else {
+    if (legacy) {
+      ++use_counts_[v8::Isolate::kLegacyFunctionDeclaration];
+    }
+    // Make a block around the statement for a lexical binding
+    // is introduced by a FunctionDeclaration.
+    Scope* body_scope = NewScope(scope_, BLOCK_SCOPE);
+    BlockState block_state(&scope_, body_scope);
+    Block* block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
+    Statement* body = ParseFunctionDeclaration(NULL, CHECK_OK);
+    block->statements()->Add(body, zone());
+    body_scope->set_end_position(scanner()->location().end_pos);
+    body_scope = body_scope->FinalizeBlockScope();
+    block->set_scope(body_scope);
+    return block;
+  }
+}
 
 Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
                                      bool* ok) {
@@ -3617,7 +3508,7 @@
       ParseVariableDeclarations(kForStatement, &parsing_result, nullptr,
                                 CHECK_OK);
 
-      ForEachStatement::VisitMode mode;
+      ForEachStatement::VisitMode mode = ForEachStatement::ENUMERATE;
       int each_beg_pos = scanner()->location().beg_pos;
       int each_end_pos = scanner()->location().end_pos;
 
@@ -3706,9 +3597,11 @@
             factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
 
         {
+          DontCollectExpressionsInTailPositionScope no_tail_calls(
+              function_state_);
           BlockState block_state(&scope_, body_scope);
 
-          Statement* body = ParseSubStatement(NULL, CHECK_OK);
+          Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
 
           auto each_initialization_block =
               factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
@@ -3729,8 +3622,7 @@
           body_block->statements()->Add(body, zone());
           VariableProxy* temp_proxy =
               factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
-          InitializeForEachStatement(loop, temp_proxy, enumerable, body_block,
-                                     false);
+          InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
         }
         body_scope->set_end_position(scanner()->location().end_pos);
         body_scope = body_scope->FinalizeBlockScope();
@@ -3785,7 +3677,7 @@
       ExpressionClassifier classifier(this);
       Expression* expression = ParseExpression(false, &classifier, CHECK_OK);
       int lhs_end_pos = scanner()->location().end_pos;
-      ForEachStatement::VisitMode mode;
+      ForEachStatement::VisitMode mode = ForEachStatement::ENUMERATE;
       is_let_identifier_expression =
           expression->IsVariableProxy() &&
           expression->AsVariableProxy()->raw_name() ==
@@ -3793,9 +3685,8 @@
 
       bool is_for_each = CheckInOrOf(&mode, ok);
       if (!*ok) return nullptr;
-      bool is_destructuring =
-          is_for_each && allow_harmony_destructuring_assignment() &&
-          (expression->IsArrayLiteral() || expression->IsObjectLiteral());
+      bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
+                                              expression->IsObjectLiteral());
 
       if (is_destructuring) {
         ValidateAssignmentPattern(&classifier, CHECK_OK);
@@ -3825,25 +3716,10 @@
 
         Expect(Token::RPAREN, CHECK_OK);
 
-        // Make a block around the statement in case a lexical binding
-        // is introduced, e.g. by a FunctionDeclaration.
-        // This block must not use for_scope as its scope because if a
-        // lexical binding is introduced which overlaps with the for-in/of,
-        // expressions in head of the loop should actually have variables
-        // resolved in the outer scope.
-        Scope* body_scope = NewScope(for_scope, BLOCK_SCOPE);
-        {
-          BlockState block_state(&scope_, body_scope);
-          Block* block =
-              factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
-          Statement* body = ParseSubStatement(NULL, CHECK_OK);
-          block->statements()->Add(body, zone());
-          InitializeForEachStatement(loop, expression, enumerable, block,
-                                     is_destructuring);
-          body_scope->set_end_position(scanner()->location().end_pos);
-          body_scope = body_scope->FinalizeBlockScope();
-          block->set_scope(body_scope);
-        }
+        // For legacy compat reasons, give for loops similar treatment to
+        // if statements in allowing a function declaration for a body
+        Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
+        InitializeForEachStatement(loop, expression, enumerable, body);
 
         Statement* final_loop = loop->IsForOfStatement()
             ? FinalizeForOfStatement(
@@ -3900,7 +3776,7 @@
     }
     Expect(Token::RPAREN, CHECK_OK);
 
-    body = ParseSubStatement(NULL, CHECK_OK);
+    body = ParseScopedStatement(NULL, true, CHECK_OK);
   }
 
   Statement* result = NULL;
@@ -4064,7 +3940,6 @@
     parser_->scope_->RemoveUnresolved(expr->AsVariableProxy());
   } else if (expr->IsAssignment()) {
     Assignment* assignment = expr->AsAssignment();
-    DCHECK(parser_->allow_harmony_default_parameters());
     DCHECK(!assignment->is_compound());
     initializer = assignment->value();
     expr = assignment->target();
@@ -4227,7 +4102,8 @@
     if (is_generator) {
       // For generators, allocating variables in contexts is currently a win
       // because it minimizes the work needed to suspend and resume an
-      // activation.
+      // activation.  The machine code produced for generators (by full-codegen)
+      // relies on this forced context allocation, but not in an essential way.
       scope_->ForceContextAllocation();
 
       // Calling a generator returns a generator object.  That object is stored
@@ -4347,7 +4223,7 @@
       // temp_zone is deallocated. These objects are instead allocated in a
       // parser-persistent zone (see parser_zone_ in AstNodeFactory).
       {
-        Zone temp_zone;
+        Zone temp_zone(zone()->allocator());
         AstNodeFactory::BodyScope inner(factory(), &temp_zone, use_temp_zone);
 
         body = ParseEagerFunctionBody(function_name, pos, formals, kind,
@@ -4365,16 +4241,6 @@
     // Parsing the body may change the language mode in our scope.
     language_mode = scope->language_mode();
 
-    if (is_strong(language_mode) && IsSubclassConstructor(kind)) {
-      if (!function_state.super_location().IsValid()) {
-        ReportMessageAt(function_name_location,
-                        MessageTemplate::kStrongSuperCallMissing,
-                        kReferenceError);
-        *ok = false;
-        return nullptr;
-      }
-    }
-
     // Validate name and parameter names. We can do this only after parsing the
     // function, since the function can declare itself strict.
     CheckFunctionName(language_mode, function_name, function_name_validity,
@@ -4391,10 +4257,7 @@
     if (is_sloppy(language_mode) && allow_harmony_sloppy_function()) {
       InsertSloppyBlockFunctionVarBindings(scope, CHECK_OK);
     }
-    if (is_strict(language_mode) || allow_harmony_sloppy() ||
-        allow_harmony_destructuring_bind()) {
-      CheckConflictingVarDeclarations(scope, CHECK_OK);
-    }
+    CheckConflictingVarDeclarations(scope, CHECK_OK);
 
     if (body) {
       // If body can be inspected, rewrite queued destructuring assignments
@@ -4677,15 +4540,12 @@
     if (IsGeneratorFunction(kind)) {
       // We produce:
       //
-      // try { InitialYield; ...body...; FinalYield }
+      // try { InitialYield; ...body...; return {value: undefined, done: true} }
       // finally { %GeneratorClose(generator) }
       //
       // - InitialYield yields the actual generator object.
-      // - FinalYield yields {value: foo, done: true} where foo is the
-      //   completion value of body.  (This is needed here in case the body
-      //   falls through without an explicit return.)
-      // - Any return statement inside the body will be converted into a similar
-      //   FinalYield.
+      // - Any return statement inside the body will have its argument wrapped
+      //   in a "done" iterator result object.
       // - If the generator terminates for whatever reason, we must close it.
       //   Hence the finally clause.
 
@@ -4703,8 +4563,8 @@
             Token::INIT, init_proxy, allocation, RelocInfo::kNoPosition);
         VariableProxy* get_proxy = factory()->NewVariableProxy(
             function_state_->generator_object_variable());
-        Yield* yield = factory()->NewYield(
-            get_proxy, assignment, Yield::kInitial, RelocInfo::kNoPosition);
+        Yield* yield =
+            factory()->NewYield(get_proxy, assignment, RelocInfo::kNoPosition);
         try_block->statements()->Add(
             factory()->NewExpressionStatement(yield, RelocInfo::kNoPosition),
             zone());
@@ -4712,15 +4572,9 @@
 
       ParseStatementList(try_block->statements(), Token::RBRACE, CHECK_OK);
 
-      VariableProxy* get_proxy = factory()->NewVariableProxy(
-          function_state_->generator_object_variable());
-      Expression* undefined =
-          factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
-      Yield* yield = factory()->NewYield(get_proxy, undefined, Yield::kFinal,
-                                         RelocInfo::kNoPosition);
-      try_block->statements()->Add(
-          factory()->NewExpressionStatement(yield, RelocInfo::kNoPosition),
-          zone());
+      Statement* final_return = factory()->NewReturnStatement(
+          BuildIteratorResult(nullptr, true), RelocInfo::kNoPosition);
+      try_block->statements()->Add(final_return, zone());
 
       Block* finally_block =
           factory()->NewBlock(nullptr, 1, false, RelocInfo::kNoPosition);
@@ -4801,7 +4655,7 @@
   const List<Expression*>& expressions_in_tail_position =
       function_state_->expressions_in_tail_position();
   for (int i = 0; i < expressions_in_tail_position.length(); ++i) {
-    expressions_in_tail_position[i]->MarkTail();
+    MarkTailPosition(expressions_in_tail_position[i]);
   }
   return result;
 }
@@ -4825,14 +4679,13 @@
 #define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
     SET_ALLOW(natives);
     SET_ALLOW(harmony_sloppy);
+    SET_ALLOW(harmony_sloppy_function);
     SET_ALLOW(harmony_sloppy_let);
-    SET_ALLOW(harmony_default_parameters);
-    SET_ALLOW(harmony_destructuring_bind);
-    SET_ALLOW(harmony_destructuring_assignment);
-    SET_ALLOW(strong_mode);
     SET_ALLOW(harmony_do_expressions);
     SET_ALLOW(harmony_function_name);
     SET_ALLOW(harmony_function_sent);
+    SET_ALLOW(harmony_exponentiation_operator);
+    SET_ALLOW(harmony_restrictive_declarations);
 #undef SET_ALLOW
   }
   PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
@@ -4844,8 +4697,8 @@
   return result;
 }
 
-
-ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
+ClassLiteral* Parser::ParseClassLiteral(ExpressionClassifier* classifier,
+                                        const AstRawString* name,
                                         Scanner::Location class_name_location,
                                         bool name_is_strict_reserved, int pos,
                                         bool* ok) {
@@ -4861,11 +4714,6 @@
     *ok = false;
     return NULL;
   }
-  if (is_strong(language_mode()) && IsUndefined(name)) {
-    ReportMessageAt(class_name_location, MessageTemplate::kStrongUndefined);
-    *ok = false;
-    return NULL;
-  }
 
   Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
   BlockState block_state(&scope_, block_scope);
@@ -4883,9 +4731,13 @@
   Expression* extends = NULL;
   if (Check(Token::EXTENDS)) {
     block_scope->set_start_position(scanner()->location().end_pos);
-    ExpressionClassifier classifier(this);
-    extends = ParseLeftHandSideExpression(&classifier, CHECK_OK);
-    RewriteNonPattern(&classifier, CHECK_OK);
+    ExpressionClassifier extends_classifier(this);
+    extends = ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
+    RewriteNonPattern(&extends_classifier, CHECK_OK);
+    if (classifier != nullptr) {
+      classifier->Accumulate(&extends_classifier,
+                             ExpressionClassifier::ExpressionProductions);
+    }
   } else {
     block_scope->set_start_position(scanner()->location().end_pos);
   }
@@ -4906,12 +4758,16 @@
     const bool is_static = false;
     bool is_computed_name = false;  // Classes do not care about computed
                                     // property names here.
-    ExpressionClassifier classifier(this);
+    ExpressionClassifier property_classifier(this);
     const AstRawString* property_name = nullptr;
     ObjectLiteral::Property* property = ParsePropertyDefinition(
         &checker, in_class, has_extends, is_static, &is_computed_name,
-        &has_seen_constructor, &classifier, &property_name, CHECK_OK);
-    RewriteNonPattern(&classifier, CHECK_OK);
+        &has_seen_constructor, &property_classifier, &property_name, CHECK_OK);
+    RewriteNonPattern(&property_classifier, CHECK_OK);
+    if (classifier != nullptr) {
+      classifier->Accumulate(&property_classifier,
+                             ExpressionClassifier::ExpressionProductions);
+    }
 
     if (has_seen_constructor && constructor == NULL) {
       constructor = GetPropertyValue(property)->AsFunctionLiteral();
@@ -4938,8 +4794,8 @@
                                      end_pos, block_scope->language_mode());
   }
 
-  // Note that we do not finalize this block scope because strong
-  // mode uses it as a sentinel value indicating an anonymous class.
+  // Note that we do not finalize this block scope because it is
+  // used as a sentinel value indicating an anonymous class.
   block_scope->set_end_position(end_pos);
 
   if (name != NULL) {
@@ -4997,7 +4853,7 @@
 
     // Check that the expected number of arguments are being passed.
     if (function->nargs != -1 && function->nargs != args->length()) {
-      ReportMessage(MessageTemplate::kIllegalAccess);
+      ReportMessage(MessageTemplate::kRuntimeWrongNumArgs);
       *ok = false;
       return NULL;
     }
@@ -5331,12 +5187,11 @@
     ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(4, zone());
     args->Add(factory()->NewArrayLiteral(
                   const_cast<ZoneList<Expression*>*>(cooked_strings),
-                  cooked_idx, is_strong(language_mode()), pos),
+                  cooked_idx, pos),
               zone());
     args->Add(
         factory()->NewArrayLiteral(
-            const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx,
-            is_strong(language_mode()), pos),
+            const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx, pos),
         zone());
 
     // Ensure hash is suitable as a Smi value
@@ -5425,7 +5280,6 @@
         }
         int literal_index = function_state_->NextMaterializedLiteralIndex();
         args->Add(factory()->NewArrayLiteral(unspread, literal_index,
-                                             is_strong(language_mode()),
                                              RelocInfo::kNoPosition),
                   zone());
 
@@ -5511,8 +5365,6 @@
   v8::Isolate::UseCounterFeature feature;
   if (is_sloppy(mode))
     feature = v8::Isolate::kSloppyMode;
-  else if (is_strong(mode))
-    feature = v8::Isolate::kStrongMode;
   else if (is_strict(mode))
     feature = v8::Isolate::kStrictMode;
   else
@@ -5523,8 +5375,8 @@
 
 
 void Parser::RaiseLanguageMode(LanguageMode mode) {
-  SetLanguageMode(scope_,
-                  static_cast<LanguageMode>(scope_->language_mode() | mode));
+  LanguageMode old = scope_->language_mode();
+  SetLanguageMode(scope_, old > mode ? old : mode);
 }
 
 
@@ -5532,6 +5384,16 @@
   parser_->RewriteDestructuringAssignments();
 }
 
+Expression* ParserTraits::RewriteExponentiation(Expression* left,
+                                                Expression* right, int pos) {
+  return parser_->RewriteExponentiation(left, right, pos);
+}
+
+Expression* ParserTraits::RewriteAssignExponentiation(Expression* left,
+                                                      Expression* right,
+                                                      int pos) {
+  return parser_->RewriteAssignExponentiation(left, right, pos);
+}
 
 void ParserTraits::RewriteNonPattern(Type::ExpressionClassifier* classifier,
                                      bool* ok) {
@@ -5605,7 +5467,6 @@
 
 
 void Parser::RewriteDestructuringAssignments() {
-  if (!allow_harmony_destructuring_assignment()) return;
   const auto& assignments =
       function_state_->destructuring_assignments_to_rewrite();
   for (int i = assignments.length() - 1; i >= 0; --i) {
@@ -5622,6 +5483,60 @@
   }
 }
 
+Expression* Parser::RewriteExponentiation(Expression* left, Expression* right,
+                                          int pos) {
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+  args->Add(left, zone());
+  args->Add(right, zone());
+  return factory()->NewCallRuntime(Context::MATH_POW_METHOD_INDEX, args, pos);
+}
+
+Expression* Parser::RewriteAssignExponentiation(Expression* left,
+                                                Expression* right, int pos) {
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+  if (left->IsVariableProxy()) {
+    VariableProxy* lhs = left->AsVariableProxy();
+
+    Expression* result;
+    DCHECK_NOT_NULL(lhs->raw_name());
+    result =
+        this->ExpressionFromIdentifier(lhs->raw_name(), lhs->position(),
+                                       lhs->end_position(), scope_, factory());
+    args->Add(left, zone());
+    args->Add(right, zone());
+    Expression* call =
+        factory()->NewCallRuntime(Context::MATH_POW_METHOD_INDEX, args, pos);
+    return factory()->NewAssignment(Token::ASSIGN, result, call, pos);
+  } else if (left->IsProperty()) {
+    Property* prop = left->AsProperty();
+    auto temp_obj = scope_->NewTemporary(ast_value_factory()->empty_string());
+    auto temp_key = scope_->NewTemporary(ast_value_factory()->empty_string());
+    Expression* assign_obj = factory()->NewAssignment(
+        Token::ASSIGN, factory()->NewVariableProxy(temp_obj), prop->obj(),
+        RelocInfo::kNoPosition);
+    Expression* assign_key = factory()->NewAssignment(
+        Token::ASSIGN, factory()->NewVariableProxy(temp_key), prop->key(),
+        RelocInfo::kNoPosition);
+    args->Add(factory()->NewProperty(factory()->NewVariableProxy(temp_obj),
+                                     factory()->NewVariableProxy(temp_key),
+                                     left->position()),
+              zone());
+    args->Add(right, zone());
+    Expression* call =
+        factory()->NewCallRuntime(Context::MATH_POW_METHOD_INDEX, args, pos);
+    Expression* target = factory()->NewProperty(
+        factory()->NewVariableProxy(temp_obj),
+        factory()->NewVariableProxy(temp_key), RelocInfo::kNoPosition);
+    Expression* assign =
+        factory()->NewAssignment(Token::ASSIGN, target, call, pos);
+    return factory()->NewBinaryOperation(
+        Token::COMMA, assign_obj,
+        factory()->NewBinaryOperation(Token::COMMA, assign_key, assign, pos),
+        pos);
+  }
+  UNREACHABLE();
+  return nullptr;
+}
 
 Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
   // Array literals containing spreads are rewritten using do expressions, e.g.
@@ -5673,45 +5588,6 @@
       Variable* each =
           scope_->NewTemporary(ast_value_factory()->dot_for_string());
       Expression* subject = spread->expression();
-      Variable* iterator =
-          scope_->NewTemporary(ast_value_factory()->dot_iterator_string());
-      Variable* element =
-          scope_->NewTemporary(ast_value_factory()->dot_result_string());
-      // iterator = subject[Symbol.iterator]()
-      Expression* assign_iterator = factory()->NewAssignment(
-          Token::ASSIGN, factory()->NewVariableProxy(iterator),
-          GetIterator(subject, factory(), spread->expression_position()),
-          subject->position());
-      // !%_IsJSReceiver(element = iterator.next()) &&
-      //     %ThrowIteratorResultNotAnObject(element)
-      Expression* next_element;
-      {
-        // element = iterator.next()
-        Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
-        next_element = BuildIteratorNextResult(iterator_proxy, element,
-                                               spread->expression_position());
-      }
-      // element.done
-      Expression* element_done;
-      {
-        Expression* done_literal = factory()->NewStringLiteral(
-            ast_value_factory()->done_string(), RelocInfo::kNoPosition);
-        Expression* element_proxy = factory()->NewVariableProxy(element);
-        element_done = factory()->NewProperty(element_proxy, done_literal,
-                                              RelocInfo::kNoPosition);
-      }
-      // each = element.value
-      Expression* assign_each;
-      {
-        Expression* value_literal = factory()->NewStringLiteral(
-            ast_value_factory()->value_string(), RelocInfo::kNoPosition);
-        Expression* element_proxy = factory()->NewVariableProxy(element);
-        Expression* element_value = factory()->NewProperty(
-            element_proxy, value_literal, RelocInfo::kNoPosition);
-        assign_each = factory()->NewAssignment(
-            Token::ASSIGN, factory()->NewVariableProxy(each), element_value,
-            RelocInfo::kNoPosition);
-      }
       // %AppendElement($R, each)
       Statement* append_body;
       {
@@ -5728,11 +5604,10 @@
       // for (each of spread) %AppendElement($R, each)
       ForEachStatement* loop = factory()->NewForEachStatement(
           ForEachStatement::ITERATE, nullptr, RelocInfo::kNoPosition);
-      ForOfStatement* for_of = loop->AsForOfStatement();
-      for_of->Initialize(factory()->NewVariableProxy(each), subject,
-                         append_body, iterator, assign_iterator, next_element,
-                         element_done, assign_each);
-      do_block->statements()->Add(for_of, zone());
+      InitializeForOfStatement(loop->AsForOfStatement(),
+                               factory()->NewVariableProxy(each), subject,
+                               append_body, spread->expression_position());
+      do_block->statements()->Add(loop, zone());
     }
   }
   // Now, rewind the original array literal to truncate everything from the
@@ -6055,9 +5930,8 @@
 
     Block* then = factory->NewBlock(nullptr, 4+1, false, nopos);
     Variable* var_tmp = scope->NewTemporary(avfactory->empty_string());
-    BuildIteratorClose(
-        then->statements(), var_iterator, factory->NewUndefinedLiteral(nopos),
-        var_tmp);
+    BuildIteratorClose(then->statements(), var_iterator, Nothing<Variable*>(),
+                       var_tmp);
     then->statements()->Add(throw_call, zone);
     check_throw = factory->NewIfStatement(
         condition, then, factory->NewEmptyStatement(nopos), nopos);
@@ -6128,13 +6002,11 @@
     set_mode_return = factory->NewExpressionStatement(assignment, nopos);
   }
 
-
-  // RawYield(output);
+  // Yield(output);
   Statement* yield_output;
   {
     Expression* output_proxy = factory->NewVariableProxy(var_output);
-    Yield* yield = factory->NewYield(
-        generator, output_proxy, Yield::kInitial, nopos);
+    Yield* yield = factory->NewYield(generator, output_proxy, nopos);
     yield_output = factory->NewExpressionStatement(yield, nopos);
   }
 
@@ -6232,8 +6104,7 @@
     case_next->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
 
     auto case_return = new (zone) ZoneList<Statement*>(5, zone);
-    BuildIteratorClose(case_return, var_iterator,
-                       factory->NewVariableProxy(var_input, nopos), var_output);
+    BuildIteratorClose(case_return, var_iterator, Just(var_input), var_output);
     case_return->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
 
     auto case_throw = new (zone) ZoneList<Statement*>(5, zone);
@@ -6311,7 +6182,8 @@
 //       if (!IS_CALLABLE(C)) {
 //         throw MakeTypeError(kCalledNonCallableInstanceOf);
 //       }
-//       handler_result = %ordinary_has_instance(C, O);
+//       handler_result = %_GetOrdinaryHasInstance()
+//       handler_result = %_Call(handler_result, C, O);
 //     } else {
 //       handler_result = !!(%_Call(handler_result, C, O));
 //     }
@@ -6356,8 +6228,8 @@
         factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
     Expression* call =
         NewThrowTypeError(MessageTemplate::kNonObjectInInstanceOfCheck,
-                          avfactory->empty_string(), nopos);
-    Statement* throw_call = factory->NewExpressionStatement(call, nopos);
+                          avfactory->empty_string(), pos);
+    Statement* throw_call = factory->NewExpressionStatement(call, pos);
 
     validate_C =
         factory->NewIfStatement(is_receiver_call,
@@ -6384,7 +6256,8 @@
   //   if (!IS_CALLABLE(C)) {
   //     throw MakeTypeError(kCalledNonCallableInstanceOf);
   //   }
-  //   result = %ordinary_has_instance(C, O);
+  //   handler_result = %_GetOrdinaryHasInstance()
+  //   handler_result = %_Call(handler_result, C, O);
   // } else {
   //   handler_result = !!%_Call(handler_result, C, O);
   // }
@@ -6394,17 +6267,29 @@
         Token::EQ_STRICT, factory->NewVariableProxy(var_handler_result),
         factory->NewUndefinedLiteral(nopos), nopos);
 
-    Block* then_side = factory->NewBlock(nullptr, 2, false, nopos);
+    Block* then_side = factory->NewBlock(nullptr, 3, false, nopos);
     {
       Expression* throw_expr =
           NewThrowTypeError(MessageTemplate::kCalledNonCallableInstanceOf,
-                            avfactory->empty_string(), nopos);
-      Statement* validate_C = CheckCallable(var_C, throw_expr);
-      ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(2, zone);
+                            avfactory->empty_string(), pos);
+      Statement* validate_C = CheckCallable(var_C, throw_expr, pos);
+
+      ZoneList<Expression*>* empty_args =
+          new (zone) ZoneList<Expression*>(0, zone);
+      Expression* ordinary_has_instance = factory->NewCallRuntime(
+          Runtime::kInlineGetOrdinaryHasInstance, empty_args, pos);
+      Expression* handler_proxy = factory->NewVariableProxy(var_handler_result);
+      Expression* assignment_handler = factory->NewAssignment(
+          Token::ASSIGN, handler_proxy, ordinary_has_instance, nopos);
+      Statement* assignment_get_handler =
+          factory->NewExpressionStatement(assignment_handler, nopos);
+
+      ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(3, zone);
+      args->Add(factory->NewVariableProxy(var_handler_result), zone);
       args->Add(factory->NewVariableProxy(var_C), zone);
       args->Add(factory->NewVariableProxy(var_O), zone);
-      CallRuntime* call = factory->NewCallRuntime(
-          Context::ORDINARY_HAS_INSTANCE_INDEX, args, pos);
+      Expression* call =
+          factory->NewCallRuntime(Runtime::kInlineCall, args, pos);
       Expression* result_proxy = factory->NewVariableProxy(var_handler_result);
       Expression* assignment =
           factory->NewAssignment(Token::ASSIGN, result_proxy, call, nopos);
@@ -6412,6 +6297,7 @@
           factory->NewExpressionStatement(assignment, nopos);
 
       then_side->statements()->Add(validate_C, zone);
+      then_side->statements()->Add(assignment_get_handler, zone);
       then_side->statements()->Add(assignment_return, zone);
     }
 
@@ -6455,7 +6341,8 @@
   return instanceof;
 }
 
-Statement* ParserTraits::CheckCallable(Variable* var, Expression* error) {
+Statement* ParserTraits::CheckCallable(Variable* var, Expression* error,
+                                       int pos) {
   auto factory = parser_->factory();
   auto avfactory = parser_->ast_value_factory();
   const int nopos = RelocInfo::kNoPosition;
@@ -6468,7 +6355,7 @@
     Expression* condition = factory->NewCompareOperation(
         Token::EQ_STRICT, type_of, function_literal, nopos);
 
-    Statement* throw_call = factory->NewExpressionStatement(error, nopos);
+    Statement* throw_call = factory->NewExpressionStatement(error, pos);
 
     validate_var = factory->NewIfStatement(
         condition, factory->NewEmptyStatement(nopos), throw_call, nopos);
@@ -6478,17 +6365,21 @@
 
 void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
                                       Variable* iterator,
-                                      Expression* input,
+                                      Maybe<Variable*> input,
                                       Variable* var_output) {
   //
   // This function adds four statements to [statements], corresponding to the
   // following code:
   //
   //   let iteratorReturn = iterator.return;
-  //   if (IS_NULL_OR_UNDEFINED(iteratorReturn) return input;
-  //   output = %_Call(iteratorReturn, iterator);
+  //   if (IS_NULL_OR_UNDEFINED(iteratorReturn) return |input|;
+  //   output = %_Call(iteratorReturn, iterator|, input|);
   //   if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
   //
+  // Here, |...| denotes optional parts, depending on the presence of the
+  // input variable.  The reason for allowing input is that BuildIteratorClose
+  // can then be reused to handle the return case in yield*.
+  //
 
   const int nopos = RelocInfo::kNoPosition;
   auto factory = parser_->factory();
@@ -6510,25 +6401,33 @@
     get_return = factory->NewExpressionStatement(assignment, nopos);
   }
 
-  // if (IS_NULL_OR_UNDEFINED(iteratorReturn) return input;
+  // if (IS_NULL_OR_UNDEFINED(iteratorReturn) return |input|;
   Statement* check_return;
   {
     Expression* condition = factory->NewCompareOperation(
         Token::EQ, factory->NewVariableProxy(var_return),
         factory->NewNullLiteral(nopos), nopos);
 
-    Statement* return_input = factory->NewReturnStatement(input, nopos);
+    Expression* value = input.IsJust()
+                            ? static_cast<Expression*>(
+                                  factory->NewVariableProxy(input.FromJust()))
+                            : factory->NewUndefinedLiteral(nopos);
+
+    Statement* return_input = factory->NewReturnStatement(value, nopos);
 
     check_return = factory->NewIfStatement(
         condition, return_input, factory->NewEmptyStatement(nopos), nopos);
   }
 
-  // output = %_Call(iteratorReturn, iterator);
+  // output = %_Call(iteratorReturn, iterator, |input|);
   Statement* call_return;
   {
     auto args = new (zone) ZoneList<Expression*>(3, zone);
     args->Add(factory->NewVariableProxy(var_return), zone);
     args->Add(factory->NewVariableProxy(iterator), zone);
+    if (input.IsJust()) {
+      args->Add(factory->NewVariableProxy(input.FromJust()), zone);
+    }
 
     Expression* call =
         factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
@@ -6568,9 +6467,124 @@
   statements->Add(validate_output, zone);
 }
 
+void ParserTraits::FinalizeIteratorUse(Variable* completion,
+                                       Expression* condition, Variable* iter,
+                                       Block* iterator_use, Block* target) {
+  if (!FLAG_harmony_iterator_close) return;
 
-// Runtime encoding of different completion modes.
-enum ForOfLoopBodyCompletion { BODY_COMPLETED, BODY_ABORTED, BODY_THREW };
+  //
+  // This function adds two statements to [target], corresponding to the
+  // following code:
+  //
+  //   completion = kNormalCompletion;
+  //   try {
+  //     try {
+  //       iterator_use
+  //     } catch(e) {
+  //       if (completion === kAbruptCompletion) completion = kThrowCompletion;
+  //       %ReThrow(e);
+  //     }
+  //   } finally {
+  //     if (condition) {
+  //       #BuildIteratorCloseForCompletion(iter, completion)
+  //     }
+  //   }
+  //
+
+  const int nopos = RelocInfo::kNoPosition;
+  auto factory = parser_->factory();
+  auto avfactory = parser_->ast_value_factory();
+  auto scope = parser_->scope_;
+  auto zone = parser_->zone();
+
+  // completion = kNormalCompletion;
+  Statement* initialize_completion;
+  {
+    Expression* proxy = factory->NewVariableProxy(completion);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, proxy,
+        factory->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
+    initialize_completion = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+  // if (completion === kAbruptCompletion) completion = kThrowCompletion;
+  Statement* set_completion_throw;
+  {
+    Expression* condition = factory->NewCompareOperation(
+        Token::EQ_STRICT, factory->NewVariableProxy(completion),
+        factory->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
+
+    Expression* proxy = factory->NewVariableProxy(completion);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, proxy,
+        factory->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
+    Statement* statement = factory->NewExpressionStatement(assignment, nopos);
+    set_completion_throw = factory->NewIfStatement(
+        condition, statement, factory->NewEmptyStatement(nopos), nopos);
+  }
+
+  // if (condition) {
+  //   #BuildIteratorCloseForCompletion(iter, completion)
+  // }
+  Block* maybe_close;
+  {
+    Block* block = factory->NewBlock(nullptr, 2, true, nopos);
+    parser_->BuildIteratorCloseForCompletion(block->statements(), iter,
+                                             completion);
+    DCHECK(block->statements()->length() == 2);
+
+    maybe_close = factory->NewBlock(nullptr, 1, true, nopos);
+    maybe_close->statements()->Add(
+        factory->NewIfStatement(condition, block,
+                                factory->NewEmptyStatement(nopos), nopos),
+        zone);
+  }
+
+  // try { #try_block }
+  // catch(e) {
+  //   #set_completion_throw;
+  //   %ReThrow(e);
+  // }
+  Statement* try_catch;
+  {
+    Scope* catch_scope = parser_->NewScope(scope, CATCH_SCOPE);
+    Variable* catch_variable =
+        catch_scope->DeclareLocal(avfactory->dot_catch_string(), VAR,
+                                  kCreatedInitialized, Variable::NORMAL);
+
+    Statement* rethrow;
+    // We use %ReThrow rather than the ordinary throw because we want to
+    // preserve the original exception message.  This is also why we create a
+    // TryCatchStatementForReThrow below (which does not clear the pending
+    // message), rather than a TryCatchStatement.
+    {
+      auto args = new (zone) ZoneList<Expression*>(1, zone);
+      args->Add(factory->NewVariableProxy(catch_variable), zone);
+      rethrow = factory->NewExpressionStatement(
+          factory->NewCallRuntime(Runtime::kReThrow, args, nopos), nopos);
+    }
+
+    Block* catch_block = factory->NewBlock(nullptr, 2, false, nopos);
+    catch_block->statements()->Add(set_completion_throw, zone);
+    catch_block->statements()->Add(rethrow, zone);
+
+    try_catch = factory->NewTryCatchStatementForReThrow(
+        iterator_use, catch_scope, catch_variable, catch_block, nopos);
+  }
+
+  // try { #try_catch } finally { #maybe_close }
+  Statement* try_finally;
+  {
+    Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+    try_block->statements()->Add(try_catch, zone);
+
+    try_finally =
+        factory->NewTryFinallyStatement(try_block, maybe_close, nopos);
+  }
+
+  target->statements()->Add(initialize_completion, zone);
+  target->statements()->Add(try_finally, zone);
+}
 
 void ParserTraits::BuildIteratorCloseForCompletion(
     ZoneList<Statement*>* statements, Variable* iterator,
@@ -6581,16 +6595,17 @@
   //
   //   let iteratorReturn = iterator.return;
   //   if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) {
-  //     let output;
-  //     if (completion === BODY_THREW) {
+  //     if (completion === kThrowCompletion) {
   //       if (!IS_CALLABLE(iteratorReturn)) {
   //         throw MakeTypeError(kReturnMethodNotCallable);
   //       }
-  //       try { output = %_Call(iteratorReturn, iterator) } catch (_) { }
+  //       try { %_Call(iteratorReturn, iterator) } catch (_) { }
   //     } else {
-  //       output = %_Call(iteratorReturn, iterator);
+  //       let output = %_Call(iteratorReturn, iterator);
+  //       if (!IS_RECEIVER(output)) {
+  //         %ThrowIterResultNotAnObject(output);
+  //       }
   //     }
-  //     if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
   //   }
   //
 
@@ -6600,11 +6615,9 @@
   auto scope = parser_->scope_;
   auto zone = parser_->zone();
 
-  // let output;
-  Variable* var_output = scope->NewTemporary(avfactory->empty_string());
 
   // let iteratorReturn = iterator.return;
-  Variable* var_return = var_output;  // Reusing the output variable.
+  Variable* var_return = scope->NewTemporary(avfactory->empty_string());
   Statement* get_return;
   {
     Expression* iterator_proxy = factory->NewVariableProxy(iterator);
@@ -6626,25 +6639,10 @@
     Expression* throw_expr = NewThrowTypeError(
         MessageTemplate::kReturnMethodNotCallable,
         avfactory->empty_string(), nopos);
-    check_return_callable = CheckCallable(var_return, throw_expr);
+    check_return_callable = CheckCallable(var_return, throw_expr, nopos);
   }
 
-  // output = %_Call(iteratorReturn, iterator);
-  Statement* call_return;
-  {
-    auto args = new (zone) ZoneList<Expression*>(2, zone);
-    args->Add(factory->NewVariableProxy(var_return), zone);
-    args->Add(factory->NewVariableProxy(iterator), zone);
-    Expression* call =
-        factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
-
-    Expression* output_proxy = factory->NewVariableProxy(var_output);
-    Expression* assignment = factory->NewAssignment(
-        Token::ASSIGN, output_proxy, call, nopos);
-    call_return = factory->NewExpressionStatement(assignment, nopos);
-  }
-
-  // try { output = %_Call(iteratorReturn, iterator) } catch (_) { }
+  // try { %_Call(iteratorReturn, iterator) } catch (_) { }
   Statement* try_call_return;
   {
     auto args = new (zone) ZoneList<Expression*>(2, zone);
@@ -6653,12 +6651,10 @@
 
     Expression* call =
         factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
-    Expression* assignment = factory->NewAssignment(
-        Token::ASSIGN, factory->NewVariableProxy(var_output), call, nopos);
 
     Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
-    try_block->statements()->Add(
-        factory->NewExpressionStatement(assignment, nopos), zone);
+    try_block->statements()->Add(factory->NewExpressionStatement(call, nopos),
+                                 zone);
 
     Block* catch_block = factory->NewBlock(nullptr, 0, false, nopos);
 
@@ -6671,29 +6667,27 @@
         try_block, catch_scope, catch_variable, catch_block, nopos);
   }
 
-  // if (completion === ABRUPT_THROW) {
-  //   #check_return_callable;
-  //   #try_call_return;
-  // } else {
-  //   #call_return;
+  // let output = %_Call(iteratorReturn, iterator);
+  // if (!IS_RECEIVER(output)) {
+  //   %ThrowIteratorResultNotAnObject(output);
   // }
-  Statement* call_return_carefully;
+  Block* validate_return;
   {
-    Expression* condition = factory->NewCompareOperation(
-        Token::EQ_STRICT, factory->NewVariableProxy(completion),
-        factory->NewSmiLiteral(BODY_THREW, nopos), nopos);
+    Variable* var_output = scope->NewTemporary(avfactory->empty_string());
+    Statement* call_return;
+    {
+      auto args = new (zone) ZoneList<Expression*>(2, zone);
+      args->Add(factory->NewVariableProxy(var_return), zone);
+      args->Add(factory->NewVariableProxy(iterator), zone);
+      Expression* call =
+          factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
 
-    Block* then_block = factory->NewBlock(nullptr, 2, false, nopos);
-    then_block->statements()->Add(check_return_callable, zone);
-    then_block->statements()->Add(try_call_return, zone);
+      Expression* output_proxy = factory->NewVariableProxy(var_output);
+      Expression* assignment =
+          factory->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
+      call_return = factory->NewExpressionStatement(assignment, nopos);
+    }
 
-    call_return_carefully =
-        factory->NewIfStatement(condition, then_block, call_return, nopos);
-  }
-
-  // if (!IS_RECEIVER(output)) %ThrowIteratorResultNotAnObject(output);
-  Statement* validate_output;
-  {
     Expression* is_receiver_call;
     {
       auto args = new (zone) ZoneList<Expression*>(1, zone);
@@ -6711,8 +6705,32 @@
       throw_call = factory->NewExpressionStatement(call, nopos);
     }
 
-    validate_output = factory->NewIfStatement(
+    Statement* check_return = factory->NewIfStatement(
         is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+
+    validate_return = factory->NewBlock(nullptr, 2, false, nopos);
+    validate_return->statements()->Add(call_return, zone);
+    validate_return->statements()->Add(check_return, zone);
+  }
+
+  // if (completion === kThrowCompletion) {
+  //   #check_return_callable;
+  //   #try_call_return;
+  // } else {
+  //   #validate_return;
+  // }
+  Statement* call_return_carefully;
+  {
+    Expression* condition = factory->NewCompareOperation(
+        Token::EQ_STRICT, factory->NewVariableProxy(completion),
+        factory->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
+
+    Block* then_block = factory->NewBlock(nullptr, 2, false, nopos);
+    then_block->statements()->Add(check_return_callable, zone);
+    then_block->statements()->Add(try_call_return, zone);
+
+    call_return_carefully =
+        factory->NewIfStatement(condition, then_block, validate_return, nopos);
   }
 
   // if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) { ... }
@@ -6722,12 +6740,9 @@
         Token::EQ, factory->NewVariableProxy(var_return),
         factory->NewNullLiteral(nopos), nopos);
 
-    Block* block = factory->NewBlock(nullptr, 2, false, nopos);
-    block->statements()->Add(call_return_carefully, zone);
-    block->statements()->Add(validate_output, zone);
-
-    maybe_call_return = factory->NewIfStatement(
-        condition, factory->NewEmptyStatement(nopos), block, nopos);
+    maybe_call_return =
+        factory->NewIfStatement(condition, factory->NewEmptyStatement(nopos),
+                                call_return_carefully, nopos);
   }
 
 
@@ -6742,25 +6757,35 @@
   //
   // This function replaces the loop with the following wrapping:
   //
-  //   let completion = BODY_COMPLETED;
+  //   let each;
+  //   let completion = kNormalCompletion;
   //   try {
-  //     #loop;
-  //   } catch(e) {
-  //     if (completion === BODY_ABORTED) completion = BODY_THREW;
-  //     throw e;
+  //     try {
+  //       #loop;
+  //     } catch(e) {
+  //       if (completion === kAbruptCompletion) completion = kThrowCompletion;
+  //       %ReThrow(e);
+  //     }
   //   } finally {
-  //     if (!(completion === BODY_COMPLETED || IS_UNDEFINED(#iterator))) {
-  //       #BuildIteratorClose(#iterator, completion)  // See above.
+  //     if (!(completion === kNormalCompletion || IS_UNDEFINED(#iterator))) {
+  //       #BuildIteratorCloseForCompletion(#iterator, completion)
   //     }
   //   }
   //
   // where the loop's body is wrapped as follows:
   //
   //   {
-  //     {{completion = BODY_ABORTED;}}
   //     #loop-body
-  //     {{completion = BODY_COMPLETED;}}
+  //     {{completion = kNormalCompletion;}}
   //   }
+  //
+  // and the loop's assign_each is wrapped as follows
+  //
+  //   do {
+  //     {{completion = kAbruptCompletion;}}
+  //     #assign-each
+  //   }
+  //
 
   const int nopos = RelocInfo::kNoPosition;
   auto factory = parser_->factory();
@@ -6768,129 +6793,41 @@
   auto scope = parser_->scope_;
   auto zone = parser_->zone();
 
-  // let completion = BODY_COMPLETED;
   Variable* var_completion = scope->NewTemporary(avfactory->empty_string());
-  Statement* initialize_completion;
+
+  // let each;
+  Variable* var_each = scope->NewTemporary(avfactory->empty_string());
+  Statement* initialize_each;
   {
-    Expression* proxy = factory->NewVariableProxy(var_completion);
+    Expression* proxy = factory->NewVariableProxy(var_each);
     Expression* assignment = factory->NewAssignment(
         Token::ASSIGN, proxy,
-        factory->NewSmiLiteral(BODY_COMPLETED, nopos), nopos);
-    initialize_completion =
+        factory->NewUndefinedLiteral(nopos), nopos);
+    initialize_each =
         factory->NewExpressionStatement(assignment, nopos);
   }
 
-  // if (completion === BODY_ABORTED) completion = BODY_THREW;
-  Statement* set_completion_throw;
+  // !(completion === kNormalCompletion || IS_UNDEFINED(#iterator))
+  Expression* closing_condition;
   {
-    Expression* condition = factory->NewCompareOperation(
+    Expression* lhs = factory->NewCompareOperation(
         Token::EQ_STRICT, factory->NewVariableProxy(var_completion),
-        factory->NewSmiLiteral(BODY_ABORTED, nopos), nopos);
-
-    Expression* proxy = factory->NewVariableProxy(var_completion);
-    Expression* assignment = factory->NewAssignment(
-        Token::ASSIGN, proxy, factory->NewSmiLiteral(BODY_THREW, nopos),
-        nopos);
-    Statement* statement = factory->NewExpressionStatement(assignment, nopos);
-    set_completion_throw = factory->NewIfStatement(
-        condition, statement, factory->NewEmptyStatement(nopos), nopos);
-  }
-
-  // if (!(completion === BODY_COMPLETED || IS_UNDEFINED(#iterator))) {
-  //   #BuildIteratorClose(#iterator, completion)
-  // }
-  Block* maybe_close;
-  {
-    Expression* condition1 = factory->NewCompareOperation(
-        Token::EQ_STRICT, factory->NewVariableProxy(var_completion),
-        factory->NewSmiLiteral(BODY_COMPLETED, nopos), nopos);
-    Expression* condition2 = factory->NewCompareOperation(
+        factory->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
+    Expression* rhs = factory->NewCompareOperation(
         Token::EQ_STRICT, factory->NewVariableProxy(loop->iterator()),
         factory->NewUndefinedLiteral(nopos), nopos);
-    Expression* condition = factory->NewBinaryOperation(
-        Token::OR, condition1, condition2, nopos);
-
-    Block* block = factory->NewBlock(nullptr, 2, false, nopos);
-    BuildIteratorCloseForCompletion(
-        block->statements(), loop->iterator(), var_completion);
-    DCHECK(block->statements()->length() == 2);
-
-    maybe_close = factory->NewBlock(nullptr, 1, false, nopos);
-    maybe_close->statements()->Add(factory->NewIfStatement(
-        condition, factory->NewEmptyStatement(nopos), block, nopos), zone);
+    closing_condition = factory->NewUnaryOperation(
+        Token::NOT, factory->NewBinaryOperation(Token::OR, lhs, rhs, nopos),
+        nopos);
   }
 
-  // try { #try_block }
-  // catch(e) {
-  //   #set_completion_throw;
-  //   throw e;
-  // }
-  Statement* try_catch;
-  {
-    Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
-    Variable* catch_variable = catch_scope->DeclareLocal(
-        avfactory->dot_catch_string(), VAR, kCreatedInitialized,
-        Variable::NORMAL);
-
-    Statement* rethrow;
-    {
-      Expression* proxy = factory->NewVariableProxy(catch_variable);
-      rethrow = factory->NewExpressionStatement(
-          factory->NewThrow(proxy, nopos), nopos);
-    }
-
-    Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
-    try_block->statements()->Add(loop, zone);
-
-    Block* catch_block = factory->NewBlock(nullptr, 2, false, nopos);
-    catch_block->statements()->Add(set_completion_throw, zone);
-    catch_block->statements()->Add(rethrow, zone);
-
-    try_catch = factory->NewTryCatchStatement(
-        try_block, catch_scope, catch_variable, catch_block, nopos);
-  }
-
-  // try { #try_catch } finally { #maybe_close }
-  Statement* try_finally;
-  {
-    Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
-    try_block->statements()->Add(try_catch, zone);
-
-    try_finally =
-        factory->NewTryFinallyStatement(try_block, maybe_close, nopos);
-  }
-
-  // #initialize_completion;
-  // #try_finally;
-  Statement* final_loop;
-  {
-    Block* block = factory->NewBlock(nullptr, 2, false, nopos);
-    block->statements()->Add(initialize_completion, zone);
-    block->statements()->Add(try_finally, zone);
-    final_loop = block;
-  }
-
-  // {{completion = BODY_ABORTED;}}
-  Statement* set_completion_break;
-  {
-    Expression* proxy = factory->NewVariableProxy(var_completion);
-    Expression* assignment = factory->NewAssignment(
-        Token::ASSIGN, proxy,
-        factory->NewSmiLiteral(BODY_ABORTED, nopos), nopos);
-
-    Block* block = factory->NewBlock(nullptr, 1, true, nopos);
-    block->statements()->Add(
-        factory->NewExpressionStatement(assignment, nopos), zone);
-    set_completion_break = block;
-  }
-
-  // {{completion = BODY_COMPLETED;}}
+  // {{completion = kNormalCompletion;}}
   Statement* set_completion_normal;
   {
     Expression* proxy = factory->NewVariableProxy(var_completion);
     Expression* assignment = factory->NewAssignment(
-        Token::ASSIGN, proxy, factory->NewSmiLiteral(BODY_COMPLETED, nopos),
-        nopos);
+        Token::ASSIGN, proxy,
+        factory->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
 
     Block* block = factory->NewBlock(nullptr, 1, true, nopos);
     block->statements()->Add(
@@ -6898,13 +6835,54 @@
     set_completion_normal = block;
   }
 
-  // { #set_completion_break; #loop-body; #set_completion_normal }
+  // {{completion = kAbruptCompletion;}}
+  Statement* set_completion_abrupt;
+  {
+    Expression* proxy = factory->NewVariableProxy(var_completion);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, proxy,
+        factory->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
+
+    Block* block = factory->NewBlock(nullptr, 1, true, nopos);
+    block->statements()->Add(factory->NewExpressionStatement(assignment, nopos),
+                             zone);
+    set_completion_abrupt = block;
+  }
+
+  // { #loop-body; #set_completion_normal }
   Block* new_body = factory->NewBlock(nullptr, 2, false, nopos);
-  new_body->statements()->Add(set_completion_break, zone);
-  new_body->statements()->Add(loop->body(), zone);
-  new_body->statements()->Add(set_completion_normal, zone);
+  {
+    new_body->statements()->Add(loop->body(), zone);
+    new_body->statements()->Add(set_completion_normal, zone);
+  }
+
+  // { #set_completion_abrupt; #assign-each }
+  Block* new_assign_each = factory->NewBlock(nullptr, 2, false, nopos);
+  {
+    new_assign_each->statements()->Add(set_completion_abrupt, zone);
+    new_assign_each->statements()->Add(
+        factory->NewExpressionStatement(loop->assign_each(), nopos), zone);
+  }
+
+  // Now put things together.
 
   loop->set_body(new_body);
+  loop->set_assign_each(
+      factory->NewDoExpression(new_assign_each, var_each, nopos));
+
+  Statement* final_loop;
+  {
+    Block* target = factory->NewBlock(nullptr, 3, false, nopos);
+    target->statements()->Add(initialize_each, zone);
+
+    Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+    try_block->statements()->Add(loop, zone);
+
+    FinalizeIteratorUse(var_completion, closing_condition, loop->iterator(),
+                        try_block, target);
+    final_loop = target;
+  }
+
   return final_loop;
 }
 
diff --git a/src/parsing/parser.h b/src/parsing/parser.h
index d4fb62f..c82682e 100644
--- a/src/parsing/parser.h
+++ b/src/parsing/parser.h
@@ -53,7 +53,6 @@
   FLAG_ACCESSOR(kEval, is_eval, set_eval)
   FLAG_ACCESSOR(kGlobal, is_global, set_global)
   FLAG_ACCESSOR(kStrictMode, is_strict_mode, set_strict_mode)
-  FLAG_ACCESSOR(kStrongMode, is_strong_mode, set_strong_mode)
   FLAG_ACCESSOR(kNative, is_native, set_native)
   FLAG_ACCESSOR(kModule, is_module, set_module)
   FLAG_ACCESSOR(kAllowLazyParsing, allow_lazy_parsing, set_allow_lazy_parsing)
@@ -132,17 +131,17 @@
   Handle<Context> context() { return context_; }
   void clear_script() { script_ = Handle<Script>::null(); }
   void set_isolate(Isolate* isolate) { isolate_ = isolate; }
+  void set_shared_info(Handle<SharedFunctionInfo> shared) { shared_ = shared; }
   void set_context(Handle<Context> context) { context_ = context; }
   void set_script(Handle<Script> script) { script_ = script; }
   //--------------------------------------------------------------------------
 
   LanguageMode language_mode() {
-    return construct_language_mode(is_strict_mode(), is_strong_mode());
+    return construct_language_mode(is_strict_mode());
   }
   void set_language_mode(LanguageMode language_mode) {
     STATIC_ASSERT(LANGUAGE_END == 3);
-    set_strict_mode(language_mode & STRICT_BIT);
-    set_strong_mode(language_mode & STRONG_BIT);
+    set_strict_mode(is_strict(language_mode));
   }
 
   void ReopenHandlesInNewHandleScope() {
@@ -165,13 +164,12 @@
     kEval = 1 << 2,
     kGlobal = 1 << 3,
     kStrictMode = 1 << 4,
-    kStrongMode = 1 << 5,
-    kNative = 1 << 6,
-    kParseRestriction = 1 << 7,
-    kModule = 1 << 8,
-    kAllowLazyParsing = 1 << 9,
+    kNative = 1 << 5,
+    kParseRestriction = 1 << 6,
+    kModule = 1 << 7,
+    kAllowLazyParsing = 1 << 8,
     // ---------- Output flags --------------------------
-    kAstValueFactoryOwned = 1 << 10
+    kAstValueFactoryOwned = 1 << 9
   };
 
   //------------- Inputs to parsing and scope analysis -----------------------
@@ -205,7 +203,6 @@
   void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
   bool GetFlag(Flag f) const { return (flags_ & f) != 0; }
 
-  void set_shared_info(Handle<SharedFunctionInfo> shared) { shared_ = shared; }
   void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
 };
 
@@ -404,16 +401,6 @@
     fni->AddFunction(func_to_infer);
   }
 
-  static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
-      Scope* scope, ObjectLiteralProperty* property, bool* has_function) {
-    Expression* value = property->value();
-    if (scope->DeclarationScope()->is_script_scope() &&
-        value->AsFunctionLiteral() != NULL) {
-      *has_function = true;
-      value->AsFunctionLiteral()->set_pretenure();
-    }
-  }
-
   // If we assign a function literal to a property we pretenure the
   // literal so it can be added as a constant function property.
   static void CheckAssigningFunctionLiteralToProperty(Expression* left,
@@ -444,6 +431,8 @@
   Expression* BuildUnaryExpression(Expression* expression, Token::Value op,
                                    int pos, AstNodeFactory* factory);
 
+  Expression* BuildIteratorResult(Expression* value, bool done);
+
   // Generate AST node that throws a ReferenceError with the given type.
   Expression* NewThrowReferenceError(MessageTemplate::Template message,
                                      int pos);
@@ -464,6 +453,9 @@
                             MessageTemplate::Template message,
                             const AstRawString* arg, int pos);
 
+  void FinalizeIteratorUse(Variable* completion, Expression* condition,
+                           Variable* iter, Block* iterator_use, Block* result);
+
   Statement* FinalizeForOfStatement(ForOfStatement* loop, int pos);
 
   // Reporting errors.
@@ -581,11 +573,14 @@
       const ParserFormalParameters& parameters, FunctionKind kind,
       FunctionLiteral::FunctionType function_type, bool* ok);
 
-  ClassLiteral* ParseClassLiteral(const AstRawString* name,
+  ClassLiteral* ParseClassLiteral(Type::ExpressionClassifier* classifier,
+                                  const AstRawString* name,
                                   Scanner::Location class_name_location,
                                   bool name_is_strict_reserved, int pos,
                                   bool* ok);
 
+  V8_INLINE void MarkTailPosition(Expression* expression);
+
   V8_INLINE void CheckConflictingVarDeclarations(v8::internal::Scope* scope,
                                                  bool* ok);
 
@@ -644,6 +639,11 @@
   // Rewrite all DestructuringAssignments in the current FunctionState.
   V8_INLINE void RewriteDestructuringAssignments();
 
+  V8_INLINE Expression* RewriteExponentiation(Expression* left,
+                                              Expression* right, int pos);
+  V8_INLINE Expression* RewriteAssignExponentiation(Expression* left,
+                                                    Expression* right, int pos);
+
   V8_INLINE void QueueDestructuringAssignmentForRewriting(
       Expression* assignment);
   V8_INLINE void QueueNonPatternForRewriting(Expression* expr);
@@ -670,13 +670,12 @@
  private:
   Parser* parser_;
 
-  void BuildIteratorClose(
-      ZoneList<Statement*>* statements, Variable* iterator,
-      Expression* input, Variable* output);
+  void BuildIteratorClose(ZoneList<Statement*>* statements, Variable* iterator,
+                          Maybe<Variable*> input, Variable* output);
   void BuildIteratorCloseForCompletion(
       ZoneList<Statement*>* statements, Variable* iterator,
       Variable* body_threw);
-  Statement* CheckCallable(Variable* var, Expression* error);
+  Statement* CheckCallable(Variable* var, Expression* error, int pos);
 };
 
 
@@ -705,6 +704,13 @@
  private:
   friend class ParserTraits;
 
+  // Runtime encoding of different completion modes.
+  enum CompletionKind {
+    kNormalCompletion,
+    kThrowCompletion,
+    kAbruptCompletion
+  };
+
   // Limit the allowed number of local variables in a function. The hard limit
   // is that offsets computed by FullCodeGenerator::StackOperand and similar
   // functions are ints, and they should not overflow. In addition, accessing
@@ -754,8 +760,12 @@
                           ZoneList<const AstRawString*>* local_names,
                           Scanner::Location* reserved_loc, bool* ok);
   ZoneList<ImportDeclaration*>* ParseNamedImports(int pos, bool* ok);
-  Statement* ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok);
-  Statement* ParseSubStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+  Statement* ParseStatement(ZoneList<const AstRawString*>* labels,
+                            AllowLabelledFunctionStatement allow_function,
+                            bool* ok);
+  Statement* ParseSubStatement(ZoneList<const AstRawString*>* labels,
+                               AllowLabelledFunctionStatement allow_function,
+                               bool* ok);
   Statement* ParseStatementAsUnlabelled(ZoneList<const AstRawString*>* labels,
                                    bool* ok);
   Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names,
@@ -897,7 +907,8 @@
                                    ZoneList<const AstRawString*>* names,
                                    bool* ok);
   Statement* ParseExpressionOrLabelledStatement(
-      ZoneList<const AstRawString*>* labels, bool* ok);
+      ZoneList<const AstRawString*>* labels,
+      AllowLabelledFunctionStatement allow_function, bool* ok);
   IfStatement* ParseIfStatement(ZoneList<const AstRawString*>* labels,
                                 bool* ok);
   Statement* ParseContinueStatement(bool* ok);
@@ -920,6 +931,14 @@
   class CollectExpressionsInTailPositionToListScope;
   TryStatement* ParseTryStatement(bool* ok);
   DebuggerStatement* ParseDebuggerStatement(bool* ok);
+  // Parse a SubStatement in strict mode, or with an extra block scope in
+  // sloppy mode to handle
+  // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+  // The legacy parameter indicates whether function declarations are
+  // banned by the ES2015 specification in this location, and they are being
+  // permitted here to match previous V8 behavior.
+  Statement* ParseScopedStatement(ZoneList<const AstRawString*>* labels,
+                                  bool legacy, bool* ok);
 
   // !%_IsJSReceiver(result = iterator.next()) &&
   //     %ThrowIteratorResultNotAnObject(result)
@@ -929,8 +948,10 @@
 
   // Initialize the components of a for-in / for-of statement.
   void InitializeForEachStatement(ForEachStatement* stmt, Expression* each,
-                                  Expression* subject, Statement* body,
-                                  bool is_destructuring);
+                                  Expression* subject, Statement* body);
+  void InitializeForOfStatement(ForOfStatement* stmt, Expression* each,
+                                Expression* iterable, Statement* body,
+                                int iterable_pos);
   Statement* DesugarLexicalBindingsInForStatement(
       Scope* inner_scope, VariableMode mode,
       ZoneList<const AstRawString*>* names, ForStatement* loop, Statement* init,
@@ -944,8 +965,8 @@
       int function_token_position, FunctionLiteral::FunctionType type,
       LanguageMode language_mode, bool* ok);
 
-
-  ClassLiteral* ParseClassLiteral(const AstRawString* name,
+  ClassLiteral* ParseClassLiteral(ExpressionClassifier* classifier,
+                                  const AstRawString* name,
                                   Scanner::Location class_name_location,
                                   bool name_is_strict_reserved, int pos,
                                   bool* ok);
@@ -1035,6 +1056,11 @@
 
   V8_INLINE void RewriteDestructuringAssignments();
 
+  V8_INLINE Expression* RewriteExponentiation(Expression* left,
+                                              Expression* right, int pos);
+  V8_INLINE Expression* RewriteAssignExponentiation(Expression* left,
+                                                    Expression* right, int pos);
+
   friend class NonPatternRewriter;
   V8_INLINE Expression* RewriteSpreads(ArrayLiteral* lit);
 
diff --git a/src/parsing/pattern-rewriter.cc b/src/parsing/pattern-rewriter.cc
index 768a948..e699255 100644
--- a/src/parsing/pattern-rewriter.cc
+++ b/src/parsing/pattern-rewriter.cc
@@ -17,6 +17,8 @@
     ZoneList<const AstRawString*>* names, bool* ok) {
   PatternRewriter rewriter;
 
+  DCHECK(block->ignore_completion_value());
+
   rewriter.scope_ = declaration_descriptor->scope;
   rewriter.parser_ = declaration_descriptor->parser;
   rewriter.context_ = BINDING;
@@ -234,22 +236,20 @@
 
     if (IsImmutableVariableMode(descriptor_->mode)) {
       arguments->Add(value, zone());
-      value = NULL;  // zap the value to avoid the unnecessary assignment
-
       // Construct the call to Runtime_InitializeConstGlobal
       // and add it to the initialization statement block.
       // Note that the function does different things depending on
       // the number of arguments (1 or 2).
-      initialize =
-          factory()->NewCallRuntime(Runtime::kInitializeConstGlobal, arguments,
-                                    descriptor_->initialization_pos);
+      initialize = factory()->NewCallRuntime(Runtime::kInitializeConstGlobal,
+                                             arguments, value->position());
+      value = NULL;  // zap the value to avoid the unnecessary assignment
     } else {
       // Add language mode.
       // We may want to pass singleton to avoid Literal allocations.
       LanguageMode language_mode = initialization_scope->language_mode();
-      arguments->Add(factory()->NewNumberLiteral(language_mode,
-                                                 descriptor_->declaration_pos),
-                     zone());
+      arguments->Add(
+          factory()->NewNumberLiteral(language_mode, RelocInfo::kNoPosition),
+          zone());
 
       // Be careful not to assign a value to the global variable if
       // we're in a with. The initialization value should not
@@ -257,12 +257,11 @@
       // which is why we need to generate a separate assignment node.
       if (value != NULL && !descriptor_->scope->inside_with()) {
         arguments->Add(value, zone());
-        value = NULL;  // zap the value to avoid the unnecessary assignment
         // Construct the call to Runtime_InitializeVarGlobal
         // and add it to the initialization statement block.
-        initialize =
-            factory()->NewCallRuntime(Runtime::kInitializeVarGlobal, arguments,
-                                      descriptor_->declaration_pos);
+        initialize = factory()->NewCallRuntime(Runtime::kInitializeVarGlobal,
+                                               arguments, value->position());
+        value = NULL;  // zap the value to avoid the unnecessary assignment
       } else {
         initialize = NULL;
       }
@@ -270,7 +269,7 @@
 
     if (initialize != NULL) {
       block_->statements()->Add(
-          factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition),
+          factory()->NewExpressionStatement(initialize, initialize->position()),
           zone());
     }
   } else if (value != nullptr && (descriptor_->mode == CONST_LEGACY ||
@@ -286,7 +285,7 @@
     DCHECK_NOT_NULL(proxy->var());
     DCHECK_NOT_NULL(value);
     // Add break location for destructured sub-pattern.
-    int pos = IsSubPattern() ? pattern->position() : RelocInfo::kNoPosition;
+    int pos = IsSubPattern() ? pattern->position() : value->position();
     Assignment* assignment =
         factory()->NewAssignment(Token::INIT, proxy, value, pos);
     block_->statements()->Add(
@@ -303,7 +302,7 @@
     // property).
     VariableProxy* proxy = initialization_scope->NewUnresolved(factory(), name);
     // Add break location for destructured sub-pattern.
-    int pos = IsSubPattern() ? pattern->position() : RelocInfo::kNoPosition;
+    int pos = IsSubPattern() ? pattern->position() : value->position();
     Assignment* assignment =
         factory()->NewAssignment(Token::INIT, proxy, value, pos);
     block_->statements()->Add(
@@ -365,7 +364,7 @@
   PatternContext old_context = SetAssignmentContextIfNeeded(initializer);
   int pos = assign->position();
   Block* old_block = block_;
-  block_ = factory()->NewBlock(nullptr, 8, false, pos);
+  block_ = factory()->NewBlock(nullptr, 8, true, pos);
   Variable* temp = nullptr;
   Expression* pattern = assign->target();
   Expression* old_value = current_value_;
@@ -414,16 +413,27 @@
 
 void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
                                                 Variable** temp_var) {
+  DCHECK(block_->ignore_completion_value());
+
   auto temp = *temp_var = CreateTempVar(current_value_);
-
-  block_->statements()->Add(parser_->BuildAssertIsCoercible(temp), zone());
-
   auto iterator = CreateTempVar(parser_->GetIterator(
       factory()->NewVariableProxy(temp), factory(), RelocInfo::kNoPosition));
   auto done = CreateTempVar(
       factory()->NewBooleanLiteral(false, RelocInfo::kNoPosition));
   auto result = CreateTempVar();
   auto v = CreateTempVar();
+  auto completion = CreateTempVar();
+  auto nopos = RelocInfo::kNoPosition;
+
+  // For the purpose of iterator finalization, we temporarily set block_ to a
+  // new block.  In the main body of this function, we write to block_ (both
+  // explicitly and implicitly via recursion).  At the end of the function, we
+  // wrap this new block in a try-finally statement, restore block_ to its
+  // original value, and add the try-finally statement to block_.
+  auto target = block_;
+  if (FLAG_harmony_iterator_close) {
+    block_ = factory()->NewBlock(nullptr, 8, true, nopos);
+  }
 
   Spread* spread = nullptr;
   for (Expression* value : *node->values()) {
@@ -433,88 +443,201 @@
     }
 
     PatternContext context = SetInitializerContextIfNeeded(value);
+
     // if (!done) {
+    //   done = true;  // If .next, .done or .value throws, don't close.
     //   result = IteratorNext(iterator);
-    //   v = (done = result.done) ? undefined : result.value;
+    //   if (result.done) {
+    //     v = undefined;
+    //   } else {
+    //     v = result.value;
+    //     done = false;
+    //   }
     // }
-    auto next_block =
-        factory()->NewBlock(nullptr, 2, true, RelocInfo::kNoPosition);
-    next_block->statements()->Add(factory()->NewExpressionStatement(
-                                      parser_->BuildIteratorNextResult(
-                                          factory()->NewVariableProxy(iterator),
-                                          result, RelocInfo::kNoPosition),
+    Statement* if_not_done;
+    {
+      auto result_done = factory()->NewProperty(
+          factory()->NewVariableProxy(result),
+          factory()->NewStringLiteral(ast_value_factory()->done_string(),
                                       RelocInfo::kNoPosition),
-                                  zone());
+          RelocInfo::kNoPosition);
 
-    auto assign_to_done = factory()->NewAssignment(
-        Token::ASSIGN, factory()->NewVariableProxy(done),
-        factory()->NewProperty(
-            factory()->NewVariableProxy(result),
-            factory()->NewStringLiteral(ast_value_factory()->done_string(),
-                                        RelocInfo::kNoPosition),
-            RelocInfo::kNoPosition),
-        RelocInfo::kNoPosition);
-    auto next_value = factory()->NewConditional(
-        assign_to_done, factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
-        factory()->NewProperty(
-            factory()->NewVariableProxy(result),
-            factory()->NewStringLiteral(ast_value_factory()->value_string(),
-                                        RelocInfo::kNoPosition),
-            RelocInfo::kNoPosition),
-        RelocInfo::kNoPosition);
-    next_block->statements()->Add(
-        factory()->NewExpressionStatement(
-            factory()->NewAssignment(Token::ASSIGN,
-                                     factory()->NewVariableProxy(v), next_value,
-                                     RelocInfo::kNoPosition),
-            RelocInfo::kNoPosition),
-        zone());
+      auto assign_undefined = factory()->NewAssignment(
+          Token::ASSIGN, factory()->NewVariableProxy(v),
+          factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+          RelocInfo::kNoPosition);
 
-    auto if_statement = factory()->NewIfStatement(
-        factory()->NewUnaryOperation(Token::NOT,
-                                     factory()->NewVariableProxy(done),
-                                     RelocInfo::kNoPosition),
-        next_block, factory()->NewEmptyStatement(RelocInfo::kNoPosition),
-        RelocInfo::kNoPosition);
-    block_->statements()->Add(if_statement, zone());
+      auto assign_value = factory()->NewAssignment(
+          Token::ASSIGN, factory()->NewVariableProxy(v),
+          factory()->NewProperty(
+              factory()->NewVariableProxy(result),
+              factory()->NewStringLiteral(ast_value_factory()->value_string(),
+                                          RelocInfo::kNoPosition),
+              RelocInfo::kNoPosition),
+          RelocInfo::kNoPosition);
+
+      auto unset_done = factory()->NewAssignment(
+          Token::ASSIGN, factory()->NewVariableProxy(done),
+          factory()->NewBooleanLiteral(false, RelocInfo::kNoPosition),
+          RelocInfo::kNoPosition);
+
+      auto inner_else =
+          factory()->NewBlock(nullptr, 2, true, RelocInfo::kNoPosition);
+      inner_else->statements()->Add(
+          factory()->NewExpressionStatement(assign_value, nopos), zone());
+      inner_else->statements()->Add(
+          factory()->NewExpressionStatement(unset_done, nopos), zone());
+
+      auto inner_if = factory()->NewIfStatement(
+          result_done,
+          factory()->NewExpressionStatement(assign_undefined, nopos),
+          inner_else, nopos);
+
+      auto next_block =
+          factory()->NewBlock(nullptr, 3, true, RelocInfo::kNoPosition);
+      next_block->statements()->Add(
+          factory()->NewExpressionStatement(
+              factory()->NewAssignment(
+                  Token::ASSIGN, factory()->NewVariableProxy(done),
+                  factory()->NewBooleanLiteral(true, nopos), nopos),
+              nopos),
+          zone());
+      next_block->statements()->Add(
+          factory()->NewExpressionStatement(
+              parser_->BuildIteratorNextResult(
+                  factory()->NewVariableProxy(iterator), result,
+                  RelocInfo::kNoPosition),
+              RelocInfo::kNoPosition),
+          zone());
+      next_block->statements()->Add(inner_if, zone());
+
+      if_not_done = factory()->NewIfStatement(
+          factory()->NewUnaryOperation(Token::NOT,
+                                       factory()->NewVariableProxy(done),
+                                       RelocInfo::kNoPosition),
+          next_block, factory()->NewEmptyStatement(RelocInfo::kNoPosition),
+          RelocInfo::kNoPosition);
+    }
+    block_->statements()->Add(if_not_done, zone());
 
     if (!(value->IsLiteral() && value->AsLiteral()->raw_value()->IsTheHole())) {
+      if (FLAG_harmony_iterator_close) {
+        // completion = kAbruptCompletion;
+        Expression* proxy = factory()->NewVariableProxy(completion);
+        Expression* assignment = factory()->NewAssignment(
+            Token::ASSIGN, proxy,
+            factory()->NewSmiLiteral(kAbruptCompletion, nopos), nopos);
+        block_->statements()->Add(
+            factory()->NewExpressionStatement(assignment, nopos), zone());
+      }
+
       RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
+
+      if (FLAG_harmony_iterator_close) {
+        // completion = kNormalCompletion;
+        Expression* proxy = factory()->NewVariableProxy(completion);
+        Expression* assignment = factory()->NewAssignment(
+            Token::ASSIGN, proxy,
+            factory()->NewSmiLiteral(kNormalCompletion, nopos), nopos);
+        block_->statements()->Add(
+            factory()->NewExpressionStatement(assignment, nopos), zone());
+      }
     }
     set_context(context);
   }
 
   if (spread != nullptr) {
-    // array = [];
-    // if (!done) %concat_iterable_to_array(array, iterator);
-    auto empty_exprs = new (zone()) ZoneList<Expression*>(0, zone());
-    auto array = CreateTempVar(factory()->NewArrayLiteral(
-        empty_exprs,
-        // Reuse pattern's literal index - it is unused since there is no
-        // actual literal allocated.
-        node->literal_index(), is_strong(scope()->language_mode()),
-        RelocInfo::kNoPosition));
+    // A spread can only occur as the last component.  It is not handled by
+    // RecurseIntoSubpattern above.
 
-    auto arguments = new (zone()) ZoneList<Expression*>(2, zone());
-    arguments->Add(factory()->NewVariableProxy(array), zone());
-    arguments->Add(factory()->NewVariableProxy(iterator), zone());
-    auto spread_into_array_call =
-        factory()->NewCallRuntime(Context::CONCAT_ITERABLE_TO_ARRAY_INDEX,
-                                  arguments, RelocInfo::kNoPosition);
+    // let array = [];
+    // while (!done) {
+    //   result = IteratorNext(iterator);
+    //   if (result.done) {
+    //     done = true;
+    //   } else {
+    //     %AppendElement(array, result.value);
+    //   }
+    // }
 
-    auto if_statement = factory()->NewIfStatement(
-        factory()->NewUnaryOperation(Token::NOT,
-                                     factory()->NewVariableProxy(done),
-                                     RelocInfo::kNoPosition),
-        factory()->NewExpressionStatement(spread_into_array_call,
-                                          RelocInfo::kNoPosition),
-        factory()->NewEmptyStatement(RelocInfo::kNoPosition),
-        RelocInfo::kNoPosition);
-    block_->statements()->Add(if_statement, zone());
+    // let array = [];
+    Variable* array;
+    {
+      auto empty_exprs = new (zone()) ZoneList<Expression*>(0, zone());
+      array = CreateTempVar(factory()->NewArrayLiteral(
+          empty_exprs,
+          // Reuse pattern's literal index - it is unused since there is no
+          // actual literal allocated.
+          node->literal_index(), RelocInfo::kNoPosition));
+    }
 
+    // result = IteratorNext(iterator);
+    Statement* get_next = factory()->NewExpressionStatement(
+        parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
+                                         result, nopos),
+        nopos);
+
+    // done = true;
+    Statement* set_done = factory()->NewExpressionStatement(
+        factory()->NewAssignment(
+            Token::ASSIGN, factory()->NewVariableProxy(done),
+            factory()->NewBooleanLiteral(true, nopos), nopos),
+        nopos);
+
+    // %AppendElement(array, result.value);
+    Statement* append_element;
+    {
+      auto args = new (zone()) ZoneList<Expression*>(2, zone());
+      args->Add(factory()->NewVariableProxy(array), zone());
+      args->Add(factory()->NewProperty(
+                    factory()->NewVariableProxy(result),
+                    factory()->NewStringLiteral(
+                        ast_value_factory()->value_string(), nopos),
+                    nopos),
+                zone());
+      append_element = factory()->NewExpressionStatement(
+          factory()->NewCallRuntime(Runtime::kAppendElement, args, nopos),
+          nopos);
+    }
+
+    // if (result.done) { #set_done } else { #append_element }
+    Statement* set_done_or_append;
+    {
+      Expression* result_done =
+          factory()->NewProperty(factory()->NewVariableProxy(result),
+                                 factory()->NewStringLiteral(
+                                     ast_value_factory()->done_string(), nopos),
+                                 nopos);
+      set_done_or_append = factory()->NewIfStatement(result_done, set_done,
+                                                     append_element, nopos);
+    }
+
+    // while (!done) {
+    //   #get_next;
+    //   #set_done_or_append;
+    // }
+    WhileStatement* loop = factory()->NewWhileStatement(nullptr, nopos);
+    {
+      Expression* condition = factory()->NewUnaryOperation(
+          Token::NOT, factory()->NewVariableProxy(done), nopos);
+      Block* body = factory()->NewBlock(nullptr, 2, true, nopos);
+      body->statements()->Add(get_next, zone());
+      body->statements()->Add(set_done_or_append, zone());
+      loop->Initialize(condition, body);
+    }
+
+    block_->statements()->Add(loop, zone());
     RecurseIntoSubpattern(spread->expression(),
                           factory()->NewVariableProxy(array));
   }
+
+  if (FLAG_harmony_iterator_close) {
+    Expression* closing_condition = factory()->NewUnaryOperation(
+        Token::NOT, factory()->NewVariableProxy(done), nopos);
+    parser_->FinalizeIteratorUse(completion, closing_condition, iterator,
+                                 block_, target);
+    block_ = target;
+  }
 }
 
 
diff --git a/src/parsing/preparse-data.h b/src/parsing/preparse-data.h
index dbe1022..1c99450 100644
--- a/src/parsing/preparse-data.h
+++ b/src/parsing/preparse-data.h
@@ -6,6 +6,7 @@
 #define V8_PARSING_PREPARSE_DATA_H_
 
 #include "src/allocation.h"
+#include "src/collector.h"
 #include "src/hashmap.h"
 #include "src/messages.h"
 #include "src/parsing/preparse-data-format.h"
diff --git a/src/parsing/preparser.cc b/src/parsing/preparser.cc
index d335c8b..da1c35b 100644
--- a/src/parsing/preparser.cc
+++ b/src/parsing/preparser.cc
@@ -78,8 +78,6 @@
     int pos, Scanner* scanner, PreParserFactory* factory) {
   if (scanner->UnescapedLiteralMatches("use strict", 10)) {
     return PreParserExpression::UseStrictStringLiteral();
-  } else if (scanner->UnescapedLiteralMatches("use strong", 10)) {
-    return PreParserExpression::UseStrongStringLiteral();
   }
   return PreParserExpression::StringLiteral();
 }
@@ -132,25 +130,16 @@
       int end_pos = scanner()->location().end_pos;
       CheckStrictOctalLiteral(start_position, end_pos, &ok);
       if (!ok) return kPreParseSuccess;
-
-      if (is_strong(scope_->language_mode()) && IsSubclassConstructor(kind)) {
-        if (!function_state.super_location().IsValid()) {
-          ReportMessageAt(Scanner::Location(start_position, start_position + 1),
-                          MessageTemplate::kStrongSuperCallMissing,
-                          kReferenceError);
-          return kPreParseSuccess;
-        }
-      }
     }
   }
   return kPreParseSuccess;
 }
 
-
 PreParserExpression PreParserTraits::ParseClassLiteral(
-    PreParserIdentifier name, Scanner::Location class_name_location,
-    bool name_is_strict_reserved, int pos, bool* ok) {
-  return pre_parser_->ParseClassLiteral(name, class_name_location,
+    Type::ExpressionClassifier* classifier, PreParserIdentifier name,
+    Scanner::Location class_name_location, bool name_is_strict_reserved,
+    int pos, bool* ok) {
+  return pre_parser_->ParseClassLiteral(classifier, name, class_name_location,
                                         name_is_strict_reserved, pos, ok);
 }
 
@@ -205,7 +194,7 @@
     default:
       break;
   }
-  return ParseStatement(ok);
+  return ParseStatement(kAllowLabelledFunctionStatement, ok);
 }
 
 
@@ -226,62 +215,26 @@
     }
     bool starts_with_identifier = peek() == Token::IDENTIFIER;
     Scanner::Location token_loc = scanner()->peek_location();
-    Scanner::Location old_this_loc = function_state_->this_location();
-    Scanner::Location old_super_loc = function_state_->super_location();
     Statement statement = ParseStatementListItem(ok);
     if (!*ok) return;
 
-    if (is_strong(language_mode()) && scope_->is_function_scope() &&
-        IsClassConstructor(function_state_->kind())) {
-      Scanner::Location this_loc = function_state_->this_location();
-      Scanner::Location super_loc = function_state_->super_location();
-      if (this_loc.beg_pos != old_this_loc.beg_pos &&
-          this_loc.beg_pos != token_loc.beg_pos) {
-        ReportMessageAt(this_loc, MessageTemplate::kStrongConstructorThis);
-        *ok = false;
-        return;
-      }
-      if (super_loc.beg_pos != old_super_loc.beg_pos &&
-          super_loc.beg_pos != token_loc.beg_pos) {
-        ReportMessageAt(super_loc, MessageTemplate::kStrongConstructorSuper);
-        *ok = false;
-        return;
-      }
-    }
-
     if (directive_prologue) {
       bool use_strict_found = statement.IsUseStrictLiteral();
-      bool use_strong_found =
-          statement.IsUseStrongLiteral() && allow_strong_mode();
 
       if (use_strict_found) {
         scope_->SetLanguageMode(
             static_cast<LanguageMode>(scope_->language_mode() | STRICT));
-      } else if (use_strong_found) {
-        scope_->SetLanguageMode(static_cast<LanguageMode>(
-            scope_->language_mode() | STRONG));
-        if (IsClassConstructor(function_state_->kind())) {
-          // "use strong" cannot occur in a class constructor body, to avoid
-          // unintuitive strong class object semantics.
-          PreParserTraits::ReportMessageAt(
-              token_loc, MessageTemplate::kStrongConstructorDirective);
-          *ok = false;
-          return;
-        }
       } else if (!statement.IsStringLiteral()) {
         directive_prologue = false;
       }
 
-      if ((use_strict_found || use_strong_found) &&
-          !scope_->HasSimpleParameters()) {
+      if (use_strict_found && !scope_->HasSimpleParameters()) {
         // TC39 deemed "use strict" directives to be an error when occurring
         // in the body of a function with non-simple parameter list, on
         // 29/7/2015. https://goo.gl/ueA7Ln
-        //
-        // In V8, this also applies to "use strong " directives.
         PreParserTraits::ReportMessageAt(
             token_loc, MessageTemplate::kIllegalLanguageModeDirective,
-            use_strict_found ? "use strict" : "use strong");
+            "use strict");
         *ok = false;
         return;
       }
@@ -310,8 +263,8 @@
 #define DUMMY )  // to make indentation work
 #undef DUMMY
 
-
-PreParser::Statement PreParser::ParseStatement(bool* ok) {
+PreParser::Statement PreParser::ParseStatement(
+    AllowLabelledFunctionStatement allow_function, bool* ok) {
   // Statement ::
   //   EmptyStatement
   //   ...
@@ -320,11 +273,20 @@
     Next();
     return Statement::Default();
   }
-  return ParseSubStatement(ok);
+  return ParseSubStatement(allow_function, ok);
 }
 
+PreParser::Statement PreParser::ParseScopedStatement(bool legacy, bool* ok) {
+  if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
+      (legacy && allow_harmony_restrictive_declarations())) {
+    return ParseSubStatement(kDisallowLabelledFunctionStatement, ok);
+  } else {
+    return ParseFunctionDeclaration(CHECK_OK);
+  }
+}
 
-PreParser::Statement PreParser::ParseSubStatement(bool* ok) {
+PreParser::Statement PreParser::ParseSubStatement(
+    AllowLabelledFunctionStatement allow_function, bool* ok) {
   // Statement ::
   //   Block
   //   VariableStatement
@@ -355,12 +317,6 @@
       return ParseBlock(ok);
 
     case Token::SEMICOLON:
-      if (is_strong(language_mode())) {
-        PreParserTraits::ReportMessageAt(scanner()->peek_location(),
-                                         MessageTemplate::kStrongEmpty);
-        *ok = false;
-        return Statement::Default();
-      }
       Next();
       return Statement::Default();
 
@@ -397,20 +353,18 @@
     case Token::TRY:
       return ParseTryStatement(ok);
 
-    case Token::FUNCTION: {
-      Scanner::Location start_location = scanner()->peek_location();
-      Statement statement = ParseFunctionDeclaration(CHECK_OK);
-      Scanner::Location end_location = scanner()->location();
-      if (is_strict(language_mode())) {
-        PreParserTraits::ReportMessageAt(start_location.beg_pos,
-                                         end_location.end_pos,
-                                         MessageTemplate::kStrictFunction);
-        *ok = false;
-        return Statement::Default();
-      } else {
-        return statement;
-      }
-    }
+    case Token::FUNCTION:
+      // FunctionDeclaration only allowed as a StatementListItem, not in
+      // an arbitrary Statement position. Exceptions such as
+      // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
+      // are handled by calling ParseScopedStatement rather than
+      // ParseSubStatement directly.
+      ReportMessageAt(scanner()->peek_location(),
+                      is_strict(language_mode())
+                          ? MessageTemplate::kStrictFunction
+                          : MessageTemplate::kSloppyFunction);
+      *ok = false;
+      return Statement::Default();
 
     case Token::DEBUGGER:
       return ParseDebuggerStatement(ok);
@@ -418,17 +372,8 @@
     case Token::VAR:
       return ParseVariableStatement(kStatement, ok);
 
-    case Token::CONST:
-      // In ES6 CONST is not allowed as a Statement, only as a
-      // LexicalDeclaration, however we continue to allow it in sloppy mode for
-      // backwards compatibility.
-      if (is_sloppy(language_mode()) && allow_legacy_const()) {
-        return ParseVariableStatement(kStatement, ok);
-      }
-
-    // Fall through.
     default:
-      return ParseExpressionOrLabelledStatement(ok);
+      return ParseExpressionOrLabelledStatement(allow_function, ok);
   }
 }
 
@@ -468,8 +413,8 @@
   bool is_strict_reserved = false;
   Identifier name =
       ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
-  ParseClassLiteral(name, scanner()->location(), is_strict_reserved, pos,
-                    CHECK_OK);
+  ParseClassLiteral(nullptr, name, scanner()->location(), is_strict_reserved,
+                    pos, CHECK_OK);
   return Statement::Default();
 }
 
@@ -527,12 +472,6 @@
   bool lexical = false;
   bool is_pattern = false;
   if (peek() == Token::VAR) {
-    if (is_strong(language_mode())) {
-      Scanner::Location location = scanner()->peek_location();
-      ReportMessageAt(location, MessageTemplate::kStrongVar);
-      *ok = false;
-      return Statement::Default();
-    }
     Consume(Token::VAR);
   } else if (peek() == Token::CONST && allow_const()) {
     // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
@@ -546,8 +485,7 @@
     // existing pages. Therefore we keep allowing const with the old
     // non-harmony semantics in sloppy mode.
     Consume(Token::CONST);
-    if (is_strict(language_mode()) ||
-        (allow_harmony_sloppy() && !allow_legacy_const())) {
+    if (is_strict(language_mode()) || allow_harmony_sloppy()) {
       DCHECK(var_context != kStatement);
       require_initializer = true;
       lexical = true;
@@ -574,19 +512,12 @@
     PreParserExpression pattern = PreParserExpression::Default();
     {
       ExpressionClassifier pattern_classifier(this);
-      Token::Value next = peek();
       pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
 
       ValidateBindingPattern(&pattern_classifier, CHECK_OK);
       if (lexical) {
         ValidateLetPattern(&pattern_classifier, CHECK_OK);
       }
-
-      if (!allow_harmony_destructuring_bind() && !pattern.IsIdentifier()) {
-        ReportUnexpectedToken(next);
-        *ok = false;
-        return Statement::Default();
-      }
     }
 
     is_pattern = pattern.IsObjectLiteral() || pattern.IsArrayLiteral();
@@ -625,8 +556,8 @@
   return Statement::Default();
 }
 
-
-PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
+PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
+    AllowLabelledFunctionStatement allow_function, bool* ok) {
   // ExpressionStatement | LabelledStatement ::
   //   Expression ';'
   //   Identifier ':' Statement
@@ -640,45 +571,6 @@
       *ok = false;
       return Statement::Default();
 
-    case Token::THIS:
-      if (!FLAG_strong_this) break;
-      // Fall through.
-    case Token::SUPER:
-      if (is_strong(language_mode()) &&
-          IsClassConstructor(function_state_->kind())) {
-        bool is_this = peek() == Token::THIS;
-        Expression expr = Expression::Default();
-        ExpressionClassifier classifier(this);
-        if (is_this) {
-          expr = ParseStrongInitializationExpression(&classifier, CHECK_OK);
-        } else {
-          expr = ParseStrongSuperCallExpression(&classifier, CHECK_OK);
-        }
-        ValidateExpression(&classifier, CHECK_OK);
-        switch (peek()) {
-          case Token::SEMICOLON:
-            Consume(Token::SEMICOLON);
-            break;
-          case Token::RBRACE:
-          case Token::EOS:
-            break;
-          default:
-            if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
-              ReportMessageAt(function_state_->this_location(),
-                              is_this
-                                  ? MessageTemplate::kStrongConstructorThis
-                                  : MessageTemplate::kStrongConstructorSuper);
-              *ok = false;
-              return Statement::Default();
-            }
-        }
-        return Statement::ExpressionStatement(expr);
-      }
-      break;
-
-    // TODO(arv): Handle `let [`
-    // https://code.google.com/p/v8/issues/detail?id=3847
-
     default:
       break;
   }
@@ -698,7 +590,16 @@
     DCHECK(is_sloppy(language_mode()) ||
            !IsFutureStrictReserved(expr.AsIdentifier()));
     Consume(Token::COLON);
-    Statement statement = ParseStatement(ok);
+    // ES#sec-labelled-function-declarations Labelled Function Declarations
+    if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
+      if (allow_function == kAllowLabelledFunctionStatement) {
+        return ParseFunctionDeclaration(ok);
+      } else {
+        return ParseScopedStatement(true, ok);
+      }
+    }
+    Statement statement =
+        ParseStatement(kDisallowLabelledFunctionStatement, ok);
     return statement.IsJumpStatement() ? Statement::Default() : statement;
     // Preparsing is disabled for extensions (because the extension details
     // aren't passed to lazily compiled functions), so we don't
@@ -726,10 +627,10 @@
   Expect(Token::LPAREN, CHECK_OK);
   ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
-  Statement stat = ParseSubStatement(CHECK_OK);
+  Statement stat = ParseScopedStatement(false, CHECK_OK);
   if (peek() == Token::ELSE) {
     Next();
-    Statement else_stat = ParseSubStatement(CHECK_OK);
+    Statement else_stat = ParseScopedStatement(false, CHECK_OK);
     stat = (stat.IsJumpStatement() && else_stat.IsJumpStatement()) ?
         Statement::Jump() : Statement::Default();
   } else {
@@ -795,14 +696,6 @@
       tok != Token::SEMICOLON &&
       tok != Token::RBRACE &&
       tok != Token::EOS) {
-    if (is_strong(language_mode()) &&
-        IsClassConstructor(function_state_->kind())) {
-      int pos = peek_position();
-      ReportMessageAt(Scanner::Location(pos, pos + 1),
-                      MessageTemplate::kStrongConstructorReturnValue);
-      *ok = false;
-      return Statement::Default();
-    }
     ParseExpression(true, CHECK_OK);
   }
   ExpectSemicolon(CHECK_OK);
@@ -825,7 +718,7 @@
 
   Scope* with_scope = NewScope(scope_, WITH_SCOPE);
   BlockState block_state(&scope_, with_scope);
-  ParseSubStatement(CHECK_OK);
+  ParseScopedStatement(true, CHECK_OK);
   return Statement::Default();
 }
 
@@ -857,13 +750,6 @@
       statement = ParseStatementListItem(CHECK_OK);
       token = peek();
     }
-    if (is_strong(language_mode()) && !statement.IsJumpStatement() &&
-        token != Token::RBRACE) {
-      ReportMessageAt(scanner()->location(),
-                      MessageTemplate::kStrongSwitchFallthrough);
-      *ok = false;
-      return Statement::Default();
-    }
   }
   Expect(Token::RBRACE, ok);
   return Statement::Default();
@@ -875,7 +761,7 @@
   //   'do' Statement 'while' '(' Expression ')' ';'
 
   Expect(Token::DO, CHECK_OK);
-  ParseSubStatement(CHECK_OK);
+  ParseScopedStatement(true, CHECK_OK);
   Expect(Token::WHILE, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
   ParseExpression(true, CHECK_OK);
@@ -893,7 +779,7 @@
   Expect(Token::LPAREN, CHECK_OK);
   ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
-  ParseSubStatement(ok);
+  ParseScopedStatement(true, ok);
   return Statement::Default();
 }
 
@@ -945,7 +831,7 @@
         }
 
         Expect(Token::RPAREN, CHECK_OK);
-        ParseSubStatement(CHECK_OK);
+        ParseScopedStatement(true, CHECK_OK);
         return Statement::Default();
       }
     } else {
@@ -958,7 +844,6 @@
       bool is_for_each = CheckInOrOf(&mode, ok);
       if (!*ok) return Statement::Default();
       bool is_destructuring = is_for_each &&
-                              allow_harmony_destructuring_assignment() &&
                               (lhs->IsArrayLiteral() || lhs->IsObjectLiteral());
 
       if (is_destructuring) {
@@ -983,7 +868,7 @@
         }
 
         Expect(Token::RPAREN, CHECK_OK);
-        ParseSubStatement(CHECK_OK);
+        ParseScopedStatement(true, CHECK_OK);
         return Statement::Default();
       }
     }
@@ -1009,7 +894,7 @@
   }
   Expect(Token::RPAREN, CHECK_OK);
 
-  ParseSubStatement(ok);
+  ParseScopedStatement(true, ok);
   return Statement::Default();
 }
 
@@ -1156,16 +1041,6 @@
     CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
   }
 
-  if (is_strong(language_mode) && IsSubclassConstructor(kind)) {
-    if (!function_state.super_location().IsValid()) {
-      ReportMessageAt(function_name_location,
-                      MessageTemplate::kStrongSuperCallMissing,
-                      kReferenceError);
-      *ok = false;
-      return Expression::Default();
-    }
-  }
-
   return Expression::Default();
 }
 
@@ -1186,10 +1061,10 @@
                     scope_->uses_super_property(), scope_->calls_eval());
 }
 
-
 PreParserExpression PreParser::ParseClassLiteral(
-    PreParserIdentifier name, Scanner::Location class_name_location,
-    bool name_is_strict_reserved, int pos, bool* ok) {
+    ExpressionClassifier* classifier, PreParserIdentifier name,
+    Scanner::Location class_name_location, bool name_is_strict_reserved,
+    int pos, bool* ok) {
   // All parts of a ClassDeclaration and ClassExpression are strict code.
   if (name_is_strict_reserved) {
     ReportMessageAt(class_name_location,
@@ -1202,13 +1077,8 @@
     *ok = false;
     return EmptyExpression();
   }
-  LanguageMode class_language_mode = language_mode();
-  if (is_strong(class_language_mode) && IsUndefined(name)) {
-    ReportMessageAt(class_name_location, MessageTemplate::kStrongUndefined);
-    *ok = false;
-    return EmptyExpression();
-  }
 
+  LanguageMode class_language_mode = language_mode();
   Scope* scope = NewScope(scope_, BLOCK_SCOPE);
   BlockState block_state(&scope_, scope);
   scope_->SetLanguageMode(
@@ -1218,9 +1088,13 @@
 
   bool has_extends = Check(Token::EXTENDS);
   if (has_extends) {
-    ExpressionClassifier classifier(this);
-    ParseLeftHandSideExpression(&classifier, CHECK_OK);
-    ValidateExpression(&classifier, CHECK_OK);
+    ExpressionClassifier extends_classifier(this);
+    ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
+    ValidateExpression(&extends_classifier, CHECK_OK);
+    if (classifier != nullptr) {
+      classifier->Accumulate(&extends_classifier,
+                             ExpressionClassifier::ExpressionProductions);
+    }
   }
 
   ClassLiteralChecker checker(this);
@@ -1234,11 +1108,15 @@
     bool is_computed_name = false;  // Classes do not care about computed
                                     // property names here.
     Identifier name;
-    ExpressionClassifier classifier(this);
+    ExpressionClassifier property_classifier(this);
     ParsePropertyDefinition(&checker, in_class, has_extends, is_static,
                             &is_computed_name, &has_seen_constructor,
-                            &classifier, &name, CHECK_OK);
-    ValidateExpression(&classifier, CHECK_OK);
+                            &property_classifier, &name, CHECK_OK);
+    ValidateExpression(&property_classifier, CHECK_OK);
+    if (classifier != nullptr) {
+      classifier->Accumulate(&property_classifier,
+                             ExpressionClassifier::ExpressionProductions);
+    }
   }
 
   Expect(Token::RBRACE, CHECK_OK);
diff --git a/src/parsing/preparser.h b/src/parsing/preparser.h
index 253251c..f2f6951 100644
--- a/src/parsing/preparser.h
+++ b/src/parsing/preparser.h
@@ -144,11 +144,6 @@
                                IsUseStrictField::encode(true));
   }
 
-  static PreParserExpression UseStrongStringLiteral() {
-    return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
-                               IsUseStrongField::encode(true));
-  }
-
   static PreParserExpression This() {
     return PreParserExpression(TypeField::encode(kExpression) |
                                ExpressionTypeField::encode(kThisExpression));
@@ -214,11 +209,6 @@
            IsUseStrictField::decode(code_);
   }
 
-  bool IsUseStrongLiteral() const {
-    return TypeField::decode(code_) == kStringLiteralExpression &&
-           IsUseStrongField::decode(code_);
-  }
-
   bool IsThis() const {
     return TypeField::decode(code_) == kExpression &&
            ExpressionTypeField::decode(code_) == kThisExpression;
@@ -317,7 +307,6 @@
   // of the Type field, so they can share the storage.
   typedef BitField<ExpressionType, TypeField::kNext, 3> ExpressionTypeField;
   typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
-  typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseStrongField;
   typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
       IdentifierTypeField;
   typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
@@ -366,9 +355,6 @@
     if (expression.IsUseStrictLiteral()) {
       return PreParserStatement(kUseStrictExpressionStatement);
     }
-    if (expression.IsUseStrongLiteral()) {
-      return PreParserStatement(kUseStrongExpressionStatement);
-    }
     if (expression.IsStringLiteral()) {
       return PreParserStatement(kStringLiteralExpressionStatement);
     }
@@ -376,15 +362,13 @@
   }
 
   bool IsStringLiteral() {
-    return code_ == kStringLiteralExpressionStatement;
+    return code_ == kStringLiteralExpressionStatement || IsUseStrictLiteral();
   }
 
   bool IsUseStrictLiteral() {
     return code_ == kUseStrictExpressionStatement;
   }
 
-  bool IsUseStrongLiteral() { return code_ == kUseStrongExpressionStatement; }
-
   bool IsFunctionDeclaration() {
     return code_ == kFunctionDeclaration;
   }
@@ -399,7 +383,6 @@
     kJumpStatement,
     kStringLiteralExpressionStatement,
     kUseStrictExpressionStatement,
-    kUseStrongExpressionStatement,
     kFunctionDeclaration
   };
 
@@ -424,18 +407,17 @@
   }
   PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
                                        int js_flags, int literal_index,
-                                       bool is_strong, int pos) {
+                                       int pos) {
     return PreParserExpression::Default();
   }
   PreParserExpression NewArrayLiteral(PreParserExpressionList values,
                                       int literal_index,
-                                      bool is_strong,
                                       int pos) {
     return PreParserExpression::ArrayLiteral();
   }
   PreParserExpression NewArrayLiteral(PreParserExpressionList values,
                                       int first_spread_index, int literal_index,
-                                      bool is_strong, int pos) {
+                                      int pos) {
     return PreParserExpression::ArrayLiteral();
   }
   PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
@@ -454,8 +436,6 @@
   PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
                                        int literal_index,
                                        int boilerplate_properties,
-                                       bool has_function,
-                                       bool is_strong,
                                        int pos) {
     return PreParserExpression::ObjectLiteral();
   }
@@ -496,7 +476,6 @@
   }
   PreParserExpression NewYield(PreParserExpression generator_object,
                                PreParserExpression expression,
-                               Yield::Kind yield_kind,
                                int pos) {
     return PreParserExpression::Default();
   }
@@ -683,9 +662,6 @@
     UNREACHABLE();
   }
 
-  static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
-      Scope* scope, PreParserExpression property, bool* has_function) {}
-
   static void CheckAssigningFunctionLiteralToProperty(
       PreParserExpression left, PreParserExpression right) {}
 
@@ -710,6 +686,10 @@
     return PreParserExpression::Default();
   }
 
+  PreParserExpression BuildIteratorResult(PreParserExpression value,
+                                          bool done) {
+    return PreParserExpression::Default();
+  }
   PreParserExpression NewThrowReferenceError(MessageTemplate::Template message,
                                              int pos) {
     return PreParserExpression::Default();
@@ -902,11 +882,14 @@
       int function_token_position, FunctionLiteral::FunctionType type,
       LanguageMode language_mode, bool* ok);
 
-  PreParserExpression ParseClassLiteral(PreParserIdentifier name,
+  PreParserExpression ParseClassLiteral(Type::ExpressionClassifier* classifier,
+                                        PreParserIdentifier name,
                                         Scanner::Location class_name_location,
                                         bool name_is_strict_reserved, int pos,
                                         bool* ok);
 
+  V8_INLINE void MarkTailPosition(PreParserExpression) {}
+
   PreParserExpressionList PrepareSpreadArguments(PreParserExpressionList list) {
     return list;
   }
@@ -922,6 +905,16 @@
 
   inline void RewriteDestructuringAssignments() {}
 
+  inline PreParserExpression RewriteExponentiation(PreParserExpression left,
+                                                   PreParserExpression right,
+                                                   int pos) {
+    return left;
+  }
+  inline PreParserExpression RewriteAssignExponentiation(
+      PreParserExpression left, PreParserExpression right, int pos) {
+    return left;
+  }
+
   inline void QueueDestructuringAssignmentForRewriting(PreParserExpression) {}
   inline void QueueNonPatternForRewriting(PreParserExpression) {}
 
@@ -1029,8 +1022,11 @@
   Statement ParseStatementListItem(bool* ok);
   void ParseStatementList(int end_token, bool* ok,
                           Scanner::BookmarkScope* bookmark = nullptr);
-  Statement ParseStatement(bool* ok);
-  Statement ParseSubStatement(bool* ok);
+  Statement ParseStatement(AllowLabelledFunctionStatement allow_function,
+                           bool* ok);
+  Statement ParseSubStatement(AllowLabelledFunctionStatement allow_function,
+                              bool* ok);
+  Statement ParseScopedStatement(bool legacy, bool* ok);
   Statement ParseFunctionDeclaration(bool* ok);
   Statement ParseClassDeclaration(bool* ok);
   Statement ParseBlock(bool* ok);
@@ -1042,7 +1038,8 @@
                                       Scanner::Location* first_initializer_loc,
                                       Scanner::Location* bindings_loc,
                                       bool* ok);
-  Statement ParseExpressionOrLabelledStatement(bool* ok);
+  Statement ParseExpressionOrLabelledStatement(
+      AllowLabelledFunctionStatement allow_function, bool* ok);
   Statement ParseIfStatement(bool* ok);
   Statement ParseContinueStatement(bool* ok);
   Statement ParseBreakStatement(bool* ok);
@@ -1075,7 +1072,8 @@
   void ParseLazyFunctionLiteralBody(bool* ok,
                                     Scanner::BookmarkScope* bookmark = nullptr);
 
-  PreParserExpression ParseClassLiteral(PreParserIdentifier name,
+  PreParserExpression ParseClassLiteral(ExpressionClassifier* classifier,
+                                        PreParserIdentifier name,
                                         Scanner::Location class_name_location,
                                         bool name_is_strict_reserved, int pos,
                                         bool* ok);
@@ -1140,8 +1138,7 @@
 
 PreParserExpression PreParserTraits::RewriteYieldStar(
     PreParserExpression generator, PreParserExpression expression, int pos) {
-  return pre_parser_->factory()->NewYield(
-      generator, expression, Yield::kDelegating, pos);
+  return PreParserExpression::Default();
 }
 
 PreParserExpression PreParserTraits::RewriteInstanceof(PreParserExpression lhs,
diff --git a/src/parsing/rewriter.cc b/src/parsing/rewriter.cc
index c8e8fed..915a464 100644
--- a/src/parsing/rewriter.cc
+++ b/src/parsing/rewriter.cc
@@ -355,14 +355,7 @@
     if (processor.HasStackOverflow()) return false;
 
     if (processor.result_assigned()) {
-      DCHECK(function->end_position() != RelocInfo::kNoPosition);
-      // Set the position of the assignment statement one character past the
-      // source code, such that it definitely is not in the source code range
-      // of an immediate inner scope. For example in
-      //   eval('with ({x:1}) x = 1');
-      // the end position of the function generated for executing the eval code
-      // coincides with the end of the with scope which is the position of '1'.
-      int pos = function->end_position();
+      int pos = RelocInfo::kNoPosition;
       VariableProxy* result_proxy =
           processor.factory()->NewVariableProxy(result, pos);
       Statement* result_statement =
diff --git a/src/parsing/scanner.cc b/src/parsing/scanner.cc
index 2d5a579..698cb5e 100644
--- a/src/parsing/scanner.cc
+++ b/src/parsing/scanner.cc
@@ -40,7 +40,8 @@
     : unicode_cache_(unicode_cache),
       bookmark_c0_(kNoBookmark),
       octal_pos_(Location::invalid()),
-      found_html_comment_(false) {
+      found_html_comment_(false),
+      allow_harmony_exponentiation_operator_(false) {
   bookmark_current_.literal_chars = &bookmark_current_literal_;
   bookmark_current_.raw_literal_chars = &bookmark_current_raw_literal_;
   bookmark_next_.literal_chars = &bookmark_next_literal_;
@@ -60,15 +61,19 @@
   Scan();
 }
 
-
-template <bool capture_raw>
+template <bool capture_raw, bool unicode>
 uc32 Scanner::ScanHexNumber(int expected_length) {
   DCHECK(expected_length <= 4);  // prevent overflow
 
+  int begin = source_pos() - 2;
   uc32 x = 0;
   for (int i = 0; i < expected_length; i++) {
     int d = HexValue(c0_);
     if (d < 0) {
+      ReportScannerError(Location(begin, begin + expected_length + 2),
+                         unicode
+                             ? MessageTemplate::kInvalidUnicodeEscapeSequence
+                             : MessageTemplate::kInvalidHexEscapeSequence);
       return -1;
     }
     x = x * 16 + d;
@@ -78,20 +83,23 @@
   return x;
 }
 
-
 template <bool capture_raw>
-uc32 Scanner::ScanUnlimitedLengthHexNumber(int max_value) {
+uc32 Scanner::ScanUnlimitedLengthHexNumber(int max_value, int beg_pos) {
   uc32 x = 0;
   int d = HexValue(c0_);
-  if (d < 0) {
-    return -1;
-  }
+  if (d < 0) return -1;
+
   while (d >= 0) {
     x = x * 16 + d;
-    if (x > max_value) return -1;
+    if (x > max_value) {
+      ReportScannerError(Location(beg_pos, source_pos() + 1),
+                         MessageTemplate::kUndefinedUnicodeCodePoint);
+      return -1;
+    }
     Advance<capture_raw>();
     d = HexValue(c0_);
   }
+
   return x;
 }
 
@@ -565,7 +573,14 @@
 
       case '*':
         // * *=
-        token = Select('=', Token::ASSIGN_MUL, Token::MUL);
+        Advance();
+        if (c0_ == '*' && allow_harmony_exponentiation_operator()) {
+          token = Select('=', Token::ASSIGN_EXP, Token::EXP);
+        } else if (c0_ == '=') {
+          token = Select(Token::ASSIGN_MUL);
+        } else {
+          token = Token::MUL;
+        }
         break;
 
       case '%':
@@ -847,7 +862,9 @@
     uc32 c = c0_;
     Advance();
     if (c == '\\') {
-      if (c0_ < 0 || !ScanEscape<false, false>()) return Token::ILLEGAL;
+      if (c0_ < 0 || !ScanEscape<false, false>()) {
+        return Token::ILLEGAL;
+      }
     } else {
       AddLiteralChar(c);
     }
@@ -879,7 +896,6 @@
   StartRawLiteral();
   const bool capture_raw = true;
   const bool in_template_literal = true;
-
   while (true) {
     uc32 c = c0_;
     Advance<capture_raw>();
@@ -1099,18 +1115,19 @@
   // Accept both \uxxxx and \u{xxxxxx}. In the latter case, the number of
   // hex digits between { } is arbitrary. \ and u have already been read.
   if (c0_ == '{') {
+    int begin = source_pos() - 2;
     Advance<capture_raw>();
-    uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10ffff);
-    if (cp < 0) {
-      return -1;
-    }
-    if (c0_ != '}') {
+    uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10ffff, begin);
+    if (cp < 0 || c0_ != '}') {
+      ReportScannerError(source_pos(),
+                         MessageTemplate::kInvalidUnicodeEscapeSequence);
       return -1;
     }
     Advance<capture_raw>();
     return cp;
   }
-  return ScanHexNumber<capture_raw>(4);
+  const bool unicode = true;
+  return ScanHexNumber<capture_raw, unicode>(4);
 }
 
 
@@ -1420,7 +1437,6 @@
         flag = RegExp::kUnicode;
         break;
       case 'y':
-        if (!FLAG_harmony_regexps) return Nothing<RegExp::Flags>();
         flag = RegExp::kSticky;
         break;
       default:
diff --git a/src/parsing/scanner.h b/src/parsing/scanner.h
index 3f9bbb5..22c504c 100644
--- a/src/parsing/scanner.h
+++ b/src/parsing/scanner.h
@@ -10,13 +10,14 @@
 #include "src/allocation.h"
 #include "src/base/logging.h"
 #include "src/char-predicates.h"
+#include "src/collector.h"
 #include "src/globals.h"
 #include "src/hashmap.h"
 #include "src/list.h"
+#include "src/messages.h"
 #include "src/parsing/token.h"
 #include "src/unicode.h"
 #include "src/unicode-decoder.h"
-#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
@@ -354,6 +355,10 @@
   // (the token last returned by Next()).
   Location location() const { return current_.location; }
 
+  bool has_error() const { return scanner_error_ != MessageTemplate::kNone; }
+  MessageTemplate::Template error() const { return scanner_error_; }
+  Location error_location() const { return scanner_error_location_; }
+
   // Similar functions for the upcoming token.
 
   // One token look-ahead (past the token returned by Next()).
@@ -450,6 +455,12 @@
 
   bool FoundHtmlComment() const { return found_html_comment_; }
 
+#define DECLARE_ACCESSORS(name)                                \
+  inline bool allow_##name() const { return allow_##name##_; } \
+  inline void set_allow_##name(bool allow) { allow_##name##_ = allow; }
+  DECLARE_ACCESSORS(harmony_exponentiation_operator)
+#undef ACCESSOR
+
  private:
   // The current and look-ahead token.
   struct TokenDesc {
@@ -476,6 +487,7 @@
     current_.raw_literal_chars = NULL;
     next_next_.token = Token::UNINITIALIZED;
     found_html_comment_ = false;
+    scanner_error_ = MessageTemplate::kNone;
   }
 
   // Support BookmarkScope functionality.
@@ -486,6 +498,19 @@
   void DropBookmark();
   static void CopyTokenDesc(TokenDesc* to, TokenDesc* from);
 
+  void ReportScannerError(const Location& location,
+                          MessageTemplate::Template error) {
+    if (has_error()) return;
+    scanner_error_ = error;
+    scanner_error_location_ = location;
+  }
+
+  void ReportScannerError(int pos, MessageTemplate::Template error) {
+    if (has_error()) return;
+    scanner_error_ = error;
+    scanner_error_location_ = Location(pos, pos + 1);
+  }
+
   // Literal buffer support
   inline void StartLiteral() {
     LiteralBuffer* free_buffer =
@@ -631,13 +656,13 @@
     return current_.raw_literal_chars->is_one_byte();
   }
 
-  template <bool capture_raw>
+  template <bool capture_raw, bool unicode = false>
   uc32 ScanHexNumber(int expected_length);
   // Scan a number of any length but not bigger than max_value. For example, the
   // number can be 000000001, so it's very long in characters but its value is
   // small.
   template <bool capture_raw>
-  uc32 ScanUnlimitedLengthHexNumber(int max_value);
+  uc32 ScanUnlimitedLengthHexNumber(int max_value, int beg_pos);
 
   // Scans a single JavaScript token.
   void Scan();
@@ -758,6 +783,11 @@
 
   // Whether this scanner encountered an HTML comment.
   bool found_html_comment_;
+
+  bool allow_harmony_exponentiation_operator_;
+
+  MessageTemplate::Template scanner_error_;
+  Location scanner_error_location_;
 };
 
 }  // namespace internal
diff --git a/src/parsing/token.h b/src/parsing/token.h
index 7a62b4d..fae9ea8 100644
--- a/src/parsing/token.h
+++ b/src/parsing/token.h
@@ -63,6 +63,7 @@
   T(ASSIGN_MUL, "*=", 2)                                             \
   T(ASSIGN_DIV, "/=", 2)                                             \
   T(ASSIGN_MOD, "%=", 2)                                             \
+  T(ASSIGN_EXP, "**=", 2)                                            \
                                                                      \
   /* Binary operators sorted by precedence. */                       \
   /* IsBinaryOp() relies on this block of enum values */             \
@@ -82,6 +83,7 @@
   T(MUL, "*", 13)                                                    \
   T(DIV, "/", 13)                                                    \
   T(MOD, "%", 13)                                                    \
+  T(EXP, "**", 14)                                                   \
                                                                      \
   /* Compare operators sorted by precedence. */                      \
   /* IsCompareOp() relies on this block of enum values */            \
@@ -214,12 +216,10 @@
   }
 
   static bool IsAssignmentOp(Value tok) {
-    return INIT <= tok && tok <= ASSIGN_MOD;
+    return INIT <= tok && tok <= ASSIGN_EXP;
   }
 
-  static bool IsBinaryOp(Value op) {
-    return COMMA <= op && op <= MOD;
-  }
+  static bool IsBinaryOp(Value op) { return COMMA <= op && op <= EXP; }
 
   static bool IsTruncatingBinaryOp(Value op) {
     return BIT_OR <= op && op <= ROR;
diff --git a/src/perf-jit.cc b/src/perf-jit.cc
new file mode 100644
index 0000000..6f35514
--- /dev/null
+++ b/src/perf-jit.cc
@@ -0,0 +1,336 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/perf-jit.h"
+
+#include "src/assembler.h"
+#include "src/objects-inl.h"
+
+#if V8_OS_LINUX
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#endif  // V8_OS_LINUX
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_LINUX
+
+struct PerfJitHeader {
+  uint32_t magic_;
+  uint32_t version_;
+  uint32_t size_;
+  uint32_t elf_mach_target_;
+  uint32_t reserved_;
+  uint32_t process_id_;
+  uint64_t time_stamp_;
+  uint64_t flags_;
+
+  static const uint32_t kMagic = 0x4A695444;
+  static const uint32_t kVersion = 1;
+};
+
+struct PerfJitBase {
+  enum PerfJitEvent { kLoad = 0, kMove = 1, kDebugInfo = 2, kClose = 3 };
+
+  uint32_t event_;
+  uint32_t size_;
+  uint64_t time_stamp_;
+};
+
+struct PerfJitCodeLoad : PerfJitBase {
+  uint32_t process_id_;
+  uint32_t thread_id_;
+  uint64_t vma_;
+  uint64_t code_address_;
+  uint64_t code_size_;
+  uint64_t code_id_;
+};
+
+struct PerfJitDebugEntry {
+  uint64_t address_;
+  int line_number_;
+  int column_;
+  // Followed by null-terminated name or \0xff\0 if same as previous.
+};
+
+struct PerfJitCodeDebugInfo : PerfJitBase {
+  uint64_t address_;
+  uint64_t entry_count_;
+  // Followed by entry_count_ instances of PerfJitDebugEntry.
+};
+
+const char PerfJitLogger::kFilenameFormatString[] = "./jit-%d.dump";
+
+// Extra padding for the PID in the filename
+const int PerfJitLogger::kFilenameBufferPadding = 16;
+
+base::LazyRecursiveMutex PerfJitLogger::file_mutex_;
+// The following static variables are protected by PerfJitLogger::file_mutex_.
+uint64_t PerfJitLogger::reference_count_ = 0;
+void* PerfJitLogger::marker_address_ = nullptr;
+uint64_t PerfJitLogger::code_index_ = 0;
+FILE* PerfJitLogger::perf_output_handle_ = nullptr;
+
+void PerfJitLogger::OpenJitDumpFile() {
+  // Open the perf JIT dump file.
+  perf_output_handle_ = nullptr;
+
+  int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
+  ScopedVector<char> perf_dump_name(bufferSize);
+  int size = SNPrintF(perf_dump_name, kFilenameFormatString,
+                      base::OS::GetCurrentProcessId());
+  CHECK_NE(size, -1);
+
+  int fd = open(perf_dump_name.start(), O_CREAT | O_TRUNC | O_RDWR, 0666);
+  if (fd == -1) return;
+
+  marker_address_ = OpenMarkerFile(fd);
+  if (marker_address_ == nullptr) return;
+
+  perf_output_handle_ = fdopen(fd, "w+");
+  if (perf_output_handle_ == nullptr) return;
+
+  setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
+}
+
+void PerfJitLogger::CloseJitDumpFile() {
+  if (perf_output_handle_ == nullptr) return;
+  fclose(perf_output_handle_);
+  perf_output_handle_ = nullptr;
+}
+
+void* PerfJitLogger::OpenMarkerFile(int fd) {
+  long page_size = sysconf(_SC_PAGESIZE);  // NOLINT(runtime/int)
+  if (page_size == -1) return nullptr;
+
+  // Mmap the file so that there is a mmap record in the perf_data file.
+  //
+  // The map must be PROT_EXEC to ensure it is not ignored by perf record.
+  void* marker_address =
+      mmap(nullptr, page_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
+  return (marker_address == MAP_FAILED) ? nullptr : marker_address;
+}
+
+void PerfJitLogger::CloseMarkerFile(void* marker_address) {
+  if (marker_address == nullptr) return;
+  long page_size = sysconf(_SC_PAGESIZE);  // NOLINT(runtime/int)
+  if (page_size == -1) return;
+  munmap(marker_address, page_size);
+}
+
+PerfJitLogger::PerfJitLogger() {
+  base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+
+  reference_count_++;
+  // If this is the first logger, open the file and write the header.
+  if (reference_count_ == 1) {
+    OpenJitDumpFile();
+    if (perf_output_handle_ == nullptr) return;
+    LogWriteHeader();
+  }
+}
+
+PerfJitLogger::~PerfJitLogger() {
+  base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+
+  reference_count_--;
+  // If this was the last logger, close the file.
+  if (reference_count_ == 0) {
+    CloseJitDumpFile();
+  }
+}
+
+uint64_t PerfJitLogger::GetTimestamp() {
+  struct timespec ts;
+  int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+  DCHECK_EQ(0, result);
+  USE(result);
+  static const uint64_t kNsecPerSec = 1000000000;
+  return (ts.tv_sec * kNsecPerSec) + ts.tv_nsec;
+}
+
+void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
+                                      SharedFunctionInfo* shared,
+                                      const char* name, int length) {
+  if (FLAG_perf_basic_prof_only_functions &&
+      (abstract_code->kind() != AbstractCode::FUNCTION &&
+       abstract_code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
+       abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION)) {
+    return;
+  }
+
+  base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+
+  if (perf_output_handle_ == nullptr) return;
+
+  // We only support non-interpreted functions.
+  if (!abstract_code->IsCode()) return;
+  Code* code = abstract_code->GetCode();
+  DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
+
+  // Debug info has to be emitted first.
+  if (FLAG_perf_prof_debug_info && shared != nullptr) {
+    LogWriteDebugInfo(code, shared);
+  }
+
+  const char* code_name = name;
+  uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start());
+  uint32_t code_size = code->is_crankshafted() ? code->safepoint_table_offset()
+                                               : code->instruction_size();
+
+  static const char string_terminator[] = "\0";
+
+  PerfJitCodeLoad code_load;
+  code_load.event_ = PerfJitCodeLoad::kLoad;
+  code_load.size_ = sizeof(code_load) + length + 1 + code_size;
+  code_load.time_stamp_ = GetTimestamp();
+  code_load.process_id_ =
+      static_cast<uint32_t>(base::OS::GetCurrentProcessId());
+  code_load.thread_id_ = static_cast<uint32_t>(base::OS::GetCurrentThreadId());
+  code_load.vma_ = 0x0;  //  Our addresses are absolute.
+  code_load.code_address_ = reinterpret_cast<uint64_t>(code_pointer);
+  code_load.code_size_ = code_size;
+  code_load.code_id_ = code_index_;
+
+  code_index_++;
+
+  LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
+  LogWriteBytes(code_name, length);
+  LogWriteBytes(string_terminator, 1);
+  LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
+}
+
+void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
+  // Compute the entry count and get the name of the script.
+  uint32_t entry_count = 0;
+  for (RelocIterator it(code, RelocInfo::kPositionMask); !it.done();
+       it.next()) {
+    entry_count++;
+  }
+  if (entry_count == 0) return;
+  Handle<Script> script(Script::cast(shared->script()));
+  Handle<Object> name_or_url(Script::GetNameOrSourceURL(script));
+
+  int name_length = 0;
+  base::SmartArrayPointer<char> name_string;
+  if (name_or_url->IsString()) {
+    name_string =
+        Handle<String>::cast(name_or_url)
+            ->ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &name_length);
+    DCHECK_EQ(0, name_string.get()[name_length]);
+  } else {
+    const char unknown[] = "<unknown>";
+    name_length = static_cast<int>(strlen(unknown));
+    char* buffer = NewArray<char>(name_length);
+    base::OS::StrNCpy(buffer, name_length + 1, unknown,
+                      static_cast<size_t>(name_length));
+    name_string = base::SmartArrayPointer<char>(buffer);
+  }
+  DCHECK_EQ(name_length, strlen(name_string.get()));
+
+  PerfJitCodeDebugInfo debug_info;
+
+  debug_info.event_ = PerfJitCodeLoad::kDebugInfo;
+  debug_info.time_stamp_ = GetTimestamp();
+  debug_info.address_ = reinterpret_cast<uint64_t>(code->instruction_start());
+  debug_info.entry_count_ = entry_count;
+
+  uint32_t size = sizeof(debug_info);
+  // Add the sizes of fixed parts of entries.
+  size += entry_count * sizeof(PerfJitDebugEntry);
+  // Add the size of the name after the first entry.
+  size += (static_cast<uint32_t>(name_length) + 1) * entry_count;
+
+  int padding = ((size + 7) & (~7)) - size;
+
+  debug_info.size_ = size + padding;
+
+  LogWriteBytes(reinterpret_cast<const char*>(&debug_info), sizeof(debug_info));
+
+  int script_line_offset = script->line_offset();
+  Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+
+  for (RelocIterator it(code, RelocInfo::kPositionMask); !it.done();
+       it.next()) {
+    int position = static_cast<int>(it.rinfo()->data());
+    int line_number = Script::GetLineNumber(script, position);
+    // Compute column.
+    int relative_line_number = line_number - script_line_offset;
+    int start =
+        (relative_line_number == 0)
+            ? 0
+            : Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
+    int column_offset = position - start;
+    if (relative_line_number == 0) {
+      // For the case where the code is on the same line as the script tag.
+      column_offset += script->column_offset();
+    }
+
+    PerfJitDebugEntry entry;
+    entry.address_ = reinterpret_cast<uint64_t>(it.rinfo()->pc());
+    entry.line_number_ = line_number;
+    entry.column_ = column_offset;
+    LogWriteBytes(reinterpret_cast<const char*>(&entry), sizeof(entry));
+    LogWriteBytes(name_string.get(), name_length + 1);
+  }
+  char padding_bytes[] = "\0\0\0\0\0\0\0\0";
+  LogWriteBytes(padding_bytes, padding);
+}
+
+void PerfJitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
+  // Code relocation not supported.
+  UNREACHABLE();
+}
+
+void PerfJitLogger::LogWriteBytes(const char* bytes, int size) {
+  size_t rv = fwrite(bytes, 1, size, perf_output_handle_);
+  DCHECK(static_cast<size_t>(size) == rv);
+  USE(rv);
+}
+
+void PerfJitLogger::LogWriteHeader() {
+  DCHECK(perf_output_handle_ != NULL);
+  PerfJitHeader header;
+
+  header.magic_ = PerfJitHeader::kMagic;
+  header.version_ = PerfJitHeader::kVersion;
+  header.size_ = sizeof(header);
+  header.elf_mach_target_ = GetElfMach();
+  header.reserved_ = 0xdeadbeef;
+  header.process_id_ = base::OS::GetCurrentProcessId();
+  header.time_stamp_ =
+      static_cast<uint64_t>(base::OS::TimeCurrentMillis() * 1000.0);
+  header.flags_ = 0;
+
+  LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header));
+}
+
+#endif  // V8_OS_LINUX
+}  // namespace internal
+}  // namespace v8
diff --git a/src/perf-jit.h b/src/perf-jit.h
new file mode 100644
index 0000000..25cc3b3
--- /dev/null
+++ b/src/perf-jit.h
@@ -0,0 +1,122 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PERF_JIT_H_
+#define V8_PERF_JIT_H_
+
+#include "src/log.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_LINUX
+
+// Linux perf tool logging support
+class PerfJitLogger : public CodeEventLogger {
+ public:
+  PerfJitLogger();
+  virtual ~PerfJitLogger();
+
+  void CodeMoveEvent(AbstractCode* from, Address to) override;
+  void CodeDisableOptEvent(AbstractCode* code,
+                           SharedFunctionInfo* shared) override {}
+
+ private:
+  void OpenJitDumpFile();
+  void CloseJitDumpFile();
+  void* OpenMarkerFile(int fd);
+  void CloseMarkerFile(void* marker_address);
+
+  uint64_t GetTimestamp();
+  void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+                         const char* name, int length) override;
+
+  // Extension added to V8 log file name to get the low-level log name.
+  static const char kFilenameFormatString[];
+  static const int kFilenameBufferPadding;
+
+  // File buffer size of the low-level log. We don't use the default to
+  // minimize the associated overhead.
+  static const int kLogBufferSize = 2 * MB;
+
+  void LogWriteBytes(const char* bytes, int size);
+  void LogWriteHeader();
+  void LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared);
+
+  static const uint32_t kElfMachIA32 = 3;
+  static const uint32_t kElfMachX64 = 62;
+  static const uint32_t kElfMachARM = 40;
+  static const uint32_t kElfMachMIPS = 10;
+
+  uint32_t GetElfMach() {
+#if V8_TARGET_ARCH_IA32
+    return kElfMachIA32;
+#elif V8_TARGET_ARCH_X64
+    return kElfMachX64;
+#elif V8_TARGET_ARCH_ARM
+    return kElfMachARM;
+#elif V8_TARGET_ARCH_MIPS
+    return kElfMachMIPS;
+#else
+    UNIMPLEMENTED();
+    return 0;
+#endif
+  }
+
+  // Per-process singleton file. We assume that there is one main isolate;
+  // to determine when it goes away, we keep reference count.
+  static base::LazyRecursiveMutex file_mutex_;
+  static FILE* perf_output_handle_;
+  static uint64_t reference_count_;
+  static void* marker_address_;
+  static uint64_t code_index_;
+};
+
+#else
+
+// PerfJitLogger is only implemented on Linux
+class PerfJitLogger : public CodeEventLogger {
+ public:
+  void CodeMoveEvent(AbstractCode* from, Address to) override {
+    UNIMPLEMENTED();
+  }
+
+  void CodeDisableOptEvent(AbstractCode* code,
+                           SharedFunctionInfo* shared) override {
+    UNIMPLEMENTED();
+  }
+
+  void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
+                         const char* name, int length) override {
+    UNIMPLEMENTED();
+  }
+};
+
+#endif  // V8_OS_LINUX
+}  // namespace internal
+}  // namespace v8
+#endif
diff --git a/src/ppc/assembler-ppc-inl.h b/src/ppc/assembler-ppc-inl.h
index 42e2208..c495fee 100644
--- a/src/ppc/assembler-ppc-inl.h
+++ b/src/ppc/assembler-ppc-inl.h
@@ -89,6 +89,10 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
 
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
@@ -179,6 +183,18 @@
   return pc + (len + 2) * kInstrSize;
 }
 
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, size_t old_size, size_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  DCHECK(old_base <= wasm_memory_reference() &&
+         wasm_memory_reference() < old_base + old_size);
+  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+  DCHECK(new_base <= updated_reference &&
+         updated_reference < new_base + new_size);
+  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+                                   icache_flush_mode);
+}
 
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
diff --git a/src/ppc/assembler-ppc.cc b/src/ppc/assembler-ppc.cc
index aed149b..507eec1 100644
--- a/src/ppc/assembler-ppc.cc
+++ b/src/ppc/assembler-ppc.cc
@@ -849,6 +849,10 @@
   xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
 }
 
+void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
+                     RCBit r) {
+  xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
+}
 
 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
   // a special xo_form
@@ -861,12 +865,15 @@
   xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
 }
 
-
-void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o,
-                      RCBit r) {
+void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
+                     RCBit r) {
   xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
 }
 
+void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
+                     RCBit r) {
+  xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
+}
 
 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
   d_form(SUBFIC, dst, src, imm.imm_, true);
diff --git a/src/ppc/assembler-ppc.h b/src/ppc/assembler-ppc.h
index 58c6c94..3e8be7d 100644
--- a/src/ppc/assembler-ppc.h
+++ b/src/ppc/assembler-ppc.h
@@ -808,18 +808,21 @@
   void sub(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
            RCBit r = LeaveRC);
 
-  void subfic(Register dst, Register src, const Operand& imm);
+  void subc(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+            RCBit r = LeaveRC);
+  void sube(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+            RCBit r = LeaveRC);
 
-  void subfc(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
-             RCBit r = LeaveRC);
+  void subfic(Register dst, Register src, const Operand& imm);
 
   void add(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
            RCBit r = LeaveRC);
 
   void addc(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
             RCBit r = LeaveRC);
-
-  void addze(Register dst, Register src1, OEBit o, RCBit r);
+  void adde(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+            RCBit r = LeaveRC);
+  void addze(Register dst, Register src1, OEBit o = LeaveOE, RCBit r = LeaveRC);
 
   void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
              RCBit r = LeaveRC);
@@ -1216,7 +1219,9 @@
   void dq(uint64_t data);
   void dp(uintptr_t data);
 
-  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+  AssemblerPositionsRecorder* positions_recorder() {
+    return &positions_recorder_;
+  }
 
   // Read/patch instructions
   Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@@ -1463,8 +1468,8 @@
   friend class RelocInfo;
   friend class CodePatcher;
   friend class BlockTrampolinePoolScope;
-  PositionsRecorder positions_recorder_;
-  friend class PositionsRecorder;
+  AssemblerPositionsRecorder positions_recorder_;
+  friend class AssemblerPositionsRecorder;
   friend class EnsureSpace;
 };
 
diff --git a/src/ppc/builtins-ppc.cc b/src/ppc/builtins-ppc.cc
index f0b76cc..884afed 100644
--- a/src/ppc/builtins-ppc.cc
+++ b/src/ppc/builtins-ppc.cc
@@ -532,6 +532,7 @@
   //  -- r4     : constructor function
   //  -- r5     : allocation site or undefined
   //  -- r6     : new target
+  //  -- cp     : context
   //  -- lr     : return address
   //  -- sp[...]: constructor arguments
   // -----------------------------------
@@ -547,11 +548,11 @@
 
     if (!create_implicit_receiver) {
       __ SmiTag(r7, r3, SetRC);
-      __ Push(r5, r7);
+      __ Push(cp, r5, r7);
       __ PushRoot(Heap::kTheHoleValueRootIndex);
     } else {
       __ SmiTag(r3);
-      __ Push(r5, r3);
+      __ Push(cp, r5, r3);
 
       // Allocate the new receiver object.
       __ Push(r4, r6);
@@ -623,7 +624,7 @@
     // r3: result
     // sp[0]: receiver
     // sp[1]: number of arguments (smi-tagged)
-    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
 
     if (create_implicit_receiver) {
       // If the result is an object (in the ECMA sense), we should get rid
@@ -754,9 +755,6 @@
   // r0,r8-r9, cp may be clobbered
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
-  // Clear the context before we push it when entering the internal frame.
-  __ li(cp, Operand::Zero());
-
   // Enter an internal frame.
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -853,8 +851,7 @@
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
   FrameScope frame_scope(masm, StackFrame::MANUAL);
-  __ PushFixedFrame(r4);
-  __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  __ PushStandardFrame(r4);
 
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into kInterpreterBytecodeRegister.
@@ -1208,8 +1205,7 @@
   __ mr(ip, r3);
 
   // Perform prologue operations usually performed by the young code stub.
-  __ PushFixedFrame(r4);
-  __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  __ PushStandardFrame(r4);
 
   // Jump to point after the code-age stub.
   __ addi(r3, ip, Operand(kNoCodeAgeSequenceLength));
@@ -1454,24 +1450,6 @@
 }
 
 
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
-  // We check the stack limit as indicator that recompilation might be done.
-  Label ok;
-  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-  __ cmpl(sp, ip);
-  __ bge(&ok);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kStackGuard);
-  }
-  __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&ok);
-  __ Ret();
-}
-
-
 // static
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
@@ -1518,6 +1496,27 @@
   __ TailCallRuntime(Runtime::kThrowNotDateError);
 }
 
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3    : argc
+  //  -- sp[0] : first argument (left-hand side)
+  //  -- sp[4] : receiver (right-hand side)
+  // -----------------------------------
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ LoadP(InstanceOfDescriptor::LeftRegister(),
+             MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
+    __ LoadP(InstanceOfDescriptor::RightRegister(),
+             MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
+    InstanceOfStub stub(masm->isolate(), true);
+    __ CallStub(&stub);
+  }
+
+  // Pop the argument and the receiver.
+  __ Ret(2);
+}
 
 // static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1988,19 +1987,21 @@
   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
   Comment cmnt(masm, "[ PrepareForTailCall");
 
-  // Prepare for tail call only if the debugger is not active.
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
   Label done;
-  ExternalReference debug_is_active =
-      ExternalReference::debug_is_active_address(masm->isolate());
-  __ mov(scratch1, Operand(debug_is_active));
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
   __ lbz(scratch1, MemOperand(scratch1));
   __ cmpi(scratch1, Operand::Zero());
-  __ bne(&done);
+  __ beq(&done);
 
   // Drop possible interpreter handler/stub frame.
   {
     Label no_interpreter_frame;
-    __ LoadP(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+    __ LoadP(scratch3,
+             MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
     __ bne(&no_interpreter_frame);
     __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2008,75 +2009,41 @@
   }
 
   // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(scratch3,
-           MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ LoadP(
+      scratch3,
+      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   __ bne(&no_arguments_adaptor);
 
-  // Drop arguments adaptor frame and load arguments count.
+  // Drop current frame and load arguments count from arguments adaptor frame.
   __ mr(fp, scratch2);
-  __ LoadP(scratch1,
+  __ LoadP(caller_args_count_reg,
            MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(scratch1);
+  __ SmiUntag(caller_args_count_reg);
   __ b(&formal_parameter_count_loaded);
 
   __ bind(&no_arguments_adaptor);
   // Load caller's formal parameter count
-  __ LoadP(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadP(scratch1,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
   __ LoadP(scratch1,
            FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
   __ LoadWordArith(
-      scratch1, FieldMemOperand(
-                    scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+      caller_args_count_reg,
+      FieldMemOperand(scratch1,
+                      SharedFunctionInfo::kFormalParameterCountOffset));
 #if !V8_TARGET_ARCH_PPC64
-  __ SmiUntag(scratch1);
+  __ SmiUntag(caller_args_count_reg);
 #endif
 
   __ bind(&formal_parameter_count_loaded);
 
-  // Calculate the end of destination area where we will put the arguments
-  // after we drop current frame. We add kPointerSize to count the receiver
-  // argument which is not included into formal parameters count.
-  Register dst_reg = scratch2;
-  __ ShiftLeftImm(dst_reg, scratch1, Operand(kPointerSizeLog2));
-  __ add(dst_reg, fp, dst_reg);
-  __ addi(dst_reg, dst_reg,
-          Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
-
-  Register src_reg = scratch1;
-  __ ShiftLeftImm(src_reg, args_reg, Operand(kPointerSizeLog2));
-  __ add(src_reg, sp, src_reg);
-  // Count receiver argument as well (not included in args_reg).
-  __ addi(src_reg, src_reg, Operand(kPointerSize));
-
-  if (FLAG_debug_code) {
-    __ cmpl(src_reg, dst_reg);
-    __ Check(lt, kStackAccessBelowStackPointer);
-  }
-
-  // Restore caller's frame pointer and return address now as they will be
-  // overwritten by the copying loop.
-  __ RestoreFrameStateForTailCall();
-
-  // Now copy callee arguments to the caller frame going backwards to avoid
-  // callee arguments corruption (source and destination areas could overlap).
-
-  // Both src_reg and dst_reg are pointing to the word after the one to copy,
-  // so they must be pre-decremented in the loop.
-  Register tmp_reg = scratch3;
-  Label loop;
-  __ addi(tmp_reg, args_reg, Operand(1));  // +1 for receiver
-  __ mtctr(tmp_reg);
-  __ bind(&loop);
-  __ LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
-  __ StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
-  __ bdnz(&loop);
-
-  // Leave current frame.
-  __ mr(sp, dst_reg);
-
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
   __ bind(&done);
 }
 }  // namespace
@@ -2551,31 +2518,6 @@
   {  // Too few parameters: Actual < expected
     __ bind(&too_few);
 
-    // If the function is strong we need to throw an error.
-    Label no_strong_error;
-    __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-    __ lwz(r8, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
-    __ TestBit(r8, SharedFunctionInfo::kStrongModeBit, r0);
-    __ beq(&no_strong_error, cr0);
-
-    // What we really care about is the required number of arguments.
-    __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kLengthOffset));
-#if V8_TARGET_ARCH_PPC64
-    // See commment near kLenghtOffset in src/objects.h
-    __ srawi(r7, r7, kSmiTagSize);
-#else
-    __ SmiUntag(r7);
-#endif
-    __ cmp(r3, r7);
-    __ bge(&no_strong_error);
-
-    {
-      FrameScope frame(masm, StackFrame::MANUAL);
-      EnterArgumentsAdaptorFrame(masm);
-      __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
-    }
-
-    __ bind(&no_strong_error);
     EnterArgumentsAdaptorFrame(masm);
     ArgumentAdaptorStackCheck(masm, &stack_overflow);
 
diff --git a/src/ppc/code-stubs-ppc.cc b/src/ppc/code-stubs-ppc.cc
index 03c73af..0671f99 100644
--- a/src/ppc/code-stubs-ppc.cc
+++ b/src/ppc/code-stubs-ppc.cc
@@ -4,9 +4,10 @@
 
 #if V8_TARGET_ARCH_PPC
 
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
 #include "src/base/bits.h"
 #include "src/bootstrapper.h"
-#include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
@@ -76,6 +77,10 @@
   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+  descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
 
 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
@@ -494,8 +499,9 @@
   __ b(both_loaded_as_doubles);
 }
 
-
-// Fast negative check for internalized-to-internalized equality.
+// Fast negative check for internalized-to-internalized equality or receiver
+// equality. Also handles the undetectable receiver to null/undefined
+// comparison.
 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
                                                      Register lhs, Register rhs,
                                                      Label* possible_strings,
@@ -503,7 +509,7 @@
   DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
 
   // r5 is object type of rhs.
-  Label object_test, return_unequal, undetectable;
+  Label object_test, return_equal, return_unequal, undetectable;
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   __ andi(r0, r5, Operand(kIsNotStringMask));
   __ bne(&object_test, cr0);
@@ -541,6 +547,16 @@
   __ bind(&undetectable);
   __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
   __ beq(&return_unequal, cr0);
+
+  // If both sides are JSReceivers, then the result is false according to
+  // the HTML specification, which says that only comparisons with null or
+  // undefined are affected by special casing for document.all.
+  __ CompareInstanceType(r5, r5, ODDBALL_TYPE);
+  __ beq(&return_equal);
+  __ CompareInstanceType(r6, r6, ODDBALL_TYPE);
+  __ bne(&return_unequal);
+
+  __ bind(&return_equal);
   __ li(r3, Operand(EQUAL));
   __ Ret();
 }
@@ -1409,8 +1425,12 @@
   __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
   __ bne(&slow_case);
 
-  // Ensure that {function} has an instance prototype.
+  // Go to the runtime if the function is not a constructor.
   __ lbz(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+  __ TestBit(scratch, Map::kIsConstructor, r0);
+  __ beq(&slow_case, cr0);
+
+  // Ensure that {function} has an instance prototype.
   __ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
   __ bne(&slow_case, cr0);
 
@@ -1478,7 +1498,8 @@
   // Slow-case: Call the %InstanceOf runtime function.
   __ bind(&slow_case);
   __ Push(object, function);
-  __ TailCallRuntime(Runtime::kInstanceOf);
+  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+                                         : Runtime::kInstanceOf);
 }
 
 
@@ -1531,29 +1552,6 @@
 }
 
 
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
-  // Return address is in lr.
-  Label slow;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-
-  // Check that the key is an array index, that is Uint32.
-  __ TestIfPositiveSmi(key, r0);
-  __ bne(&slow, cr0);
-
-  // Everything is fine, call runtime.
-  __ Push(receiver, key);  // Receiver, key.
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
-  __ bind(&slow);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
 // Just jump directly to runtime if native RegExp is not selected at compile
 // time or if regexp entry in generated code is turned off runtime switch or
@@ -2695,29 +2693,28 @@
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in r3.
-  Label not_smi;
-  __ JumpIfNotSmi(r3, &not_smi);
-  __ blr();
-  __ bind(&not_smi);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ TestIfSmi(r3, r0);
+  __ Ret(eq, cr0);
 
   __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
   // r3: receiver
   // r4: receiver instance type
   __ Ret(eq);
 
-  Label not_string, slow_string;
-  __ cmpli(r4, Operand(FIRST_NONSTRING_TYPE));
-  __ bge(&not_string);
-  // Check if string has a cached array index.
-  __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
-  __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
-  __ bne(&slow_string, cr0);
-  __ IndexFromHash(r5, r3);
-  __ blr();
-  __ bind(&slow_string);
-  __ push(r3);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-  __ bind(&not_string);
+  NonNumberToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in r3.
+  __ AssertNotNumber(r3);
+
+  __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
+  // r3: receiver
+  // r4: receiver instance type
+  StringToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub, lt);
 
   Label not_oddball;
   __ cmpi(r4, Operand(ODDBALL_TYPE));
@@ -2730,29 +2727,23 @@
   __ TailCallRuntime(Runtime::kToNumber);
 }
 
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in r3.
+  __ AssertString(r3);
 
-void ToLengthStub::Generate(MacroAssembler* masm) {
-  // The ToLength stub takes one argument in r3.
-  Label not_smi;
-  __ JumpIfNotSmi(r3, &not_smi);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ cmpi(r3, Operand::Zero());
-  if (CpuFeatures::IsSupported(ISELECT)) {
-    __ isel(lt, r3, r0, r3);
-  } else {
-    Label positive;
-    __ bgt(&positive);
-    __ li(r3, Operand::Zero());
-    __ bind(&positive);
-  }
-  __ Ret();
-  __ bind(&not_smi);
+  // Check if string has a cached array index.
+  Label runtime;
+  __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
+  __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
+  __ bne(&runtime, cr0);
+  __ IndexFromHash(r5, r3);
+  __ blr();
 
+  __ bind(&runtime);
   __ push(r3);  // Push argument.
-  __ TailCallRuntime(Runtime::kToLength);
+  __ TailCallRuntime(Runtime::kStringToNumber);
 }
 
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in r3.
   Label is_number;
@@ -2933,42 +2924,6 @@
 }
 
 
-void StringCompareStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r4    : left
-  //  -- r3    : right
-  //  -- lr    : return address
-  // -----------------------------------
-  __ AssertString(r4);
-  __ AssertString(r3);
-
-  Label not_same;
-  __ cmp(r3, r4);
-  __ bne(&not_same);
-  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
-                      r5);
-  __ Ret();
-
-  __ bind(&not_same);
-
-  // Check that both objects are sequential one-byte strings.
-  Label runtime;
-  __ JumpIfNotBothSequentialOneByteStrings(r4, r3, r5, r6, &runtime);
-
-  // Compare flat one-byte strings natively.
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
-                      r6);
-  StringHelper::GenerateCompareFlatOneByteStrings(masm, r4, r3, r5, r6, r7);
-
-  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
-  // tagged as a small integer.
-  __ bind(&runtime);
-  __ Push(r4, r3);
-  __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r4    : left
@@ -3285,10 +3240,17 @@
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
-  __ Push(left, right);
   if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(left, right);
+      __ CallRuntime(Runtime::kStringEqual);
+    }
+    __ LoadRoot(r4, Heap::kTrueValueRootIndex);
+    __ sub(r3, r3, r4);
+    __ Ret();
   } else {
+    __ Push(left, right);
     __ TailCallRuntime(Runtime::kStringCompare);
   }
 
@@ -3832,7 +3794,7 @@
   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   int parameter_count_offset =
-      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   __ LoadP(r4, MemOperand(fp, parameter_count_offset));
   if (function_mode() == JS_FUNCTION_STUB_MODE) {
     __ addi(r4, r4, Operand(1));
@@ -4844,7 +4806,7 @@
     __ bind(&loop);
     __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+    __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
     __ cmp(ip, r4);
     __ bne(&loop);
   }
@@ -4853,7 +4815,7 @@
   // arguments adaptor frame below the function frame).
   Label no_rest_parameters;
   __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kContextOffset));
+  __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   __ bne(&no_rest_parameters);
 
@@ -4998,7 +4960,7 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+  __ LoadP(r3, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   __ beq(&adaptor_frame);
 
@@ -5240,7 +5202,7 @@
     __ bind(&loop);
     __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+    __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
     __ cmp(ip, r4);
     __ bne(&loop);
   }
@@ -5248,7 +5210,7 @@
   // Check if we have an arguments adaptor frame below the function frame.
   Label arguments_adaptor, arguments_done;
   __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(ip, MemOperand(r6, StandardFrameConstants::kContextOffset));
+  __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   __ beq(&arguments_adaptor);
   {
@@ -5632,16 +5594,12 @@
   __ b(&leave_exit_frame);
 }
 
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
-                                      const ParameterCount& argc,
-                                      bool return_first_arg,
-                                      bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r3                  : callee
   //  -- r7                  : call_data
   //  -- r5                  : holder
   //  -- r4                  : api_function_address
-  //  -- r6                  : number of arguments if argc is a register
   //  -- cp                  : context
   //  --
   //  -- sp[0]               : last argument
@@ -5667,11 +5625,9 @@
   STATIC_ASSERT(FCA::kHolderIndex == 0);
   STATIC_ASSERT(FCA::kArgsLength == 7);
 
-  DCHECK(argc.is_immediate() || r6.is(argc.reg()));
-
   // context save
   __ push(context);
-  if (!is_lazy) {
+  if (!is_lazy()) {
     // load context from callee
     __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
   }
@@ -5683,7 +5639,7 @@
   __ push(call_data);
 
   Register scratch = call_data;
-  if (!call_data_undefined) {
+  if (!call_data_undefined()) {
     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   }
   // return value
@@ -5719,28 +5675,15 @@
   __ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
   // FunctionCallbackInfo::implicit_args_
   __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
-  if (argc.is_immediate()) {
-    // FunctionCallbackInfo::values_
-    __ addi(ip, scratch,
-            Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
-    __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
-    // FunctionCallbackInfo::length_ = argc
-    __ li(ip, Operand(argc.immediate()));
-    __ stw(ip, MemOperand(r3, 2 * kPointerSize));
-    // FunctionCallbackInfo::is_construct_call_ = 0
-    __ li(ip, Operand::Zero());
-    __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
-  } else {
-    __ ShiftLeftImm(ip, argc.reg(), Operand(kPointerSizeLog2));
-    __ addi(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
-    // FunctionCallbackInfo::values_
-    __ add(r0, scratch, ip);
-    __ StoreP(r0, MemOperand(r3, 1 * kPointerSize));
-    // FunctionCallbackInfo::length_ = argc
-    __ stw(argc.reg(), MemOperand(r3, 2 * kPointerSize));
-    // FunctionCallbackInfo::is_construct_call_
-    __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
-  }
+  // FunctionCallbackInfo::values_
+  __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+  __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
+  // FunctionCallbackInfo::length_ = argc
+  __ li(ip, Operand(argc()));
+  __ stw(ip, MemOperand(r3, 2 * kPointerSize));
+  // FunctionCallbackInfo::is_construct_call_ = 0
+  __ li(ip, Operand::Zero());
+  __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5750,7 +5693,7 @@
       fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
   // Stores return the first js argument
   int return_value_offset = 0;
-  if (return_first_arg) {
+  if (is_store()) {
     return_value_offset = 2 + FCA::kArgsLength;
   } else {
     return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5760,33 +5703,14 @@
   MemOperand is_construct_call_operand =
       MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
   MemOperand* stack_space_operand = &is_construct_call_operand;
-  if (argc.is_immediate()) {
-    stack_space = argc.immediate() + FCA::kArgsLength + 1;
-    stack_space_operand = NULL;
-  }
+  stack_space = argc() + FCA::kArgsLength + 1;
+  stack_space_operand = NULL;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
                            stack_space_operand, return_value_operand,
                            &context_restore_operand);
 }
 
 
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
-  bool call_data_undefined = this->call_data_undefined();
-  CallApiFunctionStubHelper(masm, ParameterCount(r6), false,
-                            call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
-  bool is_store = this->is_store();
-  int argc = this->argc();
-  bool call_data_undefined = this->call_data_undefined();
-  bool is_lazy = this->is_lazy();
-  CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined, is_lazy);
-}
-
-
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- sp[0]                        : name
diff --git a/src/ppc/codegen-ppc.cc b/src/ppc/codegen-ppc.cc
index d6d86b0..5642e91 100644
--- a/src/ppc/codegen-ppc.cc
+++ b/src/ppc/codegen-ppc.cc
@@ -136,7 +136,7 @@
 
   if (mode == TRACK_ALLOCATION_SITE) {
     DCHECK(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements,
+    __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r11,
                                          allocation_memento_found);
   }
 
@@ -169,7 +169,7 @@
                      scratch2));
 
   if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
   }
 
   // Check for empty arrays, which only require a map transition and no changes
@@ -289,7 +289,7 @@
                      scratch));
 
   if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
   }
 
   // Check for empty arrays, which only require a map transition and no changes
@@ -616,9 +616,7 @@
                       young_sequence_.length() / Assembler::kInstrSize,
                       CodePatcher::DONT_FLUSH));
   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
-  patcher->masm()->PushFixedFrame(r4);
-  patcher->masm()->addi(fp, sp,
-                        Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  patcher->masm()->PushStandardFrame(r4);
   for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
     patcher->masm()->nop();
   }
diff --git a/src/ppc/deoptimizer-ppc.cc b/src/ppc/deoptimizer-ppc.cc
index 9ec5cdd..ead877e 100644
--- a/src/ppc/deoptimizer-ppc.cc
+++ b/src/ppc/deoptimizer-ppc.cc
@@ -106,12 +106,6 @@
   }
 }
 
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
-  // There is no dynamic alignment padding on PPC in the input frame.
-  return false;
-}
-
-
 #define __ masm()->
 
 // This code tries to be close to ia32 code so that any changes can be
@@ -168,7 +162,12 @@
   // Allocate a new deoptimizer object.
   // Pass six arguments in r3 to r8.
   __ PrepareCallCFunction(6, r8);
+  __ li(r3, Operand::Zero());
+  Label context_check;
+  __ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ JumpIfSmi(r4, &context_check);
   __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ bind(&context_check);
   __ li(r4, Operand(type()));  // bailout type,
   // r5: bailout id already loaded.
   // r6: code address or 0 already loaded.
@@ -238,6 +237,8 @@
   }
   __ pop(r3);  // Restore deoptimizer object (class Deoptimizer).
 
+  __ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
+
   // Replace the current (input) frame with the output frames.
   Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
   // Outer loop state: r7 = current "FrameDescription** output_",
diff --git a/src/ppc/disasm-ppc.cc b/src/ppc/disasm-ppc.cc
index e72658f..baba146 100644
--- a/src/ppc/disasm-ppc.cc
+++ b/src/ppc/disasm-ppc.cc
@@ -657,10 +657,18 @@
       Format(instr, "subfc'. 'rt, 'ra, 'rb");
       return;
     }
+    case SUBFEX: {
+      Format(instr, "subfe'. 'rt, 'ra, 'rb");
+      return;
+    }
     case ADDCX: {
       Format(instr, "addc'.   'rt, 'ra, 'rb");
       return;
     }
+    case ADDEX: {
+      Format(instr, "adde'.   'rt, 'ra, 'rb");
+      return;
+    }
     case CNTLZWX: {
       Format(instr, "cntlzw'. 'ra, 'rs");
       return;
diff --git a/src/ppc/frames-ppc.h b/src/ppc/frames-ppc.h
index b1de9f5..fd4abe2 100644
--- a/src/ppc/frames-ppc.h
+++ b/src/ppc/frames-ppc.h
@@ -152,16 +152,11 @@
       -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
 };
 
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
  public:
-  static const int kFrameSize =
-      FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
-
-  static const int kConstantPoolOffset =
-      FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
-  static const int kCodeOffset = -2 * kPointerSize;
-  static const int kSPOffset = -1 * kPointerSize;
+  static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
 
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = 0 * kPointerSize;
@@ -179,7 +174,7 @@
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
   static const int kLastParameterOffset = +2 * kPointerSize;
-  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+  static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
 
   // Caller SP-relative.
   static const int kParam0Offset = -2 * kPointerSize;
diff --git a/src/ppc/interface-descriptors-ppc.cc b/src/ppc/interface-descriptors-ppc.cc
index 3db7bd5..48b6cdc 100644
--- a/src/ppc/interface-descriptors-ppc.cc
+++ b/src/ppc/interface-descriptors-ppc.cc
@@ -106,35 +106,9 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void ToNumberDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
 
 // static
-const Register ToLengthDescriptor::ReceiverRegister() { return r3; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return r3; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return r3; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return r3; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
 
 void TypeofDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -262,6 +236,13 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
+      CallInterfaceDescriptorData* data) {                          \
+    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
+  }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
 
 void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -306,6 +287,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (arg count)
+  Register registers[] = {r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void CompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -314,20 +301,6 @@
 }
 
 
-void CompareNilDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void BinaryOpDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r4, r3};
@@ -387,21 +360,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {
-      r3,  // callee
-      r7,  // call_data
-      r5,  // holder
-      r4,  // api_function_address
-      r6,  // actual number of arguments
-  };
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       r3,  // callee
diff --git a/src/ppc/macro-assembler-ppc.cc b/src/ppc/macro-assembler-ppc.cc
index 14759de..42e5a13 100644
--- a/src/ppc/macro-assembler-ppc.cc
+++ b/src/ppc/macro-assembler-ppc.cc
@@ -570,14 +570,13 @@
   StoreP(scratch, MemOperand(ip));
   // Call stub on end of buffer.
   // Check for end of buffer.
-  mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit));
-  and_(r0, scratch, r0, SetRC);
+  TestBitMask(scratch, StoreBuffer::kStoreBufferMask, r0);
 
   if (and_then == kFallThroughAtEnd) {
-    beq(&done, cr0);
+    bne(&done, cr0);
   } else {
     DCHECK(and_then == kReturnAtEnd);
-    Ret(eq, cr0);
+    Ret(ne, cr0);
   }
   mflr(r0);
   push(r0);
@@ -591,42 +590,69 @@
   }
 }
 
-
-void MacroAssembler::PushFixedFrame(Register marker_reg) {
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+  int fp_delta = 0;
   mflr(r0);
   if (FLAG_enable_embedded_constant_pool) {
     if (marker_reg.is_valid()) {
-      Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
+      Push(r0, fp, kConstantPoolRegister, marker_reg);
+      fp_delta = 2;
     } else {
-      Push(r0, fp, kConstantPoolRegister, cp);
+      Push(r0, fp, kConstantPoolRegister);
+      fp_delta = 1;
     }
   } else {
     if (marker_reg.is_valid()) {
-      Push(r0, fp, cp, marker_reg);
+      Push(r0, fp, marker_reg);
+      fp_delta = 1;
     } else {
-      Push(r0, fp, cp);
+      Push(r0, fp);
+      fp_delta = 0;
     }
   }
+  addi(fp, sp, Operand(fp_delta * kPointerSize));
 }
 
-
-void MacroAssembler::PopFixedFrame(Register marker_reg) {
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
   if (FLAG_enable_embedded_constant_pool) {
     if (marker_reg.is_valid()) {
-      Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
+      Pop(r0, fp, kConstantPoolRegister, marker_reg);
     } else {
-      Pop(r0, fp, kConstantPoolRegister, cp);
+      Pop(r0, fp, kConstantPoolRegister);
     }
   } else {
     if (marker_reg.is_valid()) {
-      Pop(r0, fp, cp, marker_reg);
+      Pop(r0, fp, marker_reg);
     } else {
-      Pop(r0, fp, cp);
+      Pop(r0, fp);
     }
   }
   mtlr(r0);
 }
 
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+  int fp_delta = 0;
+  mflr(r0);
+  if (FLAG_enable_embedded_constant_pool) {
+    if (function_reg.is_valid()) {
+      Push(r0, fp, kConstantPoolRegister, cp, function_reg);
+      fp_delta = 3;
+    } else {
+      Push(r0, fp, kConstantPoolRegister, cp);
+      fp_delta = 2;
+    }
+  } else {
+    if (function_reg.is_valid()) {
+      Push(r0, fp, cp, function_reg);
+      fp_delta = 2;
+    } else {
+      Push(r0, fp, cp);
+      fp_delta = 1;
+    }
+  }
+  addi(fp, sp, Operand(fp_delta * kPointerSize));
+}
+
 void MacroAssembler::RestoreFrameStateForTailCall() {
   if (FLAG_enable_embedded_constant_pool) {
     LoadP(kConstantPoolRegister,
@@ -803,6 +829,145 @@
 }
 #endif
 
+#if !V8_TARGET_ARCH_PPC64
+void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+                                   Register src_low, Register src_high,
+                                   Register scratch, Register shift) {
+  DCHECK(!AreAliased(dst_low, src_high, shift));
+  DCHECK(!AreAliased(dst_high, src_low, shift));
+  Label less_than_32;
+  Label done;
+  cmpi(shift, Operand(32));
+  blt(&less_than_32);
+  // If shift >= 32
+  andi(scratch, shift, Operand(0x1f));
+  slw(dst_high, src_low, scratch);
+  li(dst_low, Operand::Zero());
+  b(&done);
+  bind(&less_than_32);
+  // If shift < 32
+  subfic(scratch, shift, Operand(32));
+  slw(dst_high, src_high, shift);
+  srw(scratch, src_low, scratch);
+  orx(dst_high, dst_high, scratch);
+  slw(dst_low, src_low, shift);
+  bind(&done);
+}
+
+void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+                                   Register src_low, Register src_high,
+                                   uint32_t shift) {
+  DCHECK(!AreAliased(dst_low, src_high));
+  DCHECK(!AreAliased(dst_high, src_low));
+  if (shift == 32) {
+    Move(dst_high, src_low);
+    li(dst_low, Operand::Zero());
+  } else if (shift > 32) {
+    shift &= 0x1f;
+    slwi(dst_high, src_low, Operand(shift));
+    li(dst_low, Operand::Zero());
+  } else if (shift == 0) {
+    Move(dst_low, src_low);
+    Move(dst_high, src_high);
+  } else {
+    slwi(dst_high, src_high, Operand(shift));
+    rlwimi(dst_high, src_low, shift, 32 - shift, 31);
+    slwi(dst_low, src_low, Operand(shift));
+  }
+}
+
+void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+                                    Register src_low, Register src_high,
+                                    Register scratch, Register shift) {
+  DCHECK(!AreAliased(dst_low, src_high, shift));
+  DCHECK(!AreAliased(dst_high, src_low, shift));
+  Label less_than_32;
+  Label done;
+  cmpi(shift, Operand(32));
+  blt(&less_than_32);
+  // If shift >= 32
+  andi(scratch, shift, Operand(0x1f));
+  srw(dst_low, src_high, scratch);
+  li(dst_high, Operand::Zero());
+  b(&done);
+  bind(&less_than_32);
+  // If shift < 32
+  subfic(scratch, shift, Operand(32));
+  srw(dst_low, src_low, shift);
+  slw(scratch, src_high, scratch);
+  orx(dst_low, dst_low, scratch);
+  srw(dst_high, src_high, shift);
+  bind(&done);
+}
+
+void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+                                    Register src_low, Register src_high,
+                                    uint32_t shift) {
+  DCHECK(!AreAliased(dst_low, src_high));
+  DCHECK(!AreAliased(dst_high, src_low));
+  if (shift == 32) {
+    Move(dst_low, src_high);
+    li(dst_high, Operand::Zero());
+  } else if (shift > 32) {
+    shift &= 0x1f;
+    srwi(dst_low, src_high, Operand(shift));
+    li(dst_high, Operand::Zero());
+  } else if (shift == 0) {
+    Move(dst_low, src_low);
+    Move(dst_high, src_high);
+  } else {
+    srwi(dst_low, src_low, Operand(shift));
+    rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
+    srwi(dst_high, src_high, Operand(shift));
+  }
+}
+
+void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
+                                       Register src_low, Register src_high,
+                                       Register scratch, Register shift) {
+  DCHECK(!AreAliased(dst_low, src_high, shift));
+  DCHECK(!AreAliased(dst_high, src_low, shift));
+  Label less_than_32;
+  Label done;
+  cmpi(shift, Operand(32));
+  blt(&less_than_32);
+  // If shift >= 32
+  andi(scratch, shift, Operand(0x1f));
+  sraw(dst_low, src_high, scratch);
+  srawi(dst_high, src_high, 31);
+  b(&done);
+  bind(&less_than_32);
+  // If shift < 32
+  subfic(scratch, shift, Operand(32));
+  srw(dst_low, src_low, shift);
+  slw(scratch, src_high, scratch);
+  orx(dst_low, dst_low, scratch);
+  sraw(dst_high, src_high, shift);
+  bind(&done);
+}
+
+void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
+                                       Register src_low, Register src_high,
+                                       uint32_t shift) {
+  DCHECK(!AreAliased(dst_low, src_high));
+  DCHECK(!AreAliased(dst_high, src_low));
+  if (shift == 32) {
+    Move(dst_low, src_high);
+    srawi(dst_high, src_high, 31);
+  } else if (shift > 32) {
+    shift &= 0x1f;
+    srawi(dst_low, src_high, shift);
+    srawi(dst_high, src_high, 31);
+  } else if (shift == 0) {
+    Move(dst_low, src_low);
+    Move(dst_high, src_high);
+  } else {
+    srwi(dst_low, src_low, Operand(shift));
+    rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
+    srawi(dst_high, src_high, shift);
+  }
+}
+#endif
 
 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
     Register code_target_address) {
@@ -824,12 +989,13 @@
   mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
 }
 
-
-void MacroAssembler::StubPrologue(Register base, int prologue_offset) {
-  LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
-  PushFixedFrame(r11);
-  // Adjust FP to point to saved FP.
-  addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
+                                  int prologue_offset) {
+  {
+    ConstantPoolUnavailableScope constant_pool_unavailable(this);
+    LoadSmiLiteral(r11, Smi::FromInt(type));
+    PushCommonFrame(r11);
+  }
   if (FLAG_enable_embedded_constant_pool) {
     if (!base.is(no_reg)) {
       // base contains prologue address
@@ -865,9 +1031,7 @@
       }
     } else {
       // This matches the code found in GetNoCodeAgeSequence()
-      PushFixedFrame(r4);
-      // Adjust fp to point to saved fp.
-      addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+      PushStandardFrame(r4);
       for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
         nop();
       }
@@ -892,20 +1056,20 @@
 void MacroAssembler::EnterFrame(StackFrame::Type type,
                                 bool load_constant_pool_pointer_reg) {
   if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
-    PushFixedFrame();
-    // This path should not rely on ip containing code entry.
+    // Push type explicitly so we can leverage the constant pool.
+    // This path cannot rely on ip containing code entry.
+    PushCommonFrame();
     LoadConstantPoolPointerRegister();
     LoadSmiLiteral(ip, Smi::FromInt(type));
     push(ip);
   } else {
     LoadSmiLiteral(ip, Smi::FromInt(type));
-    PushFixedFrame(ip);
+    PushCommonFrame(ip);
   }
-  // Adjust FP to point to saved FP.
-  addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
-
-  mov(r0, Operand(CodeObject()));
-  push(r0);
+  if (type == StackFrame::INTERNAL) {
+    mov(r0, Operand(CodeObject()));
+    push(r0);
+  }
 }
 
 
@@ -921,11 +1085,8 @@
   LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
   LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   if (FLAG_enable_embedded_constant_pool) {
-    const int exitOffset = ExitFrameConstants::kConstantPoolOffset;
-    const int standardOffset = StandardFrameConstants::kConstantPoolOffset;
-    const int offset =
-        ((type == StackFrame::EXIT) ? exitOffset : standardOffset);
-    LoadP(kConstantPoolRegister, MemOperand(fp, offset));
+    LoadP(kConstantPoolRegister,
+          MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
   }
   mtlr(r0);
   frame_ends = pc_offset();
@@ -962,12 +1123,10 @@
   // all of the pushes that have happened inside of V8
   // since we were called from C code
 
-  // replicate ARM frame - TODO make this more closely follow PPC ABI
-  mflr(r0);
-  Push(r0, fp);
-  mr(fp, sp);
+  LoadSmiLiteral(ip, Smi::FromInt(StackFrame::EXIT));
+  PushCommonFrame(ip);
   // Reserve room for saved entry sp and code object.
-  subi(sp, sp, Operand(ExitFrameConstants::kFrameSize));
+  subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
 
   if (emit_debug_code()) {
     li(r8, Operand::Zero());
@@ -1052,7 +1211,7 @@
     // Calculate the stack location of the saved doubles and restore them.
     const int kNumRegs = kNumCallerSavedDoubles;
     const int offset =
-        (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
+        (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
     addi(r6, fp, Operand(-offset));
     MultiPopDoubles(kCallerSavedDoubles, r6);
   }
@@ -1093,6 +1252,67 @@
   Move(dst, d1);
 }
 
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+                                        Register caller_args_count_reg,
+                                        Register scratch0, Register scratch1) {
+#if DEBUG
+  if (callee_args_count.is_reg()) {
+    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+                       scratch1));
+  } else {
+    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+  }
+#endif
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We add kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch0;
+  ShiftLeftImm(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
+  add(dst_reg, fp, dst_reg);
+  addi(dst_reg, dst_reg,
+       Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = caller_args_count_reg;
+  // Calculate the end of source area. +kPointerSize is for the receiver.
+  if (callee_args_count.is_reg()) {
+    ShiftLeftImm(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
+    add(src_reg, sp, src_reg);
+    addi(src_reg, src_reg, Operand(kPointerSize));
+  } else {
+    Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize, r0);
+  }
+
+  if (FLAG_debug_code) {
+    cmpl(src_reg, dst_reg);
+    Check(lt, kStackAccessBelowStackPointer);
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  RestoreFrameStateForTailCall();
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch1;
+  Label loop;
+  if (callee_args_count.is_reg()) {
+    addi(tmp_reg, callee_args_count.reg(), Operand(1));  // +1 for receiver
+  } else {
+    mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
+  }
+  mtctr(tmp_reg);
+  bind(&loop);
+  LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
+  StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
+  bdnz(&loop);
+
+  // Leave current frame.
+  mr(sp, dst_reg);
+}
 
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual, Label* done,
@@ -1370,8 +1590,20 @@
   DCHECK(!holder_reg.is(ip));
   DCHECK(!scratch.is(ip));
 
-  // Load current lexical context from the stack frame.
-  LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  // Load current lexical context from the active StandardFrame, which
+  // may require crawling past STUB frames.
+  Label load_context;
+  Label has_context;
+  DCHECK(!ip.is(scratch));
+  mr(ip, fp);
+  bind(&load_context);
+  LoadP(scratch,
+        MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
+  JumpIfNotSmi(scratch, &has_context);
+  LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
+  b(&load_context);
+  bind(&has_context);
+
 // In debug mode, make sure the lexical context is set.
 #ifdef DEBUG
   cmpi(scratch, Operand::Zero());
@@ -2679,6 +2911,17 @@
   JumpIfSmi(reg2, on_either_smi);
 }
 
+void MacroAssembler::AssertNotNumber(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object, r0);
+    Check(ne, kOperandIsANumber, cr0);
+    push(object);
+    CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
+    pop(object);
+    Check(ne, kOperandIsANumber);
+  }
+}
 
 void MacroAssembler::AssertNotSmi(Register object) {
   if (emit_debug_code()) {
@@ -4228,28 +4471,52 @@
   }
 }
 
-
 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
                                                      Register scratch_reg,
+                                                     Register scratch2_reg,
                                                      Label* no_memento_found) {
-  ExternalReference new_space_start =
-      ExternalReference::new_space_start(isolate());
+  Label map_check;
+  Label top_check;
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
-  addi(scratch_reg, receiver_reg,
-       Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
-  Cmpi(scratch_reg, Operand(new_space_start), r0);
-  blt(no_memento_found);
-  mov(ip, Operand(new_space_allocation_top));
-  LoadP(ip, MemOperand(ip));
-  cmp(scratch_reg, ip);
+  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+  Register mask = scratch2_reg;
+
+  DCHECK(!AreAliased(receiver_reg, scratch_reg, mask));
+
+  // Bail out if the object is not in new space.
+  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+
+  DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
+  lis(mask, Operand((~Page::kPageAlignmentMask >> 16)));
+  addi(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+
+  // If the object is in new space, we need to check whether it is on the same
+  // page as the current top.
+  Xor(r0, scratch_reg, Operand(new_space_allocation_top));
+  and_(r0, r0, mask, SetRC);
+  beq(&top_check, cr0);
+  // The object is on a different page than allocation top. Bail out if the
+  // object sits on the page boundary as no memento can follow and we cannot
+  // touch the memory following it.
+  xor_(r0, scratch_reg, receiver_reg);
+  and_(r0, r0, mask, SetRC);
+  bne(no_memento_found, cr0);
+  // Continue with the actual map check.
+  b(&map_check);
+  // If top is on the same page as the current object, we need to check whether
+  // we are below top.
+  bind(&top_check);
+  Cmpi(scratch_reg, Operand(new_space_allocation_top), r0);
   bgt(no_memento_found);
-  LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+  // Memento map check.
+  bind(&map_check);
+  LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
   Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
        r0);
 }
 
-
 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
                                    Register reg4, Register reg5,
                                    Register reg6) {
diff --git a/src/ppc/macro-assembler-ppc.h b/src/ppc/macro-assembler-ppc.h
index d9dbd56..a529b62 100644
--- a/src/ppc/macro-assembler-ppc.h
+++ b/src/ppc/macro-assembler-ppc.h
@@ -156,12 +156,6 @@
 
   void Call(Label* target);
 
-  // Emit call to the code we are currently generating.
-  void CallSelf() {
-    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
-    Call(self, RelocInfo::CODE_TARGET);
-  }
-
   // Register move. May do nothing if the registers are identical.
   void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
   void Move(Register dst, Handle<Object> value);
@@ -344,10 +338,14 @@
     addi(sp, sp, Operand(5 * kPointerSize));
   }
 
-  // Push a fixed frame, consisting of lr, fp, context and
-  // JS function / marker id if marker_reg is a valid register.
-  void PushFixedFrame(Register marker_reg = no_reg);
-  void PopFixedFrame(Register marker_reg = no_reg);
+  // Push a fixed frame, consisting of lr, fp, constant pool.
+  void PushCommonFrame(Register marker_reg = no_reg);
+
+  // Push a standard frame, consisting of lr, fp, constant pool,
+  // context and JS function
+  void PushStandardFrame(Register function_reg);
+
+  void PopCommonFrame(Register marker_reg = no_reg);
 
   // Restore caller's frame pointer and return address prior to being
   // overwritten by tail call stack preparation.
@@ -416,8 +414,24 @@
       FPRoundingMode rounding_mode = kRoundToZero);
 #endif
 
+#if !V8_TARGET_ARCH_PPC64
+  void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
+                     Register src_high, Register scratch, Register shift);
+  void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
+                     Register src_high, uint32_t shift);
+  void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
+                      Register src_high, Register scratch, Register shift);
+  void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
+                      Register src_high, uint32_t shift);
+  void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
+                         Register src_high, Register scratch, Register shift);
+  void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
+                         Register src_high, uint32_t shift);
+#endif
+
   // Generates function and stub prologue code.
-  void StubPrologue(Register base = no_reg, int prologue_offset = 0);
+  void StubPrologue(StackFrame::Type type, Register base = no_reg,
+                    int prologue_offset = 0);
   void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
 
   // Enter exit frame.
@@ -564,6 +578,15 @@
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Removes current frame and its arguments from the stack preserving
+  // the arguments and a return address pushed to the stack for the next call.
+  // Both |callee_args_count| and |caller_args_count_reg| do not include
+  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+  // is trashed.
+  void PrepareForTailCall(const ParameterCount& callee_args_count,
+                          Register caller_args_count_reg, Register scratch0,
+                          Register scratch1);
+
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeFunctionCode(Register function, Register new_target,
                           const ParameterCount& expected,
@@ -1096,14 +1119,16 @@
   // (for consistency between 32/64-bit).
 
   // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
-  // and place them into the least significant bits of dst.
+  // and, if !test, shift them into the least significant bits of dst.
   inline void ExtractBitRange(Register dst, Register src, int rangeStart,
-                              int rangeEnd, RCBit rc = LeaveRC) {
+                              int rangeEnd, RCBit rc = LeaveRC,
+                              bool test = false) {
     DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
     int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
     int width = rangeStart - rangeEnd + 1;
-    if (rc == SetRC && rangeEnd == 0 && width <= 16) {
-      andi(dst, src, Operand((1 << width) - 1));
+    if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
+      // Prefer faster andi when applicable.
+      andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
     } else {
 #if V8_TARGET_ARCH_PPC64
       rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
@@ -1115,14 +1140,14 @@
   }
 
   inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
-                         RCBit rc = LeaveRC) {
-    ExtractBitRange(dst, src, bitNumber, bitNumber, rc);
+                         RCBit rc = LeaveRC, bool test = false) {
+    ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
   }
 
   // Extract consecutive bits (defined by mask) from src and place them
   // into the least significant bits of dst.
   inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
-                             RCBit rc = LeaveRC) {
+                             RCBit rc = LeaveRC, bool test = false) {
     int start = kBitsPerPointer - 1;
     int end;
     uintptr_t bit = (1L << start);
@@ -1142,25 +1167,25 @@
     // 1-bits in mask must be contiguous
     DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
 
-    ExtractBitRange(dst, src, start, end, rc);
+    ExtractBitRange(dst, src, start, end, rc, test);
   }
 
   // Test single bit in value.
   inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
-    ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC);
+    ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
   }
 
   // Test consecutive bit range in value.  Range is defined by
   // rangeStart - rangeEnd.
   inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
                            Register scratch = r0) {
-    ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC);
+    ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
   }
 
   // Test consecutive bit range in value.  Range is defined by mask.
   inline void TestBitMask(Register value, uintptr_t mask,
                           Register scratch = r0) {
-    ExtractBitMask(scratch, value, mask, SetRC);
+    ExtractBitMask(scratch, value, mask, SetRC, true);
   }
 
 
@@ -1307,6 +1332,9 @@
   // Jump if either of the registers contain a smi.
   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
 
+  // Abort execution if argument is a number, enabled via --debug-code.
+  void AssertNotNumber(Register object);
+
   // Abort execution if argument is a smi, enabled via --debug-code.
   void AssertNotSmi(Register object);
   void AssertSmi(Register object);
@@ -1481,13 +1509,15 @@
   // If allocation info is present, condition flags are set to eq.
   void TestJSArrayForAllocationMemento(Register receiver_reg,
                                        Register scratch_reg,
+                                       Register scratch2_reg,
                                        Label* no_memento_found);
 
   void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
                                          Register scratch_reg,
+                                         Register scratch2_reg,
                                          Label* memento_found) {
     Label no_memento_found;
-    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
                                     &no_memento_found);
     beq(memento_found);
     bind(&no_memento_found);
diff --git a/src/ppc/simulator-ppc.cc b/src/ppc/simulator-ppc.cc
index 9a1f9e0..79dc825 100644
--- a/src/ppc/simulator-ppc.cc
+++ b/src/ppc/simulator-ppc.cc
@@ -1733,8 +1733,8 @@
       int ra = instr->RAValue();
       int rb = instr->RBValue();
       uint32_t rs_val = get_register(rs);
-      uintptr_t rb_val = get_register(rb);
-      intptr_t result = rs_val >> (rb_val & 0x3f);
+      uintptr_t rb_val = get_register(rb) & 0x3f;
+      intptr_t result = (rb_val > 31) ? 0 : rs_val >> rb_val;
       set_register(ra, result);
       if (instr->Bit(0)) {  // RC bit set
         SetCR0(result);
@@ -1747,8 +1747,8 @@
       int ra = instr->RAValue();
       int rb = instr->RBValue();
       uintptr_t rs_val = get_register(rs);
-      uintptr_t rb_val = get_register(rb);
-      intptr_t result = rs_val >> (rb_val & 0x7f);
+      uintptr_t rb_val = get_register(rb) & 0x7f;
+      intptr_t result = (rb_val > 63) ? 0 : rs_val >> rb_val;
       set_register(ra, result);
       if (instr->Bit(0)) {  // RC bit set
         SetCR0(result);
@@ -1761,8 +1761,8 @@
       int ra = instr->RAValue();
       int rb = instr->RBValue();
       int32_t rs_val = get_register(rs);
-      intptr_t rb_val = get_register(rb);
-      intptr_t result = rs_val >> (rb_val & 0x3f);
+      intptr_t rb_val = get_register(rb) & 0x3f;
+      intptr_t result = (rb_val > 31) ? rs_val >> 31 : rs_val >> rb_val;
       set_register(ra, result);
       if (instr->Bit(0)) {  // RC bit set
         SetCR0(result);
@@ -1775,8 +1775,8 @@
       int ra = instr->RAValue();
       int rb = instr->RBValue();
       intptr_t rs_val = get_register(rs);
-      intptr_t rb_val = get_register(rb);
-      intptr_t result = rs_val >> (rb_val & 0x7f);
+      intptr_t rb_val = get_register(rb) & 0x7f;
+      intptr_t result = (rb_val > 63) ? rs_val >> 63 : rs_val >> rb_val;
       set_register(ra, result);
       if (instr->Bit(0)) {  // RC bit set
         SetCR0(result);
@@ -2025,19 +2025,37 @@
       uintptr_t ra_val = get_register(ra);
       uintptr_t rb_val = get_register(rb);
       uintptr_t alu_out = ~ra_val + rb_val + 1;
-      set_register(rt, alu_out);
-      // If the sign of rb and alu_out don't match, carry = 0
-      if ((alu_out ^ rb_val) & 0x80000000) {
-        special_reg_xer_ &= ~0xF0000000;
-      } else {
+      // Set carry
+      if (ra_val <= rb_val) {
         special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
+      } else {
+        special_reg_xer_ &= ~0xF0000000;
       }
+      set_register(rt, alu_out);
       if (instr->Bit(0)) {  // RC bit set
         SetCR0(alu_out);
       }
       // todo - handle OE bit
       break;
     }
+    case SUBFEX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      // int oe = instr->Bit(10);
+      uintptr_t ra_val = get_register(ra);
+      uintptr_t rb_val = get_register(rb);
+      uintptr_t alu_out = ~ra_val + rb_val;
+      if (special_reg_xer_ & 0x20000000) {
+        alu_out += 1;
+      }
+      set_register(rt, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(static_cast<intptr_t>(alu_out));
+      }
+      // todo - handle OE bit
+      break;
+    }
     case ADDCX: {
       int rt = instr->RTValue();
       int ra = instr->RAValue();
@@ -2046,7 +2064,7 @@
       uintptr_t ra_val = get_register(ra);
       uintptr_t rb_val = get_register(rb);
       uintptr_t alu_out = ra_val + rb_val;
-      // Check overflow
+      // Set carry
       if (~ra_val < rb_val) {
         special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
       } else {
@@ -2059,6 +2077,24 @@
       // todo - handle OE bit
       break;
     }
+    case ADDEX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      // int oe = instr->Bit(10);
+      uintptr_t ra_val = get_register(ra);
+      uintptr_t rb_val = get_register(rb);
+      uintptr_t alu_out = ra_val + rb_val;
+      if (special_reg_xer_ & 0x20000000) {
+        alu_out += 1;
+      }
+      set_register(rt, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(static_cast<intptr_t>(alu_out));
+      }
+      // todo - handle OE bit
+      break;
+    }
     case MULHWX: {
       int rt = instr->RTValue();
       int ra = instr->RAValue();
@@ -2117,8 +2153,8 @@
       int ra = instr->RAValue();
       int rb = instr->RBValue();
       uint32_t rs_val = get_register(rs);
-      uintptr_t rb_val = get_register(rb);
-      uint32_t result = rs_val << (rb_val & 0x3f);
+      uintptr_t rb_val = get_register(rb) & 0x3f;
+      uint32_t result = (rb_val > 31) ? 0 : rs_val << rb_val;
       set_register(ra, result);
       if (instr->Bit(0)) {  // RC bit set
         SetCR0(result);
@@ -2131,8 +2167,8 @@
       int ra = instr->RAValue();
       int rb = instr->RBValue();
       uintptr_t rs_val = get_register(rs);
-      uintptr_t rb_val = get_register(rb);
-      uintptr_t result = rs_val << (rb_val & 0x7f);
+      uintptr_t rb_val = get_register(rb) & 0x7f;
+      uintptr_t result = (rb_val > 63) ? 0 : rs_val << rb_val;
       set_register(ra, result);
       if (instr->Bit(0)) {  // RC bit set
         SetCR0(result);
diff --git a/src/profiler/allocation-tracker.cc b/src/profiler/allocation-tracker.cc
index 942068e..791cdf0 100644
--- a/src/profiler/allocation-tracker.cc
+++ b/src/profiler/allocation-tracker.cc
@@ -149,7 +149,8 @@
 
 
 void AddressToTraceMap::Print() {
-  PrintF("[AddressToTraceMap (%" V8PRIuPTR "): \n", ranges_.size());
+  PrintF("[AddressToTraceMap (%" V8_SIZET_PREFIX V8PRIuPTR "): \n",
+         ranges_.size());
   for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
     PrintF("[%p - %p] => %u\n", it->second.start, it->first,
         it->second.trace_node_id);
@@ -226,7 +227,7 @@
 
   // Mark the new block as FreeSpace to make sure the heap is iterable
   // while we are capturing stack trace.
-  heap->CreateFillerObjectAt(addr, size);
+  heap->CreateFillerObjectAt(addr, size, ClearRecordedSlots::kNo);
 
   Isolate* isolate = heap->isolate();
   int length = 0;
diff --git a/src/profiler/cpu-profiler.cc b/src/profiler/cpu-profiler.cc
index b6c7945..47585b7 100644
--- a/src/profiler/cpu-profiler.cc
+++ b/src/profiler/cpu-profiler.cc
@@ -211,10 +211,8 @@
   processor_->Enqueue(evt_rec);
 }
 
-
 void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  Code* code,
-                                  const char* name) {
+                                  AbstractCode* code, const char* name) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
   rec->start = code->address();
@@ -222,14 +220,13 @@
       tag, profiles_->GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
       CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
       CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+  RecordInliningInfo(rec->entry, code);
   rec->size = code->ExecutableSize();
   processor_->Enqueue(evt_rec);
 }
 
-
 void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  Code* code,
-                                  Name* name) {
+                                  AbstractCode* code, Name* name) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
   rec->start = code->address();
@@ -237,12 +234,13 @@
       tag, profiles_->GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
       CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
       CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+  RecordInliningInfo(rec->entry, code);
   rec->size = code->ExecutableSize();
   processor_->Enqueue(evt_rec);
 }
 
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+                                  AbstractCode* code,
                                   SharedFunctionInfo* shared,
                                   CompilationInfo* info, Name* script_name) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
@@ -250,9 +248,11 @@
   rec->start = code->address();
   rec->entry = profiles_->NewCodeEntry(
       tag, profiles_->GetFunctionName(shared->DebugName()),
-      CodeEntry::kEmptyNamePrefix, profiles_->GetName(script_name),
+      CodeEntry::kEmptyNamePrefix,
+      profiles_->GetName(InferScriptName(script_name, shared)),
       CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
       NULL, code->instruction_start());
+  RecordInliningInfo(rec->entry, code);
   if (info) {
     rec->entry->set_inlined_function_infos(info->inlined_function_infos());
   }
@@ -261,46 +261,65 @@
   processor_->Enqueue(evt_rec);
 }
 
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+                                  AbstractCode* abstract_code,
                                   SharedFunctionInfo* shared,
                                   CompilationInfo* info, Name* script_name,
                                   int line, int column) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
-  rec->start = code->address();
+  rec->start = abstract_code->address();
   Script* script = Script::cast(shared->script());
   JITLineInfoTable* line_table = NULL;
   if (script) {
-    line_table = new JITLineInfoTable();
-    for (RelocIterator it(code); !it.done(); it.next()) {
-      RelocInfo::Mode mode = it.rinfo()->rmode();
-      if (RelocInfo::IsPosition(mode)) {
-        int position = static_cast<int>(it.rinfo()->data());
-        if (position >= 0) {
-          int pc_offset = static_cast<int>(it.rinfo()->pc() - code->address());
-          int line_number = script->GetLineNumber(position) + 1;
-          line_table->SetPosition(pc_offset, line_number);
-        }
+    if (abstract_code->IsCode()) {
+      Code* code = abstract_code->GetCode();
+      int start_position = shared->start_position();
+      int end_position = shared->end_position();
+      line_table = new JITLineInfoTable();
+      for (RelocIterator it(code); !it.done(); it.next()) {
+        RelocInfo* reloc_info = it.rinfo();
+        if (!RelocInfo::IsPosition(reloc_info->rmode())) continue;
+        int position = static_cast<int>(reloc_info->data());
+        // TODO(alph): in case of inlining the position may correspond
+        // to an inlined function source code. Do not collect positions
+        // that fall beyond the function source code. There's however a
+        // chance the inlined function has similar positions but in another
+        // script. So the proper fix is to store script_id in some form
+        // along with the inlined function positions.
+        if (position < start_position || position >= end_position) continue;
+        int pc_offset = static_cast<int>(reloc_info->pc() - code->address());
+        int line_number = script->GetLineNumber(position) + 1;
+        line_table->SetPosition(pc_offset, line_number);
+      }
+    } else {
+      BytecodeArray* bytecode = abstract_code->GetBytecodeArray();
+      line_table = new JITLineInfoTable();
+      interpreter::SourcePositionTableIterator it(
+          bytecode->source_position_table());
+      for (; !it.done(); it.Advance()) {
+        int line_number = script->GetLineNumber(it.source_position()) + 1;
+        int pc_offset = it.bytecode_offset() + BytecodeArray::kHeaderSize;
+        line_table->SetPosition(pc_offset, line_number);
       }
     }
   }
   rec->entry = profiles_->NewCodeEntry(
       tag, profiles_->GetFunctionName(shared->DebugName()),
-      CodeEntry::kEmptyNamePrefix, profiles_->GetName(script_name), line,
-      column, line_table, code->instruction_start());
+      CodeEntry::kEmptyNamePrefix,
+      profiles_->GetName(InferScriptName(script_name, shared)), line, column,
+      line_table, abstract_code->instruction_start());
+  RecordInliningInfo(rec->entry, abstract_code);
   if (info) {
     rec->entry->set_inlined_function_infos(info->inlined_function_infos());
   }
   rec->entry->FillFunctionInfo(shared);
-  rec->size = code->ExecutableSize();
+  rec->size = abstract_code->ExecutableSize();
   processor_->Enqueue(evt_rec);
 }
 
-
 void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  Code* code,
-                                  int args_count) {
+                                  AbstractCode* code, int args_count) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
   rec->start = code->address();
@@ -308,21 +327,21 @@
       tag, profiles_->GetName(args_count), "args_count: ",
       CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
       CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+  RecordInliningInfo(rec->entry, code);
   rec->size = code->ExecutableSize();
   processor_->Enqueue(evt_rec);
 }
 
-
-void CpuProfiler::CodeMoveEvent(Address from, Address to) {
+void CpuProfiler::CodeMoveEvent(AbstractCode* from, Address to) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
   CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
-  rec->from = from;
+  rec->from = from->address();
   rec->to = to;
   processor_->Enqueue(evt_rec);
 }
 
-
-void CpuProfiler::CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
+void CpuProfiler::CodeDisableOptEvent(AbstractCode* code,
+                                      SharedFunctionInfo* shared) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
   CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
   rec->start = code->address();
@@ -330,7 +349,6 @@
   processor_->Enqueue(evt_rec);
 }
 
-
 void CpuProfiler::CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
   CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
@@ -343,11 +361,6 @@
   processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
 }
 
-
-void CpuProfiler::CodeDeleteEvent(Address from) {
-}
-
-
 void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -360,8 +373,7 @@
   processor_->Enqueue(evt_rec);
 }
 
-
-void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
+void CpuProfiler::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
   rec->start = code->address();
@@ -386,6 +398,60 @@
   processor_->Enqueue(evt_rec);
 }
 
+Name* CpuProfiler::InferScriptName(Name* name, SharedFunctionInfo* info) {
+  if (name->IsString() && String::cast(name)->length()) return name;
+  if (!info->script()->IsScript()) return name;
+  Object* source_url = Script::cast(info->script())->source_url();
+  return source_url->IsName() ? Name::cast(source_url) : name;
+}
+
+void CpuProfiler::RecordInliningInfo(CodeEntry* entry,
+                                     AbstractCode* abstract_code) {
+  if (!abstract_code->IsCode()) return;
+  Code* code = abstract_code->GetCode();
+  if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
+  DeoptimizationInputData* deopt_input_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  int deopt_count = deopt_input_data->DeoptCount();
+  for (int i = 0; i < deopt_count; i++) {
+    int pc_offset = deopt_input_data->Pc(i)->value();
+    if (pc_offset == -1) continue;
+    int translation_index = deopt_input_data->TranslationIndex(i)->value();
+    TranslationIterator it(deopt_input_data->TranslationByteArray(),
+                           translation_index);
+    Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+    DCHECK_EQ(Translation::BEGIN, opcode);
+    it.Skip(Translation::NumberOfOperandsFor(opcode));
+    int depth = 0;
+    std::vector<CodeEntry*> inline_stack;
+    while (it.HasNext() &&
+           Translation::BEGIN !=
+               (opcode = static_cast<Translation::Opcode>(it.Next()))) {
+      if (opcode != Translation::JS_FRAME &&
+          opcode != Translation::INTERPRETED_FRAME) {
+        it.Skip(Translation::NumberOfOperandsFor(opcode));
+        continue;
+      }
+      it.Next();  // Skip ast_id
+      int shared_info_id = it.Next();
+      it.Next();  // Skip height
+      SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
+          deopt_input_data->LiteralArray()->get(shared_info_id));
+      if (!depth++) continue;  // Skip the current function itself.
+      CodeEntry* inline_entry = new CodeEntry(
+          entry->tag(), profiles_->GetFunctionName(shared_info->DebugName()),
+          CodeEntry::kEmptyNamePrefix, entry->resource_name(),
+          CpuProfileNode::kNoLineNumberInfo,
+          CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+      inline_entry->FillFunctionInfo(shared_info);
+      inline_stack.push_back(inline_entry);
+    }
+    if (!inline_stack.empty()) {
+      entry->AddInlineStack(pc_offset, inline_stack);
+      DCHECK(inline_stack.empty());
+    }
+  }
+}
 
 CpuProfiler::CpuProfiler(Isolate* isolate)
     : isolate_(isolate),
diff --git a/src/profiler/cpu-profiler.h b/src/profiler/cpu-profiler.h
index 1a1249c..a04ee3c 100644
--- a/src/profiler/cpu-profiler.h
+++ b/src/profiler/cpu-profiler.h
@@ -201,7 +201,7 @@
               ProfileGenerator* test_generator,
               ProfilerEventsProcessor* test_processor);
 
-  virtual ~CpuProfiler();
+  ~CpuProfiler() override;
 
   void set_sampling_interval(base::TimeDelta value);
   void CollectSample();
@@ -220,29 +220,28 @@
 
   // Must be called via PROFILE macro, otherwise will crash when
   // profiling is not enabled.
-  virtual void CallbackEvent(Name* name, Address entry_point);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code, const char* comment);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code, Name* name);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
-                               SharedFunctionInfo* shared,
-                               CompilationInfo* info, Name* script_name);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
-                               SharedFunctionInfo* shared,
-                               CompilationInfo* info, Name* script_name,
-                               int line, int column);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code, int args_count);
-  virtual void CodeMovingGCEvent() {}
-  virtual void CodeMoveEvent(Address from, Address to);
-  virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared);
-  virtual void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
-  virtual void CodeDeleteEvent(Address from);
-  virtual void GetterCallbackEvent(Name* name, Address entry_point);
-  virtual void RegExpCodeCreateEvent(Code* code, String* source);
-  virtual void SetterCallbackEvent(Name* name, Address entry_point);
-  virtual void SharedFunctionInfoMoveEvent(Address from, Address to) {}
+  void CallbackEvent(Name* name, Address entry_point) override;
+  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+                       const char* comment) override;
+  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+                       Name* name) override;
+  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+                       SharedFunctionInfo* shared, CompilationInfo* info,
+                       Name* script_name) override;
+  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+                       SharedFunctionInfo* shared, CompilationInfo* info,
+                       Name* script_name, int line, int column) override;
+  void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+                       int args_count) override;
+  void CodeMovingGCEvent() override {}
+  void CodeMoveEvent(AbstractCode* from, Address to) override;
+  void CodeDisableOptEvent(AbstractCode* code,
+                           SharedFunctionInfo* shared) override;
+  void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
+  void GetterCallbackEvent(Name* name, Address entry_point) override;
+  void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+  void SetterCallbackEvent(Name* name, Address entry_point) override;
+  void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
 
   INLINE(bool is_profiling() const) { return is_profiling_; }
   bool* is_profiling_address() {
@@ -259,6 +258,8 @@
   void StopProcessor();
   void ResetProfiles();
   void LogBuiltins();
+  void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
+  Name* InferScriptName(Name* name, SharedFunctionInfo* info);
 
   Isolate* isolate_;
   base::TimeDelta sampling_interval_;
diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc
index fc43f9f..748f307 100644
--- a/src/profiler/heap-snapshot-generator.cc
+++ b/src/profiler/heap-snapshot-generator.cc
@@ -1383,9 +1383,9 @@
   SetInternalReference(obj, entry,
                        "debug_info", shared->debug_info(),
                        SharedFunctionInfo::kDebugInfoOffset);
-  SetInternalReference(obj, entry,
-                       "inferred_name", shared->inferred_name(),
-                       SharedFunctionInfo::kInferredNameOffset);
+  SetInternalReference(obj, entry, "function_identifier",
+                       shared->function_identifier(),
+                       SharedFunctionInfo::kFunctionIdentifierOffset);
   SetInternalReference(obj, entry,
                        "optimized_code_map", shared->optimized_code_map(),
                        SharedFunctionInfo::kOptimizedCodeMapOffset);
diff --git a/src/profiler/profile-generator.cc b/src/profiler/profile-generator.cc
index 58d06c9..abcd9e5 100644
--- a/src/profiler/profile-generator.cc
+++ b/src/profiler/profile-generator.cc
@@ -50,6 +50,11 @@
 
 CodeEntry::~CodeEntry() {
   delete line_info_;
+  for (auto location : inline_locations_) {
+    for (auto entry : location.second) {
+      delete entry;
+    }
+  }
 }
 
 
@@ -100,6 +105,18 @@
   return v8::CpuProfileNode::kNoLineNumberInfo;
 }
 
+void CodeEntry::AddInlineStack(int pc_offset,
+                               std::vector<CodeEntry*>& inline_stack) {
+  // It's better to use std::move to place the vector into the map,
+  // but it's not supported by the current stdlibc++ on MacOS.
+  inline_locations_.insert(std::make_pair(pc_offset, std::vector<CodeEntry*>()))
+      .first->second.swap(inline_stack);
+}
+
+const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
+  auto it = inline_locations_.find(pc_offset);
+  return it != inline_locations_.end() ? &it->second : NULL;
+}
 
 void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
   if (!shared->script()->IsScript()) return;
@@ -109,7 +126,6 @@
   set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
 }
 
-
 CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
   DCHECK(has_deopt_info());
 
@@ -274,17 +290,14 @@
   return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
 }
 
-ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path,
+ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
                                          int src_line, bool update_stats) {
   ProfileNode* node = root_;
   CodeEntry* last_entry = NULL;
-  for (CodeEntry** entry = path.start() + path.length() - 1;
-       entry != path.start() - 1;
-       --entry) {
-    if (*entry != NULL) {
-      node = node->FindOrAddChild(*entry);
-      last_entry = *entry;
-    }
+  for (auto it = path.rbegin(); it != path.rend(); ++it) {
+    if (*it == NULL) continue;
+    last_entry = *it;
+    node = node->FindOrAddChild(*it);
   }
   if (last_entry && last_entry->has_deopt_info()) {
     node->CollectDeoptInfo(last_entry);
@@ -356,7 +369,7 @@
       top_down_(isolate) {}
 
 void CpuProfile::AddPath(base::TimeTicks timestamp,
-                         const Vector<CodeEntry*>& path, int src_line,
+                         const std::vector<CodeEntry*>& path, int src_line,
                          bool update_stats) {
   ProfileNode* top_frame_node =
       top_down_.AddPathFromEnd(path, src_line, update_stats);
@@ -525,8 +538,8 @@
 }
 
 void CpuProfilesCollection::AddPathToCurrentProfiles(
-    base::TimeTicks timestamp, const Vector<CodeEntry*>& path, int src_line,
-    bool update_stats) {
+    base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
+    int src_line, bool update_stats) {
   // As starting / stopping profiles is rare relatively to this
   // method, we don't bother minimizing the duration of lock holding,
   // e.g. copying contents of the list to a local vector.
@@ -576,12 +589,10 @@
 
 
 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
-  // Allocate space for stack frames + pc + function + vm-state.
-  ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
-  // As actual number of decoded code entries may vary, initialize
-  // entries vector with NULL values.
-  CodeEntry** entry = entries.start();
-  memset(entry, 0, entries.length() * sizeof(*entry));
+  std::vector<CodeEntry*> entries;
+  // Conservatively reserve space for stack frames + pc + function + vm-state.
+  // There could in fact be more of them because of inlined entries.
+  entries.reserve(sample.frames_count + 3);
 
   // The ProfileNode knows nothing about all versions of generated code for
   // the same JS function. The line number information associated with
@@ -597,13 +608,14 @@
       // Don't use PC when in external callback code, as it can point
       // inside callback's code, and we will erroneously report
       // that a callback calls itself.
-      *entry++ = code_map_.FindEntry(sample.external_callback_entry);
+      entries.push_back(code_map_.FindEntry(sample.external_callback_entry));
     } else {
       CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
       // If there is no pc_entry we're likely in native code.
       // Find out, if top of stack was pointing inside a JS function
       // meaning that we have encountered a frameless invocation.
       if (!pc_entry && (sample.top_frame_type == StackFrame::JAVA_SCRIPT ||
+                        sample.top_frame_type == StackFrame::INTERPRETED ||
                         sample.top_frame_type == StackFrame::OPTIMIZED)) {
         pc_entry = code_map_.FindEntry(sample.tos);
       }
@@ -619,7 +631,7 @@
           src_line = pc_entry->line_number();
         }
         src_line_not_found = false;
-        *entry++ = pc_entry;
+        entries.push_back(pc_entry);
 
         if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
             pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
@@ -630,7 +642,7 @@
           // former case we don't so we simply replace the frame with
           // 'unresolved' entry.
           if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
-            *entry++ = unresolved_entry_;
+            entries.push_back(unresolved_entry_);
           }
         }
       }
@@ -639,35 +651,43 @@
     for (const Address *stack_pos = sample.stack,
                        *stack_end = stack_pos + sample.frames_count;
          stack_pos != stack_end; ++stack_pos) {
-      *entry = code_map_.FindEntry(*stack_pos);
+      CodeEntry* entry = code_map_.FindEntry(*stack_pos);
 
-      // Skip unresolved frames (e.g. internal frame) and get source line of
-      // the first JS caller.
-      if (src_line_not_found && *entry) {
+      if (entry) {
+        // Find out if the entry has an inlining stack associated.
         int pc_offset =
-            static_cast<int>(*stack_pos - (*entry)->instruction_start());
-        src_line = (*entry)->GetSourceLine(pc_offset);
-        if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
-          src_line = (*entry)->line_number();
+            static_cast<int>(*stack_pos - entry->instruction_start());
+        const std::vector<CodeEntry*>* inline_stack =
+            entry->GetInlineStack(pc_offset);
+        if (inline_stack) {
+          entries.insert(entries.end(), inline_stack->rbegin(),
+                         inline_stack->rend());
         }
-        src_line_not_found = false;
+        // Skip unresolved frames (e.g. internal frame) and get source line of
+        // the first JS caller.
+        if (src_line_not_found) {
+          src_line = entry->GetSourceLine(pc_offset);
+          if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
+            src_line = entry->line_number();
+          }
+          src_line_not_found = false;
+        }
       }
-
-      entry++;
+      entries.push_back(entry);
     }
   }
 
   if (FLAG_prof_browser_mode) {
     bool no_symbolized_entries = true;
-    for (CodeEntry** e = entries.start(); e != entry; ++e) {
-      if (*e != NULL) {
+    for (auto e : entries) {
+      if (e != NULL) {
         no_symbolized_entries = false;
         break;
       }
     }
     // If no frames were symbolized, put the VM state entry in.
     if (no_symbolized_entries) {
-      *entry++ = EntryForVMState(sample.state);
+      entries.push_back(EntryForVMState(sample.state));
     }
   }
 
diff --git a/src/profiler/profile-generator.h b/src/profiler/profile-generator.h
index 3c976d6..194b490 100644
--- a/src/profiler/profile-generator.h
+++ b/src/profiler/profile-generator.h
@@ -99,7 +99,11 @@
 
   int GetSourceLine(int pc_offset) const;
 
+  void AddInlineStack(int pc_offset, std::vector<CodeEntry*>& inline_stack);
+  const std::vector<CodeEntry*>* GetInlineStack(int pc_offset) const;
+
   Address instruction_start() const { return instruction_start_; }
+  Logger::LogEventsAndTags tag() const { return TagField::decode(bit_field_); }
 
   static const char* const kEmptyNamePrefix;
   static const char* const kEmptyResourceName;
@@ -109,7 +113,6 @@
  private:
   class TagField : public BitField<Logger::LogEventsAndTags, 0, 8> {};
   class BuiltinIdField : public BitField<Builtins::Name, 8, 8> {};
-  Logger::LogEventsAndTags tag() const { return TagField::decode(bit_field_); }
 
   uint32_t bit_field_;
   const char* name_prefix_;
@@ -125,6 +128,8 @@
   size_t pc_offset_;
   JITLineInfoTable* line_info_;
   Address instruction_start_;
+  // Should be an unordered_map, but it doesn't currently work on Win & MacOS.
+  std::map<int, std::vector<CodeEntry*>> inline_locations_;
 
   std::vector<InlinedFunctionInfo> inlined_function_infos_;
 
@@ -191,7 +196,7 @@
   ~ProfileTree();
 
   ProfileNode* AddPathFromEnd(
-      const Vector<CodeEntry*>& path,
+      const std::vector<CodeEntry*>& path,
       int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
       bool update_stats = true);
   ProfileNode* root() const { return root_; }
@@ -225,7 +230,7 @@
   CpuProfile(Isolate* isolate, const char* title, bool record_samples);
 
   // Add pc -> ... -> main() call path to the profile.
-  void AddPath(base::TimeTicks timestamp, const Vector<CodeEntry*>& path,
+  void AddPath(base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
                int src_line, bool update_stats);
   void CalculateTotalTicksAndSamplingRate();
 
@@ -334,8 +339,8 @@
 
   // Called from profile generator thread.
   void AddPathToCurrentProfiles(base::TimeTicks timestamp,
-                                const Vector<CodeEntry*>& path, int src_line,
-                                bool update_stats);
+                                const std::vector<CodeEntry*>& path,
+                                int src_line, bool update_stats);
 
   // Limits the number of profiles that can be simultaneously collected.
   static const int kMaxSimultaneousProfiles = 100;
diff --git a/src/profiler/sampler.cc b/src/profiler/sampler.cc
index e331db9..a340424 100644
--- a/src/profiler/sampler.cc
+++ b/src/profiler/sampler.cc
@@ -336,6 +336,14 @@
         reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
     state->fp =
         reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
+#elif V8_TARGET_ARCH_S390
+    if (!simulator_->has_bad_pc()) {
+      state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+    }
+    state->sp =
+        reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
+    state->fp =
+        reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
 #endif
   }
 
@@ -441,7 +449,7 @@
 #else
   // Extracting the sample from the context is extremely machine dependent.
   ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#if !(V8_OS_OPENBSD || (V8_OS_LINUX && V8_HOST_ARCH_PPC))
+#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
   mcontext_t& mcontext = ucontext->uc_mcontext;
 #endif
 #if V8_OS_LINUX
@@ -482,6 +490,17 @@
   state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip);
   state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
   state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
+#elif V8_HOST_ARCH_S390
+#if V8_TARGET_ARCH_32_BIT
+  // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
+  // mode.  This bit needs to be masked out to resolve actual address.
+  state.pc =
+      reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
+#else
+  state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr);
+#endif  // V8_TARGET_ARCH_32_BIT
+  state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[15]);
+  state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[11]);
 #endif  // V8_HOST_ARCH_*
 #elif V8_OS_MACOSX
 #if V8_HOST_ARCH_X64
@@ -731,7 +750,18 @@
     frames[i++] = isolate->c_function();
   }
   while (!it.done() && i < frames_limit) {
-    frames[i++] = it.frame()->pc();
+    if (it.frame()->is_interpreted()) {
+      // For interpreted frames use the bytecode array pointer as the pc.
+      InterpretedFrame* frame = static_cast<InterpretedFrame*>(it.frame());
+      // Since the sampler can interrupt execution at any point the
+      // bytecode_array might be garbage, so don't dereference it.
+      Address bytecode_array =
+          reinterpret_cast<Address>(frame->GetBytecodeArray()) - kHeapObjectTag;
+      frames[i++] = bytecode_array + BytecodeArray::kHeaderSize +
+                    frame->GetBytecodeOffset();
+    } else {
+      frames[i++] = it.frame()->pc();
+    }
     it.Advance();
   }
   sample_info->frames_count = i;
diff --git a/src/profiler/sampler.h b/src/profiler/sampler.h
index 8e8ef1c..dcd1255 100644
--- a/src/profiler/sampler.h
+++ b/src/profiler/sampler.h
@@ -58,7 +58,7 @@
   unsigned frames_count : kMaxFramesCountLog2;  // Number of captured frames.
   bool has_external_callback : 1;
   bool update_stats : 1;  // Whether the sample should update aggregated stats.
-  StackFrame::Type top_frame_type : 4;
+  StackFrame::Type top_frame_type : 5;
 };
 
 class Sampler {
diff --git a/src/profiler/sampling-heap-profiler.cc b/src/profiler/sampling-heap-profiler.cc
index c13538c..a32cae3 100644
--- a/src/profiler/sampling-heap-profiler.cc
+++ b/src/profiler/sampling-heap-profiler.cc
@@ -99,7 +99,8 @@
 
   // Mark the new block as FreeSpace to make sure the heap is iterable while we
   // are taking the sample.
-  heap()->CreateFillerObjectAt(soon_object, static_cast<int>(size));
+  heap()->CreateFillerObjectAt(soon_object, static_cast<int>(size),
+                               ClearRecordedSlots::kNo);
 
   Local<v8::Value> loc = v8::Utils::ToLocal(obj);
 
@@ -199,19 +200,22 @@
   int column = v8::AllocationProfile::kNoColumnNumberInfo;
   std::vector<v8::AllocationProfile::Allocation> allocations;
   allocations.reserve(node->allocations_.size());
-  if (node->script_id_ != v8::UnboundScript::kNoScriptId) {
+  if (node->script_id_ != v8::UnboundScript::kNoScriptId &&
+      scripts.find(node->script_id_) != scripts.end()) {
     // Cannot use std::map<T>::at because it is not available on android.
     auto non_const_scripts = const_cast<std::map<int, Script*>&>(scripts);
     Script* script = non_const_scripts[node->script_id_];
-    if (script->name()->IsName()) {
-      Name* name = Name::cast(script->name());
-      script_name = ToApiHandle<v8::String>(
-          isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
+    if (script) {
+      if (script->name()->IsName()) {
+        Name* name = Name::cast(script->name());
+        script_name = ToApiHandle<v8::String>(
+            isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
+      }
+      Handle<Script> script_handle(script);
+      line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
+      column =
+          1 + Script::GetColumnNumber(script_handle, node->script_position_);
     }
-    Handle<Script> script_handle(script);
-
-    line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
-    column = 1 + Script::GetColumnNumber(script_handle, node->script_position_);
     for (auto alloc : node->allocations_) {
       allocations.push_back(ScaleSample(alloc.first, alloc.second));
     }
diff --git a/src/property-descriptor.cc b/src/property-descriptor.cc
index 750f948..31efb41 100644
--- a/src/property-descriptor.cc
+++ b/src/property-descriptor.cc
@@ -18,9 +18,9 @@
 // Helper function for ToPropertyDescriptor. Comments describe steps for
 // "enumerable", other properties are handled the same way.
 // Returns false if an exception was thrown.
-bool GetPropertyIfPresent(Handle<Object> obj, Handle<String> name,
+bool GetPropertyIfPresent(Handle<JSReceiver> receiver, Handle<String> name,
                           Handle<Object>* value) {
-  LookupIterator it(obj, name);
+  LookupIterator it(receiver, name, receiver);
   // 4. Let hasEnumerable be HasProperty(Obj, "enumerable").
   Maybe<bool> has_property = JSReceiver::HasProperty(&it);
   // 5. ReturnIfAbrupt(hasEnumerable).
@@ -29,7 +29,7 @@
   if (has_property.FromJust() == true) {
     // 6a. Let enum be ToBoolean(Get(Obj, "enumerable")).
     // 6b. ReturnIfAbrupt(enum).
-    if (!JSObject::GetProperty(&it).ToHandle(value)) return false;
+    if (!Object::GetProperty(&it).ToHandle(value)) return false;
   }
   return true;
 }
@@ -39,7 +39,7 @@
 // objects: nothing on the prototype chain, just own fast data properties.
 // Must not have observable side effects, because the slow path will restart
 // the entire conversion!
-bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<Object> obj,
+bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
                                   PropertyDescriptor* desc) {
   if (!obj->IsJSObject()) return false;
   Map* map = Handle<JSObject>::cast(obj)->map();
@@ -105,7 +105,7 @@
 
 void CreateDataProperty(Isolate* isolate, Handle<JSObject> object,
                         Handle<String> name, Handle<Object> value) {
-  LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
   Maybe<bool> result = JSObject::CreateDataProperty(&it, value);
   CHECK(result.IsJust() && result.FromJust());
 }
@@ -190,14 +190,15 @@
   // 3. Let desc be a new Property Descriptor that initially has no fields.
   DCHECK(desc->is_empty());
 
-  if (ToPropertyDescriptorFastPath(isolate, obj, desc)) {
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(obj);
+  if (ToPropertyDescriptorFastPath(isolate, receiver, desc)) {
     return true;
   }
 
   // enumerable?
   Handle<Object> enumerable;
   // 4 through 6b.
-  if (!GetPropertyIfPresent(obj, isolate->factory()->enumerable_string(),
+  if (!GetPropertyIfPresent(receiver, isolate->factory()->enumerable_string(),
                             &enumerable)) {
     return false;
   }
@@ -209,7 +210,7 @@
   // configurable?
   Handle<Object> configurable;
   // 7 through 9b.
-  if (!GetPropertyIfPresent(obj, isolate->factory()->configurable_string(),
+  if (!GetPropertyIfPresent(receiver, isolate->factory()->configurable_string(),
                             &configurable)) {
     return false;
   }
@@ -221,7 +222,8 @@
   // value?
   Handle<Object> value;
   // 10 through 12b.
-  if (!GetPropertyIfPresent(obj, isolate->factory()->value_string(), &value)) {
+  if (!GetPropertyIfPresent(receiver, isolate->factory()->value_string(),
+                            &value)) {
     return false;
   }
   // 12c. Set the [[Value]] field of desc to value.
@@ -230,7 +232,7 @@
   // writable?
   Handle<Object> writable;
   // 13 through 15b.
-  if (!GetPropertyIfPresent(obj, isolate->factory()->writable_string(),
+  if (!GetPropertyIfPresent(receiver, isolate->factory()->writable_string(),
                             &writable)) {
     return false;
   }
@@ -240,7 +242,8 @@
   // getter?
   Handle<Object> getter;
   // 16 through 18b.
-  if (!GetPropertyIfPresent(obj, isolate->factory()->get_string(), &getter)) {
+  if (!GetPropertyIfPresent(receiver, isolate->factory()->get_string(),
+                            &getter)) {
     return false;
   }
   if (!getter.is_null()) {
@@ -257,7 +260,8 @@
   // setter?
   Handle<Object> setter;
   // 19 through 21b.
-  if (!GetPropertyIfPresent(obj, isolate->factory()->set_string(), &setter)) {
+  if (!GetPropertyIfPresent(receiver, isolate->factory()->set_string(),
+                            &setter)) {
     return false;
   }
   if (!setter.is_null()) {
diff --git a/src/prototype.h b/src/prototype.h
index c5e9545..e09ff0f 100644
--- a/src/prototype.h
+++ b/src/prototype.h
@@ -125,7 +125,7 @@
 
   // Returns false iff a call to JSProxy::GetPrototype throws.
   // TODO(neis): This should probably replace Advance().
-  bool AdvanceFollowingProxies() {
+  MUST_USE_RESULT bool AdvanceFollowingProxies() {
     DCHECK(!(handle_.is_null() && object_->IsJSProxy()));
     if (!HasAccess()) {
       // Abort the lookup if we do not have access to the current object.
@@ -133,10 +133,15 @@
       is_at_end_ = true;
       return true;
     }
+    return AdvanceFollowingProxiesIgnoringAccessChecks();
+  }
+
+  MUST_USE_RESULT bool AdvanceFollowingProxiesIgnoringAccessChecks() {
     if (handle_.is_null() || !handle_->IsJSProxy()) {
       AdvanceIgnoringProxies();
       return true;
     }
+
     // Due to possible __proto__ recursion limit the number of Proxies
     // we visit to an arbitrarily chosen large number.
     seen_proxies_++;
diff --git a/src/regexp/arm/regexp-macro-assembler-arm.cc b/src/regexp/arm/regexp-macro-assembler-arm.cc
index ce72188..f8dfc97 100644
--- a/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -891,7 +891,8 @@
   masm_->GetCode(&code_desc);
   Handle<Code> code = isolate()->factory()->NewCode(
       code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
-  PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+  PROFILE(masm_->isolate(),
+          RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
   return Handle<HeapObject>::cast(code);
 }
 
diff --git a/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 941ccea..e8bdad8 100644
--- a/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -1088,7 +1088,8 @@
   masm_->GetCode(&code_desc);
   Handle<Code> code = isolate()->factory()->NewCode(
       code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
-  PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+  PROFILE(masm_->isolate(),
+          RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
   return Handle<HeapObject>::cast(code);
 }
 
diff --git a/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 4c22b43..9c55af6 100644
--- a/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -511,7 +511,8 @@
     __ and_(ebx, current_character());
     index = ebx;
   }
-  __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
+  __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize),
+          Immediate(0));
   BranchOrBacktrack(not_equal, on_bit_set);
 }
 
@@ -936,7 +937,8 @@
       isolate()->factory()->NewCode(code_desc,
                                     Code::ComputeFlags(Code::REGEXP),
                                     masm_->CodeObject());
-  PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source));
+  PROFILE(masm_->isolate(),
+          RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
   return Handle<HeapObject>::cast(code);
 }
 
diff --git a/src/regexp/jsregexp.cc b/src/regexp/jsregexp.cc
index 80f48ca..ddb4a16 100644
--- a/src/regexp/jsregexp.cc
+++ b/src/regexp/jsregexp.cc
@@ -41,6 +41,8 @@
 #include "src/regexp/arm/regexp-macro-assembler-arm.h"
 #elif V8_TARGET_ARCH_PPC
 #include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/regexp/s390/regexp-macro-assembler-s390.h"
 #elif V8_TARGET_ARCH_MIPS
 #include "src/regexp/mips/regexp-macro-assembler-mips.h"
 #elif V8_TARGET_ARCH_MIPS64
@@ -133,7 +135,7 @@
                                         Handle<String> pattern,
                                         JSRegExp::Flags flags) {
   Isolate* isolate = re->GetIsolate();
-  Zone zone;
+  Zone zone(isolate->allocator());
   CompilationCache* compilation_cache = isolate->compilation_cache();
   MaybeHandle<FixedArray> maybe_cached =
       compilation_cache->LookupRegExp(pattern, flags);
@@ -344,7 +346,7 @@
                                  bool is_one_byte) {
   // Compile the RegExp.
   Isolate* isolate = re->GetIsolate();
-  Zone zone;
+  Zone zone(isolate->allocator());
   PostponeInterruptsScope postpone(isolate);
   // If we had a compilation error the last time this is saved at the
   // saved code index.
@@ -6703,6 +6705,9 @@
 #elif V8_TARGET_ARCH_ARM64
   RegExpMacroAssemblerARM64 macro_assembler(isolate, zone, mode,
                                             (data->capture_count + 1) * 2);
+#elif V8_TARGET_ARCH_S390
+  RegExpMacroAssemblerS390 macro_assembler(isolate, zone, mode,
+                                           (data->capture_count + 1) * 2);
 #elif V8_TARGET_ARCH_PPC
   RegExpMacroAssemblerPPC macro_assembler(isolate, zone, mode,
                                           (data->capture_count + 1) * 2);
diff --git a/src/regexp/mips/regexp-macro-assembler-mips.cc b/src/regexp/mips/regexp-macro-assembler-mips.cc
index 6197f45..062d661 100644
--- a/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -905,7 +905,8 @@
   masm_->GetCode(&code_desc);
   Handle<Code> code = isolate()->factory()->NewCode(
       code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
-  LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+  LOG(masm_->isolate(),
+      RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
   return Handle<HeapObject>::cast(code);
 }
 
diff --git a/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index bf95a9c..e0317de 100644
--- a/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -942,7 +942,8 @@
   masm_->GetCode(&code_desc);
   Handle<Code> code = isolate()->factory()->NewCode(
       code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
-  LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+  LOG(masm_->isolate(),
+      RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
   return Handle<HeapObject>::cast(code);
 }
 
diff --git a/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index c05c580..70842f5 100644
--- a/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -940,7 +940,8 @@
   masm_->GetCode(&code_desc);
   Handle<Code> code = isolate()->factory()->NewCode(
       code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
-  PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+  PROFILE(masm_->isolate(),
+          RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
   return Handle<HeapObject>::cast(code);
 }
 
diff --git a/src/regexp/regexp-macro-assembler.h b/src/regexp/regexp-macro-assembler.h
index 6f79a16..2aa439e 100644
--- a/src/regexp/regexp-macro-assembler.h
+++ b/src/regexp/regexp-macro-assembler.h
@@ -40,6 +40,7 @@
     kARMImplementation,
     kARM64Implementation,
     kMIPSImplementation,
+    kS390Implementation,
     kPPCImplementation,
     kX64Implementation,
     kX87Implementation,
diff --git a/src/regexp/regexp-parser.cc b/src/regexp/regexp-parser.cc
index 46c593c..d433fc8 100644
--- a/src/regexp/regexp-parser.cc
+++ b/src/regexp/regexp-parser.cc
@@ -359,14 +359,17 @@
             Advance(2);
             if (unicode()) {
               if (FLAG_harmony_regexp_property) {
-                ZoneList<CharacterRange>* ranges = ParsePropertyClass();
-                if (ranges == nullptr) {
+                ZoneList<CharacterRange>* ranges =
+                    new (zone()) ZoneList<CharacterRange>(2, zone());
+                if (!ParsePropertyClass(ranges)) {
                   return ReportError(CStrVector("Invalid property name"));
                 }
                 RegExpCharacterClass* cc =
                     new (zone()) RegExpCharacterClass(ranges, p == 'P');
                 builder->AddCharacterClass(cc);
               } else {
+                // With /u, no identity escapes except for syntax characters
+                // are allowed. Otherwise, all identity escapes are allowed.
                 return ReportError(CStrVector("Invalid escape"));
               }
             } else {
@@ -841,54 +844,95 @@
   return result;
 }
 
-ZoneList<CharacterRange>* RegExpParser::ParsePropertyClass() {
 #ifdef V8_I18N_SUPPORT
-  char property_name[3];
-  memset(property_name, 0, sizeof(property_name));
-  if (current() == '{') {
-    Advance();
-    if (current() < 'A' || current() > 'Z') return nullptr;
-    property_name[0] = static_cast<char>(current());
-    Advance();
-    if (current() >= 'a' && current() <= 'z') {
-      property_name[1] = static_cast<char>(current());
-      Advance();
-    }
-    if (current() != '}') return nullptr;
-  } else if (current() >= 'A' && current() <= 'Z') {
-    property_name[0] = static_cast<char>(current());
-  } else {
-    return nullptr;
+bool IsExactPropertyValueAlias(const char* property_name, UProperty property,
+                               int32_t property_value) {
+  const char* short_name =
+      u_getPropertyValueName(property, property_value, U_SHORT_PROPERTY_NAME);
+  if (short_name != NULL && strcmp(property_name, short_name) == 0) return true;
+  for (int i = 0;; i++) {
+    const char* long_name = u_getPropertyValueName(
+        property, property_value,
+        static_cast<UPropertyNameChoice>(U_LONG_PROPERTY_NAME + i));
+    if (long_name == NULL) break;
+    if (strcmp(property_name, long_name) == 0) return true;
   }
-  Advance();
+  return false;
+}
 
-  int32_t category =
-      u_getPropertyValueEnum(UCHAR_GENERAL_CATEGORY_MASK, property_name);
-  if (category == UCHAR_INVALID_CODE) return nullptr;
+bool LookupPropertyClass(UProperty property, const char* property_name,
+                         ZoneList<CharacterRange>* result, Zone* zone) {
+  int32_t property_value = u_getPropertyValueEnum(property, property_name);
+  if (property_value == UCHAR_INVALID_CODE) return false;
+
+  // We require the property name to match exactly to one of the property value
+  // aliases. However, u_getPropertyValueEnum uses loose matching.
+  if (!IsExactPropertyValueAlias(property_name, property, property_value)) {
+    return false;
+  }
 
   USet* set = uset_openEmpty();
   UErrorCode ec = U_ZERO_ERROR;
-  uset_applyIntPropertyValue(set, UCHAR_GENERAL_CATEGORY_MASK, category, &ec);
-  ZoneList<CharacterRange>* ranges = nullptr;
-  if (ec == U_ZERO_ERROR && !uset_isEmpty(set)) {
+  uset_applyIntPropertyValue(set, property, property_value, &ec);
+  bool success = ec == U_ZERO_ERROR && !uset_isEmpty(set);
+
+  if (success) {
     uset_removeAllStrings(set);
     int item_count = uset_getItemCount(set);
-    ranges = new (zone()) ZoneList<CharacterRange>(item_count, zone());
     int item_result = 0;
     for (int i = 0; i < item_count; i++) {
       uc32 start = 0;
       uc32 end = 0;
       item_result += uset_getItem(set, i, &start, &end, nullptr, 0, &ec);
-      ranges->Add(CharacterRange::Range(start, end), zone());
+      result->Add(CharacterRange::Range(start, end), zone);
     }
     DCHECK_EQ(U_ZERO_ERROR, ec);
     DCHECK_EQ(0, item_result);
   }
   uset_close(set);
-  return ranges;
-#else   // V8_I18N_SUPPORT
-  return nullptr;
+  return success;
+}
 #endif  // V8_I18N_SUPPORT
+
+bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result) {
+#ifdef V8_I18N_SUPPORT
+  List<char> property_name_list;
+  if (current() == '{') {
+    for (Advance(); current() != '}'; Advance()) {
+      if (!has_next()) return false;
+      property_name_list.Add(static_cast<char>(current()));
+    }
+  } else if (current() != kEndMarker) {
+    property_name_list.Add(static_cast<char>(current()));
+  } else {
+    return false;
+  }
+  Advance();
+  property_name_list.Add(0);  // null-terminate string.
+
+  const char* property_name = property_name_list.ToConstVector().start();
+
+#define PROPERTY_NAME_LOOKUP(PROPERTY)                                  \
+  do {                                                                  \
+    if (LookupPropertyClass(PROPERTY, property_name, result, zone())) { \
+      return true;                                                      \
+    }                                                                   \
+  } while (false)
+
+  // General_Category (gc) found in PropertyValueAliases.txt
+  PROPERTY_NAME_LOOKUP(UCHAR_GENERAL_CATEGORY_MASK);
+  // Script (sc) found in Scripts.txt
+  PROPERTY_NAME_LOOKUP(UCHAR_SCRIPT);
+  // To disambiguate from script names, block names have an "In"-prefix.
+  if (property_name_list.length() > 3 && property_name[0] == 'I' &&
+      property_name[1] == 'n') {
+    // Block (blk) found in Blocks.txt
+    property_name += 2;
+    PROPERTY_NAME_LOOKUP(UCHAR_BLOCK);
+  }
+#undef PROPERTY_NAME_LOOKUP
+#endif  // V8_I18N_SUPPORT
+  return false;
 }
 
 bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
@@ -1068,6 +1112,34 @@
   }
 }
 
+bool RegExpParser::ParseClassProperty(ZoneList<CharacterRange>* ranges) {
+  if (!FLAG_harmony_regexp_property) return false;
+  if (!unicode()) return false;
+  if (current() != '\\') return false;
+  uc32 next = Next();
+  bool parse_success = false;
+  if (next == 'p') {
+    Advance(2);
+    parse_success = ParsePropertyClass(ranges);
+  } else if (next == 'P') {
+    Advance(2);
+    ZoneList<CharacterRange>* property_class =
+        new (zone()) ZoneList<CharacterRange>(2, zone());
+    parse_success = ParsePropertyClass(property_class);
+    if (parse_success) {
+      ZoneList<CharacterRange>* negated =
+          new (zone()) ZoneList<CharacterRange>(2, zone());
+      CharacterRange::Negate(property_class, negated, zone());
+      const Vector<CharacterRange> negated_vector = negated->ToVector();
+      ranges->AddAll(negated_vector, zone());
+    }
+  } else {
+    return false;
+  }
+  if (!parse_success)
+    ReportError(CStrVector("Invalid property name in character class"));
+  return parse_success;
+}
 
 RegExpTree* RegExpParser::ParseCharacterClass() {
   static const char* kUnterminated = "Unterminated character class";
@@ -1084,6 +1156,8 @@
   ZoneList<CharacterRange>* ranges =
       new (zone()) ZoneList<CharacterRange>(2, zone());
   while (has_more() && current() != ']') {
+    bool parsed_property = ParseClassProperty(ranges CHECK_FAILED);
+    if (parsed_property) continue;
     uc16 char_class = kNoCharClass;
     CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
     if (current() == '-') {
@@ -1356,14 +1430,10 @@
 
 bool RegExpBuilder::NeedsDesugaringForUnicode(RegExpCharacterClass* cc) {
   if (!unicode()) return false;
-  switch (cc->standard_type()) {
-    case 's':        // white space
-    case 'w':        // ASCII word character
-    case 'd':        // ASCII digit
-      return false;  // These characters do not need desugaring.
-    default:
-      break;
-  }
+  // TODO(yangguo): we could be smarter than this. Case-insensitivity does not
+  // necessarily mean that we need to desugar. It's probably nicer to have a
+  // separate pass to figure out unicode desugarings.
+  if (ignore_case()) return true;
   ZoneList<CharacterRange>* ranges = cc->ranges(zone());
   CharacterRange::Canonicalize(ranges);
   for (int i = ranges->length() - 1; i >= 0; i--) {
diff --git a/src/regexp/regexp-parser.h b/src/regexp/regexp-parser.h
index acf783c..6142a9e 100644
--- a/src/regexp/regexp-parser.h
+++ b/src/regexp/regexp-parser.h
@@ -174,7 +174,7 @@
   bool ParseHexEscape(int length, uc32* value);
   bool ParseUnicodeEscape(uc32* value);
   bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
-  ZoneList<CharacterRange>* ParsePropertyClass();
+  bool ParsePropertyClass(ZoneList<CharacterRange>* result);
 
   uc32 ParseOctalLiteral();
 
@@ -184,6 +184,7 @@
   // can be reparsed.
   bool ParseBackReferenceIndex(int* index_out);
 
+  bool ParseClassProperty(ZoneList<CharacterRange>* result);
   CharacterRange ParseClassAtom(uc16* char_class);
   RegExpTree* ReportError(Vector<const char> message);
   void Advance();
diff --git a/src/regexp/s390/OWNERS b/src/regexp/s390/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/regexp/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/regexp/s390/regexp-macro-assembler-s390.cc b/src/regexp/s390/regexp-macro-assembler-s390.cc
new file mode 100644
index 0000000..9dac534
--- /dev/null
+++ b/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -0,0 +1,1256 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/base/bits.h"
+#include "src/code-stubs.h"
+#include "src/log.h"
+#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
+#include "src/regexp/s390/regexp-macro-assembler-s390.h"
+#include "src/unicode.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - r6: Temporarily stores the index of capture start after a matching pass
+ *        for a global regexp.
+ * - r7: Pointer to current code object (Code*) including heap object tag.
+ * - r8: Current position in input, as negative offset from end of string.
+ *        Please notice that this is the byte offset, not the character offset!
+ * - r9: Currently loaded character. Must be loaded using
+ *        LoadCurrentCharacter before using any of the dispatch methods.
+ * - r13: Points to tip of backtrack stack
+ * - r10: End of input (points to byte after last character in input).
+ * - r11: Frame pointer. Used to access arguments, local variables and
+ *         RegExp registers.
+ * - r12: IP register, used by assembler. Very volatile.
+ * - r15/sp : Points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *  - fp[112] Isolate* isolate   (address of the current isolate)
+ *  - fp[108] secondary link/return address used by native call.
+ *  - fp[104] direct_call        (if 1, direct call from JavaScript code,
+ *                                if 0, call through the runtime system).
+ *  - fp[100] stack_area_base    (high end of the memory area to use as
+ *                                backtracking stack).
+ *  - fp[96]  capture array size (may fit multiple sets of matches)
+ *  - fp[0..96] zLinux ABI register saving area
+ *  --- sp when called ---
+ *  --- frame pointer ----
+ *  - fp[-4]  direct_call        (if 1, direct call from JavaScript code,
+ *                                if 0, call through the runtime system).
+ *  - fp[-8]  stack_area_base    (high end of the memory area to use as
+ *                                backtracking stack).
+ *  - fp[-12] capture array size (may fit multiple sets of matches)
+ *  - fp[-16] int* capture_array (int[num_saved_registers_], for output).
+ *  - fp[-20] end of input       (address of end of string).
+ *  - fp[-24] start of input     (address of first character in string).
+ *  - fp[-28] start index        (character index of start).
+ *  - fp[-32] void* input_string (location of a handle containing the string).
+ *  - fp[-36] success counter    (only for global regexps to count matches).
+ *  - fp[-40] Offset of location before start of input (effectively character
+ *            string start - 1). Used to initialize capture registers to a
+ *            non-position.
+ *  - fp[-44] At start (if 1, we are starting at the start of the
+ *    string, otherwise 0)
+ *  - fp[-48] register 0         (Only positions must be stored in the first
+ *  -         register 1          num_saved_registers_ registers)
+ *  -         ...
+ *  -         register num_registers-1
+ *  --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ *              int start_index,
+ *              Address start,
+ *              Address end,
+ *              int* capture_output_array,
+ *              byte* stack_area_base,
+ *              Address secondary_return_address,  // Only used by native call.
+ *              bool direct_call = false)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in s390/simulator-s390.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the LR register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
+                                                   Mode mode,
+                                                   int registers_to_save)
+    : NativeRegExpMacroAssembler(isolate, zone),
+      masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+                               CodeObjectRequired::kYes)),
+      mode_(mode),
+      num_registers_(registers_to_save),
+      num_saved_registers_(registers_to_save),
+      entry_label_(),
+      start_label_(),
+      success_label_(),
+      backtrack_label_(),
+      exit_label_(),
+      internal_failure_label_() {
+  DCHECK_EQ(0, registers_to_save % 2);
+
+  __ b(&entry_label_);  // We'll write the entry code later.
+  // If the code gets too big or corrupted, an internal exception will be
+  // raised, and we will exit right away.
+  __ bind(&internal_failure_label_);
+  __ LoadImmP(r2, Operand(FAILURE));
+  __ Ret();
+  __ bind(&start_label_);  // And then continue from here.
+}
+
+RegExpMacroAssemblerS390::~RegExpMacroAssemblerS390() {
+  delete masm_;
+  // Unuse labels in case we throw away the assembler without calling GetCode.
+  entry_label_.Unuse();
+  start_label_.Unuse();
+  success_label_.Unuse();
+  backtrack_label_.Unuse();
+  exit_label_.Unuse();
+  check_preempt_label_.Unuse();
+  stack_overflow_label_.Unuse();
+  internal_failure_label_.Unuse();
+}
+
+int RegExpMacroAssemblerS390::stack_limit_slack() {
+  return RegExpStack::kStackLimitSlack;
+}
+
+void RegExpMacroAssemblerS390::AdvanceCurrentPosition(int by) {
+  if (by != 0) {
+    __ AddP(current_input_offset(), Operand(by * char_size()));
+  }
+}
+
+void RegExpMacroAssemblerS390::AdvanceRegister(int reg, int by) {
+  DCHECK(reg >= 0);
+  DCHECK(reg < num_registers_);
+  if (by != 0) {
+    if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(by)) {
+      __ AddMI(register_location(reg), Operand(by));
+    } else {
+      __ LoadP(r2, register_location(reg), r0);
+      __ mov(r0, Operand(by));
+      __ AddRR(r2, r0);
+      __ StoreP(r2, register_location(reg));
+    }
+  }
+}
+
+void RegExpMacroAssemblerS390::Backtrack() {
+  CheckPreemption();
+  // Pop Code* offset from backtrack stack, add Code* and jump to location.
+  Pop(r2);
+  __ AddP(r2, code_pointer());
+  __ b(r2);
+}
+
+void RegExpMacroAssemblerS390::Bind(Label* label) { __ bind(label); }
+
+void RegExpMacroAssemblerS390::CheckCharacter(uint32_t c, Label* on_equal) {
+  __ CmpLogicalP(current_character(), Operand(c));
+  BranchOrBacktrack(eq, on_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckCharacterGT(uc16 limit, Label* on_greater) {
+  __ CmpLogicalP(current_character(), Operand(limit));
+  BranchOrBacktrack(gt, on_greater);
+}
+
+void RegExpMacroAssemblerS390::CheckAtStart(Label* on_at_start) {
+  __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+  __ AddP(r2, current_input_offset(), Operand(-char_size()));
+  __ CmpP(r2, r3);
+  BranchOrBacktrack(eq, on_at_start);
+}
+
+void RegExpMacroAssemblerS390::CheckNotAtStart(int cp_offset,
+                                               Label* on_not_at_start) {
+  __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+  __ AddP(r2, current_input_offset(),
+          Operand(-char_size() + cp_offset * char_size()));
+  __ CmpP(r2, r3);
+  BranchOrBacktrack(ne, on_not_at_start);
+}
+
+void RegExpMacroAssemblerS390::CheckCharacterLT(uc16 limit, Label* on_less) {
+  __ CmpLogicalP(current_character(), Operand(limit));
+  BranchOrBacktrack(lt, on_less);
+}
+
+void RegExpMacroAssemblerS390::CheckGreedyLoop(Label* on_equal) {
+  Label backtrack_non_equal;
+  __ CmpP(current_input_offset(), MemOperand(backtrack_stackpointer(), 0));
+  __ bne(&backtrack_non_equal);
+  __ AddP(backtrack_stackpointer(), Operand(kPointerSize));
+
+  BranchOrBacktrack(al, on_equal);
+  __ bind(&backtrack_non_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
+    int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+  Label fallthrough;
+  __ LoadP(r2, register_location(start_reg));      // Index of start of
+                                                   // capture
+  __ LoadP(r3, register_location(start_reg + 1));  // Index of end
+  __ SubP(r3, r3, r2);
+
+  // At this point, the capture registers are either both set or both cleared.
+  // If the capture length is zero, then the capture is either empty or cleared.
+  // Fall through in both cases.
+  __ beq(&fallthrough);
+
+  // Check that there are enough characters left in the input.
+  if (read_backward) {
+    __ LoadP(r5, MemOperand(frame_pointer(), kStringStartMinusOne));
+    __ AddP(r5, r5, r3);
+    __ CmpP(current_input_offset(), r5);
+    BranchOrBacktrack(le, on_no_match);
+  } else {
+    __ AddP(r0, r3, current_input_offset());
+    BranchOrBacktrack(gt, on_no_match);
+  }
+
+  if (mode_ == LATIN1) {
+    Label success;
+    Label fail;
+    Label loop_check;
+
+    // r2 - offset of start of capture
+    // r3 - length of capture
+    __ AddP(r2, end_of_input_address());
+    __ AddP(r4, current_input_offset(), end_of_input_address());
+    if (read_backward) {
+      __ SubP(r4, r4, r3);  // Offset by length when matching backwards.
+    }
+    __ mov(r1, Operand::Zero());
+
+    // r1 - Loop index
+    // r2 - Address of start of capture.
+    // r4 - Address of current input position.
+
+    Label loop;
+    __ bind(&loop);
+    __ LoadlB(r5, MemOperand(r2, r1));
+    __ LoadlB(r6, MemOperand(r4, r1));
+
+    __ CmpP(r6, r5);
+    __ beq(&loop_check);
+
+    // Mismatch, try case-insensitive match (converting letters to lower-case).
+    __ Or(r5, Operand(0x20));  // Convert capture character to lower-case.
+    __ Or(r6, Operand(0x20));  // Also convert input character.
+    __ CmpP(r6, r5);
+    __ bne(&fail);
+    __ SubP(r5, Operand('a'));
+    __ CmpLogicalP(r5, Operand('z' - 'a'));  // Is r5 a lowercase letter?
+    __ ble(&loop_check);                     // In range 'a'-'z'.
+    // Latin-1: Check for values in range [224,254] but not 247.
+    __ SubP(r5, Operand(224 - 'a'));
+    __ CmpLogicalP(r5, Operand(254 - 224));
+    __ bgt(&fail);                           // Weren't Latin-1 letters.
+    __ CmpLogicalP(r5, Operand(247 - 224));  // Check for 247.
+    __ beq(&fail);
+
+    __ bind(&loop_check);
+    __ la(r1, MemOperand(r1, char_size()));
+    __ CmpP(r1, r3);
+    __ blt(&loop);
+    __ b(&success);
+
+    __ bind(&fail);
+    BranchOrBacktrack(al, on_no_match);
+
+    __ bind(&success);
+    // Compute new value of character position after the matched part.
+    __ SubP(current_input_offset(), r4, end_of_input_address());
+    if (read_backward) {
+      __ LoadP(r2, register_location(start_reg));  // Index of start of capture
+      __ LoadP(r3,
+               register_location(start_reg + 1));  // Index of end of capture
+      __ AddP(current_input_offset(), current_input_offset(), r2);
+      __ SubP(current_input_offset(), current_input_offset(), r3);
+    }
+    __ AddP(current_input_offset(), r1);
+  } else {
+    DCHECK(mode_ == UC16);
+    int argument_count = 4;
+    __ PrepareCallCFunction(argument_count, r4);
+
+    // r2 - offset of start of capture
+    // r3 - length of capture
+
+    // Put arguments into arguments registers.
+    // Parameters are
+    //   r2: Address byte_offset1 - Address captured substring's start.
+    //   r3: Address byte_offset2 - Address of current character position.
+    //   r4: size_t byte_length - length of capture in bytes(!)
+    //   r5: Isolate* isolate or 0 if unicode flag.
+
+    // Address of start of capture.
+    __ AddP(r2, end_of_input_address());
+    // Length of capture.
+    __ LoadRR(r4, r3);
+    // Save length in callee-save register for use on return.
+    __ LoadRR(r6, r3);
+    // Address of current input position.
+    __ AddP(r3, current_input_offset(), end_of_input_address());
+    if (read_backward) {
+      __ SubP(r3, r3, r6);
+    }
+// Isolate.
+#ifdef V8_I18N_SUPPORT
+    if (unicode) {
+      __ LoadImmP(r5, Operand::Zero());
+    } else  // NOLINT
+#endif      // V8_I18N_SUPPORT
+    {
+      __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+    }
+
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference function =
+          ExternalReference::re_case_insensitive_compare_uc16(isolate());
+      __ CallCFunction(function, argument_count);
+    }
+
+    // Check if function returned non-zero for success or zero for failure.
+    __ CmpP(r2, Operand::Zero());
+    BranchOrBacktrack(eq, on_no_match);
+
+    // On success, advance position by length of capture.
+    if (read_backward) {
+      __ SubP(current_input_offset(), current_input_offset(), r6);
+    } else {
+      __ AddP(current_input_offset(), current_input_offset(), r6);
+    }
+  }
+
+  __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerS390::CheckNotBackReference(int start_reg,
+                                                     bool read_backward,
+                                                     Label* on_no_match) {
+  Label fallthrough;
+  Label success;
+
+  // Find length of back-referenced capture.
+  __ LoadP(r2, register_location(start_reg));
+  __ LoadP(r3, register_location(start_reg + 1));
+  __ SubP(r3, r3, r2);  // Length to check.
+
+  // At this point, the capture registers are either both set or both cleared.
+  // If the capture length is zero, then the capture is either empty or cleared.
+  // Fall through in both cases.
+  __ beq(&fallthrough);
+
+  // Check that there are enough characters left in the input.
+  if (read_backward) {
+    __ LoadP(r5, MemOperand(frame_pointer(), kStringStartMinusOne));
+    __ AddP(r5, r5, r3);
+    __ CmpP(current_input_offset(), r5);
+    BranchOrBacktrack(lt, on_no_match);
+  } else {
+    __ AddP(r0, r3, current_input_offset());
+    BranchOrBacktrack(gt, on_no_match, cr0);
+  }
+
+  // r2 - offset of start of capture
+  // r3 - length of capture
+  __ la(r2, MemOperand(r2, end_of_input_address()));
+  __ la(r4, MemOperand(current_input_offset(), end_of_input_address()));
+  if (read_backward) {
+    __ SubP(r4, r4, r3);  // Offset by length when matching backwards.
+  }
+  __ mov(r1, Operand::Zero());
+
+  Label loop;
+  __ bind(&loop);
+  if (mode_ == LATIN1) {
+    __ LoadlB(r5, MemOperand(r2, r1));
+    __ LoadlB(r6, MemOperand(r4, r1));
+  } else {
+    DCHECK(mode_ == UC16);
+    __ LoadLogicalHalfWordP(r5, MemOperand(r2, r1));
+    __ LoadLogicalHalfWordP(r6, MemOperand(r4, r1));
+  }
+  __ la(r1, MemOperand(r1, char_size()));
+  __ CmpP(r5, r6);
+  BranchOrBacktrack(ne, on_no_match);
+  __ CmpP(r1, r3);
+  __ blt(&loop);
+
+  // Move current character position to position after match.
+  __ SubP(current_input_offset(), r4, end_of_input_address());
+  if (read_backward) {
+    __ LoadP(r2, register_location(start_reg));  // Index of start of capture
+    __ LoadP(r3, register_location(start_reg + 1));  // Index of end of capture
+    __ AddP(current_input_offset(), current_input_offset(), r2);
+    __ SubP(current_input_offset(), current_input_offset(), r3);
+  }
+  __ AddP(current_input_offset(), r1);
+
+  __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerS390::CheckNotCharacter(unsigned c,
+                                                 Label* on_not_equal) {
+  __ CmpLogicalP(current_character(), Operand(c));
+  BranchOrBacktrack(ne, on_not_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+                                                      Label* on_equal) {
+  __ AndP(r2, current_character(), Operand(mask));
+  if (c != 0) {
+    __ CmpLogicalP(r2, Operand(c));
+  }
+  BranchOrBacktrack(eq, on_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckNotCharacterAfterAnd(unsigned c,
+                                                         unsigned mask,
+                                                         Label* on_not_equal) {
+  __ AndP(r2, current_character(), Operand(mask));
+  if (c != 0) {
+    __ CmpLogicalP(r2, Operand(c));
+  }
+  BranchOrBacktrack(ne, on_not_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckNotCharacterAfterMinusAnd(
+    uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) {
+  DCHECK(minus < String::kMaxUtf16CodeUnit);
+  __ lay(r2, MemOperand(current_character(), -minus));
+  __ And(r2, Operand(mask));
+  if (c != 0) {
+    __ CmpLogicalP(r2, Operand(c));
+  }
+  BranchOrBacktrack(ne, on_not_equal);
+}
+
+void RegExpMacroAssemblerS390::CheckCharacterInRange(uc16 from, uc16 to,
+                                                     Label* on_in_range) {
+  __ lay(r2, MemOperand(current_character(), -from));
+  __ CmpLogicalP(r2, Operand(to - from));
+  BranchOrBacktrack(le, on_in_range);  // Unsigned lower-or-same condition.
+}
+
+void RegExpMacroAssemblerS390::CheckCharacterNotInRange(
+    uc16 from, uc16 to, Label* on_not_in_range) {
+  __ lay(r2, MemOperand(current_character(), -from));
+  __ CmpLogicalP(r2, Operand(to - from));
+  BranchOrBacktrack(gt, on_not_in_range);  // Unsigned higher condition.
+}
+
+void RegExpMacroAssemblerS390::CheckBitInTable(Handle<ByteArray> table,
+                                               Label* on_bit_set) {
+  __ mov(r2, Operand(table));
+  Register index = current_character();
+  if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
+    __ AndP(r3, current_character(), Operand(kTableSize - 1));
+    index = r3;
+  }
+  __ LoadlB(r2,
+            MemOperand(r2, index, (ByteArray::kHeaderSize - kHeapObjectTag)));
+  __ CmpP(r2, Operand::Zero());
+  BranchOrBacktrack(ne, on_bit_set);
+}
+
+bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
+                                                          Label* on_no_match) {
+  // Range checks (c in min..max) are generally implemented by an unsigned
+  // (c - min) <= (max - min) check
+  switch (type) {
+    case 's':
+      // Match space-characters
+      if (mode_ == LATIN1) {
+        // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+        Label success;
+        __ CmpP(current_character(), Operand(' '));
+        __ beq(&success);
+        // Check range 0x09..0x0d
+        __ SubP(r2, current_character(), Operand('\t'));
+        __ CmpLogicalP(r2, Operand('\r' - '\t'));
+        __ ble(&success);
+        // \u00a0 (NBSP).
+        __ CmpLogicalP(r2, Operand(0x00a0 - '\t'));
+        BranchOrBacktrack(ne, on_no_match);
+        __ bind(&success);
+        return true;
+      }
+      return false;
+    case 'S':
+      // The emitted code for generic character classes is good enough.
+      return false;
+    case 'd':
+      // Match ASCII digits ('0'..'9')
+      __ SubP(r2, current_character(), Operand('0'));
+      __ CmpLogicalP(r2, Operand('9' - '0'));
+      BranchOrBacktrack(gt, on_no_match);
+      return true;
+    case 'D':
+      // Match non ASCII-digits
+      __ SubP(r2, current_character(), Operand('0'));
+      __ CmpLogicalP(r2, Operand('9' - '0'));
+      BranchOrBacktrack(le, on_no_match);
+      return true;
+    case '.': {
+      // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+      __ XorP(r2, current_character(), Operand(0x01));
+      // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+      __ SubP(r2, Operand(0x0b));
+      __ CmpLogicalP(r2, Operand(0x0c - 0x0b));
+      BranchOrBacktrack(le, on_no_match);
+      if (mode_ == UC16) {
+        // Compare original value to 0x2028 and 0x2029, using the already
+        // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+        // 0x201d (0x2028 - 0x0b) or 0x201e.
+        __ SubP(r2, Operand(0x2028 - 0x0b));
+        __ CmpLogicalP(r2, Operand(1));
+        BranchOrBacktrack(le, on_no_match);
+      }
+      return true;
+    }
+    case 'n': {
+      // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+      __ XorP(r2, current_character(), Operand(0x01));
+      // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+      __ SubP(r2, Operand(0x0b));
+      __ CmpLogicalP(r2, Operand(0x0c - 0x0b));
+      if (mode_ == LATIN1) {
+        BranchOrBacktrack(gt, on_no_match);
+      } else {
+        Label done;
+        __ ble(&done);
+        // Compare original value to 0x2028 and 0x2029, using the already
+        // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+        // 0x201d (0x2028 - 0x0b) or 0x201e.
+        __ SubP(r2, Operand(0x2028 - 0x0b));
+        __ CmpLogicalP(r2, Operand(1));
+        BranchOrBacktrack(gt, on_no_match);
+        __ bind(&done);
+      }
+      return true;
+    }
+    case 'w': {
+      if (mode_ != LATIN1) {
+        // Table is 1256 entries, so all LATIN1 characters can be tested.
+        __ CmpP(current_character(), Operand('z'));
+        BranchOrBacktrack(gt, on_no_match);
+      }
+      ExternalReference map = ExternalReference::re_word_character_map();
+      __ mov(r2, Operand(map));
+      __ LoadlB(r2, MemOperand(r2, current_character()));
+      __ CmpLogicalP(r2, Operand::Zero());
+      BranchOrBacktrack(eq, on_no_match);
+      return true;
+    }
+    case 'W': {
+      Label done;
+      if (mode_ != LATIN1) {
+        // Table is 256 entries, so all LATIN characters can be tested.
+        __ CmpLogicalP(current_character(), Operand('z'));
+        __ bgt(&done);
+      }
+      ExternalReference map = ExternalReference::re_word_character_map();
+      __ mov(r2, Operand(map));
+      __ LoadlB(r2, MemOperand(r2, current_character()));
+      __ CmpLogicalP(r2, Operand::Zero());
+      BranchOrBacktrack(ne, on_no_match);
+      if (mode_ != LATIN1) {
+        __ bind(&done);
+      }
+      return true;
+    }
+    case '*':
+      // Match any character.
+      return true;
+    // No custom implementation (yet): s(UC16), S(UC16).
+    default:
+      return false;
+  }
+}
+
+void RegExpMacroAssemblerS390::Fail() {
+  __ LoadImmP(r2, Operand(FAILURE));
+  __ b(&exit_label_);
+}
+
+Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
+  Label return_r2;
+
+  // Finalize code - write the entry point code now we know how many
+  // registers we need.
+
+  // Entry code:
+  __ bind(&entry_label_);
+
+  // Tell the system that we have a stack frame.  Because the type
+  // is MANUAL, no is generated.
+  FrameScope scope(masm_, StackFrame::MANUAL);
+
+  // Ensure register assigments are consistent with callee save mask
+  DCHECK(r6.bit() & kRegExpCalleeSaved);
+  DCHECK(code_pointer().bit() & kRegExpCalleeSaved);
+  DCHECK(current_input_offset().bit() & kRegExpCalleeSaved);
+  DCHECK(current_character().bit() & kRegExpCalleeSaved);
+  DCHECK(backtrack_stackpointer().bit() & kRegExpCalleeSaved);
+  DCHECK(end_of_input_address().bit() & kRegExpCalleeSaved);
+  DCHECK(frame_pointer().bit() & kRegExpCalleeSaved);
+
+  // zLinux ABI
+  //    Incoming parameters:
+  //          r2: input_string
+  //          r3: start_index
+  //          r4: start addr
+  //          r5: end addr
+  //          r6: capture output arrray
+  //    Requires us to save the callee-preserved registers r6-r13
+  //    General convention is to also save r14 (return addr) and
+  //    sp/r15 as well in a single STM/STMG
+  __ StoreMultipleP(r6, sp, MemOperand(sp, 6 * kPointerSize));
+
+  // Load stack parameters from caller stack frame
+  __ LoadMultipleP(r7, r9,
+                   MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+  // r7 = capture array size
+  // r8 = stack area base
+  // r9 = direct call
+
+  // Actually emit code to start a new stack frame.
+  // Push arguments
+  // Save callee-save registers.
+  // Start new stack frame.
+  // Store link register in existing stack-cell.
+  // Order here should correspond to order of offset constants in header file.
+  //
+  // Set frame pointer in space for it if this is not a direct call
+  // from generated code.
+  __ LoadRR(frame_pointer(), sp);
+  __ lay(sp, MemOperand(sp, -10 * kPointerSize));
+  __ mov(r1, Operand::Zero());  // success counter
+  __ LoadRR(r0, r1);            // offset of location
+  __ StoreMultipleP(r0, r9, MemOperand(sp, 0));
+
+  // Check if we have space on the stack for registers.
+  Label stack_limit_hit;
+  Label stack_ok;
+
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(isolate());
+  __ mov(r2, Operand(stack_limit));
+  __ LoadP(r2, MemOperand(r2));
+  __ SubP(r2, sp, r2);
+  // Handle it if the stack pointer is already below the stack limit.
+  __ ble(&stack_limit_hit);
+  // Check if there is room for the variable number of registers above
+  // the stack limit.
+  __ CmpLogicalP(r2, Operand(num_registers_ * kPointerSize));
+  __ bge(&stack_ok);
+  // Exit with OutOfMemory exception. There is not enough space on the stack
+  // for our working registers.
+  __ mov(r2, Operand(EXCEPTION));
+  __ b(&return_r2);
+
+  __ bind(&stack_limit_hit);
+  CallCheckStackGuardState(r2);
+  __ CmpP(r2, Operand::Zero());
+  // If returned value is non-zero, we exit with the returned value as result.
+  __ bne(&return_r2);
+
+  __ bind(&stack_ok);
+
+  // Allocate space on stack for registers.
+  __ lay(sp, MemOperand(sp, (-num_registers_ * kPointerSize)));
+  // Load string end.
+  __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+  // Load input start.
+  __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
+  // Find negative length (offset of start relative to end).
+  __ SubP(current_input_offset(), r4, end_of_input_address());
+  __ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
+  // Set r1 to address of char before start of the input string
+  // (effectively string position -1).
+  __ LoadRR(r1, r4);
+  __ SubP(r1, current_input_offset(), Operand(char_size()));
+  if (mode_ == UC16) {
+    __ ShiftLeftP(r0, r3, Operand(1));
+    __ SubP(r1, r1, r0);
+  } else {
+    __ SubP(r1, r1, r3);
+  }
+  // Store this value in a local variable, for use when clearing
+  // position registers.
+  __ StoreP(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+  // Initialize code pointer register
+  __ mov(code_pointer(), Operand(masm_->CodeObject()));
+
+  Label load_char_start_regexp, start_regexp;
+  // Load newline if index is at start, previous character otherwise.
+  __ CmpP(r3, Operand::Zero());
+  __ bne(&load_char_start_regexp);
+  __ mov(current_character(), Operand('\n'));
+  __ b(&start_regexp);
+
+  // Global regexp restarts matching here.
+  __ bind(&load_char_start_regexp);
+  // Load previous char as initial value of current character register.
+  LoadCurrentCharacterUnchecked(-1, 1);
+  __ bind(&start_regexp);
+
+  // Initialize on-stack registers.
+  if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
+    // Fill saved registers with initial value = start offset - 1
+    if (num_saved_registers_ > 8) {
+      // One slot beyond address of register 0.
+      __ lay(r3, MemOperand(frame_pointer(), kRegisterZero + kPointerSize));
+      __ LoadImmP(r4, Operand(num_saved_registers_));
+      Label init_loop;
+      __ bind(&init_loop);
+      __ StoreP(r1, MemOperand(r3, -kPointerSize));
+      __ lay(r3, MemOperand(r3, -kPointerSize));
+      __ BranchOnCount(r4, &init_loop);
+    } else {
+      for (int i = 0; i < num_saved_registers_; i++) {
+        __ StoreP(r1, register_location(i));
+      }
+    }
+  }
+
+  // Initialize backtrack stack pointer.
+  __ LoadP(backtrack_stackpointer(),
+           MemOperand(frame_pointer(), kStackHighEnd));
+
+  __ b(&start_label_);
+
+  // Exit code:
+  if (success_label_.is_linked()) {
+    // Save captures when successful.
+    __ bind(&success_label_);
+    if (num_saved_registers_ > 0) {
+      // copy captures to output
+      __ LoadP(r0, MemOperand(frame_pointer(), kInputStart));
+      __ LoadP(r2, MemOperand(frame_pointer(), kRegisterOutput));
+      __ LoadP(r4, MemOperand(frame_pointer(), kStartIndex));
+      __ SubP(r0, end_of_input_address(), r0);
+      // r0 is length of input in bytes.
+      if (mode_ == UC16) {
+        __ ShiftRightP(r0, r0, Operand(1));
+      }
+      // r0 is length of input in characters.
+      __ AddP(r0, r4);
+      // r0 is length of string in characters.
+
+      DCHECK_EQ(0, num_saved_registers_ % 2);
+      // Always an even number of capture registers. This allows us to
+      // unroll the loop once to add an operation between a load of a register
+      // and the following use of that register.
+      __ lay(r2, MemOperand(r2, num_saved_registers_ * kIntSize));
+      for (int i = 0; i < num_saved_registers_;) {
+        if (false && i < num_saved_registers_ - 4) {
+          // TODO(john.yan): Can be optimized by SIMD instructions
+          __ LoadMultipleP(r3, r6, register_location(i + 3));
+          if (mode_ == UC16) {
+            __ ShiftRightArithP(r3, r3, Operand(1));
+            __ ShiftRightArithP(r4, r4, Operand(1));
+            __ ShiftRightArithP(r5, r5, Operand(1));
+            __ ShiftRightArithP(r6, r6, Operand(1));
+          }
+          __ AddP(r3, r0);
+          __ AddP(r4, r0);
+          __ AddP(r5, r0);
+          __ AddP(r6, r0);
+          __ StoreW(r3,
+                    MemOperand(r2, -(num_saved_registers_ - i - 3) * kIntSize));
+          __ StoreW(r4,
+                    MemOperand(r2, -(num_saved_registers_ - i - 2) * kIntSize));
+          __ StoreW(r5,
+                    MemOperand(r2, -(num_saved_registers_ - i - 1) * kIntSize));
+          __ StoreW(r6, MemOperand(r2, -(num_saved_registers_ - i) * kIntSize));
+          i += 4;
+        } else {
+          __ LoadMultipleP(r3, r4, register_location(i + 1));
+          if (mode_ == UC16) {
+            __ ShiftRightArithP(r3, r3, Operand(1));
+            __ ShiftRightArithP(r4, r4, Operand(1));
+          }
+          __ AddP(r3, r0);
+          __ AddP(r4, r0);
+          __ StoreW(r3,
+                    MemOperand(r2, -(num_saved_registers_ - i - 1) * kIntSize));
+          __ StoreW(r4, MemOperand(r2, -(num_saved_registers_ - i) * kIntSize));
+          i += 2;
+        }
+      }
+      if (global_with_zero_length_check()) {
+        // Keep capture start in r6 for the zero-length check later.
+        __ LoadP(r6, register_location(0));
+      }
+    }
+
+    if (global()) {
+      // Restart matching if the regular expression is flagged as global.
+      __ LoadP(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
+      __ LoadP(r3, MemOperand(frame_pointer(), kNumOutputRegisters));
+      __ LoadP(r4, MemOperand(frame_pointer(), kRegisterOutput));
+      // Increment success counter.
+      __ AddP(r2, Operand(1));
+      __ StoreP(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
+      // Capture results have been stored, so the number of remaining global
+      // output registers is reduced by the number of stored captures.
+      __ SubP(r3, Operand(num_saved_registers_));
+      // Check whether we have enough room for another set of capture results.
+      __ CmpP(r3, Operand(num_saved_registers_));
+      __ blt(&return_r2);
+
+      __ StoreP(r3, MemOperand(frame_pointer(), kNumOutputRegisters));
+      // Advance the location for output.
+      __ AddP(r4, Operand(num_saved_registers_ * kIntSize));
+      __ StoreP(r4, MemOperand(frame_pointer(), kRegisterOutput));
+
+      // Prepare r2 to initialize registers with its value in the next run.
+      __ LoadP(r2, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+      if (global_with_zero_length_check()) {
+        // Special case for zero-length matches.
+        // r6: capture start index
+        __ CmpP(current_input_offset(), r6);
+        // Not a zero-length match, restart.
+        __ bne(&load_char_start_regexp);
+        // Offset from the end is zero if we already reached the end.
+        __ CmpP(current_input_offset(), Operand::Zero());
+        __ beq(&exit_label_);
+        // Advance current position after a zero-length match.
+        Label advance;
+        __ bind(&advance);
+        __ AddP(current_input_offset(), Operand((mode_ == UC16) ? 2 : 1));
+        if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
+      }
+
+      __ b(&load_char_start_regexp);
+    } else {
+      __ LoadImmP(r2, Operand(SUCCESS));
+    }
+  }
+
+  // Exit and return r2
+  __ bind(&exit_label_);
+  if (global()) {
+    __ LoadP(r2, MemOperand(frame_pointer(), kSuccessfulCaptures));
+  }
+
+  __ bind(&return_r2);
+  // Skip sp past regexp registers and local variables..
+  __ LoadRR(sp, frame_pointer());
+  // Restore registers r6..r15.
+  __ LoadMultipleP(r6, sp, MemOperand(sp, 6 * kPointerSize));
+
+  __ b(r14);
+
+  // Backtrack code (branch target for conditional backtracks).
+  if (backtrack_label_.is_linked()) {
+    __ bind(&backtrack_label_);
+    Backtrack();
+  }
+
+  Label exit_with_exception;
+
+  // Preempt-code
+  if (check_preempt_label_.is_linked()) {
+    SafeCallTarget(&check_preempt_label_);
+
+    CallCheckStackGuardState(r2);
+    __ CmpP(r2, Operand::Zero());
+    // If returning non-zero, we should end execution with the given
+    // result as return value.
+    __ bne(&return_r2);
+
+    // String might have moved: Reload end of string from frame.
+    __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+    SafeReturn();
+  }
+
+  // Backtrack stack overflow code.
+  if (stack_overflow_label_.is_linked()) {
+    SafeCallTarget(&stack_overflow_label_);
+    // Reached if the backtrack-stack limit has been hit.
+    Label grow_failed;
+
+    // Call GrowStack(backtrack_stackpointer(), &stack_base)
+    static const int num_arguments = 3;
+    __ PrepareCallCFunction(num_arguments, r2);
+    __ LoadRR(r2, backtrack_stackpointer());
+    __ AddP(r3, frame_pointer(), Operand(kStackHighEnd));
+    __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+    ExternalReference grow_stack = ExternalReference::re_grow_stack(isolate());
+    __ CallCFunction(grow_stack, num_arguments);
+    // If return NULL, we have failed to grow the stack, and
+    // must exit with a stack-overflow exception.
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&exit_with_exception);
+    // Otherwise use return value as new stack pointer.
+    __ LoadRR(backtrack_stackpointer(), r2);
+    // Restore saved registers and continue.
+    SafeReturn();
+  }
+
+  if (exit_with_exception.is_linked()) {
+    // If any of the code above needed to exit with an exception.
+    __ bind(&exit_with_exception);
+    // Exit with Result EXCEPTION(-1) to signal thrown exception.
+    __ LoadImmP(r2, Operand(EXCEPTION));
+    __ b(&return_r2);
+  }
+
+  CodeDesc code_desc;
+  masm_->GetCode(&code_desc);
+  Handle<Code> code = isolate()->factory()->NewCode(
+      code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+  PROFILE(masm_->isolate(),
+          RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
+  return Handle<HeapObject>::cast(code);
+}
+
+void RegExpMacroAssemblerS390::GoTo(Label* to) { BranchOrBacktrack(al, to); }
+
+void RegExpMacroAssemblerS390::IfRegisterGE(int reg, int comparand,
+                                            Label* if_ge) {
+  __ LoadP(r2, register_location(reg), r0);
+  __ CmpP(r2, Operand(comparand));
+  BranchOrBacktrack(ge, if_ge);
+}
+
+void RegExpMacroAssemblerS390::IfRegisterLT(int reg, int comparand,
+                                            Label* if_lt) {
+  __ LoadP(r2, register_location(reg), r0);
+  __ CmpP(r2, Operand(comparand));
+  BranchOrBacktrack(lt, if_lt);
+}
+
+void RegExpMacroAssemblerS390::IfRegisterEqPos(int reg, Label* if_eq) {
+  __ LoadP(r2, register_location(reg), r0);
+  __ CmpP(r2, current_input_offset());
+  BranchOrBacktrack(eq, if_eq);
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+RegExpMacroAssemblerS390::Implementation() {
+  return kS390Implementation;
+}
+
+void RegExpMacroAssemblerS390::LoadCurrentCharacter(int cp_offset,
+                                                    Label* on_end_of_input,
+                                                    bool check_bounds,
+                                                    int characters) {
+  DCHECK(cp_offset < (1 << 30));  // Be sane! (And ensure negation works)
+  if (check_bounds) {
+    if (cp_offset >= 0) {
+      CheckPosition(cp_offset + characters - 1, on_end_of_input);
+    } else {
+      CheckPosition(cp_offset, on_end_of_input);
+    }
+  }
+  LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+void RegExpMacroAssemblerS390::PopCurrentPosition() {
+  Pop(current_input_offset());
+}
+
+void RegExpMacroAssemblerS390::PopRegister(int register_index) {
+  Pop(r2);
+  __ StoreP(r2, register_location(register_index));
+}
+
+void RegExpMacroAssemblerS390::PushBacktrack(Label* label) {
+  if (label->is_bound()) {
+    int target = label->pos();
+    __ mov(r2, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+  } else {
+    masm_->load_label_offset(r2, label);
+  }
+  Push(r2);
+  CheckStackLimit();
+}
+
+void RegExpMacroAssemblerS390::PushCurrentPosition() {
+  Push(current_input_offset());
+}
+
+void RegExpMacroAssemblerS390::PushRegister(int register_index,
+                                            StackCheckFlag check_stack_limit) {
+  __ LoadP(r2, register_location(register_index), r0);
+  Push(r2);
+  if (check_stack_limit) CheckStackLimit();
+}
+
+void RegExpMacroAssemblerS390::ReadCurrentPositionFromRegister(int reg) {
+  __ LoadP(current_input_offset(), register_location(reg), r0);
+}
+
+void RegExpMacroAssemblerS390::ReadStackPointerFromRegister(int reg) {
+  __ LoadP(backtrack_stackpointer(), register_location(reg), r0);
+  __ LoadP(r2, MemOperand(frame_pointer(), kStackHighEnd));
+  __ AddP(backtrack_stackpointer(), r2);
+}
+
+void RegExpMacroAssemblerS390::SetCurrentPositionFromEnd(int by) {
+  Label after_position;
+  __ CmpP(current_input_offset(), Operand(-by * char_size()));
+  __ bge(&after_position);
+  __ mov(current_input_offset(), Operand(-by * char_size()));
+  // On RegExp code entry (where this operation is used), the character before
+  // the current position is expected to be already loaded.
+  // We have advanced the position, so it's safe to read backwards.
+  LoadCurrentCharacterUnchecked(-1, 1);
+  __ bind(&after_position);
+}
+
+void RegExpMacroAssemblerS390::SetRegister(int register_index, int to) {
+  DCHECK(register_index >= num_saved_registers_);  // Reserved for positions!
+  __ mov(r2, Operand(to));
+  __ StoreP(r2, register_location(register_index));
+}
+
+bool RegExpMacroAssemblerS390::Succeed() {
+  __ b(&success_label_);
+  return global();
+}
+
+void RegExpMacroAssemblerS390::WriteCurrentPositionToRegister(int reg,
+                                                              int cp_offset) {
+  if (cp_offset == 0) {
+    __ StoreP(current_input_offset(), register_location(reg));
+  } else {
+    __ AddP(r2, current_input_offset(), Operand(cp_offset * char_size()));
+    __ StoreP(r2, register_location(reg));
+  }
+}
+
+void RegExpMacroAssemblerS390::ClearRegisters(int reg_from, int reg_to) {
+  DCHECK(reg_from <= reg_to);
+  __ LoadP(r2, MemOperand(frame_pointer(), kStringStartMinusOne));
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    __ StoreP(r2, register_location(reg));
+  }
+}
+
+void RegExpMacroAssemblerS390::WriteStackPointerToRegister(int reg) {
+  __ LoadP(r3, MemOperand(frame_pointer(), kStackHighEnd));
+  __ SubP(r2, backtrack_stackpointer(), r3);
+  __ StoreP(r2, register_location(reg));
+}
+
+// Private methods:
+
+void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
+  static const int num_arguments = 3;
+  __ PrepareCallCFunction(num_arguments, scratch);
+  // RegExp code frame pointer.
+  __ LoadRR(r4, frame_pointer());
+  // Code* of self.
+  __ mov(r3, Operand(masm_->CodeObject()));
+  // r2 becomes return address pointer.
+  __ lay(r2, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+  ExternalReference stack_guard_check =
+      ExternalReference::re_check_stack_guard_state(isolate());
+  CallCFunctionUsingStub(stack_guard_check, num_arguments);
+}
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+  DCHECK(sizeof(T) == kPointerSize);
+#ifdef V8_TARGET_ARCH_S390X
+  return reinterpret_cast<T&>(Memory::uint64_at(re_frame + frame_offset));
+#else
+  return reinterpret_cast<T&>(Memory::uint32_at(re_frame + frame_offset));
+#endif
+}
+
+template <typename T>
+static T* frame_entry_address(Address re_frame, int frame_offset) {
+  return reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+int RegExpMacroAssemblerS390::CheckStackGuardState(Address* return_address,
+                                                   Code* re_code,
+                                                   Address re_frame) {
+  return NativeRegExpMacroAssembler::CheckStackGuardState(
+      frame_entry<Isolate*>(re_frame, kIsolate),
+      frame_entry<intptr_t>(re_frame, kStartIndex),
+      frame_entry<intptr_t>(re_frame, kDirectCall) == 1, return_address,
+      re_code, frame_entry_address<String*>(re_frame, kInputString),
+      frame_entry_address<const byte*>(re_frame, kInputStart),
+      frame_entry_address<const byte*>(re_frame, kInputEnd));
+}
+
+MemOperand RegExpMacroAssemblerS390::register_location(int register_index) {
+  DCHECK(register_index < (1 << 30));
+  if (num_registers_ <= register_index) {
+    num_registers_ = register_index + 1;
+  }
+  return MemOperand(frame_pointer(),
+                    kRegisterZero - register_index * kPointerSize);
+}
+
+void RegExpMacroAssemblerS390::CheckPosition(int cp_offset,
+                                             Label* on_outside_input) {
+  if (cp_offset >= 0) {
+    __ CmpP(current_input_offset(), Operand(-cp_offset * char_size()));
+    BranchOrBacktrack(ge, on_outside_input);
+  } else {
+    __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+    __ AddP(r2, current_input_offset(), Operand(cp_offset * char_size()));
+    __ CmpP(r2, r3);
+    BranchOrBacktrack(le, on_outside_input);
+  }
+}
+
+void RegExpMacroAssemblerS390::BranchOrBacktrack(Condition condition, Label* to,
+                                                 CRegister cr) {
+  if (condition == al) {  // Unconditional.
+    if (to == NULL) {
+      Backtrack();
+      return;
+    }
+    __ b(to);
+    return;
+  }
+  if (to == NULL) {
+    __ b(condition, &backtrack_label_);
+    return;
+  }
+  __ b(condition, to);
+}
+
+void RegExpMacroAssemblerS390::SafeCall(Label* to, Condition cond,
+                                        CRegister cr) {
+  Label skip;
+  __ b(NegateCondition(cond), &skip);
+  __ b(r14, to);
+  __ bind(&skip);
+}
+
+void RegExpMacroAssemblerS390::SafeReturn() {
+  __ pop(r14);
+  __ mov(ip, Operand(masm_->CodeObject()));
+  __ AddP(r14, ip);
+  __ Ret();
+}
+
+void RegExpMacroAssemblerS390::SafeCallTarget(Label* name) {
+  __ bind(name);
+  __ CleanseP(r14);
+  __ LoadRR(r0, r14);
+  __ mov(ip, Operand(masm_->CodeObject()));
+  __ SubP(r0, r0, ip);
+  __ push(r0);
+}
+
+void RegExpMacroAssemblerS390::Push(Register source) {
+  DCHECK(!source.is(backtrack_stackpointer()));
+  __ lay(backtrack_stackpointer(),
+         MemOperand(backtrack_stackpointer(), -kPointerSize));
+  __ StoreP(source, MemOperand(backtrack_stackpointer()));
+}
+
+void RegExpMacroAssemblerS390::Pop(Register target) {
+  DCHECK(!target.is(backtrack_stackpointer()));
+  __ LoadP(target, MemOperand(backtrack_stackpointer()));
+  __ la(backtrack_stackpointer(),
+        MemOperand(backtrack_stackpointer(), kPointerSize));
+}
+
+void RegExpMacroAssemblerS390::CheckPreemption() {
+  // Check for preemption.
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(isolate());
+  __ mov(r2, Operand(stack_limit));
+  __ CmpLogicalP(sp, MemOperand(r2));
+  SafeCall(&check_preempt_label_, le);
+}
+
+void RegExpMacroAssemblerS390::CheckStackLimit() {
+  ExternalReference stack_limit =
+      ExternalReference::address_of_regexp_stack_limit(isolate());
+  __ mov(r2, Operand(stack_limit));
+  __ CmpLogicalP(backtrack_stackpointer(), MemOperand(r2));
+  SafeCall(&stack_overflow_label_, le);
+}
+
+void RegExpMacroAssemblerS390::CallCFunctionUsingStub(
+    ExternalReference function, int num_arguments) {
+  // Must pass all arguments in registers. The stub pushes on the stack.
+  DCHECK(num_arguments <= 8);
+  __ mov(code_pointer(), Operand(function));
+  Label ret;
+  __ larl(r14, &ret);
+  __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+  __ b(code_pointer());
+  __ bind(&ret);
+  if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+    __ LoadP(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
+  } else {
+    __ la(sp, MemOperand(sp, (kNumRequiredStackFrameSlots * kPointerSize)));
+  }
+  __ mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+bool RegExpMacroAssemblerS390::CanReadUnaligned() {
+  return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
+}
+
+void RegExpMacroAssemblerS390::LoadCurrentCharacterUnchecked(int cp_offset,
+                                                             int characters) {
+  DCHECK(characters == 1);
+  if (mode_ == LATIN1) {
+    __ LoadlB(current_character(),
+              MemOperand(current_input_offset(), end_of_input_address(),
+                         cp_offset * char_size()));
+  } else {
+    DCHECK(mode_ == UC16);
+    __ LoadLogicalHalfWordP(
+        current_character(),
+        MemOperand(current_input_offset(), end_of_input_address(),
+                   cp_offset * char_size()));
+  }
+}
+
+#undef __
+
+#endif  // V8_INTERPRETED_REGEXP
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/regexp/s390/regexp-macro-assembler-s390.h b/src/regexp/s390/regexp-macro-assembler-s390.h
new file mode 100644
index 0000000..60ca890
--- /dev/null
+++ b/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -0,0 +1,216 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
+#define V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
+
+#include "src/macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/s390/assembler-s390.h"
+#include "src/s390/frames-s390.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone, Mode mode,
+                           int registers_to_save);
+  virtual ~RegExpMacroAssemblerS390();
+  virtual int stack_limit_slack();
+  virtual void AdvanceCurrentPosition(int by);
+  virtual void AdvanceRegister(int reg, int by);
+  virtual void Backtrack();
+  virtual void Bind(Label* label);
+  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckCharacter(unsigned c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(unsigned c, unsigned mask,
+                                      Label* on_equal);
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+  // A "greedy loop" is a loop that is both greedy and with a simple
+  // body. It has a particularly simple implementation.
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+  virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+  virtual void CheckNotBackReference(int start_reg, bool read_backward,
+                                     Label* on_no_match);
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               bool read_backward, bool unicode,
+                                               Label* on_no_match);
+  virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
+                                         Label* on_not_equal);
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask,
+                                              Label* on_not_equal);
+  virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range);
+  virtual void CheckCharacterNotInRange(uc16 from, uc16 to,
+                                        Label* on_not_in_range);
+  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+  // Checks whether the given offset from the current position is before
+  // the end of the string.
+  virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+  virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
+  virtual void Fail();
+  virtual Handle<HeapObject> GetCode(Handle<String> source);
+  virtual void GoTo(Label* label);
+  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+  virtual void IfRegisterEqPos(int reg, Label* if_eq);
+  virtual IrregexpImplementation Implementation();
+  virtual void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1);
+  virtual void PopCurrentPosition();
+  virtual void PopRegister(int register_index);
+  virtual void PushBacktrack(Label* label);
+  virtual void PushCurrentPosition();
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit);
+  virtual void ReadCurrentPositionFromRegister(int reg);
+  virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetCurrentPositionFromEnd(int by);
+  virtual void SetRegister(int register_index, int to);
+  virtual bool Succeed();
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+  virtual void ClearRegisters(int reg_from, int reg_to);
+  virtual void WriteStackPointerToRegister(int reg);
+  virtual bool CanReadUnaligned();
+
+  // Called from RegExp if the stack-guard is triggered.
+  // If the code object is relocated, the return address is fixed before
+  // returning.
+  static int CheckStackGuardState(Address* return_address, Code* re_code,
+                                  Address re_frame);
+
+ private:
+  // Offsets from frame_pointer() of function parameters and stored registers.
+  static const int kFramePointer = 0;
+
+  // Above the frame pointer - Stored registers and stack passed parameters.
+  // Register 6-15(sp)
+  static const int kStoredRegisters = kFramePointer;
+  static const int kCallerFrame =
+      kStoredRegisters + kCalleeRegisterSaveAreaSize;
+  // Stack parameters placed by caller.
+  static const int kCaptureArraySize = kCallerFrame;
+  static const int kStackAreaBase = kCallerFrame + kPointerSize;
+  // kDirectCall again
+  static const int kSecondaryReturnAddress = kStackAreaBase + 2 * kPointerSize;
+  static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+
+  // Below the frame pointer.
+  // Register parameters stored by setup code.
+  static const int kDirectCall = kFramePointer - kPointerSize;
+  static const int kStackHighEnd = kDirectCall - kPointerSize;
+  static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
+  static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
+  static const int kInputEnd = kRegisterOutput - kPointerSize;
+  static const int kInputStart = kInputEnd - kPointerSize;
+  static const int kStartIndex = kInputStart - kPointerSize;
+  static const int kInputString = kStartIndex - kPointerSize;
+  // When adding local variables remember to push space for them in
+  // the frame in GetCode.
+  static const int kSuccessfulCaptures = kInputString - kPointerSize;
+  static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+  // First register address. Following registers are below it on the stack.
+  static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
+
+  // Initial size of code buffer.
+  static const size_t kRegExpCodeSize = 1024;
+
+  // Load a number of characters at the given offset from the
+  // current position, into the current-character register.
+  void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+  // Check whether preemption has been requested.
+  void CheckPreemption();
+
+  // Check whether we are exceeding the stack limit on the backtrack stack.
+  void CheckStackLimit();
+  void CallCFunctionUsingStub(ExternalReference function, int num_arguments);
+
+  // Generate a call to CheckStackGuardState.
+  void CallCheckStackGuardState(Register scratch);
+
+  // The ebp-relative location of a regexp register.
+  MemOperand register_location(int register_index);
+
+  // Register holding the current input position as negative offset from
+  // the end of the string.
+  inline Register current_input_offset() { return r8; }
+
+  // The register containing the current character after LoadCurrentCharacter.
+  inline Register current_character() { return r9; }
+
+  // Register holding address of the end of the input string.
+  inline Register end_of_input_address() { return r10; }
+
+  // Register holding the frame address. Local variables, parameters and
+  // regexp registers are addressed relative to this.
+  inline Register frame_pointer() { return fp; }
+
+  // The register containing the backtrack stack top. Provides a meaningful
+  // name to the register.
+  inline Register backtrack_stackpointer() { return r13; }
+
+  // Register holding pointer to the current code object.
+  inline Register code_pointer() { return r7; }
+
+  // Byte size of chars in the string to match (decided by the Mode argument)
+  inline int char_size() { return static_cast<int>(mode_); }
+
+  // Equivalent to a conditional branch to the label, unless the label
+  // is NULL, in which case it is a conditional Backtrack.
+  void BranchOrBacktrack(Condition condition, Label* to, CRegister cr = cr7);
+
+  // Call and return internally in the generated code in a way that
+  // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+  inline void SafeCall(Label* to, Condition cond = al, CRegister cr = cr7);
+  inline void SafeReturn();
+  inline void SafeCallTarget(Label* name);
+
+  // Pushes the value of a register on the backtrack stack. Decrements the
+  // stack pointer by a word size and stores the register's value there.
+  inline void Push(Register source);
+
+  // Pops a value from the backtrack stack. Reads the word at the stack pointer
+  // and increments it by a word size.
+  inline void Pop(Register target);
+
+  Isolate* isolate() const { return masm_->isolate(); }
+
+  MacroAssembler* masm_;
+
+  // Which mode to generate code for (Latin1 or UC16).
+  Mode mode_;
+
+  // One greater than maximal register index actually used.
+  int num_registers_;
+
+  // Number of registers to output at the end (the saved registers
+  // are always 0..num_saved_registers_-1)
+  int num_saved_registers_;
+
+  // Labels used internally.
+  Label entry_label_;
+  Label start_label_;
+  Label success_label_;
+  Label backtrack_label_;
+  Label exit_label_;
+  Label check_preempt_label_;
+  Label stack_overflow_label_;
+  Label internal_failure_label_;
+};
+
+// Set of non-volatile registers saved/restored by generated regexp code.
+const RegList kRegExpCalleeSaved =
+    1 << 6 | 1 << 7 | 1 << 8 | 1 << 9 | 1 << 10 | 1 << 11 | 1 << 13;
+
+#endif  // V8_INTERPRETED_REGEXP
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
diff --git a/src/regexp/x64/regexp-macro-assembler-x64.cc b/src/regexp/x64/regexp-macro-assembler-x64.cc
index 952034f..5d73b43 100644
--- a/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -1008,7 +1008,7 @@
   Handle<Code> code = isolate->factory()->NewCode(
       code_desc, Code::ComputeFlags(Code::REGEXP),
       masm_.CodeObject());
-  PROFILE(isolate, RegExpCodeCreateEvent(*code, *source));
+  PROFILE(isolate, RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
   return Handle<HeapObject>::cast(code);
 }
 
diff --git a/src/regexp/x87/regexp-macro-assembler-x87.cc b/src/regexp/x87/regexp-macro-assembler-x87.cc
index 6e62092..9f15b1c 100644
--- a/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ b/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -510,7 +510,8 @@
     __ and_(ebx, current_character());
     index = ebx;
   }
-  __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
+  __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize),
+          Immediate(0));
   BranchOrBacktrack(not_equal, on_bit_set);
 }
 
@@ -935,7 +936,8 @@
       isolate()->factory()->NewCode(code_desc,
                                     Code::ComputeFlags(Code::REGEXP),
                                     masm_->CodeObject());
-  PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source));
+  PROFILE(masm_->isolate(),
+          RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
   return Handle<HeapObject>::cast(code);
 }
 
diff --git a/src/register-configuration.cc b/src/register-configuration.cc
index 6b1655a..2df825a 100644
--- a/src/register-configuration.cc
+++ b/src/register-configuration.cc
@@ -91,6 +91,10 @@
                               kMaxAllocatableGeneralRegisterCount,
                               kMaxAllocatableDoubleRegisterCount,
                               kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_S390
+                              kMaxAllocatableGeneralRegisterCount,
+                              kMaxAllocatableDoubleRegisterCount,
+                              kMaxAllocatableDoubleRegisterCount,
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index e17cbb1..b76785d 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -90,7 +90,8 @@
 
 
 void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
-  if (FLAG_trace_opt && function->PassesFilter(FLAG_hydrogen_filter)) {
+  if (FLAG_trace_opt &&
+      function->shared()->PassesFilter(FLAG_hydrogen_filter)) {
     PrintF("[marking ");
     function->ShortPrint();
     PrintF(" for recompilation, reason: %s", reason);
diff --git a/src/runtime/runtime-array.cc b/src/runtime/runtime-array.cc
index f651ed4..ab436c2 100644
--- a/src/runtime/runtime-array.cc
+++ b/src/runtime/runtime-array.cc
@@ -5,11 +5,12 @@
 #include "src/runtime/runtime-utils.h"
 
 #include "src/arguments.h"
+#include "src/code-stubs.h"
 #include "src/conversions-inl.h"
 #include "src/elements.h"
 #include "src/factory.h"
 #include "src/isolate-inl.h"
-#include "src/key-accumulator.h"
+#include "src/keys.h"
 #include "src/messages.h"
 #include "src/prototype.h"
 
@@ -29,17 +30,20 @@
   return Smi::FromInt(0);
 }
 
-
-static void InstallBuiltin(Isolate* isolate, Handle<JSObject> holder,
-                           const char* name, Builtins::Name builtin_name) {
+static void InstallCode(Isolate* isolate, Handle<JSObject> holder,
+                        const char* name, Handle<Code> code) {
   Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
-  Handle<Code> code(isolate->builtins()->builtin(builtin_name));
   Handle<JSFunction> optimized =
       isolate->factory()->NewFunctionWithoutPrototype(key, code);
   optimized->shared()->DontAdaptArguments();
   JSObject::AddProperty(holder, key, optimized, NONE);
 }
 
+static void InstallBuiltin(Isolate* isolate, Handle<JSObject> holder,
+                           const char* name, Builtins::Name builtin_name) {
+  InstallCode(isolate, holder, name,
+              handle(isolate->builtins()->builtin(builtin_name), isolate));
+}
 
 RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
   HandleScope scope(isolate);
@@ -48,7 +52,8 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
-  InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
+  FastArrayPushStub stub(isolate);
+  InstallCode(isolate, holder, "push", stub.GetCode());
   InstallBuiltin(isolate, holder, "shift", Builtins::kArrayShift);
   InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
   InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
@@ -88,29 +93,6 @@
 }
 
 
-// Push an object unto an array of objects if it is not already in the
-// array.  Returns true if the element was pushed on the stack and
-// false otherwise.
-RUNTIME_FUNCTION(Runtime_PushIfAbsent) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, element, 1);
-  RUNTIME_ASSERT(array->HasFastSmiOrObjectElements());
-  int length = Smi::cast(array->length())->value();
-  FixedArray* elements = FixedArray::cast(array->elements());
-  for (int i = 0; i < length; i++) {
-    if (elements->get(i) == *element) return isolate->heap()->false_value();
-  }
-
-  // Strict not needed. Used for cycle detection in Array join implementation.
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate, JSObject::AddDataElement(array, length, element, NONE));
-  JSObject::ValidateElements(array);
-  return isolate->heap()->true_value();
-}
-
-
 // Moves all own elements of an object, that are below a limit, to positions
 // starting at zero. All undefined values are placed after non-undefined values,
 // and are followed by non-existing element. Does not change the length
@@ -234,12 +216,19 @@
     JSObject::CollectOwnElementKeys(current, &accumulator, ALL_PROPERTIES);
   }
   // Erase any keys >= length.
-  // TODO(adamk): Remove this step when the contract of %GetArrayKeys
-  // is changed to let this happen on the JS side.
   Handle<FixedArray> keys = accumulator.GetKeys(KEEP_NUMBERS);
+  int j = 0;
   for (int i = 0; i < keys->length(); i++) {
-    if (NumberToUint32(keys->get(i)) >= length) keys->set_undefined(i);
+    if (NumberToUint32(keys->get(i)) >= length) continue;
+    if (i != j) keys->set(j, keys->get(i));
+    j++;
   }
+
+  if (j != keys->length()) {
+    isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
+        *keys, keys->length() - j);
+  }
+
   return *isolate->factory()->NewJSArrayWithElements(keys);
 }
 
@@ -383,7 +372,6 @@
                                 caller_args);
 }
 
-
 RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) {
   HandleScope scope(isolate);
   Arguments empty_args(0, NULL);
diff --git a/src/runtime/runtime-classes.cc b/src/runtime/runtime-classes.cc
index e27685d..3f10225 100644
--- a/src/runtime/runtime-classes.cc
+++ b/src/runtime/runtime-classes.cc
@@ -124,14 +124,6 @@
   Handle<Map> map =
       isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
   map->set_is_prototype_map(true);
-  if (constructor->map()->is_strong()) {
-    map->set_is_strong();
-    if (super_class->IsNull()) {
-      // Strong class is not permitted to extend null.
-      THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kStrongExtendNull),
-                      Object);
-    }
-  }
   Map::SetPrototype(map, prototype_parent);
   map->SetConstructor(*constructor);
   Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
@@ -206,19 +198,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, constructor, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, prototype, 1);
-
   JSObject::MigrateSlowToFast(constructor, 0, "RuntimeToFastProperties");
-
-  if (constructor->map()->is_strong()) {
-    DCHECK(prototype->map()->is_strong());
-    MAYBE_RETURN(JSReceiver::SetIntegrityLevel(prototype, FROZEN,
-                                               Object::THROW_ON_ERROR),
-                 isolate->heap()->exception());
-    MAYBE_RETURN(JSReceiver::SetIntegrityLevel(constructor, FROZEN,
-                                               Object::THROW_ON_ERROR),
-                 isolate->heap()->exception());
-  }
   return *constructor;
 }
 
diff --git a/src/runtime/runtime-compiler.cc b/src/runtime/runtime-compiler.cc
index 263c4f9..89a6fa1 100644
--- a/src/runtime/runtime-compiler.cc
+++ b/src/runtime/runtime-compiler.cc
@@ -21,6 +21,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
 #ifdef DEBUG
   if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
     PrintF("[unoptimized: ");
@@ -28,63 +29,28 @@
     PrintF("]\n");
   }
 #endif
+
   StackLimitCheck check(isolate);
   if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
-
-  // Compile the target function.
-  DCHECK(function->shared()->allows_lazy_compilation());
-
-  Handle<Code> code;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, code,
-                                     Compiler::GetLazyCode(function));
-  DCHECK(code->IsJavaScriptCode());
-
-  function->ReplaceCode(*code);
-  return *code;
-}
-
-
-namespace {
-
-Object* CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
-                         Compiler::ConcurrencyMode mode) {
-  StackLimitCheck check(isolate);
-  if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
-
-  Handle<Code> code;
-  if (Compiler::GetOptimizedCode(function, mode).ToHandle(&code)) {
-    // Optimization succeeded, return optimized code.
-    function->ReplaceCode(*code);
-  } else {
-    // Optimization failed, get unoptimized code.
-    if (isolate->has_pending_exception()) {  // Possible stack overflow.
-      return isolate->heap()->exception();
-    }
-    code = Handle<Code>(function->shared()->code(), isolate);
-    if (code->kind() != Code::FUNCTION &&
-        code->kind() != Code::OPTIMIZED_FUNCTION) {
-      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-          isolate, code, Compiler::GetUnoptimizedCode(function));
-    }
-    function->ReplaceCode(*code);
+  if (!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
+    return isolate->heap()->exception();
   }
-
-  DCHECK(function->code()->kind() == Code::FUNCTION ||
-         function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
-         (function->code()->is_interpreter_entry_trampoline() &&
-          function->shared()->HasBytecodeArray()) ||
-         function->IsInOptimizationQueue());
+  DCHECK(function->is_compiled());
   return function->code();
 }
 
-}  // namespace
-
 
 RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  return CompileOptimized(isolate, function, Compiler::CONCURRENT);
+  StackLimitCheck check(isolate);
+  if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
+  if (!Compiler::CompileOptimized(function, Compiler::CONCURRENT)) {
+    return isolate->heap()->exception();
+  }
+  DCHECK(function->is_compiled());
+  return function->code();
 }
 
 
@@ -92,7 +58,13 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  return CompileOptimized(isolate, function, Compiler::NOT_CONCURRENT);
+  StackLimitCheck check(isolate);
+  if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
+  if (!Compiler::CompileOptimized(function, Compiler::NOT_CONCURRENT)) {
+    return isolate->heap()->exception();
+  }
+  DCHECK(function->is_compiled());
+  return function->code();
 }
 
 
@@ -150,10 +122,6 @@
   deoptimizer->MaterializeHeapObjects(&it);
   delete deoptimizer;
 
-  JavaScriptFrame* frame = it.frame();
-  RUNTIME_ASSERT(frame->function()->IsJSFunction());
-  DCHECK(frame->function() == *function);
-
   // Ensure the context register is updated for materialized objects.
   JavaScriptFrameIterator top_it(isolate);
   JavaScriptFrame* top_frame = top_it.frame();
@@ -163,7 +131,10 @@
     return isolate->heap()->undefined_value();
   }
 
-  // Search for other activations of the same function and code.
+  // Search for other activations of the same optimized code.
+  // At this point {it} is at the topmost frame of all the frames materialized
+  // by the deoptimizer. Note that this frame does not necessarily represent
+  // an activation of {function} because of potential inlined tail-calls.
   ActivationsFinder activations_finder(*optimized_code);
   activations_finder.VisitFrames(&it);
   isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
@@ -240,59 +211,17 @@
   DCHECK(caller_code->contains(frame->pc()));
 #endif  // DEBUG
 
-
   BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
   DCHECK(!ast_id.IsNone());
 
-  // Disable concurrent OSR for asm.js, to enable frame specialization.
-  Compiler::ConcurrencyMode mode = (isolate->concurrent_osr_enabled() &&
-                                    !function->shared()->asm_function() &&
-                                    function->shared()->ast_node_count() > 512)
-                                       ? Compiler::CONCURRENT
-                                       : Compiler::NOT_CONCURRENT;
-
-  OptimizedCompileJob* job = NULL;
-  if (mode == Compiler::CONCURRENT) {
-    // Gate the OSR entry with a stack check.
-    BackEdgeTable::AddStackCheck(caller_code, pc_offset);
-    // Poll already queued compilation jobs.
-    OptimizingCompileDispatcher* dispatcher =
-        isolate->optimizing_compile_dispatcher();
-    if (dispatcher->IsQueuedForOSR(function, ast_id)) {
-      if (FLAG_trace_osr) {
-        PrintF("[OSR - Still waiting for queued: ");
-        function->PrintName();
-        PrintF(" at AST id %d]\n", ast_id.ToInt());
-      }
-      return NULL;
-    }
-
-    job = dispatcher->FindReadyOSRCandidate(function, ast_id);
-  }
-
   MaybeHandle<Code> maybe_result;
-  if (job != NULL) {
-    if (FLAG_trace_osr) {
-      PrintF("[OSR - Found ready: ");
-      function->PrintName();
-      PrintF(" at AST id %d]\n", ast_id.ToInt());
-    }
-    maybe_result = Compiler::GetConcurrentlyOptimizedCode(job);
-  } else if (IsSuitableForOnStackReplacement(isolate, function)) {
+  if (IsSuitableForOnStackReplacement(isolate, function)) {
     if (FLAG_trace_osr) {
       PrintF("[OSR - Compiling: ");
       function->PrintName();
       PrintF(" at AST id %d]\n", ast_id.ToInt());
     }
-    maybe_result = Compiler::GetOptimizedCode(
-        function, mode, ast_id,
-        (mode == Compiler::NOT_CONCURRENT) ? frame : nullptr);
-    Handle<Code> result;
-    if (maybe_result.ToHandle(&result) &&
-        result.is_identical_to(isolate->builtins()->InOptimizationQueue())) {
-      // Optimization is queued.  Return to check later.
-      return NULL;
-    }
+    maybe_result = Compiler::GetOptimizedCodeForOSR(function, ast_id, frame);
   }
 
   // Revert the patched back edge table, regardless of whether OSR succeeds.
diff --git a/src/runtime/runtime-debug.cc b/src/runtime/runtime-debug.cc
index c29ea9a..ad8375a 100644
--- a/src/runtime/runtime-debug.cc
+++ b/src/runtime/runtime-debug.cc
@@ -5,11 +5,13 @@
 #include "src/runtime/runtime-utils.h"
 
 #include "src/arguments.h"
-#include "src/debug/debug.h"
 #include "src/debug/debug-evaluate.h"
 #include "src/debug/debug-frames.h"
 #include "src/debug/debug-scopes.h"
+#include "src/debug/debug.h"
 #include "src/frames-inl.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
 #include "src/isolate-inl.h"
 #include "src/runtime/runtime.h"
 
@@ -18,11 +20,39 @@
 
 RUNTIME_FUNCTION(Runtime_DebugBreak) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+  isolate->debug()->set_return_value(value);
+
   // Get the top-most JavaScript frame.
   JavaScriptFrameIterator it(isolate);
-  isolate->debug()->Break(args, it.frame());
-  return isolate->debug()->SetAfterBreakTarget(it.frame());
+  isolate->debug()->Break(it.frame());
+
+  isolate->debug()->SetAfterBreakTarget(it.frame());
+  return *isolate->debug()->return_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+  isolate->debug()->set_return_value(value);
+
+  // Get the top-most JavaScript frame.
+  JavaScriptFrameIterator it(isolate);
+  isolate->debug()->Break(it.frame());
+
+  // Return the handler from the original bytecode array.
+  DCHECK(it.frame()->is_interpreted());
+  InterpretedFrame* interpreted_frame =
+      reinterpret_cast<InterpretedFrame*>(it.frame());
+  SharedFunctionInfo* shared = interpreted_frame->function()->shared();
+  BytecodeArray* bytecode_array = shared->bytecode_array();
+  int bytecode_offset = interpreted_frame->GetBytecodeOffset();
+  interpreter::Bytecode bytecode =
+      interpreter::Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
+  return isolate->interpreter()->GetBytecodeHandler(
+      bytecode, interpreter::OperandScale::kSingle);
 }
 
 
@@ -302,8 +332,8 @@
   if (name->AsArrayIndex(&index)) {
     Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
     Handle<Object> element_or_char;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, element_or_char,
-                                       Object::GetElement(isolate, obj, index));
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, element_or_char, JSReceiver::GetElement(isolate, obj, index));
     details->set(0, *element_or_char);
     details->set(1, PropertyDetails::Empty().AsSmi());
     return *isolate->factory()->NewJSArrayWithElements(details);
@@ -418,8 +448,8 @@
   RUNTIME_ASSERT(obj->HasIndexedInterceptor());
   CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
   Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     Object::GetElement(isolate, obj, index));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, JSReceiver::GetElement(isolate, obj, index));
   return *result;
 }
 
@@ -554,7 +584,11 @@
     // Use the value from the stack.
     if (scope_info->LocalIsSynthetic(i)) continue;
     locals->set(local * 2, scope_info->LocalName(i));
-    locals->set(local * 2 + 1, *(frame_inspector.GetExpression(i)));
+    Handle<Object> value = frame_inspector.GetExpression(i);
+    // TODO(yangguo): We convert optimized out values to {undefined} when they
+    // are passed to the debugger. Eventually we should handle them somehow.
+    if (value->IsOptimizedOut()) value = isolate->factory()->undefined_value();
+    locals->set(local * 2 + 1, *value);
     local++;
   }
   if (local < local_count) {
@@ -587,31 +621,7 @@
   // to the frame information.
   Handle<Object> return_value = isolate->factory()->undefined_value();
   if (at_return) {
-    StackFrameIterator it2(isolate);
-    Address internal_frame_sp = NULL;
-    while (!it2.done()) {
-      if (it2.frame()->is_internal()) {
-        internal_frame_sp = it2.frame()->sp();
-      } else {
-        if (it2.frame()->is_java_script()) {
-          if (it2.frame()->id() == it.frame()->id()) {
-            // The internal frame just before the JavaScript frame contains the
-            // value to return on top. A debug break at return will create an
-            // internal frame to store the return value (eax/rax/r0) before
-            // entering the debug break exit frame.
-            if (internal_frame_sp != NULL) {
-              return_value =
-                  Handle<Object>(Memory::Object_at(internal_frame_sp), isolate);
-              break;
-            }
-          }
-        }
-
-        // Indicate that the previous frame was not an internal frame.
-        internal_frame_sp = NULL;
-      }
-      it2.Advance();
-    }
+    return_value = isolate->debug()->return_value();
   }
 
   // Now advance to the arguments adapter frame (if any). It contains all
@@ -740,33 +750,6 @@
 }
 
 
-// Returns the list of step-in positions (text offset) in a function of the
-// stack frame in a range from the current debug break position to the end
-// of the corresponding statement.
-RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
-  RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
-
-  CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
-
-  // Get the frame where the debugging is performed.
-  StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
-  JavaScriptFrameIterator frame_it(isolate, id);
-  RUNTIME_ASSERT(!frame_it.done());
-
-  List<int> positions;
-  isolate->debug()->GetStepinPositions(frame_it.frame(), id, &positions);
-  Factory* factory = isolate->factory();
-  Handle<FixedArray> array = factory->NewFixedArray(positions.length());
-  for (int i = 0; i < positions.length(); ++i) {
-    array->set(i, Smi::FromInt(positions[i]));
-  }
-  return *factory->NewJSArrayWithElements(array, FAST_SMI_ELEMENTS);
-}
-
-
 // Return an array with scope details
 // args[0]: number: break id
 // args[1]: number: frame index
@@ -1652,15 +1635,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_DebugPromiseEvent) {
-  DCHECK(args.length() == 1);
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, data, 0);
-  isolate->debug()->OnPromiseEvent(data);
-  return isolate->heap()->undefined_value();
-}
-
-
 RUNTIME_FUNCTION(Runtime_DebugAsyncTaskEvent) {
   DCHECK(args.length() == 1);
   HandleScope scope(isolate);
diff --git a/src/runtime/runtime-forin.cc b/src/runtime/runtime-forin.cc
index c44945c..4b558d1 100644
--- a/src/runtime/runtime-forin.cc
+++ b/src/runtime/runtime-forin.cc
@@ -5,8 +5,10 @@
 #include "src/runtime/runtime-utils.h"
 
 #include "src/arguments.h"
+#include "src/elements.h"
 #include "src/factory.h"
 #include "src/isolate-inl.h"
+#include "src/keys.h"
 #include "src/objects-inl.h"
 
 namespace v8 {
@@ -20,30 +22,82 @@
 // deletions during a for-in.
 MaybeHandle<HeapObject> Enumerate(Handle<JSReceiver> receiver) {
   Isolate* const isolate = receiver->GetIsolate();
+  FastKeyAccumulator accumulator(isolate, receiver, INCLUDE_PROTOS,
+                                 ENUMERABLE_STRINGS);
+  accumulator.set_filter_proxy_keys(false);
   // Test if we have an enum cache for {receiver}.
-  if (!receiver->IsSimpleEnum()) {
+  if (!accumulator.is_receiver_simple_enum()) {
     Handle<FixedArray> keys;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, keys,
-        JSReceiver::GetKeys(receiver, INCLUDE_PROTOS, ENUMERABLE_STRINGS),
-        HeapObject);
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, keys, accumulator.GetKeys(KEEP_NUMBERS),
+                               HeapObject);
     // Test again, since cache may have been built by GetKeys() calls above.
-    if (!receiver->IsSimpleEnum()) return keys;
+    if (!accumulator.is_receiver_simple_enum()) return keys;
   }
   return handle(receiver->map(), isolate);
 }
 
+// This is a slight modifcation of JSReceiver::HasProperty, dealing with
+// the oddities of JSProxy in for-in filter.
+MaybeHandle<Object> HasEnumerableProperty(Isolate* isolate,
+                                          Handle<JSReceiver> receiver,
+                                          Handle<Object> key) {
+  bool success = false;
+  Maybe<PropertyAttributes> result = Just(ABSENT);
+  LookupIterator it =
+      LookupIterator::PropertyOrElement(isolate, receiver, key, &success);
+  if (!success) return isolate->factory()->undefined_value();
+  for (; it.IsFound(); it.Next()) {
+    switch (it.state()) {
+      case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
+        UNREACHABLE();
+      case LookupIterator::JSPROXY: {
+        // For proxies we have to invoke the [[GetOwnProperty]] trap.
+        result = JSProxy::GetPropertyAttributes(&it);
+        if (result.IsNothing()) return MaybeHandle<Object>();
+        if (result.FromJust() == ABSENT) {
+          // Continue lookup on the proxy's prototype.
+          Handle<JSProxy> proxy = it.GetHolder<JSProxy>();
+          Handle<Object> prototype;
+          ASSIGN_RETURN_ON_EXCEPTION(isolate, prototype,
+                                     JSProxy::GetPrototype(proxy), Object);
+          if (prototype->IsNull()) break;
+          // We already have a stack-check in JSProxy::GetPrototype.
+          return HasEnumerableProperty(
+              isolate, Handle<JSReceiver>::cast(prototype), key);
+        } else if (result.FromJust() & DONT_ENUM) {
+          return isolate->factory()->undefined_value();
+        } else {
+          return it.GetName();
+        }
+      }
+      case LookupIterator::INTERCEPTOR: {
+        result = JSObject::GetPropertyAttributesWithInterceptor(&it);
+        if (result.IsNothing()) return MaybeHandle<Object>();
+        if (result.FromJust() != ABSENT) return it.GetName();
+        continue;
+      }
+      case LookupIterator::ACCESS_CHECK: {
+        if (it.HasAccess()) continue;
+        result = JSObject::GetPropertyAttributesWithFailedAccessCheck(&it);
+        if (result.IsNothing()) return MaybeHandle<Object>();
+        if (result.FromJust() != ABSENT) return it.GetName();
+        return isolate->factory()->undefined_value();
+      }
+      case LookupIterator::INTEGER_INDEXED_EXOTIC:
+        // TypedArray out-of-bounds access.
+        return isolate->factory()->undefined_value();
+      case LookupIterator::ACCESSOR:
+      case LookupIterator::DATA:
+        return it.GetName();
+    }
+  }
+  return isolate->factory()->undefined_value();
+}
 
 MaybeHandle<Object> Filter(Handle<JSReceiver> receiver, Handle<Object> key) {
   Isolate* const isolate = receiver->GetIsolate();
-  // TODO(turbofan): Fast case for array indices.
-  Handle<Name> name;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
-                             Object);
-  Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
-  MAYBE_RETURN_NULL(result);
-  if (result.FromJust()) return name;
-  return isolate->factory()->undefined_value();
+  return HasEnumerableProperty(isolate, receiver, key);
 }
 
 }  // namespace
diff --git a/src/runtime/runtime-function.cc b/src/runtime/runtime-function.cc
index d424a9e..011f9ff 100644
--- a/src/runtime/runtime-function.cc
+++ b/src/runtime/runtime-function.cc
@@ -16,11 +16,20 @@
 namespace internal {
 
 RUNTIME_FUNCTION(Runtime_FunctionGetName) {
-  SealHandleScope shs(isolate);
+  HandleScope scope(isolate);
   DCHECK(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, f, 0);
-  return f->shared()->name();
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+  if (function->IsJSBoundFunction()) {
+  Handle<Object> result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, JSBoundFunction::GetName(
+                             isolate, Handle<JSBoundFunction>::cast(function)));
+    return *result;
+  } else {
+    RUNTIME_ASSERT(function->IsJSFunction());
+    return Handle<JSFunction>::cast(function)->shared()->name();
+  }
 }
 
 
@@ -96,6 +105,14 @@
   return Smi::FromInt(abstract_code->SourcePosition(offset));
 }
 
+RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+
+  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+  FixedArray* array = fun->native_context()->embedder_data();
+  return array->get(v8::Context::kDebugIdIndex);
+}
 
 RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
   SealHandleScope shs(isolate);
@@ -153,7 +170,7 @@
   Handle<SharedFunctionInfo> target_shared(target->shared());
   Handle<SharedFunctionInfo> source_shared(source->shared());
 
-  if (!Compiler::Compile(source, KEEP_EXCEPTION)) {
+  if (!Compiler::Compile(source, Compiler::KEEP_EXCEPTION)) {
     return isolate->heap()->exception();
   }
 
@@ -168,7 +185,7 @@
   // of the target shared function info.
   target_shared->ReplaceCode(source_shared->code());
   if (source_shared->HasBytecodeArray()) {
-    target_shared->set_function_data(source_shared->bytecode_array());
+    target_shared->set_bytecode_array(source_shared->bytecode_array());
   }
   target_shared->set_scope_info(source_shared->scope_info());
   target_shared->set_length(source_shared->length());
@@ -204,8 +221,8 @@
 
   if (isolate->logger()->is_logging_code_events() ||
       isolate->cpu_profiler()->is_profiling()) {
-    isolate->logger()->LogExistingFunction(source_shared,
-                                           Handle<Code>(source_shared->code()));
+    isolate->logger()->LogExistingFunction(
+        source_shared, Handle<AbstractCode>(source_shared->abstract_code()));
   }
 
   return *target;
@@ -253,7 +270,7 @@
   HandleScope scope(isolate);
   DCHECK_LE(2, args.length());
   int const argc = args.length() - 2;
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, target, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
   ScopedVector<Handle<Object>> argv(argc);
   for (int i = 0; i < argc; ++i) {
@@ -267,61 +284,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_TailCall) {
-  HandleScope scope(isolate);
-  DCHECK_LE(2, args.length());
-  int const argc = args.length() - 2;
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
-  ScopedVector<Handle<Object>> argv(argc);
-  for (int i = 0; i < argc; ++i) {
-    argv[i] = args.at<Object>(2 + i);
-  }
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Execution::Call(isolate, target, receiver, argc, argv.start()));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_Apply) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 5);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, fun, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2);
-  CONVERT_INT32_ARG_CHECKED(offset, 3);
-  CONVERT_INT32_ARG_CHECKED(argc, 4);
-  RUNTIME_ASSERT(offset >= 0);
-  // Loose upper bound to allow fuzzing. We'll most likely run out of
-  // stack space before hitting this limit.
-  static int kMaxArgc = 1000000;
-  RUNTIME_ASSERT(argc >= 0 && argc <= kMaxArgc);
-
-  // If there are too many arguments, allocate argv via malloc.
-  const int argv_small_size = 10;
-  Handle<Object> argv_small_buffer[argv_small_size];
-  base::SmartArrayPointer<Handle<Object> > argv_large_buffer;
-  Handle<Object>* argv = argv_small_buffer;
-  if (argc > argv_small_size) {
-    argv = new Handle<Object>[argc];
-    if (argv == NULL) return isolate->StackOverflow();
-    argv_large_buffer = base::SmartArrayPointer<Handle<Object> >(argv);
-  }
-
-  for (int i = 0; i < argc; ++i) {
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, argv[i], Object::GetElement(isolate, arguments, offset + i));
-  }
-
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Execution::Call(isolate, fun, receiver, argc, argv));
-  return *result;
-}
-
-
 // ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
 RUNTIME_FUNCTION(Runtime_ConvertReceiver) {
   HandleScope scope(isolate);
@@ -342,14 +304,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_ThrowStrongModeTooFewArguments) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  THROW_NEW_ERROR_RETURN_FAILURE(isolate,
-                                 NewTypeError(MessageTemplate::kStrongArity));
-}
-
-
 RUNTIME_FUNCTION(Runtime_FunctionToString) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
diff --git a/src/runtime/runtime-generator.cc b/src/runtime/runtime-generator.cc
index dab0621..181b5f9 100644
--- a/src/runtime/runtime-generator.cc
+++ b/src/runtime/runtime-generator.cc
@@ -43,6 +43,8 @@
   JavaScriptFrame* frame = stack_iterator.frame();
   RUNTIME_ASSERT(frame->function()->shared()->is_generator());
   DCHECK_EQ(frame->function(), generator_object->function());
+  DCHECK(frame->function()->shared()->is_compiled());
+  DCHECK(!frame->function()->IsOptimized());
 
   // The caller should have saved the context and continuation already.
   DCHECK_EQ(generator_object->context(), Context::cast(frame->context()));
@@ -88,18 +90,18 @@
   JavaScriptFrame* frame = stack_iterator.frame();
 
   DCHECK_EQ(frame->function(), generator_object->function());
-  DCHECK(frame->function()->is_compiled());
+  DCHECK(frame->function()->shared()->is_compiled());
+  DCHECK(!frame->function()->IsOptimized());
 
   STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
   STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
 
-  Address pc = generator_object->function()->code()->instruction_start();
+  Code* code = generator_object->function()->shared()->code();
   int offset = generator_object->continuation();
-  DCHECK(offset > 0);
-  frame->set_pc(pc + offset);
+  DCHECK_GT(offset, 0);
+  frame->set_pc(code->instruction_start() + offset);
   if (FLAG_enable_embedded_constant_pool) {
-    frame->set_constant_pool(
-        generator_object->function()->code()->constant_pool());
+    frame->set_constant_pool(code->constant_pool());
   }
   generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting);
 
@@ -148,16 +150,6 @@
 }
 
 
-// Returns context of generator activation.
-RUNTIME_FUNCTION(Runtime_GeneratorGetContext) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-
-  return generator->context();
-}
-
-
 // Returns receiver of generator activation.
 RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
   HandleScope scope(isolate);
@@ -203,26 +195,23 @@
   return isolate->heap()->undefined_value();
 }
 
-
-// Optimization for the following three functions is disabled in
-// js/generator.js and compiler/ast-graph-builder.cc.
-
+// Optimization for builtins calling any of the following three functions is
+// disabled in js/generator.js and compiler.cc, hence they are unreachable.
 
 RUNTIME_FUNCTION(Runtime_GeneratorNext) {
   UNREACHABLE();
   return nullptr;
 }
 
-
 RUNTIME_FUNCTION(Runtime_GeneratorReturn) {
   UNREACHABLE();
   return nullptr;
 }
 
-
 RUNTIME_FUNCTION(Runtime_GeneratorThrow) {
   UNREACHABLE();
   return nullptr;
 }
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-i18n.cc b/src/runtime/runtime-i18n.cc
index e57f8d3..27f970b 100644
--- a/src/runtime/runtime-i18n.cc
+++ b/src/runtime/runtime-i18n.cc
@@ -158,8 +158,8 @@
   Handle<Name> base = factory->NewStringFromStaticChars("base");
   for (unsigned int i = 0; i < length; ++i) {
     Handle<Object> locale_id;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, locale_id,
-                                       Object::GetElement(isolate, input, i));
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, locale_id, JSReceiver::GetElement(isolate, input, i));
     if (!locale_id->IsString()) {
       return isolate->Throw(*factory->illegal_argument_string());
     }
diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc
index 0ca2e84..d871fc7 100644
--- a/src/runtime/runtime-internal.cc
+++ b/src/runtime/runtime-internal.cc
@@ -171,14 +171,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_ThrowStrongModeImplicitConversion) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  THROW_NEW_ERROR_RETURN_FAILURE(
-      isolate, NewTypeError(MessageTemplate::kStrongImplicitConversion));
-}
-
-
 RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
@@ -271,7 +263,7 @@
 RUNTIME_FUNCTION(Runtime_CollectStackTrace) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, error_object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, caller, 1);
 
   if (!isolate->bootstrapper()->IsActive()) {
@@ -317,7 +309,6 @@
   return *result;
 }
 
-
 #define CALLSITE_GET(NAME, RETURN)                          \
   RUNTIME_FUNCTION(Runtime_CallSite##NAME##RT) {            \
     HandleScope scope(isolate);                             \
@@ -325,7 +316,7 @@
     CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0); \
     Handle<String> result;                                  \
     CallSite call_site(isolate, call_site_obj);             \
-    RUNTIME_ASSERT(call_site.IsValid())                     \
+    RUNTIME_ASSERT(call_site.IsValid());                    \
     return RETURN(call_site.NAME(), isolate);               \
   }
 
@@ -366,18 +357,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_IncrementStatsCounter) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_CHECKED(String, name, 0);
-
-  if (FLAG_native_code_counters) {
-    StatsCounter(isolate, name->ToCString().get()).Increment();
-  }
-  return isolate->heap()->undefined_value();
-}
-
-
 namespace {
 
 bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
@@ -407,7 +386,7 @@
 Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
   MessageLocation location;
   if (ComputeLocation(isolate, &location)) {
-    Zone zone;
+    Zone zone(isolate->allocator());
     base::SmartPointer<ParseInfo> info(
         location.function()->shared()->is_function()
             ? new ParseInfo(&zone, location.function())
@@ -477,6 +456,12 @@
   return isolate->heap()->undefined_value();
 }
 
+RUNTIME_FUNCTION(Runtime_GetOrdinaryHasInstance) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(0, args.length());
+
+  return isolate->native_context()->ordinary_has_instance();
+}
 
 RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
   HandleScope scope(isolate);
diff --git a/src/runtime/runtime-interpreter.cc b/src/runtime/runtime-interpreter.cc
index 7150a8b..22ae911 100644
--- a/src/runtime/runtime-interpreter.cc
+++ b/src/runtime/runtime-interpreter.cc
@@ -16,30 +16,6 @@
 namespace v8 {
 namespace internal {
 
-RUNTIME_FUNCTION(Runtime_InterpreterToBoolean) {
-  SealHandleScope shs(isolate);
-  DCHECK_EQ(1, args.length());
-  CONVERT_ARG_CHECKED(Object, x, 0);
-  return isolate->heap()->ToBoolean(x->BooleanValue());
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterLogicalNot) {
-  SealHandleScope shs(isolate);
-  DCHECK_EQ(1, args.length());
-  CONVERT_ARG_CHECKED(Object, x, 0);
-  return isolate->heap()->ToBoolean(!x->BooleanValue());
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterTypeOf) {
-  HandleScope shs(isolate);
-  DCHECK_EQ(1, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
-  return Object::cast(*Object::TypeOf(isolate, x));
-}
-
-
 RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -52,10 +28,24 @@
 
 namespace {
 
+void AdvanceToOffsetForTracing(
+    interpreter::BytecodeArrayIterator& bytecode_iterator, int offset) {
+  while (bytecode_iterator.current_offset() +
+             bytecode_iterator.current_bytecode_size() <=
+         offset) {
+    bytecode_iterator.Advance();
+  }
+  DCHECK(bytecode_iterator.current_offset() == offset ||
+         ((bytecode_iterator.current_offset() + 1) == offset &&
+          bytecode_iterator.current_operand_scale() >
+              interpreter::OperandScale::kSingle));
+}
+
 void PrintRegisters(std::ostream& os, bool is_input,
-                    Handle<BytecodeArray> bytecode_array, int bytecode_offset,
+                    interpreter::BytecodeArrayIterator& bytecode_iterator,
                     Handle<Object> accumulator) {
-  static const int kRegFieldWidth = static_cast<int>(strlen("accumulator"));
+  static const char kAccumulator[] = "accumulator";
+  static const int kRegFieldWidth = static_cast<int>(sizeof(kAccumulator) - 1);
   static const char* kInputColourCode = "\033[0;36m";
   static const char* kOutputColourCode = "\033[0;35m";
   static const char* kNormalColourCode = "\033[0;m";
@@ -64,22 +54,24 @@
     os << (is_input ? kInputColourCode : kOutputColourCode);
   }
 
+  interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
+
   // Print accumulator.
-  os << "      [ accumulator" << kArrowDirection;
-  accumulator->ShortPrint();
-  os << " ]" << std::endl;
+  if ((is_input && interpreter::Bytecodes::ReadsAccumulator(bytecode)) ||
+      (!is_input && interpreter::Bytecodes::WritesAccumulator(bytecode))) {
+    os << "      [ " << kAccumulator << kArrowDirection;
+    accumulator->ShortPrint();
+    os << " ]" << std::endl;
+  }
 
   // Find the location of the register file.
-  JavaScriptFrameIterator frame_iterator(bytecode_array->GetIsolate());
+  JavaScriptFrameIterator frame_iterator(
+      bytecode_iterator.bytecode_array()->GetIsolate());
   JavaScriptFrame* frame = frame_iterator.frame();
   Address register_file =
       frame->fp() + InterpreterFrameConstants::kRegisterFilePointerFromFp;
 
   // Print the registers.
-  interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
-  bytecode_iterator.set_current_offset(
-      bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag);
-  interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
   int operand_count = interpreter::Bytecodes::NumberOfOperands(bytecode);
   for (int operand_index = 0; operand_index < operand_count; operand_index++) {
     interpreter::OperandType operand_type =
@@ -98,7 +90,7 @@
         Object* reg_object = Memory::Object_at(reg_location);
         os << "      [ " << std::setw(kRegFieldWidth)
            << interpreter::Register(reg_index).ToString(
-                  bytecode_array->parameter_count())
+                  bytecode_iterator.bytecode_array()->parameter_count())
            << kArrowDirection;
         reg_object->ShortPrint(os);
         os << " ]" << std::endl;
@@ -120,20 +112,23 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
   OFStream os(stdout);
 
-  // Print bytecode.
-  const uint8_t* bytecode_address =
-      reinterpret_cast<const uint8_t*>(*bytecode_array) + bytecode_offset;
-  Vector<char> buf = Vector<char>::New(50);
-  SNPrintF(buf, "%p", bytecode_address);
-  os << " -> " << buf.start() << " (" << bytecode_offset << ") : ";
-  interpreter::Bytecodes::Decode(os, bytecode_address,
-                                 bytecode_array->parameter_count());
-  os << std::endl;
+  int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
+  interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
+  AdvanceToOffsetForTracing(bytecode_iterator, offset);
+  if (offset == bytecode_iterator.current_offset()) {
+    // Print bytecode.
+    const uint8_t* bytecode_address =
+        reinterpret_cast<const uint8_t*>(*bytecode_array) + bytecode_offset;
+    os << " -> " << static_cast<const void*>(bytecode_address)
+       << " (" << bytecode_offset << ") : ";
+    interpreter::Bytecodes::Decode(os, bytecode_address,
+                                   bytecode_array->parameter_count());
+    os << std::endl;
+    // Print all input registers and accumulator.
+    PrintRegisters(os, true, bytecode_iterator, accumulator);
 
-  // Print all input registers and accumulator.
-  PrintRegisters(os, true, bytecode_array, bytecode_offset, accumulator);
-
-  os << std::flush;
+    os << std::flush;
+  }
   return isolate->heap()->undefined_value();
 }
 
@@ -143,11 +138,21 @@
   CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
   CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
-  OFStream os(stdout);
 
-  // Print all output registers and accumulator.
-  PrintRegisters(os, false, bytecode_array, bytecode_offset, accumulator);
-  os << std::flush;
+  int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
+  interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
+  AdvanceToOffsetForTracing(bytecode_iterator, offset);
+  // The offset comparison here ensures registers only printed when the
+  // (potentially) widened bytecode has completed. The iterator reports
+  // the offset as the offset of the prefix bytecode.
+  if (bytecode_iterator.current_operand_scale() ==
+          interpreter::OperandScale::kSingle ||
+      offset > bytecode_iterator.current_offset()) {
+    OFStream os(stdout);
+    // Print all output registers and accumulator.
+    PrintRegisters(os, false, bytecode_iterator, accumulator);
+    os << std::flush;
+  }
   return isolate->heap()->undefined_value();
 }
 
diff --git a/src/runtime/runtime-literals.cc b/src/runtime/runtime-literals.cc
index e730957..f14a7cf 100644
--- a/src/runtime/runtime-literals.cc
+++ b/src/runtime/runtime-literals.cc
@@ -16,7 +16,7 @@
 
 static Handle<Map> ComputeObjectLiteralMap(
     Handle<Context> context, Handle<FixedArray> constant_properties,
-    bool is_strong, bool* is_result_from_cache) {
+    bool* is_result_from_cache) {
   int properties_length = constant_properties->length();
   int number_of_properties = properties_length / 2;
 
@@ -30,18 +30,16 @@
   }
   Isolate* isolate = context->GetIsolate();
   return isolate->factory()->ObjectLiteralMapFromCache(
-      context, number_of_properties, is_strong, is_result_from_cache);
+      context, number_of_properties, is_result_from_cache);
 }
 
 MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
     Isolate* isolate, Handle<LiteralsArray> literals,
-    Handle<FixedArray> constant_properties, bool is_strong);
-
+    Handle<FixedArray> constant_properties);
 
 MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
     Isolate* isolate, Handle<LiteralsArray> literals,
-    Handle<FixedArray> constant_properties, bool should_have_fast_elements,
-    bool has_function_literal, bool is_strong) {
+    Handle<FixedArray> constant_properties, bool should_have_fast_elements) {
   Handle<Context> context = isolate->native_context();
 
   // In case we have function literals, we want the object to be in
@@ -49,12 +47,8 @@
   // maps with constant functions can't be shared if the functions are
   // not the same (which is the common case).
   bool is_result_from_cache = false;
-  Handle<Map> map = has_function_literal
-      ? Handle<Map>(is_strong
-                        ? context->js_object_strong_map()
-                        : context->object_function()->initial_map())
-      : ComputeObjectLiteralMap(context, constant_properties, is_strong,
-                                &is_result_from_cache);
+  Handle<Map> map = ComputeObjectLiteralMap(context, constant_properties,
+                                            &is_result_from_cache);
 
   PretenureFlag pretenure_flag =
       isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
@@ -69,7 +63,7 @@
   int length = constant_properties->length();
   bool should_transform =
       !is_result_from_cache && boilerplate->HasFastProperties();
-  bool should_normalize = should_transform || has_function_literal;
+  bool should_normalize = should_transform;
   if (should_normalize) {
     // TODO(verwaest): We might not want to ever normalize here.
     JSObject::NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES,
@@ -84,44 +78,22 @@
       // simple object or array literal.
       Handle<FixedArray> array = Handle<FixedArray>::cast(value);
       ASSIGN_RETURN_ON_EXCEPTION(
-          isolate, value,
-          CreateLiteralBoilerplate(isolate, literals, array, is_strong),
+          isolate, value, CreateLiteralBoilerplate(isolate, literals, array),
           Object);
     }
     MaybeHandle<Object> maybe_result;
     uint32_t element_index = 0;
-    if (key->IsInternalizedString()) {
-      if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
-        // Array index as string (uint32).
-        if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate);
-        maybe_result = JSObject::SetOwnElementIgnoreAttributes(
-            boilerplate, element_index, value, NONE);
-      } else {
-        Handle<String> name(String::cast(*key));
-        DCHECK(!name->AsArrayIndex(&element_index));
-        maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(
-            boilerplate, name, value, NONE);
-      }
-    } else if (key->ToArrayIndex(&element_index)) {
+    if (key->ToArrayIndex(&element_index)) {
       // Array index (uint32).
       if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate);
       maybe_result = JSObject::SetOwnElementIgnoreAttributes(
           boilerplate, element_index, value, NONE);
     } else {
-      // Non-uint32 number.
-      DCHECK(key->IsNumber());
-      double num = key->Number();
-      char arr[100];
-      Vector<char> buffer(arr, arraysize(arr));
-      const char* str = DoubleToCString(num, buffer);
-      Handle<String> name = isolate->factory()->NewStringFromAsciiChecked(str);
+      Handle<String> name = Handle<String>::cast(key);
+      DCHECK(!name->AsArrayIndex(&element_index));
       maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name,
                                                               value, NONE);
     }
-    // If setting the property on the boilerplate throws an
-    // exception, the exception is converted to an empty handle in
-    // the handle based operations.  In that case, we need to
-    // convert back to an exception.
     RETURN_ON_EXCEPTION(isolate, maybe_result, Object);
   }
 
@@ -129,7 +101,7 @@
   // containing function literals we defer this operation until after all
   // computed properties have been assigned so that we can generate
   // constant function properties.
-  if (should_transform && !has_function_literal) {
+  if (should_transform) {
     JSObject::MigrateSlowToFast(boilerplate,
                                 boilerplate->map()->unused_property_fields(),
                                 "FastLiteral");
@@ -137,10 +109,9 @@
   return boilerplate;
 }
 
-
 MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
     Isolate* isolate, Handle<LiteralsArray> literals,
-    Handle<FixedArray> elements, bool is_strong) {
+    Handle<FixedArray> elements) {
   // Create the JSArray.
   Handle<JSFunction> constructor = isolate->array_function();
 
@@ -159,9 +130,8 @@
     DisallowHeapAllocation no_gc;
     DCHECK(IsFastElementsKind(constant_elements_kind));
     Context* native_context = isolate->context()->native_context();
-    Strength strength = is_strong ? Strength::STRONG : Strength::WEAK;
-    Object* map = native_context->get(
-        Context::ArrayMapIndex(constant_elements_kind, strength));
+    Object* map =
+        native_context->get(Context::ArrayMapIndex(constant_elements_kind));
     object->set_map(Map::cast(map));
   }
 
@@ -188,20 +158,20 @@
       Handle<FixedArray> fixed_array_values_copy =
           isolate->factory()->CopyFixedArray(fixed_array_values);
       copied_elements_values = fixed_array_values_copy;
-      for (int i = 0; i < fixed_array_values->length(); i++) {
-        HandleScope scope(isolate);
-        if (fixed_array_values->get(i)->IsFixedArray()) {
-          // The value contains the constant_properties of a
-          // simple object or array literal.
-          Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i)));
-          Handle<Object> result;
-          ASSIGN_RETURN_ON_EXCEPTION(
-              isolate, result,
-              CreateLiteralBoilerplate(isolate, literals, fa, is_strong),
-              Object);
-          fixed_array_values_copy->set(i, *result);
-        }
-      }
+      FOR_WITH_HANDLE_SCOPE(
+          isolate, int, i = 0, i, i < fixed_array_values->length(), i++, {
+            if (fixed_array_values->get(i)->IsFixedArray()) {
+              // The value contains the constant_properties of a
+              // simple object or array literal.
+              Handle<FixedArray> fa(
+                  FixedArray::cast(fixed_array_values->get(i)));
+              Handle<Object> result;
+              ASSIGN_RETURN_ON_EXCEPTION(
+                  isolate, result,
+                  CreateLiteralBoilerplate(isolate, literals, fa), Object);
+              fixed_array_values_copy->set(i, *result);
+            }
+          });
     }
   }
   object->set_elements(*copied_elements_values);
@@ -211,22 +181,18 @@
   return object;
 }
 
-
 MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
-    Isolate* isolate, Handle<LiteralsArray> literals, Handle<FixedArray> array,
-    bool is_strong) {
+    Isolate* isolate, Handle<LiteralsArray> literals,
+    Handle<FixedArray> array) {
   Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
-  const bool kHasNoFunctionLiteral = false;
   switch (CompileTimeValue::GetLiteralType(array)) {
     case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
-      return CreateObjectLiteralBoilerplate(isolate, literals, elements, true,
-                                            kHasNoFunctionLiteral, is_strong);
+      return CreateObjectLiteralBoilerplate(isolate, literals, elements, true);
     case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
-      return CreateObjectLiteralBoilerplate(isolate, literals, elements, false,
-                                            kHasNoFunctionLiteral, is_strong);
+      return CreateObjectLiteralBoilerplate(isolate, literals, elements, false);
     case CompileTimeValue::ARRAY_LITERAL:
       return Runtime::CreateArrayLiteralBoilerplate(isolate, literals,
-                                                    elements, is_strong);
+                                                    elements);
     default:
       UNREACHABLE();
       return MaybeHandle<Object>();
@@ -262,9 +228,7 @@
   CONVERT_SMI_ARG_CHECKED(flags, 3);
   Handle<LiteralsArray> literals(closure->literals(), isolate);
   bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
-  bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
   bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
-  bool is_strong = (flags & ObjectLiteral::kIsStrong) != 0;
 
   RUNTIME_ASSERT(literals_index >= 0 &&
                  literals_index < literals->literals_count());
@@ -278,8 +242,7 @@
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, raw_boilerplate,
         CreateObjectLiteralBoilerplate(isolate, literals, constant_properties,
-                                       should_have_fast_elements,
-                                       has_function_literal, is_strong));
+                                       should_have_fast_elements));
     boilerplate = Handle<JSObject>::cast(raw_boilerplate);
 
     AllocationSiteCreationContext creation_context(isolate);
@@ -306,10 +269,9 @@
   return *copy;
 }
 
-
 MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
     Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
-    Handle<FixedArray> elements, bool is_strong) {
+    Handle<FixedArray> elements) {
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> literal_site(literals->literal(literals_index), isolate);
   Handle<AllocationSite> site;
@@ -318,8 +280,7 @@
     Handle<Object> boilerplate;
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate, boilerplate,
-        Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements,
-                                               is_strong),
+        Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements),
         AllocationSite);
 
     AllocationSiteCreationContext creation_context(isolate);
@@ -346,11 +307,9 @@
       literals_index >= 0 && literals_index < literals->literals_count(),
       JSObject);
   Handle<AllocationSite> site;
-  bool is_strong = (flags & ArrayLiteral::kIsStrong) != 0;
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate, site,
-      GetLiteralAllocationSite(isolate, literals, literals_index, elements,
-                               is_strong),
+      GetLiteralAllocationSite(isolate, literals, literals_index, elements),
       JSObject);
 
   bool enable_mementos = (flags & ArrayLiteral::kDisableMementos) == 0;
diff --git a/src/runtime/runtime-liveedit.cc b/src/runtime/runtime-liveedit.cc
index 189ec08..da342de 100644
--- a/src/runtime/runtime-liveedit.cc
+++ b/src/runtime/runtime-liveedit.cc
@@ -186,7 +186,7 @@
   DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
-  RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_array))
+  RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_array));
 
   LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
   return isolate->heap()->undefined_value();
@@ -207,19 +207,21 @@
   USE(new_shared_array);
   RUNTIME_ASSERT(old_shared_array->length()->IsSmi());
   RUNTIME_ASSERT(new_shared_array->length() == old_shared_array->length());
-  RUNTIME_ASSERT(old_shared_array->HasFastElements())
-  RUNTIME_ASSERT(new_shared_array->HasFastElements())
+  RUNTIME_ASSERT(old_shared_array->HasFastElements());
+  RUNTIME_ASSERT(new_shared_array->HasFastElements());
   int array_length = Smi::cast(old_shared_array->length())->value();
   for (int i = 0; i < array_length; i++) {
     Handle<Object> old_element;
     Handle<Object> new_element;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, old_element, Object::GetElement(isolate, old_shared_array, i));
+        isolate, old_element,
+        JSReceiver::GetElement(isolate, old_shared_array, i));
     RUNTIME_ASSERT(
         old_element->IsJSValue() &&
         Handle<JSValue>::cast(old_element)->value()->IsSharedFunctionInfo());
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, new_element, Object::GetElement(isolate, new_shared_array, i));
+        isolate, new_element,
+        JSReceiver::GetElement(isolate, new_shared_array, i));
     RUNTIME_ASSERT(
         new_element->IsUndefined() ||
         (new_element->IsJSValue() &&
@@ -242,7 +244,7 @@
   CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
 
   Handle<JSArray> result = LiveEdit::CompareStrings(s1, s2);
-  uint32_t array_length;
+  uint32_t array_length = 0;
   CHECK(result->length()->ToArrayLength(&array_length));
   if (array_length > 0) {
     isolate->debug()->feature_tracker()->Track(DebugFeatureTracker::kLiveEdit);
diff --git a/src/runtime/runtime-maths.cc b/src/runtime/runtime-maths.cc
index 9c4fde1..91b6181 100644
--- a/src/runtime/runtime-maths.cc
+++ b/src/runtime/runtime-maths.cc
@@ -23,9 +23,6 @@
     return *isolate->factory()->NewHeapNumber(std::name(x));   \
   }
 
-RUNTIME_UNARY_MATH(Acos, acos)
-RUNTIME_UNARY_MATH(Asin, asin)
-RUNTIME_UNARY_MATH(Atan, atan)
 RUNTIME_UNARY_MATH(LogRT, log)
 #undef RUNTIME_UNARY_MATH
 
@@ -111,27 +108,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_MathClz32) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  isolate->counters()->math_clz32_runtime()->Increment();
-
-  CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
-  return *isolate->factory()->NewNumberFromUint(
-      base::bits::CountLeadingZeros32(x));
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathFloor) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  isolate->counters()->math_floor_runtime()->Increment();
-
-  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return *isolate->factory()->NewNumber(Floor(x));
-}
-
-
 // Slow version of Math.pow.  We check for fast paths for special cases.
 // Used if VFP3 is not available.
 RUNTIME_FUNCTION(Runtime_MathPow) {
@@ -174,75 +150,21 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_RoundNumber) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_NUMBER_ARG_HANDLE_CHECKED(input, 0);
-  isolate->counters()->math_round_runtime()->Increment();
-
-  if (!input->IsHeapNumber()) {
-    DCHECK(input->IsSmi());
-    return *input;
-  }
-
-  Handle<HeapNumber> number = Handle<HeapNumber>::cast(input);
-
-  double value = number->value();
-  int exponent = number->get_exponent();
-  int sign = number->get_sign();
-
-  if (exponent < -1) {
-    // Number in range ]-0.5..0.5[. These always round to +/-zero.
-    if (sign) return isolate->heap()->minus_zero_value();
-    return Smi::FromInt(0);
-  }
-
-  // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
-  // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
-  // argument holds for 32-bit smis).
-  if (!sign && exponent < kSmiValueSize - 2) {
-    return Smi::FromInt(static_cast<int>(value + 0.5));
-  }
-
-  // If the magnitude is big enough, there's no place for fraction part. If we
-  // try to add 0.5 to this number, 1.0 will be added instead.
-  if (exponent >= 52) {
-    return *number;
-  }
-
-  if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
-
-  // Do not call NumberFromDouble() to avoid extra checks.
-  return *isolate->factory()->NewNumber(Floor(value + 0.5));
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathSqrt) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  isolate->counters()->math_sqrt_runtime()->Increment();
-
-  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  lazily_initialize_fast_sqrt(isolate);
-  return *isolate->factory()->NewNumber(fast_sqrt(x, isolate));
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathFround) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-
-  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  float xf = DoubleToFloat32(x);
-  return *isolate->factory()->NewNumber(xf);
-}
-
-
 RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
-  // Random numbers in the snapshot are not really that random.
-  DCHECK(!isolate->bootstrapper()->IsActive());
+  if (isolate->serializer_enabled()) {
+    // Random numbers in the snapshot are not really that random. And we cannot
+    // return a typed array as it cannot be serialized. To make calling
+    // Math.random possible when creating a custom startup snapshot, we simply
+    // return a normal array with a single random number.
+    Handle<HeapNumber> random_number = isolate->factory()->NewHeapNumber(
+        isolate->random_number_generator()->NextDouble());
+    Handle<FixedArray> array_backing = isolate->factory()->NewFixedArray(1);
+    array_backing->set(0, *random_number);
+    return *isolate->factory()->NewJSArrayWithElements(array_backing);
+  }
+
   static const int kState0Offset = 0;
   static const int kState1Offset = 1;
   static const int kRandomBatchSize = 64;
diff --git a/src/runtime/runtime-numbers.cc b/src/runtime/runtime-numbers.cc
index 46fbff3..efbdeb2 100644
--- a/src/runtime/runtime-numbers.cc
+++ b/src/runtime/runtime-numbers.cc
@@ -208,19 +208,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_NumberImul) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-
-  // We rely on implementation-defined behavior below, but at least not on
-  // undefined behavior.
-  CONVERT_NUMBER_CHECKED(uint32_t, x, Int32, args[0]);
-  CONVERT_NUMBER_CHECKED(uint32_t, y, Int32, args[1]);
-  int32_t product = static_cast<int32_t>(x * y);
-  return *isolate->factory()->NewNumberFromInt(product);
-}
-
-
 // Compare two Smis as if they were converted to strings and then
 // compared lexicographically.
 RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc
index 45a4992..5bdb085 100644
--- a/src/runtime/runtime-object.cc
+++ b/src/runtime/runtime-object.cc
@@ -125,6 +125,82 @@
   return JSReceiver::DeleteProperty(&it, language_mode);
 }
 
+// ES6 19.1.3.2
+RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
+  HandleScope scope(isolate);
+  Handle<Object> property = args.at<Object>(1);
+
+  Handle<Name> key;
+  uint32_t index;
+  bool key_is_array_index = property->ToArrayIndex(&index);
+
+  if (!key_is_array_index) {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+                                       Object::ToName(isolate, property));
+    key_is_array_index = key->AsArrayIndex(&index);
+  }
+
+  Handle<Object> object = args.at<Object>(0);
+
+  if (object->IsJSObject()) {
+    Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
+    // Fast case: either the key is a real named property or it is not
+    // an array index and there are no interceptors or hidden
+    // prototypes.
+    // TODO(jkummerow): Make JSReceiver::HasOwnProperty fast enough to
+    // handle all cases directly (without this custom fast path).
+    {
+      LookupIterator::Configuration c = LookupIterator::OWN_SKIP_INTERCEPTOR;
+      LookupIterator it =
+          key_is_array_index ? LookupIterator(isolate, js_obj, index, js_obj, c)
+                             : LookupIterator(js_obj, key, js_obj, c);
+      Maybe<bool> maybe = JSReceiver::HasProperty(&it);
+      if (maybe.IsNothing()) return isolate->heap()->exception();
+      DCHECK(!isolate->has_pending_exception());
+      if (maybe.FromJust()) return isolate->heap()->true_value();
+    }
+
+    Map* map = js_obj->map();
+    if (!map->has_hidden_prototype() &&
+        (key_is_array_index ? !map->has_indexed_interceptor()
+                            : !map->has_named_interceptor())) {
+      return isolate->heap()->false_value();
+    }
+
+    // Slow case.
+    LookupIterator::Configuration c = LookupIterator::HIDDEN;
+    LookupIterator it = key_is_array_index
+                            ? LookupIterator(isolate, js_obj, index, js_obj, c)
+                            : LookupIterator(js_obj, key, js_obj, c);
+
+    Maybe<bool> maybe = JSReceiver::HasProperty(&it);
+    if (maybe.IsNothing()) return isolate->heap()->exception();
+    DCHECK(!isolate->has_pending_exception());
+    return isolate->heap()->ToBoolean(maybe.FromJust());
+
+  } else if (object->IsJSProxy()) {
+    if (key.is_null()) {
+      DCHECK(key_is_array_index);
+      key = isolate->factory()->Uint32ToString(index);
+    }
+
+    Maybe<bool> result =
+        JSReceiver::HasOwnProperty(Handle<JSProxy>::cast(object), key);
+    if (!result.IsJust()) return isolate->heap()->exception();
+    return isolate->heap()->ToBoolean(result.FromJust());
+
+  } else if (object->IsString()) {
+    return isolate->heap()->ToBoolean(
+        key_is_array_index
+            ? index < static_cast<uint32_t>(String::cast(*object)->length())
+            : key->Equals(isolate->heap()->length_string()));
+  } else if (object->IsNull() || object->IsUndefined()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
+  }
+
+  return isolate->heap()->false_value();
+}
 
 MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
                                                Handle<Object> object,
@@ -205,7 +281,7 @@
   Factory* factory = isolate->factory();
 
   // Get attributes.
-  LookupIterator it = LookupIterator::PropertyOrElement(isolate, obj, name,
+  LookupIterator it = LookupIterator::PropertyOrElement(isolate, obj, name, obj,
                                                         LookupIterator::HIDDEN);
   Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
 
@@ -293,7 +369,7 @@
   Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
   Handle<JSGlobalObject> global_object(script_context->global_object(),
                                        isolate);
-  LookupIterator it(global_object, name, LookupIterator::HIDDEN);
+  LookupIterator it(global_object, name, global_object, LookupIterator::HIDDEN);
 
   // Switch to fast mode only if there is a data property and it's not on
   // a hidden prototype.
@@ -328,7 +404,7 @@
   Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
   Handle<JSGlobalObject> global_object(script_context->global_object(),
                                        isolate);
-  LookupIterator it(global_object, name, LookupIterator::HIDDEN);
+  LookupIterator it(global_object, name, global_object, LookupIterator::HIDDEN);
 
   // Switch to fast mode only if there is a data property and it's not on
   // a hidden prototype.
@@ -413,7 +489,7 @@
 #ifdef DEBUG
   uint32_t index = 0;
   DCHECK(!name->ToArrayIndex(&index));
-  LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
   if (!maybe.IsJust()) return isolate->heap()->exception();
   RUNTIME_ASSERT(!it.IsFound());
@@ -441,7 +517,7 @@
   CHECK(key->ToArrayIndex(&index));
 
 #ifdef DEBUG
-  LookupIterator it(isolate, object, index,
+  LookupIterator it(isolate, object, index, object,
                     LookupIterator::OWN_SKIP_INTERCEPTOR);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
   if (!maybe.IsJust()) return isolate->heap()->exception();
@@ -532,85 +608,6 @@
 }
 
 
-static Object* HasOwnPropertyImplementation(Isolate* isolate,
-                                            Handle<JSObject> object,
-                                            Handle<Name> key) {
-  Maybe<bool> maybe = JSReceiver::HasOwnProperty(object, key);
-  if (!maybe.IsJust()) return isolate->heap()->exception();
-  if (maybe.FromJust()) return isolate->heap()->true_value();
-  // Handle hidden prototypes.  If there's a hidden prototype above this thing
-  // then we have to check it for properties, because they are supposed to
-  // look like they are on this object.
-  if (object->map()->has_hidden_prototype()) {
-    PrototypeIterator iter(isolate, object);
-    DCHECK(!iter.IsAtEnd());
-
-    // TODO(verwaest): The recursion is not necessary for keys that are array
-    // indices. Removing this.
-    // Casting to JSObject is fine because JSProxies are never used as
-    // hidden prototypes.
-    return HasOwnPropertyImplementation(
-        isolate, PrototypeIterator::GetCurrent<JSObject>(iter), key);
-  }
-  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-  return isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0)
-  CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
-
-  uint32_t index;
-  const bool key_is_array_index = key->AsArrayIndex(&index);
-
-  // Only JS objects can have properties.
-  if (object->IsJSObject()) {
-    Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
-    // Fast case: either the key is a real named property or it is not
-    // an array index and there are no interceptors or hidden
-    // prototypes.
-    // TODO(jkummerow): Make JSReceiver::HasOwnProperty fast enough to
-    // handle all cases directly (without this custom fast path).
-    Maybe<bool> maybe = Nothing<bool>();
-    if (key_is_array_index) {
-      LookupIterator it(js_obj->GetIsolate(), js_obj, index,
-                        LookupIterator::HIDDEN);
-      maybe = JSReceiver::HasProperty(&it);
-    } else {
-      maybe = JSObject::HasRealNamedProperty(js_obj, key);
-    }
-    if (!maybe.IsJust()) return isolate->heap()->exception();
-    DCHECK(!isolate->has_pending_exception());
-    if (maybe.FromJust()) {
-      return isolate->heap()->true_value();
-    }
-    Map* map = js_obj->map();
-    if (!key_is_array_index && !map->has_named_interceptor() &&
-        !map->has_hidden_prototype()) {
-      return isolate->heap()->false_value();
-    }
-    // Slow case.
-    return HasOwnPropertyImplementation(isolate, Handle<JSObject>(js_obj),
-                                        Handle<Name>(key));
-  } else if (object->IsString() && key_is_array_index) {
-    // Well, there is one exception:  Handle [] on strings.
-    Handle<String> string = Handle<String>::cast(object);
-    if (index < static_cast<uint32_t>(string->length())) {
-      return isolate->heap()->true_value();
-    }
-  } else if (object->IsJSProxy()) {
-    Maybe<bool> result =
-        JSReceiver::HasOwnProperty(Handle<JSProxy>::cast(object), key);
-    if (!result.IsJust()) return isolate->heap()->exception();
-    return isolate->heap()->ToBoolean(result.FromJust());
-  }
-  return isolate->heap()->false_value();
-}
-
-
 // ES6 section 12.9.3, operator in.
 RUNTIME_FUNCTION(Runtime_HasProperty) {
   HandleScope scope(isolate);
@@ -840,8 +837,8 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
   CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
 
-  LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
-                                                        LookupIterator::OWN);
+  LookupIterator it = LookupIterator::PropertyOrElement(
+      isolate, object, name, object, LookupIterator::OWN);
   if (it.state() == LookupIterator::ACCESS_CHECK && !it.HasAccess()) {
     return isolate->heap()->undefined_value();
   }
@@ -869,8 +866,8 @@
                         isolate->factory()->empty_string());
   }
 
-  LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
-                                                        LookupIterator::OWN);
+  LookupIterator it = LookupIterator::PropertyOrElement(
+      isolate, object, name, object, LookupIterator::OWN);
   // Cannot fail since this should only be called when
   // creating an object literal.
   CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs,
@@ -915,15 +912,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_IsStrong) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_CHECKED(Object, obj, 0);
-  return isolate->heap()->ToBoolean(obj->IsJSReceiver() &&
-                                    JSReceiver::cast(obj)->map()->is_strong());
-}
-
-
 RUNTIME_FUNCTION(Runtime_ClassOf) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 1);
@@ -1118,6 +1106,9 @@
 
 
 RUNTIME_FUNCTION(Runtime_InstanceOf) {
+  // TODO(4447): Remove this function when ES6 instanceof ships for good.
+  DCHECK(!FLAG_harmony_instanceof);
+
   // ECMA-262, section 11.8.6, page 54.
   HandleScope shs(isolate);
   DCHECK_EQ(2, args.length());
@@ -1146,7 +1137,50 @@
   Handle<Object> prototype;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, prototype,
-      Object::GetProperty(callable, isolate->factory()->prototype_string()));
+      JSReceiver::GetProperty(Handle<JSReceiver>::cast(callable),
+                              isolate->factory()->prototype_string()));
+  if (!prototype->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
+  }
+  // Return whether or not {prototype} is in the prototype chain of {object}.
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+  Maybe<bool> result =
+      JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
+  // ES6 section 19.2.3.6 Function.prototype[@@hasInstance](V)
+  HandleScope shs(isolate);
+  DCHECK_EQ(2, args.length());
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
+  // {callable} must have a [[Call]] internal method.
+  if (!callable->IsCallable()) {
+    return isolate->heap()->false_value();
+  }
+  // If {object} is not a receiver, return false.
+  if (!object->IsJSReceiver()) {
+    return isolate->heap()->false_value();
+  }
+  // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
+  // and use that instead of {callable}.
+  while (callable->IsJSBoundFunction()) {
+    callable =
+        handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
+               isolate);
+  }
+  DCHECK(callable->IsCallable());
+  // Get the "prototype" of {callable}; raise an error if it's not a receiver.
+  Handle<Object> prototype;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, prototype,
+      JSReceiver::GetProperty(Handle<JSReceiver>::cast(callable),
+                              isolate->factory()->prototype_string()));
   if (!prototype->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate,
diff --git a/src/runtime/runtime-regexp.cc b/src/runtime/runtime-regexp.cc
index df86aa8..aead017 100644
--- a/src/runtime/runtime-regexp.cc
+++ b/src/runtime/runtime-regexp.cc
@@ -642,7 +642,7 @@
   // TODO(hpayer): We should shrink the large object page if the size
   // of the object changed significantly.
   if (!heap->lo_space()->Contains(*answer)) {
-    heap->CreateFillerObjectAt(end_of_string, delta);
+    heap->CreateFillerObjectAt(end_of_string, delta, ClearRecordedSlots::kNo);
   }
   heap->AdjustLiveBytes(*answer, -delta, Heap::CONCURRENT_TO_SWEEPER);
   return *answer;
@@ -734,9 +734,9 @@
   // Create JSArray of substrings separated by separator.
   int part_count = indices.length();
 
-  Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
-  JSObject::EnsureCanContainHeapObjectElements(result);
-  result->set_length(Smi::FromInt(part_count));
+  Handle<JSArray> result =
+      isolate->factory()->NewJSArray(FAST_ELEMENTS, part_count, part_count,
+                                     INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
 
   DCHECK(result->HasFastObjectElements());
 
@@ -746,14 +746,13 @@
     elements->set(0, *subject);
   } else {
     int part_start = 0;
-    for (int i = 0; i < part_count; i++) {
-      HandleScope local_loop_handle(isolate);
+    FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < part_count, i++, {
       int part_end = indices.at(i);
       Handle<String> substring =
           isolate->factory()->NewProperSubString(subject, part_start, part_end);
       elements->set(i, *substring);
       part_start = part_end + pattern_length;
-    }
+    });
   }
 
   if (limit == 0xffffffffu) {
diff --git a/src/runtime/runtime-scopes.cc b/src/runtime/runtime-scopes.cc
index a8f3a74..de0d66a 100644
--- a/src/runtime/runtime-scopes.cc
+++ b/src/runtime/runtime-scopes.cc
@@ -44,7 +44,8 @@
   }
 
   // Do the lookup own properties only, see ES5 erratum.
-  LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+  LookupIterator it(global, name, global,
+                    LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
   if (!maybe.IsJust()) return isolate->heap()->exception();
 
@@ -102,8 +103,7 @@
 
   // Traverse the name/value pairs and set the properties.
   int length = pairs->length();
-  for (int i = 0; i < length; i += 2) {
-    HandleScope scope(isolate);
+  FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 2, {
     Handle<String> name(String::cast(pairs->get(i)));
     Handle<Object> initial_value(pairs->get(i + 1), isolate);
 
@@ -142,7 +142,7 @@
                                     static_cast<PropertyAttributes>(attr),
                                     is_var, is_const, is_function);
     if (isolate->has_pending_exception()) return result;
-  }
+  });
 
   return isolate->heap()->undefined_value();
 }
@@ -182,7 +182,8 @@
   Handle<JSGlobalObject> global = isolate->global_object();
 
   // Lookup the property as own on the global object.
-  LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+  LookupIterator it(global, name, global,
+                    LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
   DCHECK(maybe.IsJust());
   PropertyAttributes old_attributes = maybe.FromJust();
@@ -394,7 +395,8 @@
     // code can run in between that modifies the declared property.
     DCHECK(holder->IsJSGlobalObject() || holder->IsJSContextExtensionObject());
 
-    LookupIterator it(holder, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+    LookupIterator it(holder, name, Handle<JSReceiver>::cast(holder),
+                      LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
     Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
     if (!maybe.IsJust()) return isolate->heap()->exception();
     PropertyAttributes old_attributes = maybe.FromJust();
@@ -640,9 +642,9 @@
   base::SmartArrayPointer<Handle<Object>> arguments =
       GetCallerArguments(isolate, &argument_count);
   int num_elements = std::max(0, argument_count - start_index);
-  Handle<JSObject> result = isolate->factory()->NewJSArray(
-      FAST_ELEMENTS, num_elements, num_elements, Strength::WEAK,
-      DONT_INITIALIZE_ARRAY_ELEMENTS);
+  Handle<JSObject> result =
+      isolate->factory()->NewJSArray(FAST_ELEMENTS, num_elements, num_elements,
+                                     DONT_INITIALIZE_ARRAY_ELEMENTS);
   {
     DisallowHeapAllocation no_gc;
     FixedArray* elements = FixedArray::cast(result->elements());
@@ -708,7 +710,7 @@
     }
 
     if (IsLexicalVariableMode(mode)) {
-      LookupIterator it(global_object, name,
+      LookupIterator it(global_object, name, global_object,
                         LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
       Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
       if (!maybe.IsJust()) return isolate->heap()->exception();
diff --git a/src/runtime/runtime-strings.cc b/src/runtime/runtime-strings.cc
index fcec47d..6786fa9 100644
--- a/src/runtime/runtime-strings.cc
+++ b/src/runtime/runtime-strings.cc
@@ -371,14 +371,13 @@
   Handle<String> substring =
       isolate->factory()->NewSubString(subject, offsets.at(0), offsets.at(1));
   elements->set(0, *substring);
-  for (int i = 1; i < matches; i++) {
-    HandleScope temp_scope(isolate);
+  FOR_WITH_HANDLE_SCOPE(isolate, int, i = 1, i, i < matches, i++, {
     int from = offsets.at(i * 2);
     int to = offsets.at(i * 2 + 1);
     Handle<String> substring =
         isolate->factory()->NewProperSubString(subject, from, to);
     elements->set(i, *substring);
-  }
+  });
   Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
   result->set_length(Smi::FromInt(matches));
   return *result;
@@ -557,6 +556,7 @@
   RUNTIME_ASSERT(fixed_array->get(0)->IsString());
   String* first = String::cast(fixed_array->get(0));
   String* separator_raw = *separator;
+
   int first_length = first->length();
   String::WriteToFlat(first, sink, 0, first_length);
   sink += first_length;
@@ -580,6 +580,26 @@
   return *answer;
 }
 
+template <typename sinkchar>
+static void WriteRepeatToFlat(String* src, Vector<sinkchar> buffer, int cursor,
+                              int repeat, int length) {
+  if (repeat == 0) return;
+
+  sinkchar* start = &buffer[cursor];
+  String::WriteToFlat<sinkchar>(src, start, 0, length);
+
+  int done = 1;
+  sinkchar* next = start + length;
+
+  while (done < repeat) {
+    int block = Min(done, repeat - done);
+    int block_chars = block * length;
+    CopyChars(next, start, block_chars);
+    next += block_chars;
+    done += block;
+  }
+}
+
 template <typename Char>
 static void JoinSparseArrayWithSeparator(FixedArray* elements,
                                          int elements_length,
@@ -589,34 +609,30 @@
   DisallowHeapAllocation no_gc;
   int previous_separator_position = 0;
   int separator_length = separator->length();
+  DCHECK_LT(0, separator_length);
   int cursor = 0;
   for (int i = 0; i < elements_length; i += 2) {
     int position = NumberToInt32(elements->get(i));
     String* string = String::cast(elements->get(i + 1));
     int string_length = string->length();
     if (string->length() > 0) {
-      while (previous_separator_position < position) {
-        String::WriteToFlat<Char>(separator, &buffer[cursor], 0,
-                                  separator_length);
-        cursor += separator_length;
-        previous_separator_position++;
-      }
+      int repeat = position - previous_separator_position;
+      WriteRepeatToFlat<Char>(separator, buffer, cursor, repeat,
+                              separator_length);
+      cursor += repeat * separator_length;
+      previous_separator_position = position;
       String::WriteToFlat<Char>(string, &buffer[cursor], 0, string_length);
       cursor += string->length();
     }
   }
-  if (separator_length > 0) {
-    // Array length must be representable as a signed 32-bit number,
-    // otherwise the total string length would have been too large.
-    DCHECK(array_length <= 0x7fffffff);  // Is int32_t.
-    int last_array_index = static_cast<int>(array_length - 1);
-    while (previous_separator_position < last_array_index) {
-      String::WriteToFlat<Char>(separator, &buffer[cursor], 0,
-                                separator_length);
-      cursor += separator_length;
-      previous_separator_position++;
-    }
-  }
+
+  int last_array_index = static_cast<int>(array_length - 1);
+  // Array length must be representable as a signed 32-bit number,
+  // otherwise the total string length would have been too large.
+  DCHECK(array_length <= 0x7fffffff);  // Is int32_t.
+  int repeat = last_array_index - previous_separator_position;
+  WriteRepeatToFlat<Char>(separator, buffer, cursor, repeat, separator_length);
+  cursor += repeat * separator_length;
   DCHECK(cursor <= buffer.length());
 }
 
@@ -642,13 +658,6 @@
   RUNTIME_ASSERT(elements_length <= elements_array->elements()->length());
   RUNTIME_ASSERT((elements_length & 1) == 0);  // Even length.
   FixedArray* elements = FixedArray::cast(elements_array->elements());
-  for (int i = 0; i < elements_length; i += 2) {
-    RUNTIME_ASSERT(elements->get(i)->IsNumber());
-    CONVERT_NUMBER_CHECKED(uint32_t, position, Uint32, elements->get(i));
-    RUNTIME_ASSERT(position < array_length);
-    RUNTIME_ASSERT(elements->get(i + 1)->IsString());
-  }
-
   {
     DisallowHeapAllocation no_gc;
     for (int i = 0; i < elements_length; i += 2) {
@@ -1145,24 +1154,93 @@
   return *result;
 }
 
-
-RUNTIME_FUNCTION(Runtime_StringEquals) {
+RUNTIME_FUNCTION(Runtime_StringLessThan) {
   HandleScope handle_scope(isolate);
-  DCHECK(args.length() == 2);
-
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
-
-  bool not_equal = !String::Equals(x, y);
-  // This is slightly convoluted because the value that signifies
-  // equality is 0 and inequality is 1 so we have to negate the result
-  // from String::Equals.
-  DCHECK(not_equal == 0 || not_equal == 1);
-  STATIC_ASSERT(EQUAL == 0);
-  STATIC_ASSERT(NOT_EQUAL == 1);
-  return Smi::FromInt(not_equal);
+  switch (String::Compare(x, y)) {
+    case ComparisonResult::kLessThan:
+      return isolate->heap()->true_value();
+    case ComparisonResult::kEqual:
+    case ComparisonResult::kGreaterThan:
+      return isolate->heap()->false_value();
+    case ComparisonResult::kUndefined:
+      break;
+  }
+  UNREACHABLE();
+  return Smi::FromInt(0);
 }
 
+RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
+  HandleScope handle_scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+  switch (String::Compare(x, y)) {
+    case ComparisonResult::kEqual:
+    case ComparisonResult::kLessThan:
+      return isolate->heap()->true_value();
+    case ComparisonResult::kGreaterThan:
+      return isolate->heap()->false_value();
+    case ComparisonResult::kUndefined:
+      break;
+  }
+  UNREACHABLE();
+  return Smi::FromInt(0);
+}
+
+RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
+  HandleScope handle_scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+  switch (String::Compare(x, y)) {
+    case ComparisonResult::kGreaterThan:
+      return isolate->heap()->true_value();
+    case ComparisonResult::kEqual:
+    case ComparisonResult::kLessThan:
+      return isolate->heap()->false_value();
+    case ComparisonResult::kUndefined:
+      break;
+  }
+  UNREACHABLE();
+  return Smi::FromInt(0);
+}
+
+RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
+  HandleScope handle_scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+  switch (String::Compare(x, y)) {
+    case ComparisonResult::kEqual:
+    case ComparisonResult::kGreaterThan:
+      return isolate->heap()->true_value();
+    case ComparisonResult::kLessThan:
+      return isolate->heap()->false_value();
+    case ComparisonResult::kUndefined:
+      break;
+  }
+  UNREACHABLE();
+  return Smi::FromInt(0);
+}
+
+RUNTIME_FUNCTION(Runtime_StringEqual) {
+  HandleScope handle_scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+  return isolate->heap()->ToBoolean(String::Equals(x, y));
+}
+
+RUNTIME_FUNCTION(Runtime_StringNotEqual) {
+  HandleScope handle_scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
+  return isolate->heap()->ToBoolean(!String::Equals(x, y));
+}
 
 RUNTIME_FUNCTION(Runtime_FlattenString) {
   HandleScope scope(isolate);
diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc
index 5f27a60..a0f0566 100644
--- a/src/runtime/runtime-test.cc
+++ b/src/runtime/runtime-test.cc
@@ -398,7 +398,7 @@
   DCHECK(args.length() == 1);
   // Get the function and make sure it is compiled.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
-  if (!Compiler::Compile(func, KEEP_EXCEPTION)) {
+  if (!Compiler::Compile(func, Compiler::KEEP_EXCEPTION)) {
     return isolate->heap()->exception();
   }
   OFStream os(stdout);
@@ -503,5 +503,14 @@
 TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
 
 #undef FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
+
+
+RUNTIME_FUNCTION(Runtime_SpeciesProtector) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(0, args.length());
+  return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
+}
+
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-utils.h b/src/runtime/runtime-utils.h
index c673b5a..17c78d5 100644
--- a/src/runtime/runtime-utils.h
+++ b/src/runtime/runtime-utils.h
@@ -5,19 +5,49 @@
 #ifndef V8_RUNTIME_RUNTIME_UTILS_H_
 #define V8_RUNTIME_RUNTIME_UTILS_H_
 
+#include "src/base/logging.h"
 #include "src/runtime/runtime.h"
 
 namespace v8 {
 namespace internal {
 
-#define RUNTIME_ASSERT(value) \
-  if (!(value)) return isolate->ThrowIllegalOperation();
+#ifdef DEBUG
+
+#define RUNTIME_ASSERT(value)                      \
+  do {                                             \
+    if (!(value)) {                                \
+      V8_RuntimeError(__FILE__, __LINE__, #value); \
+      return isolate->ThrowIllegalOperation();     \
+    }                                              \
+  } while (0)
+
+#define RUNTIME_ASSERT_HANDLIFIED(value, T)        \
+  do {                                             \
+    if (!(value)) {                                \
+      V8_RuntimeError(__FILE__, __LINE__, #value); \
+      isolate->ThrowIllegalOperation();            \
+      return MaybeHandle<T>();                     \
+    }                                              \
+  } while (0)
+
+#else
+
+#define RUNTIME_ASSERT(value)                  \
+  do {                                         \
+    if (!(value)) {                            \
+      return isolate->ThrowIllegalOperation(); \
+    }                                          \
+  } while (0)
 
 #define RUNTIME_ASSERT_HANDLIFIED(value, T) \
-  if (!(value)) {                           \
-    isolate->ThrowIllegalOperation();       \
-    return MaybeHandle<T>();                \
-  }
+  do {                                      \
+    if (!(value)) {                         \
+      isolate->ThrowIllegalOperation();     \
+      return MaybeHandle<T>();              \
+    }                                       \
+  } while (0)
+
+#endif
 
 // Cast the given object to a value of the specified type and store
 // it in a variable with the given name.  If the object is not of the
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index 7019c3b..dc1678b 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -35,7 +35,6 @@
   F(FinishArrayPrototypeSetup, 1, 1) \
   F(SpecialArrayFunctions, 0, 1)     \
   F(TransitionElementsKind, 2, 1)    \
-  F(PushIfAbsent, 2, 1)              \
   F(RemoveArrayHoles, 2, 1)          \
   F(MoveArrayContents, 2, 1)         \
   F(EstimateNumberOfElements, 1, 1)  \
@@ -43,6 +42,7 @@
   F(ArrayConstructor, -1, 1)         \
   F(NewArray, -1 /* >= 3 */, 1)      \
   F(InternalArrayConstructor, -1, 1) \
+  F(ArrayPush, -1, 1)                \
   F(NormalizeElements, 1, 1)         \
   F(GrowArrayElements, 2, 1)         \
   F(HasComplexElements, 1, 1)        \
@@ -53,7 +53,6 @@
   F(FixedArraySet, 3, 1)             \
   F(ArraySpeciesConstructor, 1, 1)
 
-
 #define FOR_EACH_INTRINSIC_ATOMICS(F) \
   F(AtomicsCompareExchange, 4, 1)     \
   F(AtomicsLoad, 2, 1)                \
@@ -138,10 +137,10 @@
   F(DateCurrentTime, 0, 1)         \
   F(ThrowNotDateError, 0, 1)
 
-
 #define FOR_EACH_INTRINSIC_DEBUG(F)            \
   F(HandleDebuggerStatement, 0, 1)             \
-  F(DebugBreak, 0, 1)                          \
+  F(DebugBreak, 1, 1)                          \
+  F(DebugBreakOnBytecode, 1, 1)                \
   F(SetDebugEventListener, 2, 1)               \
   F(ScheduleBreak, 0, 1)                       \
   F(DebugGetInternalProperties, 1, 1)          \
@@ -156,7 +155,6 @@
   F(GetFrameCount, 1, 1)                       \
   F(GetFrameDetails, 2, 1)                     \
   F(GetScopeCount, 2, 1)                       \
-  F(GetStepInPositions, 2, 1)                  \
   F(GetScopeDetails, 4, 1)                     \
   F(GetAllScopesDetails, 4, 1)                 \
   F(GetFunctionScopeCount, 1, 1)               \
@@ -192,12 +190,10 @@
   F(DebugPrepareStepInIfStepping, 1, 1)        \
   F(DebugPushPromise, 2, 1)                    \
   F(DebugPopPromise, 0, 1)                     \
-  F(DebugPromiseEvent, 1, 1)                   \
   F(DebugAsyncTaskEvent, 1, 1)                 \
   F(DebugIsActive, 0, 1)                       \
   F(DebugBreakInOptimizedCode, 0, 1)
 
-
 #define FOR_EACH_INTRINSIC_FORIN(F) \
   F(ForInDone, 2, 1)                \
   F(ForInEnumerate, 1, 1)           \
@@ -206,9 +202,6 @@
   F(ForInStep, 1, 1)
 
 #define FOR_EACH_INTRINSIC_INTERPRETER(F) \
-  F(InterpreterToBoolean, 1, 1)           \
-  F(InterpreterLogicalNot, 1, 1)          \
-  F(InterpreterTypeOf, 1, 1)              \
   F(InterpreterNewClosure, 2, 1)          \
   F(InterpreterTraceBytecodeEntry, 3, 1)  \
   F(InterpreterTraceBytecodeExit, 3, 1)   \
@@ -223,18 +216,16 @@
   F(FunctionGetSourceCode, 1, 1)           \
   F(FunctionGetScriptSourcePosition, 1, 1) \
   F(FunctionGetPositionForOffset, 2, 1)    \
+  F(FunctionGetContextData, 1, 1)          \
   F(FunctionSetInstanceClassName, 2, 1)    \
   F(FunctionSetLength, 2, 1)               \
   F(FunctionSetPrototype, 2, 1)            \
   F(FunctionIsAPIFunction, 1, 1)           \
   F(SetCode, 2, 1)                         \
   F(SetNativeFlag, 1, 1)                   \
-  F(ThrowStrongModeTooFewArguments, 0, 1)  \
   F(IsConstructor, 1, 1)                   \
   F(SetForceInlineFlag, 1, 1)              \
   F(Call, -1 /* >= 2 */, 1)                \
-  F(TailCall, -1 /* >= 2 */, 1)            \
-  F(Apply, 5, 1)                           \
   F(ConvertReceiver, 1, 1)                 \
   F(IsFunction, 1, 1)                      \
   F(FunctionToString, 1, 1)
@@ -245,7 +236,6 @@
   F(ResumeJSGeneratorObject, 3, 1)      \
   F(GeneratorClose, 1, 1)               \
   F(GeneratorGetFunction, 1, 1)         \
-  F(GeneratorGetContext, 1, 1)          \
   F(GeneratorGetReceiver, 1, 1)         \
   F(GeneratorGetInput, 1, 1)            \
   F(GeneratorGetContinuation, 1, 1)     \
@@ -284,7 +274,6 @@
 #define FOR_EACH_INTRINSIC_I18N(F)
 #endif
 
-
 #define FOR_EACH_INTRINSIC_INTERNAL(F)              \
   F(CheckIsBootstrapping, 0, 1)                     \
   F(ExportFromRuntime, 1, 1)                        \
@@ -302,7 +291,6 @@
   F(ThrowIllegalInvocation, 0, 1)                   \
   F(ThrowIteratorResultNotAnObject, 1, 1)           \
   F(ThrowStackOverflow, 0, 1)                       \
-  F(ThrowStrongModeImplicitConversion, 0, 1)        \
   F(PromiseRejectEvent, 3, 1)                       \
   F(PromiseRevokeReject, 1, 1)                      \
   F(StackGuard, 0, 1)                               \
@@ -324,15 +312,14 @@
   F(CallSiteIsEvalRT, 1, 1)                         \
   F(CallSiteIsConstructorRT, 1, 1)                  \
   F(IS_VAR, 1, 1)                                   \
-  F(IncrementStatsCounter, 1, 1)                    \
   F(ThrowConstructedNonConstructable, 1, 1)         \
   F(ThrowDerivedConstructorReturnedNonObject, 0, 1) \
   F(ThrowCalledNonCallable, 1, 1)                   \
   F(CreateListFromArrayLike, 1, 1)                  \
   F(IncrementUseCounter, 1, 1)                      \
+  F(GetOrdinaryHasInstance, 0, 1)                   \
   F(GetAndResetRuntimeCallStats, 0, 1)
 
-
 #define FOR_EACH_INTRINSIC_JSON(F) \
   F(QuoteJSONString, 1, 1)         \
   F(BasicJSONStringify, 1, 1)      \
@@ -361,9 +348,6 @@
 
 
 #define FOR_EACH_INTRINSIC_MATHS(F) \
-  F(MathAcos, 1, 1)                 \
-  F(MathAsin, 1, 1)                 \
-  F(MathAtan, 1, 1)                 \
   F(MathLogRT, 1, 1)                \
   F(DoubleHi, 1, 1)                 \
   F(DoubleLo, 1, 1)                 \
@@ -371,13 +355,8 @@
   F(RemPiO2, 2, 1)                  \
   F(MathAtan2, 2, 1)                \
   F(MathExpRT, 1, 1)                \
-  F(MathClz32, 1, 1)                \
-  F(MathFloor, 1, 1)                \
   F(MathPow, 2, 1)                  \
   F(MathPowRT, 2, 1)                \
-  F(RoundNumber, 1, 1)              \
-  F(MathSqrt, 1, 1)                 \
-  F(MathFround, 1, 1)               \
   F(GenerateRandomNumbers, 1, 1)
 
 
@@ -394,7 +373,6 @@
   F(NumberToStringSkipCache, 1, 1)     \
   F(NumberToIntegerMapMinusZero, 1, 1) \
   F(NumberToSmi, 1, 1)                 \
-  F(NumberImul, 2, 1)                  \
   F(SmiLexicographicCompare, 2, 1)     \
   F(MaxSmi, 0, 1)                      \
   F(IsSmi, 1, 1)                       \
@@ -404,6 +382,7 @@
 
 #define FOR_EACH_INTRINSIC_OBJECT(F)                 \
   F(GetPrototype, 1, 1)                              \
+  F(ObjectHasOwnProperty, 2, 1)                      \
   F(InternalSetPrototype, 2, 1)                      \
   F(SetPrototype, 2, 1)                              \
   F(GetOwnProperty_Legacy, 2, 1)                     \
@@ -419,7 +398,6 @@
   F(AppendElement, 2, 1)                             \
   F(DeleteProperty_Sloppy, 2, 1)                     \
   F(DeleteProperty_Strict, 2, 1)                     \
-  F(HasOwnProperty, 2, 1)                            \
   F(HasProperty, 2, 1)                               \
   F(PropertyIsEnumerable, 2, 1)                      \
   F(GetOwnPropertyKeys, 2, 1)                        \
@@ -440,7 +418,6 @@
   F(HasFastPackedElements, 1, 1)                     \
   F(ValueOf, 1, 1)                                   \
   F(IsJSReceiver, 1, 1)                              \
-  F(IsStrong, 1, 1)                                  \
   F(ClassOf, 1, 1)                                   \
   F(DefineGetterPropertyUnchecked, 4, 1)             \
   F(DefineSetterPropertyUnchecked, 4, 1)             \
@@ -457,6 +434,7 @@
   F(SameValueZero, 2, 1)                             \
   F(Compare, 3, 1)                                   \
   F(InstanceOf, 2, 1)                                \
+  F(OrdinaryHasInstance, 2, 1)                       \
   F(HasInPrototypeChain, 2, 1)                       \
   F(CreateIterResultObject, 2, 1)                    \
   F(IsAccessCheckNeeded, 1, 1)                       \
@@ -854,7 +832,6 @@
   F(Bool8x16Equal, 2, 1)               \
   F(Bool8x16NotEqual, 2, 1)
 
-
 #define FOR_EACH_INTRINSIC_STRINGS(F)     \
   F(StringReplaceOneCharWithString, 3, 1) \
   F(StringIndexOf, 3, 1)                  \
@@ -875,7 +852,12 @@
   F(StringTrim, 3, 1)                     \
   F(TruncateString, 2, 1)                 \
   F(NewString, 2, 1)                      \
-  F(StringEquals, 2, 1)                   \
+  F(StringLessThan, 2, 1)                 \
+  F(StringLessThanOrEqual, 2, 1)          \
+  F(StringGreaterThan, 2, 1)              \
+  F(StringGreaterThanOrEqual, 2, 1)       \
+  F(StringEqual, 2, 1)                    \
+  F(StringNotEqual, 2, 1)                 \
   F(FlattenString, 1, 1)                  \
   F(StringCharFromCode, 1, 1)             \
   F(StringCharAt, 2, 1)                   \
@@ -885,7 +867,6 @@
   F(TwoByteSeqStringSetChar, 3, 1)        \
   F(StringCharCodeAt, 2, 1)
 
-
 #define FOR_EACH_INTRINSIC_SYMBOL(F) \
   F(CreateSymbol, 1, 1)              \
   F(CreatePrivateSymbol, 1, 1)       \
@@ -941,7 +922,8 @@
   F(HasFixedInt32Elements, 1, 1)              \
   F(HasFixedFloat32Elements, 1, 1)            \
   F(HasFixedFloat64Elements, 1, 1)            \
-  F(HasFixedUint8ClampedElements, 1, 1)
+  F(HasFixedUint8ClampedElements, 1, 1)       \
+  F(SpeciesProtector, 0, 1)
 
 #define FOR_EACH_INTRINSIC_TYPEDARRAY(F)     \
   F(ArrayBufferGetByteLength, 1, 1)          \
@@ -995,7 +977,6 @@
   F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
   F(CallIC_Miss, 3, 1)                       \
   F(CompareIC_Miss, 3, 1)                    \
-  F(CompareNilIC_Miss, 1, 1)                 \
   F(ElementsTransitionAndStoreIC_Miss, 5, 1) \
   F(KeyedLoadIC_Miss, 4, 1)                  \
   F(KeyedLoadIC_MissFromStubFailure, 4, 1)   \
@@ -1151,7 +1132,7 @@
   // Used in runtime.cc and hydrogen's VisitArrayLiteral.
   MUST_USE_RESULT static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
       Isolate* isolate, Handle<LiteralsArray> literals,
-      Handle<FixedArray> elements, bool is_strong);
+      Handle<FixedArray> elements);
 
   static MaybeHandle<JSArray> GetInternalProperties(Isolate* isolate,
                                                     Handle<Object>);
diff --git a/src/s390/OWNERS b/src/s390/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/s390/assembler-s390-inl.h b/src/s390/assembler-s390-inl.h
new file mode 100644
index 0000000..400d543
--- /dev/null
+++ b/src/s390/assembler-s390-inl.h
@@ -0,0 +1,593 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_S390_ASSEMBLER_S390_INL_H_
+#define V8_S390_ASSEMBLER_S390_INL_H_
+
+#include "src/s390/assembler-s390.h"
+
+#include "src/assembler.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsCrankshaft() { return true; }
+
+void RelocInfo::apply(intptr_t delta) {
+  // Absolute code pointer inside code object moves with the code object.
+  if (IsInternalReference(rmode_)) {
+    // Jump table entry
+    Address target = Memory::Address_at(pc_);
+    Memory::Address_at(pc_) = target + delta;
+  } else if (IsCodeTarget(rmode_)) {
+    SixByteInstr instr =
+        Instruction::InstructionBits(reinterpret_cast<const byte*>(pc_));
+    int32_t dis = static_cast<int32_t>(instr & 0xFFFFFFFF) * 2  // halfwords
+                  - static_cast<int32_t>(delta);
+    instr >>= 32;  // Clear the 4-byte displacement field.
+    instr <<= 32;
+    instr |= static_cast<uint32_t>(dis / 2);
+    Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc_),
+                                                  instr);
+  } else {
+    // mov sequence
+    DCHECK(IsInternalReferenceEncoded(rmode_));
+    Address target = Assembler::target_address_at(pc_, host_);
+    Assembler::set_target_address_at(isolate_, pc_, host_, target + delta,
+                                     SKIP_ICACHE_FLUSH);
+  }
+}
+
+Address RelocInfo::target_internal_reference() {
+  if (IsInternalReference(rmode_)) {
+    // Jump table entry
+    return Memory::Address_at(pc_);
+  } else {
+    // mov sequence
+    DCHECK(IsInternalReferenceEncoded(rmode_));
+    return Assembler::target_address_at(pc_, host_);
+  }
+}
+
+Address RelocInfo::target_internal_reference_address() {
+  DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+  return reinterpret_cast<Address>(pc_);
+}
+
+Address RelocInfo::target_address() {
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+Address RelocInfo::target_address_address() {
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
+         rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+
+  // Read the address of the word containing the target_address in an
+  // instruction stream.
+  // The only architecture-independent user of this function is the serializer.
+  // The serializer uses it to find out how many raw bytes of instruction to
+  // output before the next target.
+  // For an instruction like LIS/ORI where the target bits are mixed into the
+  // instruction bits, the size of the target will be zero, indicating that the
+  // serializer should not step forward in memory after a target is resolved
+  // and written.
+  return reinterpret_cast<Address>(pc_);
+}
+
+Address RelocInfo::constant_pool_entry_address() {
+  UNREACHABLE();
+  return NULL;
+}
+
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+
+void RelocInfo::set_target_address(Address target,
+                                   WriteBarrierMode write_barrier_mode,
+                                   ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  Assembler::set_target_address_at(isolate_, pc_, host_, target,
+                                   icache_flush_mode);
+  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+      IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
+}
+
+Address Assembler::target_address_from_return_address(Address pc) {
+  // Returns the address of the call target from the return address that will
+  // be returned to after a call.
+  // Sequence is:
+  //    BRASL r14, RI
+  return pc - kCallTargetAddressOffset;
+}
+
+Address Assembler::return_address_from_call_start(Address pc) {
+  // Sequence is:
+  //    BRASL r14, RI
+  return pc + kCallTargetAddressOffset;
+}
+
+Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
+  SixByteInstr instr =
+      Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
+  int index = instr & 0xFFFFFFFF;
+  return code_targets_[index];
+}
+
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, size_t old_size, size_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  DCHECK(old_base <= wasm_memory_reference() &&
+         wasm_memory_reference() < old_base + old_size);
+  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+  DCHECK(new_base <= updated_reference &&
+         updated_reference < new_base + new_size);
+  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
+                                   icache_flush_mode);
+}
+
+Object* RelocInfo::target_object() {
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+}
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  if (rmode_ == EMBEDDED_OBJECT) {
+    return Handle<Object>(
+        reinterpret_cast<Object**>(Assembler::target_address_at(pc_, host_)));
+  } else {
+    return origin->code_target_object_handle_at(pc_);
+  }
+}
+
+void RelocInfo::set_target_object(Object* target,
+                                  WriteBarrierMode write_barrier_mode,
+                                  ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  Assembler::set_target_address_at(isolate_, pc_, host_,
+                                   reinterpret_cast<Address>(target),
+                                   icache_flush_mode);
+  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target));
+  }
+}
+
+Address RelocInfo::target_external_reference() {
+  DCHECK(rmode_ == EXTERNAL_REFERENCE);
+  return Assembler::target_address_at(pc_, host_);
+}
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+  DCHECK(IsRuntimeEntry(rmode_));
+  return target_address();
+}
+
+void RelocInfo::set_target_runtime_entry(Address target,
+                                         WriteBarrierMode write_barrier_mode,
+                                         ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsRuntimeEntry(rmode_));
+  if (target_address() != target)
+    set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+  DCHECK(rmode_ == RelocInfo::CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<Cell>(reinterpret_cast<Cell**>(address));
+}
+
+Cell* RelocInfo::target_cell() {
+  DCHECK(rmode_ == RelocInfo::CELL);
+  return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
+                                ICacheFlushMode icache_flush_mode) {
+  DCHECK(rmode_ == RelocInfo::CELL);
+  Address address = cell->address() + Cell::kValueOffset;
+  Memory::Address_at(pc_) = address;
+  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+                                                                  cell);
+  }
+}
+
+#if V8_TARGET_ARCH_S390X
+// NOP(2byte) + PUSH + MOV + BASR =
+// NOP + LAY + STG + IIHF + IILF + BASR
+static const int kCodeAgingSequenceLength = 28;
+static const int kCodeAgingTargetDelta = 14;  // Jump past NOP + PUSH to IIHF
+                                              // LAY + 4 * STG + LA
+static const int kNoCodeAgeSequenceLength = 34;
+#else
+#if (V8_HOST_ARCH_S390)
+// NOP + NILH + LAY + ST + IILF + BASR
+static const int kCodeAgingSequenceLength = 24;
+static const int kCodeAgingTargetDelta = 16;  // Jump past NOP to IILF
+// NILH + LAY + 4 * ST + LA
+static const int kNoCodeAgeSequenceLength = 30;
+#else
+// NOP + LAY + ST + IILF + BASR
+static const int kCodeAgingSequenceLength = 20;
+static const int kCodeAgingTargetDelta = 12;  // Jump past NOP to IILF
+// LAY + 4 * ST + LA
+static const int kNoCodeAgeSequenceLength = 26;
+#endif
+#endif
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+  UNREACHABLE();  // This should never be reached on S390.
+  return Handle<Object>();
+}
+
+Code* RelocInfo::code_age_stub() {
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  return Code::GetCodeFromTargetAddress(
+      Assembler::target_address_at(pc_ + kCodeAgingTargetDelta, host_));
+}
+
+void RelocInfo::set_code_age_stub(Code* stub,
+                                  ICacheFlushMode icache_flush_mode) {
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  Assembler::set_target_address_at(isolate_, pc_ + kCodeAgingTargetDelta, host_,
+                                   stub->instruction_start(),
+                                   icache_flush_mode);
+}
+
+Address RelocInfo::debug_call_address() {
+  DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+  return Assembler::target_address_at(pc_, host_);
+}
+
+void RelocInfo::set_debug_call_address(Address target) {
+  DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+  Assembler::set_target_address_at(isolate_, pc_, host_, target);
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
+}
+
+void RelocInfo::WipeOut() {
+  DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+         IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+         IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+  if (IsInternalReference(rmode_)) {
+    // Jump table entry
+    Memory::Address_at(pc_) = NULL;
+  } else if (IsInternalReferenceEncoded(rmode_)) {
+    // mov sequence
+    // Currently used only by deserializer, no need to flush.
+    Assembler::set_target_address_at(isolate_, pc_, host_, NULL,
+                                     SKIP_ICACHE_FLUSH);
+  } else {
+    Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
+  }
+}
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    visitor->VisitEmbeddedPointer(this);
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::CELL) {
+    visitor->VisitCell(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    visitor->VisitExternalReference(this);
+  } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+    visitor->VisitInternalReference(this);
+  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+    visitor->VisitCodeAgeSequence(this);
+  } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+             IsPatchedDebugBreakSlotSequence()) {
+    visitor->VisitDebugTarget(this);
+  } else if (IsRuntimeEntry(mode)) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+template <typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    StaticVisitor::VisitCodeTarget(heap, this);
+  } else if (mode == RelocInfo::CELL) {
+    StaticVisitor::VisitCell(heap, this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    StaticVisitor::VisitExternalReference(this);
+  } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+    StaticVisitor::VisitInternalReference(this);
+  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+    StaticVisitor::VisitCodeAgeSequence(heap, this);
+  } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+             IsPatchedDebugBreakSlotSequence()) {
+    StaticVisitor::VisitDebugTarget(heap, this);
+  } else if (IsRuntimeEntry(mode)) {
+    StaticVisitor::VisitRuntimeEntry(this);
+  }
+}
+
+// Operand constructors
+Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
+  rm_ = no_reg;
+  imm_ = immediate;
+  rmode_ = rmode;
+}
+
+Operand::Operand(const ExternalReference& f) {
+  rm_ = no_reg;
+  imm_ = reinterpret_cast<intptr_t>(f.address());
+  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+Operand::Operand(Smi* value) {
+  rm_ = no_reg;
+  imm_ = reinterpret_cast<intptr_t>(value);
+  rmode_ = kRelocInfo_NONEPTR;
+}
+
+Operand::Operand(Register rm) {
+  rm_ = rm;
+  rmode_ = kRelocInfo_NONEPTR;  // S390 -why doesn't ARM do this?
+}
+
+void Assembler::CheckBuffer() {
+  if (buffer_space() <= kGap) {
+    GrowBuffer();
+  }
+}
+
+int32_t Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode,
+                                    TypeFeedbackId ast_id) {
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
+  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
+    SetRecordedAstId(ast_id);
+    RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID);
+  } else {
+    RecordRelocInfo(rmode);
+  }
+
+  int current = code_targets_.length();
+  if (current > 0 && code_targets_.last().is_identical_to(target)) {
+    // Optimization if we keep jumping to the same code target.
+    current--;
+  } else {
+    code_targets_.Add(target);
+  }
+  return current;
+}
+
+// Helper to emit the binary encoding of a 2 byte instruction
+void Assembler::emit2bytes(uint16_t x) {
+  CheckBuffer();
+#if V8_TARGET_LITTLE_ENDIAN
+  // We need to emit instructions in big endian format as disassembler /
+  // simulator require the first byte of the instruction in order to decode
+  // the instruction length.  Swap the bytes.
+  x = ((x & 0x00FF) << 8) | ((x & 0xFF00) >> 8);
+#endif
+  *reinterpret_cast<uint16_t*>(pc_) = x;
+  pc_ += 2;
+}
+
+// Helper to emit the binary encoding of a 4 byte instruction
+void Assembler::emit4bytes(uint32_t x) {
+  CheckBuffer();
+#if V8_TARGET_LITTLE_ENDIAN
+  // We need to emit instructions in big endian format as disassembler /
+  // simulator require the first byte of the instruction in order to decode
+  // the instruction length.  Swap the bytes.
+  x = ((x & 0x000000FF) << 24) | ((x & 0x0000FF00) << 8) |
+      ((x & 0x00FF0000) >> 8) | ((x & 0xFF000000) >> 24);
+#endif
+  *reinterpret_cast<uint32_t*>(pc_) = x;
+  pc_ += 4;
+}
+
+// Helper to emit the binary encoding of a 6 byte instruction
+void Assembler::emit6bytes(uint64_t x) {
+  CheckBuffer();
+#if V8_TARGET_LITTLE_ENDIAN
+  // We need to emit instructions in big endian format as disassembler /
+  // simulator require the first byte of the instruction in order to decode
+  // the instruction length.  Swap the bytes.
+  x = (static_cast<uint64_t>(x & 0xFF) << 40) |
+      (static_cast<uint64_t>((x >> 8) & 0xFF) << 32) |
+      (static_cast<uint64_t>((x >> 16) & 0xFF) << 24) |
+      (static_cast<uint64_t>((x >> 24) & 0xFF) << 16) |
+      (static_cast<uint64_t>((x >> 32) & 0xFF) << 8) |
+      (static_cast<uint64_t>((x >> 40) & 0xFF));
+  x |= (*reinterpret_cast<uint64_t*>(pc_) >> 48) << 48;
+#else
+  // We need to pad two bytes of zeros in order to get the 6-bytes
+  // stored from low address.
+  x = x << 16;
+  x |= *reinterpret_cast<uint64_t*>(pc_) & 0xFFFF;
+#endif
+  // It is safe to store 8-bytes, as CheckBuffer() guarantees we have kGap
+  // space left over.
+  *reinterpret_cast<uint64_t*>(pc_) = x;
+  pc_ += 6;
+}
+
+bool Operand::is_reg() const { return rm_.is_valid(); }
+
+// Fetch the 32bit value from the FIXED_SEQUENCE IIHF / IILF
+Address Assembler::target_address_at(Address pc, Address constant_pool) {
+  // S390 Instruction!
+  // We want to check for instructions generated by Asm::mov()
+  Opcode op1 = Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(pc));
+  SixByteInstr instr_1 =
+      Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
+
+  if (BRASL == op1 || BRCL == op1) {
+    int32_t dis = static_cast<int32_t>(instr_1 & 0xFFFFFFFF) * 2;
+    return reinterpret_cast<Address>(reinterpret_cast<uint64_t>(pc) + dis);
+  }
+
+#if V8_TARGET_ARCH_S390X
+  int instr1_length =
+      Instruction::InstructionLength(reinterpret_cast<const byte*>(pc));
+  Opcode op2 = Instruction::S390OpcodeValue(
+      reinterpret_cast<const byte*>(pc + instr1_length));
+  SixByteInstr instr_2 = Instruction::InstructionBits(
+      reinterpret_cast<const byte*>(pc + instr1_length));
+  // IIHF for hi_32, IILF for lo_32
+  if (IIHF == op1 && IILF == op2) {
+    return reinterpret_cast<Address>(((instr_1 & 0xFFFFFFFF) << 32) |
+                                     ((instr_2 & 0xFFFFFFFF)));
+  }
+#else
+  // IILF loads 32-bits
+  if (IILF == op1 || CFI == op1) {
+    return reinterpret_cast<Address>((instr_1 & 0xFFFFFFFF));
+  }
+#endif
+
+  UNIMPLEMENTED();
+  return (Address)0;
+}
+
+// This sets the branch destination (which gets loaded at the call address).
+// This is for calls and branches within generated code.  The serializer
+// has already deserialized the mov instructions etc.
+// There is a FIXED_SEQUENCE assumption here
+void Assembler::deserialization_set_special_target_at(
+    Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+  set_target_address_at(isolate, instruction_payload, code, target);
+}
+
+void Assembler::deserialization_set_target_internal_reference_at(
+    Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+  if (RelocInfo::IsInternalReferenceEncoded(mode)) {
+    Code* code = NULL;
+    set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
+  } else {
+    Memory::Address_at(pc) = target;
+  }
+}
+
+// This code assumes the FIXED_SEQUENCE of IIHF/IILF
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+                                      Address constant_pool, Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+  // Check for instructions generated by Asm::mov()
+  Opcode op1 = Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(pc));
+  SixByteInstr instr_1 =
+      Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
+  bool patched = false;
+
+  if (BRASL == op1 || BRCL == op1) {
+    instr_1 >>= 32;  // Zero out the lower 32-bits
+    instr_1 <<= 32;
+    int32_t halfwords = (target - pc) / 2;  // number of halfwords
+    instr_1 |= static_cast<uint32_t>(halfwords);
+    Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
+                                                  instr_1);
+    if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+      Assembler::FlushICache(isolate, pc, 6);
+    }
+    patched = true;
+  } else {
+#if V8_TARGET_ARCH_S390X
+    int instr1_length =
+        Instruction::InstructionLength(reinterpret_cast<const byte*>(pc));
+    Opcode op2 = Instruction::S390OpcodeValue(
+        reinterpret_cast<const byte*>(pc + instr1_length));
+    SixByteInstr instr_2 = Instruction::InstructionBits(
+        reinterpret_cast<const byte*>(pc + instr1_length));
+    // IIHF for hi_32, IILF for lo_32
+    if (IIHF == op1 && IILF == op2) {
+      // IIHF
+      instr_1 >>= 32;  // Zero out the lower 32-bits
+      instr_1 <<= 32;
+      instr_1 |= reinterpret_cast<uint64_t>(target) >> 32;
+
+      Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
+                                                    instr_1);
+
+      // IILF
+      instr_2 >>= 32;
+      instr_2 <<= 32;
+      instr_2 |= reinterpret_cast<uint64_t>(target) & 0xFFFFFFFF;
+
+      Instruction::SetInstructionBits<SixByteInstr>(
+          reinterpret_cast<byte*>(pc + instr1_length), instr_2);
+      if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+        Assembler::FlushICache(isolate, pc, 12);
+      }
+      patched = true;
+    }
+#else
+    // IILF loads 32-bits
+    if (IILF == op1 || CFI == op1) {
+      instr_1 >>= 32;  // Zero out the lower 32-bits
+      instr_1 <<= 32;
+      instr_1 |= reinterpret_cast<uint32_t>(target);
+
+      Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
+                                                    instr_1);
+      if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+        Assembler::FlushICache(isolate, pc, 6);
+      }
+      patched = true;
+    }
+#endif
+  }
+  if (!patched) UNREACHABLE();
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_S390_ASSEMBLER_S390_INL_H_
diff --git a/src/s390/assembler-s390.cc b/src/s390/assembler-s390.cc
new file mode 100644
index 0000000..35ba431
--- /dev/null
+++ b/src/s390/assembler-s390.cc
@@ -0,0 +1,3061 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#include "src/s390/assembler-s390.h"
+
+#if V8_TARGET_ARCH_S390
+
+#if V8_HOST_ARCH_S390
+#include <elf.h>  // Required for auxv checks for STFLE support
+#endif
+
+#include "src/base/bits.h"
+#include "src/base/cpu.h"
+#include "src/s390/assembler-s390-inl.h"
+
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// Get the CPU features enabled by the build.
+static unsigned CpuFeaturesImpliedByCompiler() {
+  unsigned answer = 0;
+  return answer;
+}
+
+// Check whether Store Facility STFLE instruction is available on the platform.
+// Instruction returns a bit vector of the enabled hardware facilities.
+static bool supportsSTFLE() {
+#if V8_HOST_ARCH_S390
+  static bool read_tried = false;
+  static uint32_t auxv_hwcap = 0;
+
+  if (!read_tried) {
+    // Open the AUXV (auxilliary vector) psuedo-file
+    int fd = open("/proc/self/auxv", O_RDONLY);
+
+    read_tried = true;
+    if (fd != -1) {
+#if V8_TARGET_ARCH_S390X
+      static Elf64_auxv_t buffer[16];
+      Elf64_auxv_t* auxv_element;
+#else
+      static Elf32_auxv_t buffer[16];
+      Elf32_auxv_t* auxv_element;
+#endif
+      int bytes_read = 0;
+      while (bytes_read >= 0) {
+        // Read a chunk of the AUXV
+        bytes_read = read(fd, buffer, sizeof(buffer));
+        // Locate and read the platform field of AUXV if it is in the chunk
+        for (auxv_element = buffer;
+             auxv_element + sizeof(auxv_element) <= buffer + bytes_read &&
+             auxv_element->a_type != AT_NULL;
+             auxv_element++) {
+          // We are looking for HWCAP entry in AUXV to search for STFLE support
+          if (auxv_element->a_type == AT_HWCAP) {
+            /* Note: Both auxv_hwcap and buffer are static */
+            auxv_hwcap = auxv_element->a_un.a_val;
+            goto done_reading;
+          }
+        }
+      }
+    done_reading:
+      close(fd);
+    }
+  }
+
+  // Did not find result
+  if (0 == auxv_hwcap) {
+    return false;
+  }
+
+  // HWCAP_S390_STFLE is defined to be 4 in include/asm/elf.h.  Currently
+  // hardcoded in case that include file does not exist.
+  const uint32_t HWCAP_S390_STFLE = 4;
+  return (auxv_hwcap & HWCAP_S390_STFLE);
+#else
+  // STFLE is not available on non-s390 hosts
+  return false;
+#endif
+}
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+  supported_ |= CpuFeaturesImpliedByCompiler();
+  icache_line_size_ = 256;
+
+  // Only use statically determined features for cross compile (snapshot).
+  if (cross_compile) return;
+
+#ifdef DEBUG
+  initialized_ = true;
+#endif
+
+  static bool performSTFLE = supportsSTFLE();
+
+// Need to define host, as we are generating inlined S390 assembly to test
+// for facilities.
+#if V8_HOST_ARCH_S390
+  if (performSTFLE) {
+    // STFLE D(B) requires:
+    //    GPR0 to specify # of double words to update minus 1.
+    //      i.e. GPR0 = 0 for 1 doubleword
+    //    D(B) to specify to memory location to store the facilities bits
+    // The facilities we are checking for are:
+    //   Bit 45 - Distinct Operands for instructions like ARK, SRK, etc.
+    // As such, we require only 1 double word
+    int64_t facilities[1];
+    facilities[0] = 0;
+    // LHI sets up GPR0
+    // STFLE is specified as .insn, as opcode is not recognized.
+    // We register the instructions kill r0 (LHI) and the CC (STFLE).
+    asm volatile(
+        "lhi   0,0\n"
+        ".insn s,0xb2b00000,%0\n"
+        : "=Q"(facilities)
+        :
+        : "cc", "r0");
+
+    // Test for Distinct Operands Facility - Bit 45
+    if (facilities[0] & (1lu << (63 - 45))) {
+      supported_ |= (1u << DISTINCT_OPS);
+    }
+    // Test for General Instruction Extension Facility - Bit 34
+    if (facilities[0] & (1lu << (63 - 34))) {
+      supported_ |= (1u << GENERAL_INSTR_EXT);
+    }
+    // Test for Floating Point Extension Facility - Bit 37
+    if (facilities[0] & (1lu << (63 - 37))) {
+      supported_ |= (1u << FLOATING_POINT_EXT);
+    }
+  }
+#else
+  // All distinct ops instructions can be simulated
+  supported_ |= (1u << DISTINCT_OPS);
+  // RISBG can be simulated
+  supported_ |= (1u << GENERAL_INSTR_EXT);
+
+  supported_ |= (1u << FLOATING_POINT_EXT);
+  USE(performSTFLE);  // To avoid assert
+#endif
+  supported_ |= (1u << FPU);
+}
+
+void CpuFeatures::PrintTarget() {
+  const char* s390_arch = NULL;
+
+#if V8_TARGET_ARCH_S390X
+  s390_arch = "s390x";
+#else
+  s390_arch = "s390";
+#endif
+
+  printf("target %s\n", s390_arch);
+}
+
+void CpuFeatures::PrintFeatures() {
+  printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
+  printf("FPU_EXT=%d\n", CpuFeatures::IsSupported(FLOATING_POINT_EXT));
+  printf("GENERAL_INSTR=%d\n", CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
+  printf("DISTINCT_OPS=%d\n", CpuFeatures::IsSupported(DISTINCT_OPS));
+}
+
+Register ToRegister(int num) {
+  DCHECK(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] = {r0, r1, r2,  r3, r4, r5,  r6,  r7,
+                                 r8, r9, r10, fp, ip, r13, r14, sp};
+  return kRegisters[num];
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask =
+    RelocInfo::kCodeTargetMask | 1 << RelocInfo::INTERNAL_REFERENCE;
+
+bool RelocInfo::IsCodedSpecially() {
+  // The deserializer needs to know whether a pointer is specially
+  // coded.  Being specially coded on S390 means that it is an iihf/iilf
+  // instruction sequence, and that is always the case inside code
+  // objects.
+  return true;
+}
+
+bool RelocInfo::IsInConstantPool() { return false; }
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-s390-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+  AllowDeferredHandleDereference using_raw_address;
+  rm_ = no_reg;
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  if (obj->IsHeapObject()) {
+    DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+    imm_ = reinterpret_cast<intptr_t>(handle.location());
+    rmode_ = RelocInfo::EMBEDDED_OBJECT;
+  } else {
+    // no relocation needed
+    imm_ = reinterpret_cast<intptr_t>(obj);
+    rmode_ = kRelocInfo_NONEPTR;
+  }
+}
+
+MemOperand::MemOperand(Register rn, int32_t offset) {
+  baseRegister = rn;
+  indexRegister = r0;
+  offset_ = offset;
+}
+
+MemOperand::MemOperand(Register rx, Register rb, int32_t offset) {
+  baseRegister = rb;
+  indexRegister = rx;
+  offset_ = offset;
+}
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+    : AssemblerBase(isolate, buffer, buffer_size),
+      recorded_ast_id_(TypeFeedbackId::None()),
+      code_targets_(100),
+      positions_recorder_(this) {
+  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+
+  last_bound_pos_ = 0;
+  ClearRecordedAstId();
+  relocations_.reserve(128);
+}
+
+void Assembler::GetCode(CodeDesc* desc) {
+  EmitRelocations();
+
+  // Set up code descriptor.
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+  desc->origin = this;
+}
+
+void Assembler::Align(int m) {
+  DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop(0);
+  }
+}
+
+void Assembler::CodeTargetAlign() { Align(8); }
+
+Condition Assembler::GetCondition(Instr instr) {
+  switch (instr & kCondMask) {
+    case BT:
+      return eq;
+    case BF:
+      return ne;
+    default:
+      UNIMPLEMENTED();
+  }
+  return al;
+}
+
+#if V8_TARGET_ARCH_S390X
+// This code assumes a FIXED_SEQUENCE for 64bit loads (iihf/iilf)
+bool Assembler::Is64BitLoadIntoIP(SixByteInstr instr1, SixByteInstr instr2) {
+  // Check the instructions are the iihf/iilf load into ip
+  return (((instr1 >> 32) == 0xC0C8) && ((instr2 >> 32) == 0xC0C9));
+}
+#else
+// This code assumes a FIXED_SEQUENCE for 32bit loads (iilf)
+bool Assembler::Is32BitLoadIntoIP(SixByteInstr instr) {
+  // Check the instruction is an iilf load into ip/r12.
+  return ((instr >> 32) == 0xC0C9);
+}
+#endif
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+// Returns the target address of the relative instructions, typically
+// of the form: pos + imm (where immediate is in # of halfwords for
+// BR* and LARL).
+int Assembler::target_at(int pos) {
+  SixByteInstr instr = instr_at(pos);
+  // check which type of branch this is 16 or 26 bit offset
+  Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
+
+  if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
+    int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
+    imm16 <<= 1;  // BRC immediate is in # of halfwords
+    if (imm16 == 0) return kEndOfChain;
+    return pos + imm16;
+  } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
+             BRASL == opcode) {
+    int32_t imm32 =
+        static_cast<int32_t>(instr & (static_cast<uint64_t>(0xffffffff)));
+    if (LLILF != opcode)
+      imm32 <<= 1;  // BR* + LARL treat immediate in # of halfwords
+    if (imm32 == 0) return kEndOfChain;
+    return pos + imm32;
+  }
+
+  // Unknown condition
+  DCHECK(false);
+  return -1;
+}
+
+// Update the target address of the current relative instruction.
+void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
+  SixByteInstr instr = instr_at(pos);
+  Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
+
+  if (is_branch != nullptr) {
+    *is_branch = (opcode == BRC || opcode == BRCT || opcode == BRCTG ||
+                  opcode == BRCL || opcode == BRASL);
+  }
+
+  if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
+    int16_t imm16 = target_pos - pos;
+    instr &= (~0xffff);
+    DCHECK(is_int16(imm16));
+    instr_at_put<FourByteInstr>(pos, instr | (imm16 >> 1));
+    return;
+  } else if (BRCL == opcode || LARL == opcode || BRASL == opcode) {
+    // Immediate is in # of halfwords
+    int32_t imm32 = target_pos - pos;
+    instr &= (~static_cast<uint64_t>(0xffffffff));
+    instr_at_put<SixByteInstr>(pos, instr | (imm32 >> 1));
+    return;
+  } else if (LLILF == opcode) {
+    DCHECK(target_pos == kEndOfChain || target_pos >= 0);
+    // Emitted label constant, not part of a branch.
+    // Make label relative to Code* of generated Code object.
+    int32_t imm32 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+    instr &= (~static_cast<uint64_t>(0xffffffff));
+    instr_at_put<SixByteInstr>(pos, instr | imm32);
+    return;
+  }
+  DCHECK(false);
+}
+
+// Returns the maximum number of bits given instruction can address.
+int Assembler::max_reach_from(int pos) {
+  Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
+
+  // Check which type of instr.  In theory, we can return
+  // the values below + 1, given offset is # of halfwords
+  if (BRC == opcode || BRCT == opcode || BRCTG == opcode) {
+    return 16;
+  } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
+             BRASL == opcode) {
+    return 31;  // Using 31 as workaround instead of 32 as
+                // is_intn(x,32) doesn't work on 32-bit platforms.
+                // llilf: Emitted label constant, not part of
+                //        a branch (regexp PushBacktrack).
+  }
+  DCHECK(false);
+  return 16;
+}
+
+void Assembler::bind_to(Label* L, int pos) {
+  DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  bool is_branch = false;
+  while (L->is_linked()) {
+    int fixup_pos = L->pos();
+#ifdef DEBUG
+    int32_t offset = pos - fixup_pos;
+    int maxReach = max_reach_from(fixup_pos);
+#endif
+    next(L);  // call next before overwriting link with target at fixup_pos
+    DCHECK(is_intn(offset, maxReach));
+    target_at_put(fixup_pos, pos, &is_branch);
+  }
+  L->bind_to(pos);
+
+  // Keep track of the last bound label so we don't eliminate any instructions
+  // before a bound label.
+  if (pos > last_bound_pos_) last_bound_pos_ = pos;
+}
+
+void Assembler::bind(Label* L) {
+  DCHECK(!L->is_bound());  // label can only be bound once
+  bind_to(L, pc_offset());
+}
+
+void Assembler::next(Label* L) {
+  DCHECK(L->is_linked());
+  int link = target_at(L->pos());
+  if (link == kEndOfChain) {
+    L->Unuse();
+  } else {
+    DCHECK(link >= 0);
+    L->link_to(link);
+  }
+}
+
+bool Assembler::is_near(Label* L, Condition cond) {
+  DCHECK(L->is_bound());
+  if (L->is_bound() == false) return false;
+
+  int maxReach = ((cond == al) ? 26 : 16);
+  int offset = L->pos() - pc_offset();
+
+  return is_intn(offset, maxReach);
+}
+
+int Assembler::link(Label* L) {
+  int position;
+  if (L->is_bound()) {
+    position = L->pos();
+  } else {
+    if (L->is_linked()) {
+      position = L->pos();  // L's link
+    } else {
+      // was: target_pos = kEndOfChain;
+      // However, using self to mark the first reference
+      // should avoid most instances of branch offset overflow.  See
+      // target_at() for where this is converted back to kEndOfChain.
+      position = pc_offset();
+    }
+    L->link_to(pc_offset());
+  }
+
+  return position;
+}
+
+void Assembler::load_label_offset(Register r1, Label* L) {
+  int target_pos;
+  int constant;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+    constant = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link
+    } else {
+      // was: target_pos = kEndOfChain;
+      // However, using branch to self to mark the first reference
+      // should avoid most instances of branch offset overflow.  See
+      // target_at() for where this is converted back to kEndOfChain.
+      target_pos = pc_offset();
+    }
+    L->link_to(pc_offset());
+
+    constant = target_pos - pc_offset();
+  }
+  llilf(r1, Operand(constant));
+}
+
+// Pseudo op - branch on condition
+void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) {
+  int offset = branch_offset;
+  if (is_bound && is_int16(offset)) {
+    brc(c, Operand(offset & 0xFFFF));  // short jump
+  } else {
+    brcl(c, Operand(offset));  // long jump
+  }
+}
+
+// 32-bit Store Multiple - short displacement (12-bits unsigned)
+void Assembler::stm(Register r1, Register r2, const MemOperand& src) {
+  rs_form(STM, r1, r2, src.rb(), src.offset());
+}
+
+// 32-bit Store Multiple - long displacement (20-bits signed)
+void Assembler::stmy(Register r1, Register r2, const MemOperand& src) {
+  rsy_form(STMY, r1, r2, src.rb(), src.offset());
+}
+
+// 64-bit Store Multiple - long displacement (20-bits signed)
+void Assembler::stmg(Register r1, Register r2, const MemOperand& src) {
+  rsy_form(STMG, r1, r2, src.rb(), src.offset());
+}
+
+// Exception-generating instructions and debugging support.
+// Stops with a non-negative code less than kNumOfWatchedStops support
+// enabling/disabling and a counter feature. See simulator-s390.h .
+void Assembler::stop(const char* msg, Condition cond, int32_t code,
+                     CRegister cr) {
+  if (cond != al) {
+    Label skip;
+    b(NegateCondition(cond), &skip, Label::kNear);
+    bkpt(0);
+    bind(&skip);
+  } else {
+    bkpt(0);
+  }
+}
+
+void Assembler::bkpt(uint32_t imm16) {
+  // GDB software breakpoint instruction
+  emit2bytes(0x0001);
+}
+
+// Pseudo instructions.
+void Assembler::nop(int type) {
+  switch (type) {
+    case 0:
+      lr(r0, r0);
+      break;
+    case DEBUG_BREAK_NOP:
+      // TODO(john.yan): Use a better NOP break
+      oill(r3, Operand::Zero());
+      break;
+    default:
+      UNIMPLEMENTED();
+  }
+}
+
+// RR format: <insn> R1,R2
+//    +--------+----+----+
+//    | OpCode | R1 | R2 |
+//    +--------+----+----+
+//    0        8    12  15
+#define RR_FORM_EMIT(name, op) \
+  void Assembler::name(Register r1, Register r2) { rr_form(op, r1, r2); }
+
+void Assembler::rr_form(Opcode op, Register r1, Register r2) {
+  DCHECK(is_uint8(op));
+  emit2bytes(op * B8 | r1.code() * B4 | r2.code());
+}
+
+void Assembler::rr_form(Opcode op, DoubleRegister r1, DoubleRegister r2) {
+  DCHECK(is_uint8(op));
+  emit2bytes(op * B8 | r1.code() * B4 | r2.code());
+}
+
+// RR2 format: <insn> M1,R2
+//    +--------+----+----+
+//    | OpCode | M1 | R2 |
+//    +--------+----+----+
+//    0        8    12  15
+#define RR2_FORM_EMIT(name, op) \
+  void Assembler::name(Condition m1, Register r2) { rr_form(op, m1, r2); }
+
+void Assembler::rr_form(Opcode op, Condition m1, Register r2) {
+  DCHECK(is_uint8(op));
+  DCHECK(is_uint4(m1));
+  emit2bytes(op * B8 | m1 * B4 | r2.code());
+}
+
+// RX format: <insn> R1,D2(X2,B2)
+//    +--------+----+----+----+-------------+
+//    | OpCode | R1 | X2 | B2 |     D2      |
+//    +--------+----+----+----+-------------+
+//    0        8    12   16   20           31
+#define RX_FORM_EMIT(name, op)                                           \
+  void Assembler::name(Register r, const MemOperand& opnd) {             \
+    name(r, opnd.getIndexRegister(), opnd.getBaseRegister(),             \
+         opnd.getDisplacement());                                        \
+  }                                                                      \
+  void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
+    rx_form(op, r1, x2, b2, d2);                                         \
+  }
+void Assembler::rx_form(Opcode op, Register r1, Register x2, Register b2,
+                        Disp d2) {
+  DCHECK(is_uint8(op));
+  DCHECK(is_uint12(d2));
+  emit4bytes(op * B24 | r1.code() * B20 | x2.code() * B16 | b2.code() * B12 |
+             d2);
+}
+
+void Assembler::rx_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
+                        Disp d2) {
+  DCHECK(is_uint8(op));
+  DCHECK(is_uint12(d2));
+  emit4bytes(op * B24 | r1.code() * B20 | x2.code() * B16 | b2.code() * B12 |
+             d2);
+}
+
+// RI1 format: <insn> R1,I2
+//    +--------+----+----+------------------+
+//    | OpCode | R1 |OpCd|        I2        |
+//    +--------+----+----+------------------+
+//    0        8    12   16                31
+#define RI1_FORM_EMIT(name, op) \
+  void Assembler::name(Register r, const Operand& i2) { ri_form(op, r, i2); }
+
+void Assembler::ri_form(Opcode op, Register r1, const Operand& i2) {
+  DCHECK(is_uint12(op));
+  DCHECK(is_uint16(i2.imm_) || is_int16(i2.imm_));
+  emit4bytes((op & 0xFF0) * B20 | r1.code() * B20 | (op & 0xF) * B16 |
+             (i2.imm_ & 0xFFFF));
+}
+
+// RI2 format: <insn> M1,I2
+//    +--------+----+----+------------------+
+//    | OpCode | M1 |OpCd|        I2        |
+//    +--------+----+----+------------------+
+//    0        8    12   16                31
+#define RI2_FORM_EMIT(name, op) \
+  void Assembler::name(Condition m, const Operand& i2) { ri_form(op, m, i2); }
+
+void Assembler::ri_form(Opcode op, Condition m1, const Operand& i2) {
+  DCHECK(is_uint12(op));
+  DCHECK(is_uint4(m1));
+  DCHECK(is_uint16(i2.imm_));
+  emit4bytes((op & 0xFF0) * B20 | m1 * B20 | (op & 0xF) * B16 |
+             (i2.imm_ & 0xFFFF));
+}
+
+// RIE-f format: <insn> R1,R2,I3,I4,I5
+//    +--------+----+----+------------------+--------+--------+
+//    | OpCode | R1 | R2 |   I3   |    I4   |   I5   | OpCode |
+//    +--------+----+----+------------------+--------+--------+
+//    0        8    12   16      24         32       40      47
+void Assembler::rie_f_form(Opcode op, Register r1, Register r2,
+                           const Operand& i3, const Operand& i4,
+                           const Operand& i5) {
+  DCHECK(is_uint16(op));
+  DCHECK(is_uint8(i3.imm_));
+  DCHECK(is_uint8(i4.imm_));
+  DCHECK(is_uint8(i5.imm_));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(r2.code())) * B32 |
+                  (static_cast<uint64_t>(i3.imm_)) * B24 |
+                  (static_cast<uint64_t>(i4.imm_)) * B16 |
+                  (static_cast<uint64_t>(i5.imm_)) * B8 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// RIE format: <insn> R1,R3,I2
+//    +--------+----+----+------------------+--------+--------+
+//    | OpCode | R1 | R3 |        I2        |////////| OpCode |
+//    +--------+----+----+------------------+--------+--------+
+//    0        8    12   16                 32       40      47
+#define RIE_FORM_EMIT(name, op)                                       \
+  void Assembler::name(Register r1, Register r3, const Operand& i2) { \
+    rie_form(op, r1, r3, i2);                                         \
+  }
+
+void Assembler::rie_form(Opcode op, Register r1, Register r3,
+                         const Operand& i2) {
+  DCHECK(is_uint16(op));
+  DCHECK(is_int16(i2.imm_));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(r3.code())) * B32 |
+                  (static_cast<uint64_t>(i2.imm_ & 0xFFFF)) * B16 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// RIL1 format: <insn> R1,I2
+//   +--------+----+----+------------------------------------+
+//   | OpCode | R1 |OpCd|                  I2                |
+//   +--------+----+----+------------------------------------+
+//   0        8    12   16                                  47
+#define RIL1_FORM_EMIT(name, op) \
+  void Assembler::name(Register r, const Operand& i2) { ril_form(op, r, i2); }
+
+void Assembler::ril_form(Opcode op, Register r1, const Operand& i2) {
+  DCHECK(is_uint12(op));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(op & 0x00F)) * B32 |
+                  (static_cast<uint64_t>(i2.imm_) & 0xFFFFFFFF);
+  emit6bytes(code);
+}
+
+// RIL2 format: <insn> M1,I2
+//   +--------+----+----+------------------------------------+
+//   | OpCode | M1 |OpCd|                  I2                |
+//   +--------+----+----+------------------------------------+
+//   0        8    12   16                                  47
+#define RIL2_FORM_EMIT(name, op)                          \
+  void Assembler::name(Condition m1, const Operand& i2) { \
+    ril_form(op, m1, i2);                                 \
+  }
+
+void Assembler::ril_form(Opcode op, Condition m1, const Operand& i2) {
+  DCHECK(is_uint12(op));
+  DCHECK(is_uint4(m1));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
+                  (static_cast<uint64_t>(m1)) * B36 |
+                  (static_cast<uint64_t>(op & 0x00F)) * B32 |
+                  (static_cast<uint64_t>(i2.imm_ & 0xFFFFFFFF));
+  emit6bytes(code);
+}
+
+// RRE format: <insn> R1,R2
+//    +------------------+--------+----+----+
+//    |      OpCode      |////////| R1 | R2 |
+//    +------------------+--------+----+----+
+//    0                  16       24   28  31
+#define RRE_FORM_EMIT(name, op) \
+  void Assembler::name(Register r1, Register r2) { rre_form(op, r1, r2); }
+
+void Assembler::rre_form(Opcode op, Register r1, Register r2) {
+  DCHECK(is_uint16(op));
+  emit4bytes(op << 16 | r1.code() * B4 | r2.code());
+}
+
+void Assembler::rre_form(Opcode op, DoubleRegister r1, DoubleRegister r2) {
+  DCHECK(is_uint16(op));
+  emit4bytes(op << 16 | r1.code() * B4 | r2.code());
+}
+
+// RRD format: <insn> R1,R3, R2
+//    +------------------+----+----+----+----+
+//    |      OpCode      | R1 |////| R3 | R2 |
+//    +------------------+----+----+----+----+
+//    0                  16  20   24   28   31
+#define RRD_FORM_EMIT(name, op)                                 \
+  void Assembler::name(Register r1, Register r3, Register r2) { \
+    rrd_form(op, r1, r3, r2);                                   \
+  }
+
+void Assembler::rrd_form(Opcode op, Register r1, Register r3, Register r2) {
+  emit4bytes(op << 16 | r1.code() * B12 | r3.code() * B4 | r2.code());
+}
+
+// RS1 format: <insn> R1,R3,D2(B2)
+//    +--------+----+----+----+-------------+
+//    | OpCode | R1 | R3 | B2 |     D2      |
+//    +--------+----+----+----+-------------+
+//    0        8    12   16   20           31
+#define RS1_FORM_EMIT(name, op)                                            \
+  void Assembler::name(Register r1, Register r3, Register b2, Disp d2) {   \
+    rs_form(op, r1, r3, b2, d2);                                           \
+  }                                                                        \
+  void Assembler::name(Register r1, Register r3, const MemOperand& opnd) { \
+    name(r1, r3, opnd.getBaseRegister(), opnd.getDisplacement());          \
+  }
+
+void Assembler::rs_form(Opcode op, Register r1, Register r3, Register b2,
+                        const Disp d2) {
+  DCHECK(is_uint12(d2));
+  emit4bytes(op * B24 | r1.code() * B20 | r3.code() * B16 | b2.code() * B12 |
+             d2);
+}
+
+// RS2 format: <insn> R1,M3,D2(B2)
+//    +--------+----+----+----+-------------+
+//    | OpCode | R1 | M3 | B2 |     D2      |
+//    +--------+----+----+----+-------------+
+//    0        8    12   16   20           31
+#define RS2_FORM_EMIT(name, op)                                             \
+  void Assembler::name(Register r1, Condition m3, Register b2, Disp d2) {   \
+    rs_form(op, r1, m3, b2, d2);                                            \
+  }                                                                         \
+  void Assembler::name(Register r1, Condition m3, const MemOperand& opnd) { \
+    name(r1, m3, opnd.getBaseRegister(), opnd.getDisplacement());           \
+  }
+
+void Assembler::rs_form(Opcode op, Register r1, Condition m3, Register b2,
+                        const Disp d2) {
+  DCHECK(is_uint12(d2));
+  emit4bytes(op * B24 | r1.code() * B20 | m3 * B16 | b2.code() * B12 | d2);
+}
+
+// RSI format: <insn> R1,R3,I2
+//    +--------+----+----+------------------+
+//    | OpCode | R1 | R3 |        RI2       |
+//    +--------+----+----+------------------+
+//    0        8    12   16                 31
+#define RSI_FORM_EMIT(name, op)                                       \
+  void Assembler::name(Register r1, Register r3, const Operand& i2) { \
+    rsi_form(op, r1, r3, i2);                                         \
+  }
+
+void Assembler::rsi_form(Opcode op, Register r1, Register r3,
+                         const Operand& i2) {
+  DCHECK(is_uint8(op));
+  DCHECK(is_uint16(i2.imm_));
+  emit4bytes(op * B24 | r1.code() * B20 | r3.code() * B16 | (i2.imm_ & 0xFFFF));
+}
+
+// RSL format: <insn> R1,R3,D2(B2)
+//    +--------+----+----+----+-------------+--------+--------+
+//    | OpCode | L1 |    | B2 |    D2       |        | OpCode |
+//    +--------+----+----+----+-------------+--------+--------+
+//    0        8    12   16   20            32       40      47
+#define RSL_FORM_EMIT(name, op)                           \
+  void Assembler::name(Length l1, Register b2, Disp d2) { \
+    rsl_form(op, l1, b2, d2);                             \
+  }
+
+void Assembler::rsl_form(Opcode op, Length l1, Register b2, Disp d2) {
+  DCHECK(is_uint16(op));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(l1)) * B36 |
+                  (static_cast<uint64_t>(b2.code())) * B28 |
+                  (static_cast<uint64_t>(d2)) * B16 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// RSY1 format: <insn> R1,R3,D2(B2)
+//    +--------+----+----+----+-------------+--------+--------+
+//    | OpCode | R1 | R3 | B2 |    DL2      |  DH2   | OpCode |
+//    +--------+----+----+----+-------------+--------+--------+
+//    0        8    12   16   20            32       40      47
+#define RSY1_FORM_EMIT(name, op)                                           \
+  void Assembler::name(Register r1, Register r3, Register b2, Disp d2) {   \
+    rsy_form(op, r1, r3, b2, d2);                                          \
+  }                                                                        \
+  void Assembler::name(Register r1, Register r3, const MemOperand& opnd) { \
+    name(r1, r3, opnd.getBaseRegister(), opnd.getDisplacement());          \
+  }
+
+void Assembler::rsy_form(Opcode op, Register r1, Register r3, Register b2,
+                         const Disp d2) {
+  DCHECK(is_int20(d2));
+  DCHECK(is_uint16(op));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(r3.code())) * B32 |
+                  (static_cast<uint64_t>(b2.code())) * B28 |
+                  (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+                  (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// RSY2 format: <insn> R1,M3,D2(B2)
+//    +--------+----+----+----+-------------+--------+--------+
+//    | OpCode | R1 | M3 | B2 |    DL2      |  DH2   | OpCode |
+//    +--------+----+----+----+-------------+--------+--------+
+//    0        8    12   16   20            32       40      47
+#define RSY2_FORM_EMIT(name, op)                                            \
+  void Assembler::name(Register r1, Condition m3, Register b2, Disp d2) {   \
+    rsy_form(op, r1, m3, b2, d2);                                           \
+  }                                                                         \
+  void Assembler::name(Register r1, Condition m3, const MemOperand& opnd) { \
+    name(r1, m3, opnd.getBaseRegister(), opnd.getDisplacement());           \
+  }
+
+void Assembler::rsy_form(Opcode op, Register r1, Condition m3, Register b2,
+                         const Disp d2) {
+  DCHECK(is_int20(d2));
+  DCHECK(is_uint16(op));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(m3)) * B32 |
+                  (static_cast<uint64_t>(b2.code())) * B28 |
+                  (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+                  (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// RXE format: <insn> R1,D2(X2,B2)
+//    +--------+----+----+----+-------------+--------+--------+
+//    | OpCode | R1 | X2 | B2 |     D2      |////////| OpCode |
+//    +--------+----+----+----+-------------+--------+--------+
+//    0        8    12   16   20            32       40      47
+#define RXE_FORM_EMIT(name, op)                                          \
+  void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
+    rxe_form(op, r1, x2, b2, d2);                                        \
+  }                                                                      \
+  void Assembler::name(Register r1, const MemOperand& opnd) {            \
+    name(r1, opnd.getIndexRegister(), opnd.getBaseRegister(),            \
+         opnd.getDisplacement());                                        \
+  }
+
+void Assembler::rxe_form(Opcode op, Register r1, Register x2, Register b2,
+                         Disp d2) {
+  DCHECK(is_uint12(d2));
+  DCHECK(is_uint16(op));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(x2.code())) * B32 |
+                  (static_cast<uint64_t>(b2.code())) * B28 |
+                  (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// RXY format: <insn> R1,D2(X2,B2)
+//    +--------+----+----+----+-------------+--------+--------+
+//    | OpCode | R1 | X2 | B2 |     DL2     |   DH2  | OpCode |
+//    +--------+----+----+----+-------------+--------+--------+
+//    0        8    12   16   20            32   36   40      47
+#define RXY_FORM_EMIT(name, op)                                          \
+  void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
+    rxy_form(op, r1, x2, b2, d2);                                        \
+  }                                                                      \
+  void Assembler::name(Register r1, const MemOperand& opnd) {            \
+    name(r1, opnd.getIndexRegister(), opnd.getBaseRegister(),            \
+         opnd.getDisplacement());                                        \
+  }
+
+void Assembler::rxy_form(Opcode op, Register r1, Register x2, Register b2,
+                         Disp d2) {
+  DCHECK(is_int20(d2));
+  DCHECK(is_uint16(op));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(x2.code())) * B32 |
+                  (static_cast<uint64_t>(b2.code())) * B28 |
+                  (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+                  (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+void Assembler::rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
+                         Disp d2) {
+  DCHECK(is_int20(d2));
+  DCHECK(is_uint16(op));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(x2.code())) * B32 |
+                  (static_cast<uint64_t>(b2.code())) * B28 |
+                  (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+                  (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// RRS format: <insn> R1,R2,M3,D4(B4)
+//    +--------+----+----+----+-------------+----+---+--------+
+//    | OpCode | R1 | R2 | B4 |     D4      | M3 |///| OpCode |
+//    +--------+----+----+----+-------------+----+---+--------+
+//    0        8    12   16   20            32   36   40      47
+#define RRS_FORM_EMIT(name, op)                                        \
+  void Assembler::name(Register r1, Register r2, Register b4, Disp d4, \
+                       Condition m3) {                                 \
+    rrs_form(op, r1, r2, b4, d4, m3);                                  \
+  }                                                                    \
+  void Assembler::name(Register r1, Register r2, Condition m3,         \
+                       const MemOperand& opnd) {                       \
+    name(r1, r2, opnd.getBaseRegister(), opnd.getDisplacement(), m3);  \
+  }
+
+void Assembler::rrs_form(Opcode op, Register r1, Register r2, Register b4,
+                         Disp d4, Condition m3) {
+  DCHECK(is_uint12(d4));
+  DCHECK(is_uint16(op));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(r2.code())) * B32 |
+                  (static_cast<uint64_t>(b4.code())) * B28 |
+                  (static_cast<uint64_t>(d4)) * B16 |
+                  (static_cast<uint64_t>(m3)) << 12 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// RIS format: <insn> R1,I2,M3,D4(B4)
+//    +--------+----+----+----+-------------+--------+--------+
+//    | OpCode | R1 | M3 | B4 |     D4      |   I2   | OpCode |
+//    +--------+----+----+----+-------------+--------+--------+
+//    0        8    12   16   20            32        40      47
+#define RIS_FORM_EMIT(name, op)                                         \
+  void Assembler::name(Register r1, Condition m3, Register b4, Disp d4, \
+                       const Operand& i2) {                             \
+    ris_form(op, r1, m3, b4, d4, i2);                                   \
+  }                                                                     \
+  void Assembler::name(Register r1, const Operand& i2, Condition m3,    \
+                       const MemOperand& opnd) {                        \
+    name(r1, m3, opnd.getBaseRegister(), opnd.getDisplacement(), i2);   \
+  }
+
+void Assembler::ris_form(Opcode op, Register r1, Condition m3, Register b4,
+                         Disp d4, const Operand& i2) {
+  DCHECK(is_uint12(d4));
+  DCHECK(is_uint16(op));
+  DCHECK(is_uint8(i2.imm_));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(m3)) * B32 |
+                  (static_cast<uint64_t>(b4.code())) * B28 |
+                  (static_cast<uint64_t>(d4)) * B16 |
+                  (static_cast<uint64_t>(i2.imm_)) << 8 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// S format: <insn> D2(B2)
+//    +------------------+----+-------------+
+//    |      OpCode      | B2 |     D2      |
+//    +------------------+----+-------------+
+//    0                  16   20           31
+#define S_FORM_EMIT(name, op)                                        \
+  void Assembler::name(Register b1, Disp d2) { s_form(op, b1, d2); } \
+  void Assembler::name(const MemOperand& opnd) {                     \
+    name(opnd.getBaseRegister(), opnd.getDisplacement());            \
+  }
+
+void Assembler::s_form(Opcode op, Register b1, Disp d2) {
+  DCHECK(is_uint12(d2));
+  emit4bytes(op << 16 | b1.code() * B12 | d2);
+}
+
+// SI format: <insn> D1(B1),I2
+//    +--------+---------+----+-------------+
+//    | OpCode |   I2    | B1 |     D1      |
+//    +--------+---------+----+-------------+
+//    0        8         16   20           31
+#define SI_FORM_EMIT(name, op)                                      \
+  void Assembler::name(const Operand& i2, Register b1, Disp d1) {   \
+    si_form(op, i2, b1, d1);                                        \
+  }                                                                 \
+  void Assembler::name(const MemOperand& opnd, const Operand& i2) { \
+    name(i2, opnd.getBaseRegister(), opnd.getDisplacement());       \
+  }
+
+void Assembler::si_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
+  emit4bytes((op & 0x00FF) << 24 | i2.imm_ * B16 | b1.code() * B12 | d1);
+}
+
+// SIY format: <insn> D1(B1),I2
+//    +--------+---------+----+-------------+--------+--------+
+//    | OpCode |   I2    | B1 |     DL1     |  DH1   | OpCode |
+//    +--------+---------+----+-------------+--------+--------+
+//    0        8         16   20            32   36   40      47
+#define SIY_FORM_EMIT(name, op)                                     \
+  void Assembler::name(const Operand& i2, Register b1, Disp d1) {   \
+    siy_form(op, i2, b1, d1);                                       \
+  }                                                                 \
+  void Assembler::name(const MemOperand& opnd, const Operand& i2) { \
+    name(i2, opnd.getBaseRegister(), opnd.getDisplacement());       \
+  }
+
+void Assembler::siy_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
+  DCHECK(is_uint20(d1));
+  DCHECK(is_uint16(op));
+  DCHECK(is_uint8(i2.imm_));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(i2.imm_)) * B32 |
+                  (static_cast<uint64_t>(b1.code())) * B28 |
+                  (static_cast<uint64_t>(d1 & 0x0FFF)) * B16 |
+                  (static_cast<uint64_t>(d1 & 0x0FF000)) >> 4 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// SIL format: <insn> D1(B1),I2
+//    +------------------+----+-------------+-----------------+
+//    |     OpCode       | B1 |      D1     |        I2       |
+//    +------------------+----+-------------+-----------------+
+//    0                 16   20            32                47
+#define SIL_FORM_EMIT(name, op)                                     \
+  void Assembler::name(Register b1, Disp d1, const Operand& i2) {   \
+    sil_form(op, b1, d1, i2);                                       \
+  }                                                                 \
+  void Assembler::name(const MemOperand& opnd, const Operand& i2) { \
+    name(opnd.getBaseRegister(), opnd.getDisplacement(), i2);       \
+  }
+
+void Assembler::sil_form(Opcode op, Register b1, Disp d1, const Operand& i2) {
+  DCHECK(is_uint12(d1));
+  DCHECK(is_uint16(op));
+  DCHECK(is_uint16(i2.imm_));
+  uint64_t code = (static_cast<uint64_t>(op)) * B32 |
+                  (static_cast<uint64_t>(b1.code())) * B28 |
+                  (static_cast<uint64_t>(d1)) * B16 |
+                  (static_cast<uint64_t>(i2.imm_));
+  emit6bytes(code);
+}
+
+// RXF format: <insn> R1,R3,D2(X2,B2)
+//    +--------+----+----+----+-------------+----+---+--------+
+//    | OpCode | R3 | X2 | B2 |     D2      | R1 |///| OpCode |
+//    +--------+----+----+----+-------------+----+---+--------+
+//    0        8    12   16   20            32   36  40      47
+#define RXF_FORM_EMIT(name, op)                                            \
+  void Assembler::name(Register r1, Register r3, Register b2, Register x2, \
+                       Disp d2) {                                          \
+    rxf_form(op, r1, r3, b2, x2, d2);                                      \
+  }                                                                        \
+  void Assembler::name(Register r1, Register r3, const MemOperand& opnd) { \
+    name(r1, r3, opnd.getBaseRegister(), opnd.getIndexRegister(),          \
+         opnd.getDisplacement());                                          \
+  }
+
+void Assembler::rxf_form(Opcode op, Register r1, Register r3, Register b2,
+                         Register x2, Disp d2) {
+  DCHECK(is_uint12(d2));
+  DCHECK(is_uint16(op));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r3.code())) * B36 |
+                  (static_cast<uint64_t>(x2.code())) * B32 |
+                  (static_cast<uint64_t>(b2.code())) * B28 |
+                  (static_cast<uint64_t>(d2)) * B16 |
+                  (static_cast<uint64_t>(r1.code())) * B12 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+}
+
+// SS1 format: <insn> D1(L,B1),D2(B3)
+//    +--------+----+----+----+-------------+----+------------+
+//    | OpCode |    L    | B1 |     D1      | B2 |     D2     |
+//    +--------+----+----+----+-------------+----+------------+
+//    0        8    12   16   20            32   36          47
+#define SS1_FORM_EMIT(name, op)                                                \
+  void Assembler::name(Register b1, Disp d1, Register b2, Disp d2, Length l) { \
+    ss_form(op, l, b1, d1, b2, d2);                                            \
+  }                                                                            \
+  void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2,       \
+                       Length length) {                                        \
+    name(opnd1.getBaseRegister(), opnd1.getDisplacement(),                     \
+         opnd2.getBaseRegister(), opnd2.getDisplacement(), length);            \
+  }
+
+void Assembler::ss_form(Opcode op, Length l, Register b1, Disp d1, Register b2,
+                        Disp d2) {
+  DCHECK(is_uint12(d2));
+  DCHECK(is_uint12(d1));
+  DCHECK(is_uint8(op));
+  DCHECK(is_uint8(l));
+  uint64_t code =
+      (static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l)) * B32 |
+      (static_cast<uint64_t>(b1.code())) * B28 |
+      (static_cast<uint64_t>(d1)) * B16 |
+      (static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
+  emit6bytes(code);
+}
+
+// SS2 format: <insn> D1(L1,B1), D2(L3,B3)
+//    +--------+----+----+----+-------------+----+------------+
+//    | OpCode | L1 | L2 | B1 |     D1      | B2 |     D2     |
+//    +--------+----+----+----+-------------+----+------------+
+//    0        8    12   16   20            32   36          47
+#define SS2_FORM_EMIT(name, op)                                               \
+  void Assembler::name(Register b1, Disp d1, Register b2, Disp d2, Length l1, \
+                       Length l2) {                                           \
+    ss_form(op, l1, l2, b1, d1, b2, d2);                                      \
+  }                                                                           \
+  void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2,      \
+                       Length length1, Length length2) {                      \
+    name(opnd1.getBaseRegister(), opnd1.getDisplacement(),                    \
+         opnd2.getBaseRegister(), opnd2.getDisplacement(), length1, length2); \
+  }
+
+void Assembler::ss_form(Opcode op, Length l1, Length l2, Register b1, Disp d1,
+                        Register b2, Disp d2) {
+  DCHECK(is_uint12(d2));
+  DCHECK(is_uint12(d1));
+  DCHECK(is_uint8(op));
+  DCHECK(is_uint4(l2));
+  DCHECK(is_uint4(l1));
+  uint64_t code =
+      (static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l1)) * B36 |
+      (static_cast<uint64_t>(l2)) * B32 |
+      (static_cast<uint64_t>(b1.code())) * B28 |
+      (static_cast<uint64_t>(d1)) * B16 |
+      (static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
+  emit6bytes(code);
+}
+
+// SS3 format: <insn> D1(L1,B1), D2(I3,B2)
+//    +--------+----+----+----+-------------+----+------------+
+//    | OpCode | L1 | I3 | B1 |     D1      | B2 |     D2     |
+//    +--------+----+----+----+-------------+----+------------+
+//    0        8    12   16   20            32   36          47
+#define SS3_FORM_EMIT(name, op)                                              \
+  void Assembler::name(const Operand& i3, Register b1, Disp d1, Register b2, \
+                       Disp d2, Length l1) {                                 \
+    ss_form(op, l1, i3, b1, d1, b2, d2);                                     \
+  }                                                                          \
+  void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2,     \
+                       Length length) {                                      \
+    DCHECK(false);                                                           \
+  }
+void Assembler::ss_form(Opcode op, Length l1, const Operand& i3, Register b1,
+                        Disp d1, Register b2, Disp d2) {
+  DCHECK(is_uint12(d2));
+  DCHECK(is_uint12(d1));
+  DCHECK(is_uint8(op));
+  DCHECK(is_uint4(l1));
+  DCHECK(is_uint4(i3.imm_));
+  uint64_t code =
+      (static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l1)) * B36 |
+      (static_cast<uint64_t>(i3.imm_)) * B32 |
+      (static_cast<uint64_t>(b1.code())) * B28 |
+      (static_cast<uint64_t>(d1)) * B16 |
+      (static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
+  emit6bytes(code);
+}
+
+// SS4 format: <insn> D1(R1,B1), D2(R3,B2)
+//    +--------+----+----+----+-------------+----+------------+
+//    | OpCode | R1 | R3 | B1 |     D1      | B2 |     D2     |
+//    +--------+----+----+----+-------------+----+------------+
+//    0        8    12   16   20            32   36          47
+#define SS4_FORM_EMIT(name, op)                                            \
+  void Assembler::name(Register r1, Register r3, Register b1, Disp d1,     \
+                       Register b2, Disp d2) {                             \
+    ss_form(op, r1, r3, b1, d1, b2, d2);                                   \
+  }                                                                        \
+  void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2) { \
+    DCHECK(false);                                                         \
+  }
+void Assembler::ss_form(Opcode op, Register r1, Register r3, Register b1,
+                        Disp d1, Register b2, Disp d2) {
+  DCHECK(is_uint12(d2));
+  DCHECK(is_uint12(d1));
+  DCHECK(is_uint8(op));
+  uint64_t code = (static_cast<uint64_t>(op)) * B40 |
+                  (static_cast<uint64_t>(r1.code())) * B36 |
+                  (static_cast<uint64_t>(r3.code())) * B32 |
+                  (static_cast<uint64_t>(b1.code())) * B28 |
+                  (static_cast<uint64_t>(d1)) * B16 |
+                  (static_cast<uint64_t>(b2.code())) * B12 |
+                  (static_cast<uint64_t>(d2));
+  emit6bytes(code);
+}
+
+// SS5 format: <insn> D1(R1,B1), D2(R3,B2)
+//    +--------+----+----+----+-------------+----+------------+
+//    | OpCode | R1 | R3 | B2 |     D2      | B4 |     D4     |
+//    +--------+----+----+----+-------------+----+------------+
+//    0        8    12   16   20            32   36          47
+#define SS5_FORM_EMIT(name, op)                                            \
+  void Assembler::name(Register r1, Register r3, Register b2, Disp d2,     \
+                       Register b4, Disp d4) {                             \
+    ss_form(op, r1, r3, b2, d2, b4, d4); /*SS5 use the same form as SS4*/  \
+  }                                                                        \
+  void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2) { \
+    DCHECK(false);                                                         \
+  }
+
+#define SS6_FORM_EMIT(name, op) SS1_FORM_EMIT(name, op)
+
+// SSE format: <insn> D1(B1),D2(B2)
+//    +------------------+----+-------------+----+------------+
+//    |      OpCode      | B1 |     D1      | B2 |     D2     |
+//    +------------------+----+-------------+----+------------+
+//    0        8    12   16   20            32   36           47
+#define SSE_FORM_EMIT(name, op)                                            \
+  void Assembler::name(Register b1, Disp d1, Register b2, Disp d2) {       \
+    sse_form(op, b1, d1, b2, d2);                                          \
+  }                                                                        \
+  void Assembler::name(const MemOperand& opnd1, const MemOperand& opnd2) { \
+    name(opnd1.getBaseRegister(), opnd1.getDisplacement(),                 \
+         opnd2.getBaseRegister(), opnd2.getDisplacement());                \
+  }
+void Assembler::sse_form(Opcode op, Register b1, Disp d1, Register b2,
+                         Disp d2) {
+  DCHECK(is_uint12(d2));
+  DCHECK(is_uint12(d1));
+  DCHECK(is_uint16(op));
+  uint64_t code = (static_cast<uint64_t>(op)) * B32 |
+                  (static_cast<uint64_t>(b1.code())) * B28 |
+                  (static_cast<uint64_t>(d1)) * B16 |
+                  (static_cast<uint64_t>(b2.code())) * B12 |
+                  (static_cast<uint64_t>(d2));
+  emit6bytes(code);
+}
+
+// SSF format: <insn> R3, D1(B1),D2(B2),R3
+//    +--------+----+----+----+-------------+----+------------+
+//    | OpCode | R3 |OpCd| B1 |     D1      | B2 |     D2     |
+//    +--------+----+----+----+-------------+----+------------+
+//    0        8    12   16   20            32   36           47
+#define SSF_FORM_EMIT(name, op)                                        \
+  void Assembler::name(Register r3, Register b1, Disp d1, Register b2, \
+                       Disp d2) {                                      \
+    ssf_form(op, r3, b1, d1, b2, d2);                                  \
+  }                                                                    \
+  void Assembler::name(Register r3, const MemOperand& opnd1,           \
+                       const MemOperand& opnd2) {                      \
+    name(r3, opnd1.getBaseRegister(), opnd1.getDisplacement(),         \
+         opnd2.getBaseRegister(), opnd2.getDisplacement());            \
+  }
+
+void Assembler::ssf_form(Opcode op, Register r3, Register b1, Disp d1,
+                         Register b2, Disp d2) {
+  DCHECK(is_uint12(d2));
+  DCHECK(is_uint12(d1));
+  DCHECK(is_uint12(op));
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
+                  (static_cast<uint64_t>(r3.code())) * B36 |
+                  (static_cast<uint64_t>(op & 0x00F)) * B32 |
+                  (static_cast<uint64_t>(b1.code())) * B28 |
+                  (static_cast<uint64_t>(d1)) * B16 |
+                  (static_cast<uint64_t>(b2.code())) * B12 |
+                  (static_cast<uint64_t>(d2));
+  emit6bytes(code);
+}
+
+//  RRF1 format: <insn> R1,R2,R3
+//    +------------------+----+----+----+----+
+//    |      OpCode      | R3 |    | R1 | R2 |
+//    +------------------+----+----+----+----+
+//    0                  16   20   24   28  31
+#define RRF1_FORM_EMIT(name, op)                                        \
+  void Assembler::name(Register r1, Register r2, Register r3) {         \
+    rrf1_form(op << 16 | r3.code() * B12 | r1.code() * B4 | r2.code()); \
+  }
+
+void Assembler::rrf1_form(Opcode op, Register r1, Register r2, Register r3) {
+  uint32_t code = op << 16 | r3.code() * B12 | r1.code() * B4 | r2.code();
+  emit4bytes(code);
+}
+
+void Assembler::rrf1_form(uint32_t code) { emit4bytes(code); }
+
+//  RRF2 format: <insn> R1,R2,M3
+//    +------------------+----+----+----+----+
+//    |      OpCode      | M3 |    | R1 | R2 |
+//    +------------------+----+----+----+----+
+//    0                  16   20   24   28  31
+#define RRF2_FORM_EMIT(name, op)                                 \
+  void Assembler::name(Condition m3, Register r1, Register r2) { \
+    rrf2_form(op << 16 | m3 * B12 | r1.code() * B4 | r2.code()); \
+  }
+
+void Assembler::rrf2_form(uint32_t code) { emit4bytes(code); }
+
+//  RRF3 format: <insn> R1,R2,R3,M4
+//    +------------------+----+----+----+----+
+//    |      OpCode      | R3 | M4 | R1 | R2 |
+//    +------------------+----+----+----+----+
+//    0                  16   20   24   28  31
+#define RRF3_FORM_EMIT(name, op)                                             \
+  void Assembler::name(Register r3, Conition m4, Register r1, Register r2) { \
+    rrf3_form(op << 16 | r3.code() * B12 | m4 * B8 | r1.code() * B4 |        \
+              r2.code());                                                    \
+  }
+
+void Assembler::rrf3_form(uint32_t code) { emit4bytes(code); }
+
+//  RRF-e format: <insn> R1,M3,R2,M4
+//    +------------------+----+----+----+----+
+//    |      OpCode      | M3 | M4 | R1 | R2 |
+//    +------------------+----+----+----+----+
+//    0                  16   20   24   28  31
+void Assembler::rrfe_form(Opcode op, Condition m3, Condition m4, Register r1,
+                          Register r2) {
+  uint32_t code = op << 16 | m3 * B12 | m4 * B8 | r1.code() * B4 | r2.code();
+  emit4bytes(code);
+}
+
+// end of S390 Instruction generation
+
+// start of S390 instruction
+RX_FORM_EMIT(bc, BC)
+RR_FORM_EMIT(bctr, BCTR)
+RXE_FORM_EMIT(ceb, CEB)
+RRE_FORM_EMIT(cefbr, CEFBR)
+SS1_FORM_EMIT(ed, ED)
+RX_FORM_EMIT(ex, EX)
+RRE_FORM_EMIT(flogr, FLOGR)
+RRE_FORM_EMIT(lcgr, LCGR)
+RR_FORM_EMIT(lcr, LCR)
+RX_FORM_EMIT(le_z, LE)
+RXY_FORM_EMIT(ley, LEY)
+RIL1_FORM_EMIT(llihf, LLIHF)
+RIL1_FORM_EMIT(llilf, LLILF)
+RRE_FORM_EMIT(lngr, LNGR)
+RR_FORM_EMIT(lnr, LNR)
+RSY1_FORM_EMIT(loc, LOC)
+RXY_FORM_EMIT(lrv, LRV)
+RXY_FORM_EMIT(lrvh, LRVH)
+SS1_FORM_EMIT(mvn, MVN)
+SS1_FORM_EMIT(nc, NC)
+SI_FORM_EMIT(ni, NI)
+RIL1_FORM_EMIT(nihf, NIHF)
+RIL1_FORM_EMIT(nilf, NILF)
+RI1_FORM_EMIT(nilh, NILH)
+RI1_FORM_EMIT(nill, NILL)
+RIL1_FORM_EMIT(oihf, OIHF)
+RIL1_FORM_EMIT(oilf, OILF)
+RI1_FORM_EMIT(oill, OILL)
+RRE_FORM_EMIT(popcnt, POPCNT_Z)
+RIL1_FORM_EMIT(slfi, SLFI)
+RXY_FORM_EMIT(slgf, SLGF)
+RIL1_FORM_EMIT(slgfi, SLGFI)
+RXY_FORM_EMIT(strv, STRV)
+RI1_FORM_EMIT(tmll, TMLL)
+SS1_FORM_EMIT(tr, TR)
+S_FORM_EMIT(ts, TS)
+RIL1_FORM_EMIT(xihf, XIHF)
+RIL1_FORM_EMIT(xilf, XILF)
+
+// -------------------------
+// Load Address Instructions
+// -------------------------
+// Load Address Register-Storage
+void Assembler::la(Register r1, const MemOperand& opnd) {
+  rx_form(LA, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Address Register-Storage
+void Assembler::lay(Register r1, const MemOperand& opnd) {
+  rxy_form(LAY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Address Relative Long
+void Assembler::larl(Register r1, const Operand& opnd) {
+  ril_form(LARL, r1, opnd);
+}
+
+// Load Address Relative Long
+void Assembler::larl(Register r1, Label* l) {
+  larl(r1, Operand(branch_offset(l)));
+}
+
+// -----------------
+// Load Instructions
+// -----------------
+// Load Byte Register-Storage (32<-8)
+void Assembler::lb(Register r, const MemOperand& src) {
+  rxy_form(LB, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Byte Register-Register (32<-8)
+void Assembler::lbr(Register r1, Register r2) { rre_form(LBR, r1, r2); }
+
+// Load Byte Register-Storage (64<-8)
+void Assembler::lgb(Register r, const MemOperand& src) {
+  rxy_form(LGB, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Byte Register-Register (64<-8)
+void Assembler::lgbr(Register r1, Register r2) { rre_form(LGBR, r1, r2); }
+
+// Load Halfword Register-Storage (32<-16)
+void Assembler::lh(Register r, const MemOperand& src) {
+  rx_form(LH, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Halfword Register-Storage (32<-16)
+void Assembler::lhy(Register r, const MemOperand& src) {
+  rxy_form(LHY, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Halfword Register-Register (32<-16)
+void Assembler::lhr(Register r1, Register r2) { rre_form(LHR, r1, r2); }
+
+// Load Halfword Register-Storage (64<-16)
+void Assembler::lgh(Register r, const MemOperand& src) {
+  rxy_form(LGH, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Halfword Register-Register (64<-16)
+void Assembler::lghr(Register r1, Register r2) { rre_form(LGHR, r1, r2); }
+
+// Load Register-Storage (32)
+void Assembler::l(Register r, const MemOperand& src) {
+  rx_form(L, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Register-Storage (32)
+void Assembler::ly(Register r, const MemOperand& src) {
+  rxy_form(LY, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Register-Register (32)
+void Assembler::lr(Register r1, Register r2) { rr_form(LR, r1, r2); }
+
+// Load Register-Storage (64)
+void Assembler::lg(Register r, const MemOperand& src) {
+  rxy_form(LG, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Register-Register (64)
+void Assembler::lgr(Register r1, Register r2) { rre_form(LGR, r1, r2); }
+
+// Load Register-Storage (64<-32)
+void Assembler::lgf(Register r, const MemOperand& src) {
+  rxy_form(LGF, r, src.rx(), src.rb(), src.offset());
+}
+
+// Load Sign Extended Register-Register (64<-32)
+void Assembler::lgfr(Register r1, Register r2) { rre_form(LGFR, r1, r2); }
+
+// Load Halfword Immediate (32)
+void Assembler::lhi(Register r, const Operand& imm) { ri_form(LHI, r, imm); }
+
+// Load Halfword Immediate (64)
+void Assembler::lghi(Register r, const Operand& imm) { ri_form(LGHI, r, imm); }
+
+// --------------------------
+// Load And Test Instructions
+// --------------------------
+// Load and Test Register-Storage (32)
+void Assembler::lt_z(Register r1, const MemOperand& opnd) {
+  rxy_form(LT, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load and Test Register-Storage (64)
+void Assembler::ltg(Register r1, const MemOperand& opnd) {
+  rxy_form(LTG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load and Test Register-Register (32)
+void Assembler::ltr(Register r1, Register r2) { rr_form(LTR, r1, r2); }
+
+// Load and Test Register-Register (64)
+void Assembler::ltgr(Register r1, Register r2) { rre_form(LTGR, r1, r2); }
+
+// Load and Test Register-Register (64<-32)
+void Assembler::ltgfr(Register r1, Register r2) { rre_form(LTGFR, r1, r2); }
+
+// -------------------------
+// Load Logical Instructions
+// -------------------------
+// Load Logical Character (32) - loads a byte and zero ext.
+void Assembler::llc(Register r1, const MemOperand& opnd) {
+  rxy_form(LLC, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Logical Character (64) - loads a byte and zero ext.
+void Assembler::llgc(Register r1, const MemOperand& opnd) {
+  rxy_form(LLGC, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Logical halfword Register-Storage (64<-32)
+void Assembler::llgf(Register r1, const MemOperand& opnd) {
+  rxy_form(LLGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Logical Register-Register (64<-32)
+void Assembler::llgfr(Register r1, Register r2) { rre_form(LLGFR, r1, r2); }
+
+// Load Logical halfword Register-Storage (32)
+void Assembler::llh(Register r1, const MemOperand& opnd) {
+  rxy_form(LLH, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Logical halfword Register-Storage (64)
+void Assembler::llgh(Register r1, const MemOperand& opnd) {
+  rxy_form(LLGH, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Logical halfword Register-Register (32)
+void Assembler::llhr(Register r1, Register r2) { rre_form(LLHR, r1, r2); }
+
+// Load Logical halfword Register-Register (64)
+void Assembler::llghr(Register r1, Register r2) { rre_form(LLGHR, r1, r2); }
+
+// -------------------
+// Branch Instructions
+// -------------------
+// Branch and Save
+void Assembler::basr(Register r1, Register r2) { rr_form(BASR, r1, r2); }
+
+// Indirect Conditional Branch via register
+void Assembler::bcr(Condition m, Register target) { rr_form(BCR, m, target); }
+
+// Branch on Count (32)
+void Assembler::bct(Register r, const MemOperand& opnd) {
+  rx_form(BCT, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Branch on Count (64)
+void Assembler::bctg(Register r, const MemOperand& opnd) {
+  rxy_form(BCTG, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Branch Relative and Save (32)
+void Assembler::bras(Register r, const Operand& opnd) {
+  ri_form(BRAS, r, opnd);
+}
+
+// Branch Relative and Save (64)
+void Assembler::brasl(Register r, const Operand& opnd) {
+  ril_form(BRASL, r, opnd);
+}
+
+// Branch relative on Condition (32)
+void Assembler::brc(Condition c, const Operand& opnd) {
+  // BRC actually encodes # of halfwords, so divide by 2.
+  int16_t numHalfwords = static_cast<int16_t>(opnd.immediate()) / 2;
+  Operand halfwordOp = Operand(numHalfwords);
+  halfwordOp.setBits(16);
+  ri_form(BRC, c, halfwordOp);
+}
+
+// Branch Relative on Condition (64)
+void Assembler::brcl(Condition c, const Operand& opnd, bool isCodeTarget) {
+  Operand halfwordOp = opnd;
+  // Operand for code targets will be index to code_targets_
+  if (!isCodeTarget) {
+    // BRCL actually encodes # of halfwords, so divide by 2.
+    int32_t numHalfwords = static_cast<int32_t>(opnd.immediate()) / 2;
+    halfwordOp = Operand(numHalfwords);
+  }
+  ril_form(BRCL, c, halfwordOp);
+}
+
+// Branch On Count (32)
+void Assembler::brct(Register r1, const Operand& imm) {
+  // BRCT encodes # of halfwords, so divide by 2.
+  int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2;
+  Operand halfwordOp = Operand(numHalfwords);
+  halfwordOp.setBits(16);
+  ri_form(BRCT, r1, halfwordOp);
+}
+
+// Branch On Count (32)
+void Assembler::brctg(Register r1, const Operand& imm) {
+  // BRCTG encodes # of halfwords, so divide by 2.
+  int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2;
+  Operand halfwordOp = Operand(numHalfwords);
+  halfwordOp.setBits(16);
+  ri_form(BRCTG, r1, halfwordOp);
+}
+
+// --------------------
+// Compare Instructions
+// --------------------
+// Compare Register-Storage (32)
+void Assembler::c(Register r, const MemOperand& opnd) {
+  rx_form(C, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Register-Storage (32)
+void Assembler::cy(Register r, const MemOperand& opnd) {
+  rxy_form(CY, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Register-Register (32)
+void Assembler::cr_z(Register r1, Register r2) { rr_form(CR, r1, r2); }
+
+// Compare Register-Storage (64)
+void Assembler::cg(Register r, const MemOperand& opnd) {
+  rxy_form(CG, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Register-Register (64)
+void Assembler::cgr(Register r1, Register r2) { rre_form(CGR, r1, r2); }
+
+// Compare Halfword Register-Storage (32)
+void Assembler::ch(Register r, const MemOperand& opnd) {
+  rx_form(CH, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Halfword Register-Storage (32)
+void Assembler::chy(Register r, const MemOperand& opnd) {
+  rxy_form(CHY, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Halfword Immediate (32)
+void Assembler::chi(Register r, const Operand& opnd) { ri_form(CHI, r, opnd); }
+
+// Compare Halfword Immediate (64)
+void Assembler::cghi(Register r, const Operand& opnd) {
+  ri_form(CGHI, r, opnd);
+}
+
+// Compare Immediate (32)
+void Assembler::cfi(Register r, const Operand& opnd) { ril_form(CFI, r, opnd); }
+
+// Compare Immediate (64)
+void Assembler::cgfi(Register r, const Operand& opnd) {
+  ril_form(CGFI, r, opnd);
+}
+
+// ----------------------------
+// Compare Logical Instructions
+// ----------------------------
+// Compare Logical Register-Storage (32)
+void Assembler::cl(Register r, const MemOperand& opnd) {
+  rx_form(CL, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Logical Register-Storage (32)
+void Assembler::cly(Register r, const MemOperand& opnd) {
+  rxy_form(CLY, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Logical Register-Register (32)
+void Assembler::clr(Register r1, Register r2) { rr_form(CLR, r1, r2); }
+
+// Compare Logical Register-Storage (64)
+void Assembler::clg(Register r, const MemOperand& opnd) {
+  rxy_form(CLG, r, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Compare Logical Register-Register (64)
+void Assembler::clgr(Register r1, Register r2) { rre_form(CLGR, r1, r2); }
+
+// Compare Logical Immediate (32)
+void Assembler::clfi(Register r1, const Operand& i2) { ril_form(CLFI, r1, i2); }
+
+// Compare Logical Immediate (64<32)
+void Assembler::clgfi(Register r1, const Operand& i2) {
+  ril_form(CLGFI, r1, i2);
+}
+
+// Compare Immediate (Mem - Imm) (8)
+void Assembler::cli(const MemOperand& opnd, const Operand& imm) {
+  si_form(CLI, imm, opnd.rb(), opnd.offset());
+}
+
+// Compare Immediate (Mem - Imm) (8)
+void Assembler::cliy(const MemOperand& opnd, const Operand& imm) {
+  siy_form(CLIY, imm, opnd.rb(), opnd.offset());
+}
+
+// Compare logical - mem to mem operation
+void Assembler::clc(const MemOperand& opnd1, const MemOperand& opnd2,
+                    Length length) {
+  ss_form(CLC, length - 1, opnd1.getBaseRegister(), opnd1.getDisplacement(),
+          opnd2.getBaseRegister(), opnd2.getDisplacement());
+}
+
+// ----------------------------
+// Test Under Mask Instructions
+// ----------------------------
+// Test Under Mask (Mem - Imm) (8)
+void Assembler::tm(const MemOperand& opnd, const Operand& imm) {
+  si_form(TM, imm, opnd.rb(), opnd.offset());
+}
+
+// Test Under Mask (Mem - Imm) (8)
+void Assembler::tmy(const MemOperand& opnd, const Operand& imm) {
+  siy_form(TMY, imm, opnd.rb(), opnd.offset());
+}
+
+// -------------------------------
+// Rotate and Insert Selected Bits
+// -------------------------------
+// Rotate-And-Insert-Selected-Bits
+void Assembler::risbg(Register dst, Register src, const Operand& startBit,
+                      const Operand& endBit, const Operand& shiftAmt,
+                      bool zeroBits) {
+  // High tag the top bit of I4/EndBit to zero out any unselected bits
+  if (zeroBits)
+    rie_f_form(RISBG, dst, src, startBit, Operand(endBit.imm_ | 0x80),
+               shiftAmt);
+  else
+    rie_f_form(RISBG, dst, src, startBit, endBit, shiftAmt);
+}
+
+// Rotate-And-Insert-Selected-Bits
+void Assembler::risbgn(Register dst, Register src, const Operand& startBit,
+                       const Operand& endBit, const Operand& shiftAmt,
+                       bool zeroBits) {
+  // High tag the top bit of I4/EndBit to zero out any unselected bits
+  if (zeroBits)
+    rie_f_form(RISBGN, dst, src, startBit, Operand(endBit.imm_ | 0x80),
+               shiftAmt);
+  else
+    rie_f_form(RISBGN, dst, src, startBit, endBit, shiftAmt);
+}
+
+// ---------------------------
+// Move Character Instructions
+// ---------------------------
+// Move charactor - mem to mem operation
+void Assembler::mvc(const MemOperand& opnd1, const MemOperand& opnd2,
+                    uint32_t length) {
+  ss_form(MVC, length - 1, opnd1.getBaseRegister(), opnd1.getDisplacement(),
+          opnd2.getBaseRegister(), opnd2.getDisplacement());
+}
+
+// -----------------------
+// 32-bit Add Instructions
+// -----------------------
+// Add Register-Storage (32)
+void Assembler::a(Register r1, const MemOperand& opnd) {
+  rx_form(A, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Register-Storage (32)
+void Assembler::ay(Register r1, const MemOperand& opnd) {
+  rxy_form(AY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Immediate (32)
+void Assembler::afi(Register r1, const Operand& opnd) {
+  ril_form(AFI, r1, opnd);
+}
+
+// Add Halfword Register-Storage (32)
+void Assembler::ah(Register r1, const MemOperand& opnd) {
+  rx_form(AH, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Halfword Register-Storage (32)
+void Assembler::ahy(Register r1, const MemOperand& opnd) {
+  rxy_form(AHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Halfword Immediate (32)
+void Assembler::ahi(Register r1, const Operand& i2) { ri_form(AHI, r1, i2); }
+
+// Add Halfword Immediate (32)
+void Assembler::ahik(Register r1, Register r3, const Operand& i2) {
+  rie_form(AHIK, r1, r3, i2);
+}
+
+// Add Register (32)
+void Assembler::ar(Register r1, Register r2) { rr_form(AR, r1, r2); }
+
+// Add Register-Register-Register (32)
+void Assembler::ark(Register r1, Register r2, Register r3) {
+  rrf1_form(ARK, r1, r2, r3);
+}
+
+// Add Storage-Imm (32)
+void Assembler::asi(const MemOperand& opnd, const Operand& imm) {
+  DCHECK(is_int8(imm.imm_));
+  DCHECK(is_int20(opnd.offset()));
+  siy_form(ASI, Operand(0xff & imm.imm_), opnd.rb(), 0xfffff & opnd.offset());
+}
+
+// -----------------------
+// 64-bit Add Instructions
+// -----------------------
+// Add Register-Storage (64)
+void Assembler::ag(Register r1, const MemOperand& opnd) {
+  rxy_form(AG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Register-Storage (64<-32)
+void Assembler::agf(Register r1, const MemOperand& opnd) {
+  rxy_form(AGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Immediate (64)
+void Assembler::agfi(Register r1, const Operand& opnd) {
+  ril_form(ALFI, r1, opnd);
+}
+
+// Add Register-Register (64<-32)
+void Assembler::agfr(Register r1, Register r2) { rre_form(AGFR, r1, r2); }
+
+// Add Halfword Immediate (64)
+void Assembler::aghi(Register r1, const Operand& i2) { ri_form(AGHI, r1, i2); }
+
+// Add Halfword Immediate (64)
+void Assembler::aghik(Register r1, Register r3, const Operand& i2) {
+  rie_form(AGHIK, r1, r3, i2);
+}
+
+// Add Register (64)
+void Assembler::agr(Register r1, Register r2) { rre_form(AGR, r1, r2); }
+
+// Add Register-Register-Register (64)
+void Assembler::agrk(Register r1, Register r2, Register r3) {
+  rrf1_form(AGRK, r1, r2, r3);
+}
+
+// Add Storage-Imm (64)
+void Assembler::agsi(const MemOperand& opnd, const Operand& imm) {
+  DCHECK(is_int8(imm.imm_));
+  DCHECK(is_int20(opnd.offset()));
+  siy_form(AGSI, Operand(0xff & imm.imm_), opnd.rb(), 0xfffff & opnd.offset());
+}
+
+// -------------------------------
+// 32-bit Add Logical Instructions
+// -------------------------------
+// Add Logical Register-Storage (32)
+void Assembler::al_z(Register r1, const MemOperand& opnd) {
+  rx_form(AL, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Logical Register-Storage (32)
+void Assembler::aly(Register r1, const MemOperand& opnd) {
+  rxy_form(ALY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Logical Immediate (32)
+void Assembler::alfi(Register r1, const Operand& opnd) {
+  ril_form(ALFI, r1, opnd);
+}
+
+// Add Logical Register-Register (32)
+void Assembler::alr(Register r1, Register r2) { rr_form(ALR, r1, r2); }
+
+// Add Logical With Carry Register-Register (32)
+void Assembler::alcr(Register r1, Register r2) { rre_form(ALCR, r1, r2); }
+
+// Add Logical Register-Register-Register (32)
+void Assembler::alrk(Register r1, Register r2, Register r3) {
+  rrf1_form(ALRK, r1, r2, r3);
+}
+
+// -------------------------------
+// 64-bit Add Logical Instructions
+// -------------------------------
+// Add Logical Register-Storage (64)
+void Assembler::alg(Register r1, const MemOperand& opnd) {
+  rxy_form(ALG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Add Logical Immediate (64)
+void Assembler::algfi(Register r1, const Operand& opnd) {
+  ril_form(ALGFI, r1, opnd);
+}
+
+// Add Logical Register-Register (64)
+void Assembler::algr(Register r1, Register r2) { rre_form(ALGR, r1, r2); }
+
+// Add Logical Register-Register-Register (64)
+void Assembler::algrk(Register r1, Register r2, Register r3) {
+  rrf1_form(ALGRK, r1, r2, r3);
+}
+
+// ----------------------------
+// 32-bit Subtract Instructions
+// ----------------------------
+// Subtract Register-Storage (32)
+void Assembler::s(Register r1, const MemOperand& opnd) {
+  rx_form(S, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Register-Storage (32)
+void Assembler::sy(Register r1, const MemOperand& opnd) {
+  rxy_form(SY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Halfword Register-Storage (32)
+void Assembler::sh(Register r1, const MemOperand& opnd) {
+  rx_form(SH, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Halfword Register-Storage (32)
+void Assembler::shy(Register r1, const MemOperand& opnd) {
+  rxy_form(SHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Register (32)
+void Assembler::sr(Register r1, Register r2) { rr_form(SR, r1, r2); }
+
+// Subtract Register-Register-Register (32)
+void Assembler::srk(Register r1, Register r2, Register r3) {
+  rrf1_form(SRK, r1, r2, r3);
+}
+
+// ----------------------------
+// 64-bit Subtract Instructions
+// ----------------------------
+// Subtract Register-Storage (64)
+void Assembler::sg(Register r1, const MemOperand& opnd) {
+  rxy_form(SG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Register-Storage (64<-32)
+void Assembler::sgf(Register r1, const MemOperand& opnd) {
+  rxy_form(SGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Register (64)
+void Assembler::sgr(Register r1, Register r2) { rre_form(SGR, r1, r2); }
+
+// Subtract Register (64<-32)
+void Assembler::sgfr(Register r1, Register r2) { rre_form(SGFR, r1, r2); }
+
+// Subtract Register-Register-Register (64)
+void Assembler::sgrk(Register r1, Register r2, Register r3) {
+  rrf1_form(SGRK, r1, r2, r3);
+}
+
+// ------------------------------------
+// 32-bit Subtract Logical Instructions
+// ------------------------------------
+// Subtract Logical Register-Storage (32)
+void Assembler::sl(Register r1, const MemOperand& opnd) {
+  rx_form(SL, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Logical Register-Storage (32)
+void Assembler::sly(Register r1, const MemOperand& opnd) {
+  rxy_form(SLY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Logical Register-Register (32)
+void Assembler::slr(Register r1, Register r2) { rr_form(SLR, r1, r2); }
+
+// Subtract Logical With Borrow Register-Register (32)
+void Assembler::slbr(Register r1, Register r2) { rre_form(SLBR, r1, r2); }
+
+// Subtract Logical Register-Register-Register (32)
+void Assembler::slrk(Register r1, Register r2, Register r3) {
+  rrf1_form(SLRK, r1, r2, r3);
+}
+
+// ------------------------------------
+// 64-bit Subtract Logical Instructions
+// ------------------------------------
+// Subtract Logical Register-Storage (64)
+void Assembler::slg(Register r1, const MemOperand& opnd) {
+  rxy_form(SLG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Subtract Logical Register-Register (64)
+void Assembler::slgr(Register r1, Register r2) { rre_form(SLGR, r1, r2); }
+
+// Subtract Logical Register-Register-Register (64)
+void Assembler::slgrk(Register r1, Register r2, Register r3) {
+  rrf1_form(SLGRK, r1, r2, r3);
+}
+
+// ----------------------------
+// 32-bit Multiply Instructions
+// ----------------------------
+// Multiply Register-Storage (64<32)
+void Assembler::m(Register r1, const MemOperand& opnd) {
+  rx_form(M, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Register (64<32)
+void Assembler::mr_z(Register r1, Register r2) {
+  DCHECK(r1.code() % 2 == 0);
+  rr_form(MR, r1, r2);
+}
+
+// Multiply Logical Register-Storage (64<32)
+void Assembler::ml(Register r1, const MemOperand& opnd) {
+  rxy_form(ML, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Logical Register (64<32)
+void Assembler::mlr(Register r1, Register r2) {
+  DCHECK(r1.code() % 2 == 0);
+  rre_form(MLR, r1, r2);
+}
+
+// Multiply Single Register-Storage (32)
+void Assembler::ms(Register r1, const MemOperand& opnd) {
+  rx_form(MS, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Single Register-Storage (32)
+void Assembler::msy(Register r1, const MemOperand& opnd) {
+  rxy_form(MSY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Single Immediate (32)
+void Assembler::msfi(Register r1, const Operand& opnd) {
+  ril_form(MSFI, r1, opnd);
+}
+
+// Multiply Single Register (64<32)
+void Assembler::msr(Register r1, Register r2) { rre_form(MSR, r1, r2); }
+
+// Multiply Halfword Register-Storage (32)
+void Assembler::mh(Register r1, const MemOperand& opnd) {
+  rx_form(MH, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Halfword Register-Storage (32)
+void Assembler::mhy(Register r1, const MemOperand& opnd) {
+  rxy_form(MHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Halfword Immediate (32)
+void Assembler::mhi(Register r1, const Operand& opnd) {
+  ri_form(MHI, r1, opnd);
+}
+
+// ----------------------------
+// 64-bit Multiply Instructions
+// ----------------------------
+// Multiply Logical Register-Storage (128<64)
+void Assembler::mlg(Register r1, const MemOperand& opnd) {
+  rxy_form(MLG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Register (128<64)
+void Assembler::mlgr(Register r1, Register r2) { rre_form(MLGR, r1, r2); }
+
+// Multiply Halfword Immediate (64)
+void Assembler::mghi(Register r1, const Operand& opnd) {
+  ri_form(MGHI, r1, opnd);
+}
+
+// Multiply Single Immediate (64)
+void Assembler::msgfi(Register r1, const Operand& opnd) {
+  ril_form(MSGFI, r1, opnd);
+}
+
+// Multiply Single Register-Storage (64)
+void Assembler::msg(Register r1, const MemOperand& opnd) {
+  rxy_form(MSG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Multiply Single Register-Register (64)
+void Assembler::msgr(Register r1, Register r2) { rre_form(MSGR, r1, r2); }
+
+// --------------------------
+// 32-bit Divide Instructions
+// --------------------------
+// Divide Register-Storage (32<-64)
+void Assembler::d(Register r1, const MemOperand& opnd) {
+  rx_form(D, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Divide Register (32<-64)
+void Assembler::dr(Register r1, Register r2) {
+  DCHECK(r1.code() % 2 == 0);
+  rr_form(DR, r1, r2);
+}
+
+// Divide Logical Register-Storage (32<-64)
+void Assembler::dl(Register r1, const MemOperand& opnd) {
+  rx_form(DL, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Divide Logical Register (32<-64)
+void Assembler::dlr(Register r1, Register r2) { rre_form(DLR, r1, r2); }
+
+// --------------------------
+// 64-bit Divide Instructions
+// --------------------------
+// Divide Logical Register (64<-128)
+void Assembler::dlgr(Register r1, Register r2) { rre_form(DLGR, r1, r2); }
+
+// Divide Single Register (64<-32)
+void Assembler::dsgr(Register r1, Register r2) { rre_form(DSGR, r1, r2); }
+
+// --------------------
+// Bitwise Instructions
+// --------------------
+// AND Register-Storage (32)
+void Assembler::n(Register r1, const MemOperand& opnd) {
+  rx_form(N, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// AND Register-Storage (32)
+void Assembler::ny(Register r1, const MemOperand& opnd) {
+  rxy_form(NY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// AND Register (32)
+void Assembler::nr(Register r1, Register r2) { rr_form(NR, r1, r2); }
+
+// AND Register-Register-Register (32)
+void Assembler::nrk(Register r1, Register r2, Register r3) {
+  rrf1_form(NRK, r1, r2, r3);
+}
+
+// AND Register-Storage (64)
+void Assembler::ng(Register r1, const MemOperand& opnd) {
+  rxy_form(NG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// AND Register (64)
+void Assembler::ngr(Register r1, Register r2) { rre_form(NGR, r1, r2); }
+
+// AND Register-Register-Register (64)
+void Assembler::ngrk(Register r1, Register r2, Register r3) {
+  rrf1_form(NGRK, r1, r2, r3);
+}
+
+// OR Register-Storage (32)
+void Assembler::o(Register r1, const MemOperand& opnd) {
+  rx_form(O, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// OR Register-Storage (32)
+void Assembler::oy(Register r1, const MemOperand& opnd) {
+  rxy_form(OY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// OR Register (32)
+void Assembler::or_z(Register r1, Register r2) { rr_form(OR, r1, r2); }
+
+// OR Register-Register-Register (32)
+void Assembler::ork(Register r1, Register r2, Register r3) {
+  rrf1_form(ORK, r1, r2, r3);
+}
+
+// OR Register-Storage (64)
+void Assembler::og(Register r1, const MemOperand& opnd) {
+  rxy_form(OG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// OR Register (64)
+void Assembler::ogr(Register r1, Register r2) { rre_form(OGR, r1, r2); }
+
+// OR Register-Register-Register (64)
+void Assembler::ogrk(Register r1, Register r2, Register r3) {
+  rrf1_form(OGRK, r1, r2, r3);
+}
+
+// XOR Register-Storage (32)
+void Assembler::x(Register r1, const MemOperand& opnd) {
+  rx_form(X, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// XOR Register-Storage (32)
+void Assembler::xy(Register r1, const MemOperand& opnd) {
+  rxy_form(XY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// XOR Register (32)
+void Assembler::xr(Register r1, Register r2) { rr_form(XR, r1, r2); }
+
+// XOR Register-Register-Register (32)
+void Assembler::xrk(Register r1, Register r2, Register r3) {
+  rrf1_form(XRK, r1, r2, r3);
+}
+
+// XOR Register-Storage (64)
+void Assembler::xg(Register r1, const MemOperand& opnd) {
+  rxy_form(XG, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// XOR Register (64)
+void Assembler::xgr(Register r1, Register r2) { rre_form(XGR, r1, r2); }
+
+// XOR Register-Register-Register (64)
+void Assembler::xgrk(Register r1, Register r2, Register r3) {
+  rrf1_form(XGRK, r1, r2, r3);
+}
+
+// XOR Storage-Storage
+void Assembler::xc(const MemOperand& opnd1, const MemOperand& opnd2,
+                   Length length) {
+  ss_form(XC, length - 1, opnd1.getBaseRegister(), opnd1.getDisplacement(),
+          opnd2.getBaseRegister(), opnd2.getDisplacement());
+}
+
+// -------------------------------------------
+// Bitwise GPR <-> FPR Conversion Instructions
+// -------------------------------------------
+// Load GR from FPR (64 <- L)
+void Assembler::lgdr(Register r1, DoubleRegister f2) {
+  rre_form(LGDR, r1, Register::from_code(f2.code()));
+}
+
+// Load FPR from FR (L <- 64)
+void Assembler::ldgr(DoubleRegister f1, Register r2) {
+  rre_form(LDGR, Register::from_code(f1.code()), r2);
+}
+
+void Assembler::EnsureSpaceFor(int space_needed) {
+  if (buffer_space() <= (kGap + space_needed)) {
+    GrowBuffer(space_needed);
+  }
+}
+
+// Rotate Left Single Logical (32)
+void Assembler::rll(Register r1, Register r3, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rsy_form(RLL, r1, r3, opnd, 0);
+}
+
+// Rotate Left Single Logical (32)
+void Assembler::rll(Register r1, Register r3, const Operand& opnd) {
+  rsy_form(RLL, r1, r3, r0, opnd.immediate());
+}
+
+// Rotate Left Single Logical (32)
+void Assembler::rll(Register r1, Register r3, Register r2,
+                    const Operand& opnd) {
+  rsy_form(RLL, r1, r3, r2, opnd.immediate());
+}
+
+// Rotate Left Single Logical (64)
+void Assembler::rllg(Register r1, Register r3, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rsy_form(RLLG, r1, r3, opnd, 0);
+}
+
+// Rotate Left Single Logical (64)
+void Assembler::rllg(Register r1, Register r3, const Operand& opnd) {
+  rsy_form(RLLG, r1, r3, r0, opnd.immediate());
+}
+
+// Rotate Left Single Logical (64)
+void Assembler::rllg(Register r1, Register r3, Register r2,
+                     const Operand& opnd) {
+  rsy_form(RLLG, r1, r3, r2, opnd.immediate());
+}
+
+// Shift Left Single Logical (32)
+void Assembler::sll(Register r1, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rs_form(SLL, r1, r0, opnd, 0);
+}
+
+// Shift Left Single Logical (32)
+void Assembler::sll(Register r1, const Operand& opnd) {
+  rs_form(SLL, r1, r0, r0, opnd.immediate());
+}
+
+// Shift Left Single Logical (32)
+void Assembler::sllk(Register r1, Register r3, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rsy_form(SLLK, r1, r3, opnd, 0);
+}
+
+// Shift Left Single Logical (32)
+void Assembler::sllk(Register r1, Register r3, const Operand& opnd) {
+  rsy_form(SLLK, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Left Single Logical (64)
+void Assembler::sllg(Register r1, Register r3, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rsy_form(SLLG, r1, r3, opnd, 0);
+}
+
+// Shift Left Single Logical (64)
+void Assembler::sllg(Register r1, Register r3, const Operand& opnd) {
+  rsy_form(SLLG, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Left Double Logical (64)
+void Assembler::sldl(Register r1, Register b2, const Operand& opnd) {
+  DCHECK(r1.code() % 2 == 0);
+  rs_form(SLDL, r1, r0, b2, opnd.immediate());
+}
+
+// Shift Right Single Logical (32)
+void Assembler::srl(Register r1, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rs_form(SRL, r1, r0, opnd, 0);
+}
+
+// Shift Right Double Arith (64)
+void Assembler::srda(Register r1, Register b2, const Operand& opnd) {
+  DCHECK(r1.code() % 2 == 0);
+  rs_form(SRDA, r1, r0, b2, opnd.immediate());
+}
+
+// Shift Right Double Logical (64)
+void Assembler::srdl(Register r1, Register b2, const Operand& opnd) {
+  DCHECK(r1.code() % 2 == 0);
+  rs_form(SRDL, r1, r0, b2, opnd.immediate());
+}
+
+// Shift Right Single Logical (32)
+void Assembler::srl(Register r1, const Operand& opnd) {
+  rs_form(SRL, r1, r0, r0, opnd.immediate());
+}
+
+// Shift Right Single Logical (32)
+void Assembler::srlk(Register r1, Register r3, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rsy_form(SRLK, r1, r3, opnd, 0);
+}
+
+// Shift Right Single Logical (32)
+void Assembler::srlk(Register r1, Register r3, const Operand& opnd) {
+  rsy_form(SRLK, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Right Single Logical (64)
+void Assembler::srlg(Register r1, Register r3, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rsy_form(SRLG, r1, r3, opnd, 0);
+}
+
+// Shift Right Single Logical (64)
+void Assembler::srlg(Register r1, Register r3, const Operand& opnd) {
+  rsy_form(SRLG, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Left Single (32)
+void Assembler::sla(Register r1, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rs_form(SLA, r1, r0, opnd, 0);
+}
+
+// Shift Left Single (32)
+void Assembler::sla(Register r1, const Operand& opnd) {
+  rs_form(SLA, r1, r0, r0, opnd.immediate());
+}
+
+// Shift Left Single (32)
+void Assembler::slak(Register r1, Register r3, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rsy_form(SLAK, r1, r3, opnd, 0);
+}
+
+// Shift Left Single (32)
+void Assembler::slak(Register r1, Register r3, const Operand& opnd) {
+  rsy_form(SLAK, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Left Single (64)
+void Assembler::slag(Register r1, Register r3, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rsy_form(SLAG, r1, r3, opnd, 0);
+}
+
+// Shift Left Single (64)
+void Assembler::slag(Register r1, Register r3, const Operand& opnd) {
+  rsy_form(SLAG, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Right Single (32)
+void Assembler::sra(Register r1, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rs_form(SRA, r1, r0, opnd, 0);
+}
+
+// Shift Right Single (32)
+void Assembler::sra(Register r1, const Operand& opnd) {
+  rs_form(SRA, r1, r0, r0, opnd.immediate());
+}
+
+// Shift Right Single (32)
+void Assembler::srak(Register r1, Register r3, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rsy_form(SRAK, r1, r3, opnd, 0);
+}
+
+// Shift Right Single (32)
+void Assembler::srak(Register r1, Register r3, const Operand& opnd) {
+  rsy_form(SRAK, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Right Single (64)
+void Assembler::srag(Register r1, Register r3, Register opnd) {
+  DCHECK(!opnd.is(r0));
+  rsy_form(SRAG, r1, r3, opnd, 0);
+}
+
+void Assembler::srag(Register r1, Register r3, const Operand& opnd) {
+  rsy_form(SRAG, r1, r3, r0, opnd.immediate());
+}
+
+// Shift Right Double
+void Assembler::srda(Register r1, const Operand& opnd) {
+  DCHECK(r1.code() % 2 == 0);
+  rs_form(SRDA, r1, r0, r0, opnd.immediate());
+}
+
+// Shift Right Double Logical
+void Assembler::srdl(Register r1, const Operand& opnd) {
+  DCHECK(r1.code() % 2 == 0);
+  rs_form(SRDL, r1, r0, r0, opnd.immediate());
+}
+
+void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode,
+                     TypeFeedbackId ast_id) {
+  positions_recorder()->WriteRecordedPositions();
+  EnsureSpace ensure_space(this);
+
+  int32_t target_index = emit_code_target(target, rmode, ast_id);
+  brasl(r14, Operand(target_index));
+}
+
+void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
+                     Condition cond) {
+  EnsureSpace ensure_space(this);
+
+  int32_t target_index = emit_code_target(target, rmode);
+  brcl(cond, Operand(target_index), true);
+}
+
+// Store (32)
+void Assembler::st(Register src, const MemOperand& dst) {
+  rx_form(ST, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Store (32)
+void Assembler::sty(Register src, const MemOperand& dst) {
+  rxy_form(STY, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Store Halfword
+void Assembler::sth(Register src, const MemOperand& dst) {
+  rx_form(STH, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Store Halfword
+void Assembler::sthy(Register src, const MemOperand& dst) {
+  rxy_form(STHY, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Store Character
+void Assembler::stc(Register src, const MemOperand& dst) {
+  rx_form(STC, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Store Character
+void Assembler::stcy(Register src, const MemOperand& dst) {
+  rxy_form(STCY, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// 32-bit Load Multiple - short displacement (12-bits unsigned)
+void Assembler::lm(Register r1, Register r2, const MemOperand& src) {
+  rs_form(LM, r1, r2, src.rb(), src.offset());
+}
+
+// 32-bit Load Multiple - long displacement (20-bits signed)
+void Assembler::lmy(Register r1, Register r2, const MemOperand& src) {
+  rsy_form(LMY, r1, r2, src.rb(), src.offset());
+}
+
+// 64-bit Load Multiple - long displacement (20-bits signed)
+void Assembler::lmg(Register r1, Register r2, const MemOperand& src) {
+  rsy_form(LMG, r1, r2, src.rb(), src.offset());
+}
+
+// Move integer (32)
+void Assembler::mvhi(const MemOperand& opnd1, const Operand& i2) {
+  sil_form(MVHI, opnd1.getBaseRegister(), opnd1.getDisplacement(), i2);
+}
+
+// Move integer (64)
+void Assembler::mvghi(const MemOperand& opnd1, const Operand& i2) {
+  sil_form(MVGHI, opnd1.getBaseRegister(), opnd1.getDisplacement(), i2);
+}
+
+// Store Register (64)
+void Assembler::stg(Register src, const MemOperand& dst) {
+  DCHECK(!(dst.rb().code() == 15 && dst.offset() < 0));
+  rxy_form(STG, src, dst.rx(), dst.rb(), dst.offset());
+}
+
+// Insert Character
+void Assembler::ic_z(Register r1, const MemOperand& opnd) {
+  rx_form(IC_z, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Insert Character
+void Assembler::icy(Register r1, const MemOperand& opnd) {
+  rxy_form(ICY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Insert Immediate (High)
+void Assembler::iihf(Register r1, const Operand& opnd) {
+  ril_form(IIHF, r1, opnd);
+}
+
+// Insert Immediate (low)
+void Assembler::iilf(Register r1, const Operand& opnd) {
+  ril_form(IILF, r1, opnd);
+}
+
+// Insert Immediate (high high)
+void Assembler::iihh(Register r1, const Operand& opnd) {
+  ri_form(IIHH, r1, opnd);
+}
+
+// Insert Immediate (high low)
+void Assembler::iihl(Register r1, const Operand& opnd) {
+  ri_form(IIHL, r1, opnd);
+}
+
+// Insert Immediate (low high)
+void Assembler::iilh(Register r1, const Operand& opnd) {
+  ri_form(IILH, r1, opnd);
+}
+
+// Insert Immediate (low low)
+void Assembler::iill(Register r1, const Operand& opnd) {
+  ri_form(IILL, r1, opnd);
+}
+
+// GPR <-> FPR Instructions
+
+// Floating point instructions
+//
+// Load zero Register (64)
+void Assembler::lzdr(DoubleRegister r1) {
+  rre_form(LZDR, Register::from_code(r1.code()), Register::from_code(0));
+}
+
+// Add Register-Register (LB)
+void Assembler::aebr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(AEBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Add Register-Storage (LB)
+void Assembler::adb(DoubleRegister r1, const MemOperand& opnd) {
+  rxe_form(ADB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+           opnd.offset());
+}
+
+// Add Register-Register (LB)
+void Assembler::adbr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(ADBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Compare Register-Register (LB)
+void Assembler::cebr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(CEBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Compare Register-Storage (LB)
+void Assembler::cdb(DoubleRegister r1, const MemOperand& opnd) {
+  rx_form(CD, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+          opnd.offset());
+}
+
+// Compare Register-Register (LB)
+void Assembler::cdbr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(CDBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Divide Register-Register (LB)
+void Assembler::debr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(DEBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Divide Register-Storage (LB)
+void Assembler::ddb(DoubleRegister r1, const MemOperand& opnd) {
+  rxe_form(DDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+           opnd.offset());
+}
+
+// Divide Register-Register (LB)
+void Assembler::ddbr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(DDBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Multiply Register-Register (LB)
+void Assembler::meebr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(MEEBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Multiply Register-Storage (LB)
+void Assembler::mdb(DoubleRegister r1, const MemOperand& opnd) {
+  rxe_form(MDB, Register::from_code(r1.code()), opnd.rb(), opnd.rx(),
+           opnd.offset());
+}
+
+// Multiply Register-Register (LB)
+void Assembler::mdbr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(MDBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Subtract Register-Register (LB)
+void Assembler::sebr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(SEBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Subtract Register-Storage (LB)
+void Assembler::sdb(DoubleRegister r1, const MemOperand& opnd) {
+  rxe_form(SDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+           opnd.offset());
+}
+
+// Subtract Register-Register (LB)
+void Assembler::sdbr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(SDBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Square Root (LB)
+void Assembler::sqdb(DoubleRegister r1, const MemOperand& opnd) {
+  rxe_form(SQDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+           opnd.offset());
+}
+
+// Square Root Register-Register (LB)
+void Assembler::sqebr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(SQEBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Square Root Register-Register (LB)
+void Assembler::sqdbr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(SQDBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Load Rounded (double -> float)
+void Assembler::ledbr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(LEDBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Load Lengthen (float -> double)
+void Assembler::ldebr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(LDEBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Load Complement Register-Register (LB)
+void Assembler::lcdbr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(LCDBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Load Positive Register-Register (LB)
+void Assembler::lpebr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(LPEBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Load Positive Register-Register (LB)
+void Assembler::lpdbr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(LPDBR, Register::from_code(r1.code()),
+           Register::from_code(r2.code()));
+}
+
+// Store Double (64)
+void Assembler::std(DoubleRegister r1, const MemOperand& opnd) {
+  rx_form(STD, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Store Double (64)
+void Assembler::stdy(DoubleRegister r1, const MemOperand& opnd) {
+  DCHECK(!(opnd.rb().code() == 15 && opnd.offset() < 0));
+  rxy_form(STDY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Store Float (32)
+void Assembler::ste(DoubleRegister r1, const MemOperand& opnd) {
+  rx_form(STE, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Store Float (32)
+void Assembler::stey(DoubleRegister r1, const MemOperand& opnd) {
+  DCHECK(!(opnd.rb().code() == 15 && opnd.offset() < 0));
+  rxy_form(STEY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Double (64)
+void Assembler::ld(DoubleRegister r1, const MemOperand& opnd) {
+  DCHECK(is_uint12(opnd.offset()));
+  rx_form(LD, r1, opnd.rx(), opnd.rb(), opnd.offset() & 0xfff);
+}
+
+// Load Double (64)
+void Assembler::ldy(DoubleRegister r1, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+  rxy_form(LDY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Float (32)
+void Assembler::le_z(DoubleRegister r1, const MemOperand& opnd) {
+  DCHECK(is_uint12(opnd.offset()));
+  rx_form(LE, r1, opnd.rx(), opnd.rb(), opnd.offset() & 0xfff);
+}
+
+// Load Float (32)
+void Assembler::ley(DoubleRegister r1, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+  rxy_form(LEY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
+// Load Double Register-Register (64)
+void Assembler::ldr(DoubleRegister r1, DoubleRegister r2) {
+  rr_form(LDR, r1, r2);
+}
+
+// Load And Test Register-Register (L)
+void Assembler::ltebr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(LTEBR, r1, r2);
+}
+
+// Load And Test Register-Register (L)
+void Assembler::ltdbr(DoubleRegister r1, DoubleRegister r2) {
+  rre_form(LTDBR, r1, r2);
+}
+
+// Convert to Fixed point (64<-S)
+void Assembler::cgebr(Condition m, Register r1, DoubleRegister r2) {
+  rrfe_form(CGEBR, m, Condition(0), r1, Register::from_code(r2.code()));
+}
+
+// Convert to Fixed point (64<-L)
+void Assembler::cgdbr(Condition m, Register r1, DoubleRegister r2) {
+  rrfe_form(CGDBR, m, Condition(0), r1, Register::from_code(r2.code()));
+}
+
+// Convert to Fixed point (32<-L)
+void Assembler::cfdbr(Condition m, Register r1, DoubleRegister r2) {
+  rrfe_form(CFDBR, m, Condition(0), r1, Register::from_code(r2.code()));
+}
+
+// Convert from Fixed point (L<-64)
+void Assembler::cegbr(DoubleRegister r1, Register r2) {
+  rre_form(CEGBR, Register::from_code(r1.code()), r2);
+}
+
+// Convert from Fixed point (L<-64)
+void Assembler::cdgbr(DoubleRegister r1, Register r2) {
+  rre_form(CDGBR, Register::from_code(r1.code()), r2);
+}
+
+// Convert from Fixed point (L<-32)
+void Assembler::cdfbr(DoubleRegister r1, Register r2) {
+  rre_form(CDFBR, Register::from_code(r1.code()), r2);
+}
+
+// Convert to Fixed Logical (64<-L)
+void Assembler::clgdbr(Condition m3, Condition m4, Register r1,
+                       DoubleRegister r2) {
+  DCHECK_EQ(m4, Condition(0));
+  rrfe_form(CLGDBR, m3, m4, r1, Register::from_code(r2.code()));
+}
+
+// Convert to Fixed Logical (64<-F32)
+void Assembler::clgebr(Condition m3, Condition m4, Register r1,
+                       DoubleRegister r2) {
+  DCHECK_EQ(m4, Condition(0));
+  rrfe_form(CLGEBR, m3, m4, r1, Register::from_code(r2.code()));
+}
+
+// Convert to Fixed Logical (32<-F64)
+void Assembler::clfdbr(Condition m3, Condition m4, Register r1,
+                       DoubleRegister r2) {
+  DCHECK_EQ(m3, Condition(0));
+  DCHECK_EQ(m4, Condition(0));
+  rrfe_form(CLFDBR, Condition(0), Condition(0), r1,
+            Register::from_code(r2.code()));
+}
+
+// Convert to Fixed Logical (32<-F32)
+void Assembler::clfebr(Condition m3, Condition m4, Register r1,
+                       DoubleRegister r2) {
+  DCHECK_EQ(m4, Condition(0));
+  rrfe_form(CLFEBR, m3, Condition(0), r1, Register::from_code(r2.code()));
+}
+
+// Convert from Fixed Logical (L<-64)
+void Assembler::celgbr(Condition m3, Condition m4, DoubleRegister r1,
+                       Register r2) {
+  DCHECK_EQ(m3, Condition(0));
+  DCHECK_EQ(m4, Condition(0));
+  rrfe_form(CELGBR, Condition(0), Condition(0), Register::from_code(r1.code()),
+            r2);
+}
+
+// Convert from Fixed Logical (F32<-32)
+void Assembler::celfbr(Condition m3, Condition m4, DoubleRegister r1,
+                       Register r2) {
+  DCHECK_EQ(m3, Condition(0));
+  DCHECK_EQ(m4, Condition(0));
+  rrfe_form(CELFBR, Condition(0), Condition(0), Register::from_code(r1.code()),
+            r2);
+}
+
+// Convert from Fixed Logical (L<-64)
+void Assembler::cdlgbr(Condition m3, Condition m4, DoubleRegister r1,
+                       Register r2) {
+  DCHECK_EQ(m3, Condition(0));
+  DCHECK_EQ(m4, Condition(0));
+  rrfe_form(CDLGBR, Condition(0), Condition(0), Register::from_code(r1.code()),
+            r2);
+}
+
+// Convert from Fixed Logical (L<-32)
+void Assembler::cdlfbr(Condition m3, Condition m4, DoubleRegister r1,
+                       Register r2) {
+  DCHECK_EQ(m4, Condition(0));
+  rrfe_form(CDLFBR, m3, Condition(0), Register::from_code(r1.code()), r2);
+}
+
+// Convert from Fixed point (S<-32)
+void Assembler::cefbr(DoubleRegister r1, Register r2) {
+  rre_form(CEFBR, Register::from_code(r1.code()), r2);
+}
+
+// Convert to Fixed point (32<-S)
+void Assembler::cfebr(Condition m3, Register r1, DoubleRegister r2) {
+  rrfe_form(CFEBR, m3, Condition(0), r1, Register::from_code(r2.code()));
+}
+
+// Load (L <- S)
+void Assembler::ldeb(DoubleRegister d1, const MemOperand& opnd) {
+  rxe_form(LDEB, Register::from_code(d1.code()), opnd.rx(), opnd.rb(),
+           opnd.offset());
+}
+
+// Load FP Integer
+void Assembler::fiebra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3) {
+  rrf2_form(FIEBRA << 16 | m3 * B12 | d1.code() * B4 | d2.code());
+}
+
+// Load FP Integer
+void Assembler::fidbra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3) {
+  rrf2_form(FIDBRA << 16 | m3 * B12 | d1.code() * B4 | d2.code());
+}
+
+// Multiply and Add - MADBR R1, R3, R2
+// R1 = R3 * R2 + R1
+void Assembler::madbr(DoubleRegister d1, DoubleRegister d3, DoubleRegister d2) {
+  rrd_form(MADBR, Register::from_code(d1.code()),
+           Register::from_code(d3.code()), Register::from_code(d2.code()));
+}
+
+// Multiply and Subtract - MSDBR R1, R3, R2
+// R1 = R3 * R2 - R1
+void Assembler::msdbr(DoubleRegister d1, DoubleRegister d3, DoubleRegister d2) {
+  rrd_form(MSDBR, Register::from_code(d1.code()),
+           Register::from_code(d3.code()), Register::from_code(d2.code()));
+}
+
+// end of S390instructions
+
+bool Assembler::IsNop(SixByteInstr instr, int type) {
+  DCHECK((0 == type) || (DEBUG_BREAK_NOP == type));
+  if (DEBUG_BREAK_NOP == type) {
+    return ((instr & 0xffffffff) == 0xa53b0000);  // oill r3, 0
+  }
+  return ((instr & 0xffff) == 0x1800);  // lr r0,r0
+}
+
+void Assembler::GrowBuffer(int needed) {
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // Compute new buffer size.
+  CodeDesc desc;  // the new buffer
+  if (buffer_size_ < 4 * KB) {
+    desc.buffer_size = 4 * KB;
+  } else if (buffer_size_ < 1 * MB) {
+    desc.buffer_size = 2 * buffer_size_;
+  } else {
+    desc.buffer_size = buffer_size_ + 1 * MB;
+  }
+  int space = buffer_space() + (desc.buffer_size - buffer_size_);
+  if (space < needed) {
+    desc.buffer_size += needed - space;
+  }
+  CHECK_GT(desc.buffer_size, 0);  // no overflow
+
+  // Set up new buffer.
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+  desc.origin = this;
+
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+  // Copy the data.
+  intptr_t pc_delta = desc.buffer - buffer_;
+  intptr_t rc_delta =
+      (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+  memmove(desc.buffer, buffer_, desc.instr_size);
+  memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+          desc.reloc_size);
+
+  // Switch buffers.
+  DeleteArray(buffer_);
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+  // None of our relocation types are pc relative pointing outside the code
+  // buffer nor pc absolute pointing inside the code buffer, so there is no need
+  // to relocate any emitted relocation entries.
+}
+
+void Assembler::db(uint8_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint8_t*>(pc_) = data;
+  pc_ += sizeof(uint8_t);
+}
+
+void Assembler::dd(uint32_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint32_t*>(pc_) = data;
+  pc_ += sizeof(uint32_t);
+}
+
+void Assembler::dq(uint64_t value) {
+  CheckBuffer();
+  *reinterpret_cast<uint64_t*>(pc_) = value;
+  pc_ += sizeof(uint64_t);
+}
+
+void Assembler::dp(uintptr_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uintptr_t*>(pc_) = data;
+  pc_ += sizeof(uintptr_t);
+}
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+  if (RelocInfo::IsNone(rmode) ||
+      // Don't record external references unless the heap will be serialized.
+      (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
+       !emit_debug_code())) {
+    return;
+  }
+  if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+    data = RecordedAstId().ToInt();
+    ClearRecordedAstId();
+  }
+  DeferredRelocInfo rinfo(pc_offset(), rmode, data);
+  relocations_.push_back(rinfo);
+}
+
+void Assembler::emit_label_addr(Label* label) {
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+  int position = link(label);
+  DCHECK(label->is_bound());
+  // Keep internal references relative until EmitRelocations.
+  dp(position);
+}
+
+void Assembler::EmitRelocations() {
+  EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
+
+  for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
+       it != relocations_.end(); it++) {
+    RelocInfo::Mode rmode = it->rmode();
+    Address pc = buffer_ + it->position();
+    Code* code = NULL;
+    RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
+
+    // Fix up internal references now that they are guaranteed to be bound.
+    if (RelocInfo::IsInternalReference(rmode)) {
+      // Jump table entry
+      intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc));
+      Memory::Address_at(pc) = buffer_ + pos;
+    } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
+      // mov sequence
+      intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
+      set_target_address_at(isolate(), pc, code, buffer_ + pos,
+                            SKIP_ICACHE_FLUSH);
+    }
+
+    reloc_info_writer.Write(&rinfo);
+  }
+
+  reloc_info_writer.Finish();
+}
+
+}  // namespace internal
+}  // namespace v8
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/assembler-s390.h b/src/s390/assembler-s390.h
new file mode 100644
index 0000000..0b9fa38
--- /dev/null
+++ b/src/s390/assembler-s390.h
@@ -0,0 +1,1466 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+// A light-weight S390 Assembler
+// Generates user mode instructions for z/Architecture
+
+#ifndef V8_S390_ASSEMBLER_S390_H_
+#define V8_S390_ASSEMBLER_S390_H_
+#include <stdio.h>
+#if V8_HOST_ARCH_S390
+// elf.h include is required for auxv check for STFLE facility used
+// for hardware detection, which is sensible only on s390 hosts.
+#include <elf.h>
+#endif
+
+#include <fcntl.h>
+#include <unistd.h>
+#include "src/assembler.h"
+#include "src/s390/constants-s390.h"
+
+#define ABI_USES_FUNCTION_DESCRIPTORS 0
+
+#define ABI_PASSES_HANDLES_IN_REGS 1
+
+// ObjectPair is defined under runtime/runtime-util.h.
+// On 31-bit, ObjectPair == uint64_t.  ABI dictates long long
+//            be returned with the lower addressed half in r2
+//            and the higher addressed half in r3. (Returns in Regs)
+// On 64-bit, ObjectPair is a Struct.  ABI dictaes Structs be
+//            returned in a storage buffer allocated by the caller,
+//            with the address of this buffer passed as a hidden
+//            argument in r2. (Does NOT return in Regs)
+// For x86 linux, ObjectPair is returned in registers.
+#if V8_TARGET_ARCH_S390X
+#define ABI_RETURNS_OBJECTPAIR_IN_REGS 0
+#else
+#define ABI_RETURNS_OBJECTPAIR_IN_REGS 1
+#endif
+
+#define ABI_CALL_VIA_IP 1
+
+#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V)                              \
+  V(r0)  V(r1)  V(r2)  V(r3)  V(r4)  V(r5)  V(r6)  V(r7)  \
+  V(r8)  V(r9)  V(r10) V(fp) V(ip) V(r13) V(r14) V(sp)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V)                  \
+  V(r2)  V(r3)  V(r4)  V(r5)  V(r6)  V(r7)                \
+  V(r8)  V(r9)  V(r13)
+
+#define DOUBLE_REGISTERS(V)                               \
+  V(d0)  V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)  \
+  V(d8)  V(d9)  V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
+  V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)         \
+  V(d8)  V(d9)  V(d10) V(d11) V(d12) V(d15) V(d0)
+// clang-format on
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+struct Register {
+  enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+    GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+        kAfterLast,
+    kCode_no_reg = -1
+  };
+  static const int kNumRegisters = Code::kAfterLast;
+
+#define REGISTER_COUNT(R) 1 +
+  static const int kNumAllocatable =
+      ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
+#undef REGISTER_COUNT
+
+#define REGISTER_BIT(R) 1 << kCode_##R |
+  static const RegList kAllocatable =
+      ALLOCATABLE_GENERAL_REGISTERS(REGISTER_BIT) 0;
+#undef REGISTER_BIT
+
+  static Register from_code(int code) {
+    DCHECK(code >= 0);
+    DCHECK(code < kNumRegisters);
+    Register r = {code};
+    return r;
+  }
+
+  const char* ToString();
+  bool IsAllocatable() const;
+  bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+  bool is(Register reg) const { return reg_code == reg.reg_code; }
+  int code() const {
+    DCHECK(is_valid());
+    return reg_code;
+  }
+  int bit() const {
+    DCHECK(is_valid());
+    return 1 << reg_code;
+  }
+
+  void set_code(int code) {
+    reg_code = code;
+    DCHECK(is_valid());
+  }
+
+#if V8_TARGET_LITTLE_ENDIAN
+  static const int kMantissaOffset = 0;
+  static const int kExponentOffset = 4;
+#else
+  static const int kMantissaOffset = 4;
+  static const int kExponentOffset = 0;
+#endif
+
+  // Unfortunately we can't make this private in a struct.
+  int reg_code;
+};
+
+typedef struct Register Register;
+
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
+
+// Register aliases
+const Register kLithiumScratch = r1;  // lithium scratch.
+const Register kRootRegister = r10;   // Roots array pointer.
+const Register cp = r13;              // JavaScript context pointer.
+
+// Double word FP register.
+struct DoubleRegister {
+  enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+    DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+        kAfterLast,
+    kCode_no_reg = -1
+  };
+
+  static const int kNumRegisters = Code::kAfterLast;
+  static const int kMaxNumRegisters = kNumRegisters;
+
+  const char* ToString();
+  bool IsAllocatable() const;
+  bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+  bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+
+  int code() const {
+    DCHECK(is_valid());
+    return reg_code;
+  }
+
+  int bit() const {
+    DCHECK(is_valid());
+    return 1 << reg_code;
+  }
+
+  static DoubleRegister from_code(int code) {
+    DoubleRegister r = {code};
+    return r;
+  }
+
+  int reg_code;
+};
+
+typedef DoubleRegister DoubleRegister;
+
+#define DECLARE_REGISTER(R) \
+  const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_dreg = {Register::kCode_no_reg};
+
+// Aliases for double registers.  Defined using #define instead of
+// "static const DoubleRegister&" because Clang complains otherwise when a
+// compilation unit that includes this header doesn't use the variables.
+#define kDoubleRegZero d14
+#define kScratchDoubleReg d13
+
+Register ToRegister(int num);
+
+// Coprocessor register
+struct CRegister {
+  bool is_valid() const { return 0 <= reg_code && reg_code < 8; }
+  bool is(CRegister creg) const { return reg_code == creg.reg_code; }
+  int code() const {
+    DCHECK(is_valid());
+    return reg_code;
+  }
+  int bit() const {
+    DCHECK(is_valid());
+    return 1 << reg_code;
+  }
+
+  // Unfortunately we can't make this private in a struct.
+  int reg_code;
+};
+
+const CRegister no_creg = {-1};
+
+const CRegister cr0 = {0};
+const CRegister cr1 = {1};
+const CRegister cr2 = {2};
+const CRegister cr3 = {3};
+const CRegister cr4 = {4};
+const CRegister cr5 = {5};
+const CRegister cr6 = {6};
+const CRegister cr7 = {7};
+
+// TODO(john.yan) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+#if V8_TARGET_ARCH_S390X
+const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
+#else
+const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
+#endif
+
+// Class Operand represents a shifter operand in data processing instructions
+// defining immediate numbers and masks
+typedef uint8_t Length;
+
+struct Mask {
+  uint8_t mask;
+  uint8_t value() { return mask; }
+  static Mask from_value(uint8_t input) {
+    DCHECK(input <= 0x0F);
+    Mask m = {input};
+    return m;
+  }
+};
+
+class Operand BASE_EMBEDDED {
+ public:
+  // immediate
+  INLINE(explicit Operand(intptr_t immediate,
+                          RelocInfo::Mode rmode = kRelocInfo_NONEPTR));
+  INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
+  INLINE(explicit Operand(const ExternalReference& f));
+  explicit Operand(Handle<Object> handle);
+  INLINE(explicit Operand(Smi* value));
+
+  // rm
+  INLINE(explicit Operand(Register rm));
+
+  // Return true if this is a register operand.
+  INLINE(bool is_reg() const);
+
+  bool must_output_reloc_info(const Assembler* assembler) const;
+
+  inline intptr_t immediate() const {
+    DCHECK(!rm_.is_valid());
+    return imm_;
+  }
+
+  inline void setBits(int n) {
+    imm_ = (static_cast<uint32_t>(imm_) << (32 - n)) >> (32 - n);
+  }
+
+  Register rm() const { return rm_; }
+
+ private:
+  Register rm_;
+  intptr_t imm_;  // valid if rm_ == no_reg
+  RelocInfo::Mode rmode_;
+
+  friend class Assembler;
+  friend class MacroAssembler;
+};
+
+typedef int32_t Disp;
+
+// Class MemOperand represents a memory operand in load and store instructions
+// On S390, we have various flavours of memory operands:
+//   1) a base register + 16 bit unsigned displacement
+//   2) a base register + index register + 16 bit unsigned displacement
+//   3) a base register + index register + 20 bit signed displacement
+class MemOperand BASE_EMBEDDED {
+ public:
+  explicit MemOperand(Register rx, Disp offset = 0);
+  explicit MemOperand(Register rx, Register rb, Disp offset = 0);
+
+  int32_t offset() const { return offset_; }
+  uint32_t getDisplacement() const { return offset(); }
+
+  // Base register
+  Register rb() const {
+    DCHECK(!baseRegister.is(no_reg));
+    return baseRegister;
+  }
+
+  Register getBaseRegister() const { return rb(); }
+
+  // Index Register
+  Register rx() const {
+    DCHECK(!indexRegister.is(no_reg));
+    return indexRegister;
+  }
+  Register getIndexRegister() const { return rx(); }
+
+ private:
+  Register baseRegister;   // base
+  Register indexRegister;  // index
+  int32_t offset_;         // offset
+
+  friend class Assembler;
+};
+
+class DeferredRelocInfo {
+ public:
+  DeferredRelocInfo() {}
+  DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data)
+      : position_(position), rmode_(rmode), data_(data) {}
+
+  int position() const { return position_; }
+  RelocInfo::Mode rmode() const { return rmode_; }
+  intptr_t data() const { return data_; }
+
+ private:
+  int position_;
+  RelocInfo::Mode rmode_;
+  intptr_t data_;
+};
+
+class Assembler : public AssemblerBase {
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
+  virtual ~Assembler() {}
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Label operations & relative jumps (PPUM Appendix D)
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+
+  void bind(Label* L);  // binds an unbound label L to the current code position
+
+  // Links a label at the current pc_offset().  If already bound, returns the
+  // bound position.  If already linked, returns the position of the prior link.
+  // Otherwise, returns the current pc_offset().
+  int link(Label* L);
+
+  // Determines if Label is bound and near enough so that a single
+  // branch instruction can be used to reach it.
+  bool is_near(Label* L, Condition cond);
+
+  // Returns the branch offset to the given label from the current code position
+  // Links the label to the current position if it is still unbound
+  int branch_offset(Label* L) { return link(L) - pc_offset(); }
+
+  // Puts a labels target address at the given position.
+  // The high 8 bits are set to zero.
+  void label_at_put(Label* L, int at_offset);
+  void load_label_offset(Register r1, Label* L);
+
+  // Read/Modify the code target address in the branch/call instruction at pc.
+  INLINE(static Address target_address_at(Address pc, Address constant_pool));
+  INLINE(static void set_target_address_at(
+      Isolate* isolate, Address pc, Address constant_pool, Address target,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+  INLINE(static Address target_address_at(Address pc, Code* code)) {
+    Address constant_pool = NULL;
+    return target_address_at(pc, constant_pool);
+  }
+  INLINE(static void set_target_address_at(
+      Isolate* isolate, Address pc, Code* code, Address target,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
+    Address constant_pool = NULL;
+    set_target_address_at(isolate, pc, constant_pool, target,
+                          icache_flush_mode);
+  }
+
+  // Return the code target address at a call site from the return address
+  // of that call in the instruction stream.
+  inline static Address target_address_from_return_address(Address pc);
+
+  // Given the address of the beginning of a call, return the address
+  // in the instruction stream that the call will return to.
+  INLINE(static Address return_address_from_call_start(Address pc));
+
+  inline Handle<Object> code_target_object_handle_at(Address pc);
+  // This sets the branch destination.
+  // This is for calls and branches within generated code.
+  inline static void deserialization_set_special_target_at(
+      Isolate* isolate, Address instruction_payload, Code* code,
+      Address target);
+
+  // This sets the internal reference at the pc.
+  inline static void deserialization_set_target_internal_reference_at(
+      Isolate* isolate, Address pc, Address target,
+      RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
+  // Here we are patching the address in the IIHF/IILF instruction pair.
+  // These values are used in the serialization process and must be zero for
+  // S390 platform, as Code, Embedded Object or External-reference pointers
+  // are split across two consecutive instructions and don't exist separately
+  // in the code, so the serializer should not step forwards in memory after
+  // a target is resolved and written.
+  static const int kSpecialTargetSize = 0;
+
+// Number of bytes for instructions used to store pointer sized constant.
+#if V8_TARGET_ARCH_S390X
+  static const int kBytesForPtrConstant = 12;  // IIHF + IILF
+#else
+  static const int kBytesForPtrConstant = 6;  // IILF
+#endif
+
+  // Distance between the instruction referring to the address of the call
+  // target and the return address.
+
+  // Offset between call target address and return address
+  // for BRASL calls
+  // Patch will be appiled to other FIXED_SEQUENCE call
+  static const int kCallTargetAddressOffset = 6;
+
+// The length of FIXED_SEQUENCE call
+// iihf    r8, <address_hi>  // <64-bit only>
+// iilf    r8, <address_lo>
+// basr    r14, r8
+#if V8_TARGET_ARCH_S390X
+  static const int kCallSequenceLength = 14;
+#else
+  static const int kCallSequenceLength = 8;
+#endif
+
+  // This is the length of the BreakLocationIterator::SetDebugBreakAtReturn()
+  // code patch FIXED_SEQUENCE in bytes!
+  // JS Return Sequence = Call Sequence + BKPT
+  // static const int kJSReturnSequenceLength = kCallSequenceLength + 2;
+
+  // This is the length of the code sequence from SetDebugBreakAtSlot()
+  // FIXED_SEQUENCE in bytes!
+  static const int kDebugBreakSlotLength = kCallSequenceLength;
+  static const int kPatchDebugBreakSlotReturnOffset = kCallTargetAddressOffset;
+
+  // Length to patch between the start of the JS return sequence
+  // from SetDebugBreakAtReturn and the address from
+  // break_address_from_return_address.
+  //
+  // frame->pc() in Debug::SetAfterBreakTarget will point to BKPT in
+  // JS return sequence, so the length to patch will not include BKPT
+  // instruction length.
+  // static const int kPatchReturnSequenceAddressOffset =
+  //     kCallSequenceLength - kPatchDebugBreakSlotReturnOffset;
+
+  // Length to patch between the start of the FIXED call sequence from
+  // SetDebugBreakAtSlot() and the the address from
+  // break_address_from_return_address.
+  static const int kPatchDebugBreakSlotAddressOffset =
+      kDebugBreakSlotLength - kPatchDebugBreakSlotReturnOffset;
+
+  static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
+    return ((cr.code() * CRWIDTH) + crbit);
+  }
+
+  // ---------------------------------------------------------------------------
+  // Code generation
+
+  // Helper for unconditional branch to Label with update to save register
+  void b(Register r, Label* l) {
+    positions_recorder()->WriteRecordedPositions();
+    int32_t halfwords = branch_offset(l) / 2;
+    brasl(r, Operand(halfwords));
+  }
+
+  // Conditional Branch Instruction - Generates either BRC / BRCL
+  void branchOnCond(Condition c, int branch_offset, bool is_bound = false);
+
+  // Helpers for conditional branch to Label
+  void b(Condition cond, Label* l, Label::Distance dist = Label::kFar) {
+    branchOnCond(cond, branch_offset(l),
+                 l->is_bound() || (dist == Label::kNear));
+  }
+
+  void bc_short(Condition cond, Label* l, Label::Distance dist = Label::kFar) {
+    b(cond, l, Label::kNear);
+  }
+  // Helpers for conditional branch to Label
+  void beq(Label* l, Label::Distance dist = Label::kFar) { b(eq, l, dist); }
+  void bne(Label* l, Label::Distance dist = Label::kFar) { b(ne, l, dist); }
+  void blt(Label* l, Label::Distance dist = Label::kFar) { b(lt, l, dist); }
+  void ble(Label* l, Label::Distance dist = Label::kFar) { b(le, l, dist); }
+  void bgt(Label* l, Label::Distance dist = Label::kFar) { b(gt, l, dist); }
+  void bge(Label* l, Label::Distance dist = Label::kFar) { b(ge, l, dist); }
+  void b(Label* l, Label::Distance dist = Label::kFar) { b(al, l, dist); }
+  void jmp(Label* l, Label::Distance dist = Label::kFar) { b(al, l, dist); }
+  void bunordered(Label* l, Label::Distance dist = Label::kFar) {
+    b(unordered, l, dist);
+  }
+  void bordered(Label* l, Label::Distance dist = Label::kFar) {
+    b(ordered, l, dist);
+  }
+
+  // Helpers for conditional indirect branch off register
+  void b(Condition cond, Register r) { bcr(cond, r); }
+  void beq(Register r) { b(eq, r); }
+  void bne(Register r) { b(ne, r); }
+  void blt(Register r) { b(lt, r); }
+  void ble(Register r) { b(le, r); }
+  void bgt(Register r) { b(gt, r); }
+  void bge(Register r) { b(ge, r); }
+  void b(Register r) { b(al, r); }
+  void jmp(Register r) { b(al, r); }
+  void bunordered(Register r) { b(unordered, r); }
+  void bordered(Register r) { b(ordered, r); }
+
+  // ---------------------------------------------------------------------------
+  // Code generation
+
+  // Insert the smallest number of nop instructions
+  // possible to align the pc offset to a multiple
+  // of m. m must be a power of 2 (>= 4).
+  void Align(int m);
+  // Insert the smallest number of zero bytes possible to align the pc offset
+  // to a mulitple of m. m must be a power of 2 (>= 2).
+  void DataAlign(int m);
+  // Aligns code to something that's optimal for a jump target for the platform.
+  void CodeTargetAlign();
+
+  void breakpoint(bool do_print) {
+    if (do_print) {
+      printf("DebugBreak is inserted to %p\n", pc_);
+    }
+#if V8_HOST_ARCH_64_BIT
+    int64_t value = reinterpret_cast<uint64_t>(&v8::base::OS::DebugBreak);
+    int32_t hi_32 = static_cast<int64_t>(value) >> 32;
+    int32_t lo_32 = static_cast<int32_t>(value);
+
+    iihf(r1, Operand(hi_32));
+    iilf(r1, Operand(lo_32));
+#else
+    iilf(r1, Operand(reinterpret_cast<uint32_t>(&v8::base::OS::DebugBreak)));
+#endif
+    basr(r14, r1);
+  }
+
+  void call(Handle<Code> target, RelocInfo::Mode rmode,
+            TypeFeedbackId ast_id = TypeFeedbackId::None());
+  void jump(Handle<Code> target, RelocInfo::Mode rmode, Condition cond);
+
+// S390 instruction generation
+#define I_FORM(name) void name(const Operand& i)
+
+#define RR_FORM(name) void name(Register r1, Register r2)
+
+#define RR2_FORM(name) void name(Condition m1, Register r2)
+
+#define RX_FORM(name)                                        \
+  void name(Register r1, Register x2, Register b2, Disp d2); \
+  void name(Register r1, const MemOperand& opnd)
+
+#define RI1_FORM(name) void name(Register r, const Operand& i)
+
+#define RI2_FORM(name) void name(Condition m, const Operand& i)
+
+#define RIE_FORM(name) void name(Register r1, Register R3, const Operand& i)
+
+#define RIE_F_FORM(name)                                                    \
+  void name(Register r1, Register r2, const Operand& i3, const Operand& i4, \
+            const Operand& i5)
+
+#define RIL1_FORM(name) void name(Register r1, const Operand& i2)
+
+#define RIL2_FORM(name) void name(Condition m1, const Operand& i2)
+
+#define RXE_FORM(name)                            \
+  void name(Register r1, const MemOperand& opnd); \
+  void name(Register r1, Register b2, Register x2, Disp d2)
+
+#define RXF_FORM(name)                                         \
+  void name(Register r1, Register r3, const MemOperand& opnd); \
+  void name(Register r1, Register r3, Register b2, Register x2, Disp d2)
+
+#define RXY_FORM(name)                                       \
+  void name(Register r1, Register x2, Register b2, Disp d2); \
+  void name(Register r1, const MemOperand& opnd)
+
+#define RSI_FORM(name) void name(Register r1, Register r3, const Operand& i)
+
+#define RIS_FORM(name)                                       \
+  void name(Register r1, Condition m3, Register b4, Disp d4, \
+            const Operand& i2);                              \
+  void name(Register r1, const Operand& i2, Condition m3,    \
+            const MemOperand& opnd)
+
+#define SI_FORM(name)                                  \
+  void name(const MemOperand& opnd, const Operand& i); \
+  void name(const Operand& i2, Register b1, Disp d1)
+
+#define SIL_FORM(name)                                \
+  void name(Register b1, Disp d1, const Operand& i2); \
+  void name(const MemOperand& opnd, const Operand& i2)
+
+#define RRE_FORM(name) void name(Register r1, Register r2)
+
+#define RRF1_FORM(name) void name(Register r1, Register r2, Register r3)
+
+#define RRF2_FORM(name) void name(Condition m1, Register r1, Register r2)
+
+#define RRF3_FORM(name) \
+  void name(Register r3, Condition m4, Register r1, Register r2)
+
+#define RS1_FORM(name)                                         \
+  void name(Register r1, Register r3, const MemOperand& opnd); \
+  void name(Register r1, Register r3, Register b2, Disp d2)
+
+#define RS2_FORM(name)                                          \
+  void name(Register r1, Condition m3, const MemOperand& opnd); \
+  void name(Register r1, Condition m3, Register b2, Disp d2)
+
+#define RSE_FORM(name)                                         \
+  void name(Register r1, Register r3, const MemOperand& opnd); \
+  void name(Register r1, Register r3, Register b2, Disp d2)
+
+#define RSL_FORM(name)                       \
+  void name(Length l, Register b2, Disp d2); \
+  void name(const MemOperand& opnd)
+
+#define RSY1_FORM(name)                                      \
+  void name(Register r1, Register r3, Register b2, Disp d2); \
+  void name(Register r1, Register r3, const MemOperand& opnd)
+
+#define RSY2_FORM(name)                                       \
+  void name(Register r1, Condition m3, Register b2, Disp d2); \
+  void name(Register r1, Condition m3, const MemOperand& opnd)
+
+#define RRD_FORM(name) void name(Register r1, Register r3, Register r2)
+
+#define RRS_FORM(name)                                                     \
+  void name(Register r1, Register r2, Register b4, Disp d4, Condition m3); \
+  void name(Register r1, Register r2, Condition m3, const MemOperand& opnd)
+
+#define S_FORM(name)               \
+  void name(Register b2, Disp d2); \
+  void name(const MemOperand& opnd)
+
+#define SIY_FORM(name)                                \
+  void name(const Operand& i2, Register b1, Disp d1); \
+  void name(const MemOperand& opnd, const Operand& i)
+
+#define SS1_FORM(name)                                                  \
+  void name(Register b1, Disp d1, Register b3, Disp d2, Length length); \
+  void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length)
+
+#define SS2_FORM(name)                                                        \
+  void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length1, \
+            Length length2);                                                  \
+  void name(Register b1, Disp d1, Register b2, Disp d2, Length l1, Length l2)
+
+#define SS3_FORM(name)                                                        \
+  void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length); \
+  void name(const Operand& i3, Register b1, Disp d1, Register b2, Disp d2,    \
+            Length l1)
+
+#define SS4_FORM(name)                                                   \
+  void name(const MemOperand& opnd1, const MemOperand& opnd2);           \
+  void name(Register r1, Register r3, Register b1, Disp d1, Register b2, \
+            Disp d2)
+
+#define SS5_FORM(name)                                                   \
+  void name(const MemOperand& opnd1, const MemOperand& opnd2);           \
+  void name(Register r1, Register r3, Register b3, Disp d2, Register b4, \
+            Disp d4)
+
+#define SSE_FORM(name)                                   \
+  void name(Register b1, Disp d1, Register b2, Disp d2); \
+  void name(const MemOperand& opnd1, const MemOperand& opnd2)
+
+#define SSF_FORM(name)                                                \
+  void name(Register r3, Register b1, Disp d1, Register b2, Disp d2); \
+  void name(Register r3, const MemOperand& opnd1, const MemOperand& opnd2)
+
+  // S390 instruction sets
+  RX_FORM(bc);
+  RR_FORM(bctr);
+  RX_FORM(cd);
+  RRE_FORM(cdr);
+  RXE_FORM(cdb);
+  RXE_FORM(ceb);
+  RRE_FORM(cefbr);
+  RXE_FORM(ddb);
+  RRE_FORM(ddbr);
+  SS1_FORM(ed);
+  RRE_FORM(epair);
+  RX_FORM(ex);
+  RRF2_FORM(fidbr);
+  RRE_FORM(flogr);
+  RX_FORM(ic_z);
+  RXY_FORM(icy);
+  RIL1_FORM(iihf);
+  RI1_FORM(iihh);
+  RI1_FORM(iihl);
+  RIL1_FORM(iilf);
+  RI1_FORM(iilh);
+  RI1_FORM(iill);
+  RRE_FORM(lcgr);
+  RR_FORM(lcr);
+  RX_FORM(le_z);
+  RXY_FORM(ley);
+  RIL1_FORM(llihf);
+  RIL1_FORM(llilf);
+  RRE_FORM(lngr);
+  RR_FORM(lnr);
+  RSY1_FORM(loc);
+  RXY_FORM(lrv);
+  RXY_FORM(lrvh);
+  RXE_FORM(mdb);
+  RRE_FORM(mdbr);
+  SS4_FORM(mvck);
+  SSF_FORM(mvcos);
+  SS4_FORM(mvcs);
+  SS1_FORM(mvn);
+  SS1_FORM(nc);
+  SI_FORM(ni);
+  RIL1_FORM(nihf);
+  RIL1_FORM(nilf);
+  RI1_FORM(nilh);
+  RI1_FORM(nill);
+  RIL1_FORM(oihf);
+  RIL1_FORM(oilf);
+  RI1_FORM(oill);
+  RRE_FORM(popcnt);
+  RXE_FORM(sdb);
+  RRE_FORM(sdbr);
+  RIL1_FORM(slfi);
+  RXY_FORM(slgf);
+  RIL1_FORM(slgfi);
+  RS1_FORM(srdl);
+  RX_FORM(ste);
+  RXY_FORM(stey);
+  RXY_FORM(strv);
+  RI1_FORM(tmll);
+  SS1_FORM(tr);
+  S_FORM(ts);
+  RIL1_FORM(xihf);
+  RIL1_FORM(xilf);
+
+  // Load Address Instructions
+  void la(Register r, const MemOperand& opnd);
+  void lay(Register r, const MemOperand& opnd);
+  void larl(Register r1, const Operand& opnd);
+  void larl(Register r, Label* l);
+
+  // Load Instructions
+  void lb(Register r, const MemOperand& src);
+  void lbr(Register r1, Register r2);
+  void lgb(Register r, const MemOperand& src);
+  void lgbr(Register r1, Register r2);
+  void lh(Register r, const MemOperand& src);
+  void lhy(Register r, const MemOperand& src);
+  void lhr(Register r1, Register r2);
+  void lgh(Register r, const MemOperand& src);
+  void lghr(Register r1, Register r2);
+  void l(Register r, const MemOperand& src);
+  void ly(Register r, const MemOperand& src);
+  void lr(Register r1, Register r2);
+  void lg(Register r, const MemOperand& src);
+  void lgr(Register r1, Register r2);
+  void lgf(Register r, const MemOperand& src);
+  void lgfr(Register r1, Register r2);
+  void lhi(Register r, const Operand& imm);
+  void lghi(Register r, const Operand& imm);
+
+  // Load And Test Instructions
+  void lt_z(Register r, const MemOperand& src);
+  void ltg(Register r, const MemOperand& src);
+  void ltr(Register r1, Register r2);
+  void ltgr(Register r1, Register r2);
+  void ltgfr(Register r1, Register r2);
+
+  // Load Logical Instructions
+  void llc(Register r, const MemOperand& src);
+  void llgc(Register r, const MemOperand& src);
+  void llgf(Register r, const MemOperand& src);
+  void llgfr(Register r1, Register r2);
+  void llh(Register r, const MemOperand& src);
+  void llgh(Register r, const MemOperand& src);
+  void llhr(Register r1, Register r2);
+  void llghr(Register r1, Register r2);
+
+  // Load Multiple Instructions
+  void lm(Register r1, Register r2, const MemOperand& src);
+  void lmy(Register r1, Register r2, const MemOperand& src);
+  void lmg(Register r1, Register r2, const MemOperand& src);
+
+  // Store Instructions
+  void st(Register r, const MemOperand& src);
+  void stc(Register r, const MemOperand& src);
+  void stcy(Register r, const MemOperand& src);
+  void stg(Register r, const MemOperand& src);
+  void sth(Register r, const MemOperand& src);
+  void sthy(Register r, const MemOperand& src);
+  void sty(Register r, const MemOperand& src);
+
+  // Store Multiple Instructions
+  void stm(Register r1, Register r2, const MemOperand& src);
+  void stmy(Register r1, Register r2, const MemOperand& src);
+  void stmg(Register r1, Register r2, const MemOperand& src);
+
+  // Compare Instructions
+  void c(Register r, const MemOperand& opnd);
+  void cy(Register r, const MemOperand& opnd);
+  void cr_z(Register r1, Register r2);
+  void cg(Register r, const MemOperand& opnd);
+  void cgr(Register r1, Register r2);
+  void ch(Register r, const MemOperand& opnd);
+  void chy(Register r, const MemOperand& opnd);
+  void chi(Register r, const Operand& opnd);
+  void cghi(Register r, const Operand& opnd);
+  void cfi(Register r, const Operand& opnd);
+  void cgfi(Register r, const Operand& opnd);
+
+  // Compare Logical Instructions
+  void cl(Register r, const MemOperand& opnd);
+  void cly(Register r, const MemOperand& opnd);
+  void clr(Register r1, Register r2);
+  void clg(Register r, const MemOperand& opnd);
+  void clgr(Register r1, Register r2);
+  void clfi(Register r, const Operand& opnd);
+  void clgfi(Register r, const Operand& opnd);
+  void cli(const MemOperand& mem, const Operand& imm);
+  void cliy(const MemOperand& mem, const Operand& imm);
+  void clc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
+
+  // Test Under Mask Instructions
+  void tm(const MemOperand& mem, const Operand& imm);
+  void tmy(const MemOperand& mem, const Operand& imm);
+
+  // Rotate Instructions
+  void rll(Register r1, Register r3, Register opnd);
+  void rll(Register r1, Register r3, const Operand& opnd);
+  void rll(Register r1, Register r3, Register r2, const Operand& opnd);
+  void rllg(Register r1, Register r3, const Operand& opnd);
+  void rllg(Register r1, Register r3, const Register opnd);
+  void rllg(Register r1, Register r3, Register r2, const Operand& opnd);
+
+  // Shift Instructions (32)
+  void sll(Register r1, Register opnd);
+  void sll(Register r1, const Operand& opnd);
+  void sllk(Register r1, Register r3, Register opnd);
+  void sllk(Register r1, Register r3, const Operand& opnd);
+  void srl(Register r1, Register opnd);
+  void srl(Register r1, const Operand& opnd);
+  void srlk(Register r1, Register r3, Register opnd);
+  void srlk(Register r1, Register r3, const Operand& opnd);
+  void sra(Register r1, Register opnd);
+  void sra(Register r1, const Operand& opnd);
+  void srak(Register r1, Register r3, Register opnd);
+  void srak(Register r1, Register r3, const Operand& opnd);
+  void sla(Register r1, Register opnd);
+  void sla(Register r1, const Operand& opnd);
+  void slak(Register r1, Register r3, Register opnd);
+  void slak(Register r1, Register r3, const Operand& opnd);
+
+  // Shift Instructions (64)
+  void sllg(Register r1, Register r3, const Operand& opnd);
+  void sllg(Register r1, Register r3, const Register opnd);
+  void srlg(Register r1, Register r3, const Operand& opnd);
+  void srlg(Register r1, Register r3, const Register opnd);
+  void srag(Register r1, Register r3, const Operand& opnd);
+  void srag(Register r1, Register r3, const Register opnd);
+  void srda(Register r1, const Operand& opnd);
+  void srdl(Register r1, const Operand& opnd);
+  void slag(Register r1, Register r3, const Operand& opnd);
+  void slag(Register r1, Register r3, const Register opnd);
+  void sldl(Register r1, Register b2, const Operand& opnd);
+  void srdl(Register r1, Register b2, const Operand& opnd);
+  void srda(Register r1, Register b2, const Operand& opnd);
+
+  // Rotate and Insert Selected Bits
+  void risbg(Register dst, Register src, const Operand& startBit,
+             const Operand& endBit, const Operand& shiftAmt,
+             bool zeroBits = true);
+  void risbgn(Register dst, Register src, const Operand& startBit,
+              const Operand& endBit, const Operand& shiftAmt,
+              bool zeroBits = true);
+
+  // Move Character (Mem to Mem)
+  void mvc(const MemOperand& opnd1, const MemOperand& opnd2, uint32_t length);
+
+  // Branch Instructions
+  void basr(Register r1, Register r2);
+  void bcr(Condition m, Register target);
+  void bct(Register r, const MemOperand& opnd);
+  void bctg(Register r, const MemOperand& opnd);
+  void bras(Register r, const Operand& opnd);
+  void brasl(Register r, const Operand& opnd);
+  void brc(Condition c, const Operand& opnd);
+  void brcl(Condition m, const Operand& opnd, bool isCodeTarget = false);
+  void brct(Register r1, const Operand& opnd);
+  void brctg(Register r1, const Operand& opnd);
+
+  // 32-bit Add Instructions
+  void a(Register r1, const MemOperand& opnd);
+  void ay(Register r1, const MemOperand& opnd);
+  void afi(Register r1, const Operand& opnd);
+  void ah(Register r1, const MemOperand& opnd);
+  void ahy(Register r1, const MemOperand& opnd);
+  void ahi(Register r1, const Operand& opnd);
+  void ahik(Register r1, Register r3, const Operand& opnd);
+  void ar(Register r1, Register r2);
+  void ark(Register r1, Register r2, Register r3);
+  void asi(const MemOperand&, const Operand&);
+
+  // 64-bit Add Instructions
+  void ag(Register r1, const MemOperand& opnd);
+  void agf(Register r1, const MemOperand& opnd);
+  void agfi(Register r1, const Operand& opnd);
+  void agfr(Register r1, Register r2);
+  void aghi(Register r1, const Operand& opnd);
+  void aghik(Register r1, Register r3, const Operand& opnd);
+  void agr(Register r1, Register r2);
+  void agrk(Register r1, Register r2, Register r3);
+  void agsi(const MemOperand&, const Operand&);
+
+  // 32-bit Add Logical Instructions
+  void al_z(Register r1, const MemOperand& opnd);
+  void aly(Register r1, const MemOperand& opnd);
+  void alfi(Register r1, const Operand& opnd);
+  void alr(Register r1, Register r2);
+  void alcr(Register r1, Register r2);
+  void alrk(Register r1, Register r2, Register r3);
+
+  // 64-bit Add Logical Instructions
+  void alg(Register r1, const MemOperand& opnd);
+  void algfi(Register r1, const Operand& opnd);
+  void algr(Register r1, Register r2);
+  void algrk(Register r1, Register r2, Register r3);
+
+  // 32-bit Subtract Instructions
+  void s(Register r1, const MemOperand& opnd);
+  void sy(Register r1, const MemOperand& opnd);
+  void sh(Register r1, const MemOperand& opnd);
+  void shy(Register r1, const MemOperand& opnd);
+  void sr(Register r1, Register r2);
+  void srk(Register r1, Register r2, Register r3);
+
+  // 64-bit Subtract Instructions
+  void sg(Register r1, const MemOperand& opnd);
+  void sgf(Register r1, const MemOperand& opnd);
+  void sgr(Register r1, Register r2);
+  void sgfr(Register r1, Register r2);
+  void sgrk(Register r1, Register r2, Register r3);
+
+  // 32-bit Subtract Logical Instructions
+  void sl(Register r1, const MemOperand& opnd);
+  void sly(Register r1, const MemOperand& opnd);
+  void slr(Register r1, Register r2);
+  void slrk(Register r1, Register r2, Register r3);
+  void slbr(Register r1, Register r2);
+
+  // 64-bit Subtract Logical Instructions
+  void slg(Register r1, const MemOperand& opnd);
+  void slgr(Register r1, Register r2);
+  void slgrk(Register r1, Register r2, Register r3);
+
+  // 32-bit Multiply Instructions
+  void m(Register r1, const MemOperand& opnd);
+  void mr_z(Register r1, Register r2);
+  void ml(Register r1, const MemOperand& opnd);
+  void mlr(Register r1, Register r2);
+  void ms(Register r1, const MemOperand& opnd);
+  void msy(Register r1, const MemOperand& opnd);
+  void msfi(Register r1, const Operand& opnd);
+  void msr(Register r1, Register r2);
+  void mh(Register r1, const MemOperand& opnd);
+  void mhy(Register r1, const MemOperand& opnd);
+  void mhi(Register r1, const Operand& opnd);
+
+  // 64-bit Multiply Instructions
+  void mlg(Register r1, const MemOperand& opnd);
+  void mlgr(Register r1, Register r2);
+  void mghi(Register r1, const Operand& opnd);
+  void msgfi(Register r1, const Operand& opnd);
+  void msg(Register r1, const MemOperand& opnd);
+  void msgr(Register r1, Register r2);
+
+  // 32-bit Divide Instructions
+  void d(Register r1, const MemOperand& opnd);
+  void dr(Register r1, Register r2);
+  void dl(Register r1, const MemOperand& opnd);
+  void dlr(Register r1, Register r2);
+
+  // 64-bit Divide Instructions
+  void dlgr(Register r1, Register r2);
+  void dsgr(Register r1, Register r2);
+
+  // Bitwise Instructions (AND / OR / XOR)
+  void n(Register r1, const MemOperand& opnd);
+  void ny(Register r1, const MemOperand& opnd);
+  void nr(Register r1, Register r2);
+  void nrk(Register r1, Register r2, Register r3);
+  void ng(Register r1, const MemOperand& opnd);
+  void ngr(Register r1, Register r2);
+  void ngrk(Register r1, Register r2, Register r3);
+  void o(Register r1, const MemOperand& opnd);
+  void oy(Register r1, const MemOperand& opnd);
+  void or_z(Register r1, Register r2);
+  void ork(Register r1, Register r2, Register r3);
+  void og(Register r1, const MemOperand& opnd);
+  void ogr(Register r1, Register r2);
+  void ogrk(Register r1, Register r2, Register r3);
+  void x(Register r1, const MemOperand& opnd);
+  void xy(Register r1, const MemOperand& opnd);
+  void xr(Register r1, Register r2);
+  void xrk(Register r1, Register r2, Register r3);
+  void xg(Register r1, const MemOperand& opnd);
+  void xgr(Register r1, Register r2);
+  void xgrk(Register r1, Register r2, Register r3);
+  void xc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
+
+  // Bitwise GPR <-> FPR Conversion Instructions
+  void lgdr(Register r1, DoubleRegister f2);
+  void ldgr(DoubleRegister f1, Register r2);
+
+  // Floating Point Load / Store Instructions
+  void ld(DoubleRegister r1, const MemOperand& opnd);
+  void ldy(DoubleRegister r1, const MemOperand& opnd);
+  void le_z(DoubleRegister r1, const MemOperand& opnd);
+  void ley(DoubleRegister r1, const MemOperand& opnd);
+  void ldr(DoubleRegister r1, DoubleRegister r2);
+  void ltdbr(DoubleRegister r1, DoubleRegister r2);
+  void ltebr(DoubleRegister r1, DoubleRegister r2);
+  void std(DoubleRegister r1, const MemOperand& opnd);
+  void stdy(DoubleRegister r1, const MemOperand& opnd);
+  void ste(DoubleRegister r1, const MemOperand& opnd);
+  void stey(DoubleRegister r1, const MemOperand& opnd);
+
+  // Floating Point Load Rounded/Positive Instructions
+  void ledbr(DoubleRegister r1, DoubleRegister r2);
+  void ldebr(DoubleRegister r1, DoubleRegister r2);
+  void lpebr(DoubleRegister r1, DoubleRegister r2);
+  void lpdbr(DoubleRegister r1, DoubleRegister r2);
+
+  // Floating <-> Fixed Point Conversion Instructions
+  void cdlfbr(Condition m3, Condition m4, DoubleRegister fltReg,
+              Register fixReg);
+  void cdlgbr(Condition m3, Condition m4, DoubleRegister fltReg,
+              Register fixReg);
+  void celgbr(Condition m3, Condition m4, DoubleRegister fltReg,
+              Register fixReg);
+  void celfbr(Condition m3, Condition m4, DoubleRegister fltReg,
+              Register fixReg);
+  void clfdbr(Condition m3, Condition m4, Register fixReg,
+              DoubleRegister fltReg);
+  void clfebr(Condition m3, Condition m4, Register fixReg,
+              DoubleRegister fltReg);
+  void clgdbr(Condition m3, Condition m4, Register fixReg,
+              DoubleRegister fltReg);
+  void clgebr(Condition m3, Condition m4, Register fixReg,
+              DoubleRegister fltReg);
+  void cfdbr(Condition m, Register fixReg, DoubleRegister fltReg);
+  void cdfbr(DoubleRegister fltReg, Register fixReg);
+  void cgebr(Condition m, Register fixReg, DoubleRegister fltReg);
+  void cgdbr(Condition m, Register fixReg, DoubleRegister fltReg);
+  void cegbr(DoubleRegister fltReg, Register fixReg);
+  void cdgbr(DoubleRegister fltReg, Register fixReg);
+  void cfebr(Condition m3, Register fixReg, DoubleRegister fltReg);
+  void cefbr(DoubleRegister fltReg, Register fixReg);
+
+  // Floating Point Compare Instructions
+  void cebr(DoubleRegister r1, DoubleRegister r2);
+  void cdb(DoubleRegister r1, const MemOperand& opnd);
+  void cdbr(DoubleRegister r1, DoubleRegister r2);
+
+  // Floating Point Arithmetic Instructions
+  void aebr(DoubleRegister r1, DoubleRegister r2);
+  void adb(DoubleRegister r1, const MemOperand& opnd);
+  void adbr(DoubleRegister r1, DoubleRegister r2);
+  void lzdr(DoubleRegister r1);
+  void sebr(DoubleRegister r1, DoubleRegister r2);
+  void sdb(DoubleRegister r1, const MemOperand& opnd);
+  void sdbr(DoubleRegister r1, DoubleRegister r2);
+  void meebr(DoubleRegister r1, DoubleRegister r2);
+  void mdb(DoubleRegister r1, const MemOperand& opnd);
+  void mdbr(DoubleRegister r1, DoubleRegister r2);
+  void debr(DoubleRegister r1, DoubleRegister r2);
+  void ddb(DoubleRegister r1, const MemOperand& opnd);
+  void ddbr(DoubleRegister r1, DoubleRegister r2);
+  void madbr(DoubleRegister r1, DoubleRegister r2, DoubleRegister r3);
+  void msdbr(DoubleRegister r1, DoubleRegister r2, DoubleRegister r3);
+  void sqebr(DoubleRegister r1, DoubleRegister r2);
+  void sqdb(DoubleRegister r1, const MemOperand& opnd);
+  void sqdbr(DoubleRegister r1, DoubleRegister r2);
+  void lcdbr(DoubleRegister r1, DoubleRegister r2);
+  void ldeb(DoubleRegister r1, const MemOperand& opnd);
+
+  enum FIDBRA_MASK3 {
+    FIDBRA_CURRENT_ROUNDING_MODE = 0,
+    FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0 = 1,
+    // ...
+    FIDBRA_ROUND_TOWARD_0 = 5,
+    FIDBRA_ROUND_TOWARD_POS_INF = 6,
+    FIDBRA_ROUND_TOWARD_NEG_INF = 7
+  };
+  void fiebra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3);
+  void fidbra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3);
+
+  // Move integer
+  void mvhi(const MemOperand& opnd1, const Operand& i2);
+  void mvghi(const MemOperand& opnd1, const Operand& i2);
+
+  // Exception-generating instructions and debugging support
+  void stop(const char* msg, Condition cond = al,
+            int32_t code = kDefaultStopCode, CRegister cr = cr7);
+
+  void bkpt(uint32_t imm16);  // v5 and above
+
+  // Different nop operations are used by the code generator to detect certain
+  // states of the generated code.
+  enum NopMarkerTypes {
+    NON_MARKING_NOP = 0,
+    GROUP_ENDING_NOP,
+    DEBUG_BREAK_NOP,
+    // IC markers.
+    PROPERTY_ACCESS_INLINED,
+    PROPERTY_ACCESS_INLINED_CONTEXT,
+    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+    // Helper values.
+    LAST_CODE_MARKER,
+    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+  };
+
+  void nop(int type = 0);  // 0 is the default non-marking type.
+
+  // Check the code size generated from label to here.
+  int SizeOfCodeGeneratedSince(Label* label) {
+    return pc_offset() - label->pos();
+  }
+
+  // Debugging
+
+  // Mark generator continuation.
+  void RecordGeneratorContinuation();
+
+  // Mark address of a debug break slot.
+  void RecordDebugBreakSlot(RelocInfo::Mode mode);
+
+  // Record the AST id of the CallIC being compiled, so that it can be placed
+  // in the relocation information.
+  void SetRecordedAstId(TypeFeedbackId ast_id) { recorded_ast_id_ = ast_id; }
+
+  TypeFeedbackId RecordedAstId() {
+    // roohack - another issue??? DCHECK(!recorded_ast_id_.IsNone());
+    return recorded_ast_id_;
+  }
+
+  void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --code-comments to enable.
+  void RecordComment(const char* msg);
+
+  // Record a deoptimization reason that can be used by a log or cpu profiler.
+  // Use --trace-deopt to enable.
+  void RecordDeoptReason(const int reason, int raw_position);
+
+  // Writes a single byte or word of data in the code stream.  Used
+  // for inline tables, e.g., jump-tables.
+  void db(uint8_t data);
+  void dd(uint32_t data);
+  void dq(uint64_t data);
+  void dp(uintptr_t data);
+
+  AssemblerPositionsRecorder* positions_recorder() {
+    return &positions_recorder_;
+  }
+
+  void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
+                                          ConstantPoolEntry::Access access,
+                                          ConstantPoolEntry::Type type) {
+    // No embedded constant pool support.
+    UNREACHABLE();
+  }
+
+  // Read/patch instructions
+  SixByteInstr instr_at(int pos) {
+    return Instruction::InstructionBits(buffer_ + pos);
+  }
+  template <typename T>
+  void instr_at_put(int pos, T instr) {
+    Instruction::SetInstructionBits<T>(buffer_ + pos, instr);
+  }
+
+  // Decodes instruction at pos, and returns its length
+  int32_t instr_length_at(int pos) {
+    return Instruction::InstructionLength(buffer_ + pos);
+  }
+
+  static SixByteInstr instr_at(byte* pc) {
+    return Instruction::InstructionBits(pc);
+  }
+
+  static Condition GetCondition(Instr instr);
+
+  static bool IsBranch(Instr instr);
+#if V8_TARGET_ARCH_S390X
+  static bool Is64BitLoadIntoIP(SixByteInstr instr1, SixByteInstr instr2);
+#else
+  static bool Is32BitLoadIntoIP(SixByteInstr instr);
+#endif
+
+  static bool IsCmpRegister(Instr instr);
+  static bool IsCmpImmediate(Instr instr);
+  static bool IsNop(SixByteInstr instr, int type = NON_MARKING_NOP);
+
+  // The code currently calls CheckBuffer() too often. This has the side
+  // effect of randomly growing the buffer in the middle of multi-instruction
+  // sequences.
+  //
+  // This function allows outside callers to check and grow the buffer
+  void EnsureSpaceFor(int space_needed);
+
+  void EmitRelocations();
+  void emit_label_addr(Label* label);
+
+ public:
+  byte* buffer_pos() const { return buffer_; }
+
+ protected:
+  // Relocation for a type-recording IC has the AST id added to it.  This
+  // member variable is a way to pass the information from the call site to
+  // the relocation info.
+  TypeFeedbackId recorded_ast_id_;
+
+  int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Decode instruction(s) at pos and return backchain to previous
+  // label reference or kEndOfChain.
+  int target_at(int pos);
+
+  // Patch instruction(s) at pos to target target_pos (e.g. branch)
+  void target_at_put(int pos, int target_pos, bool* is_branch = nullptr);
+
+  // Record reloc info for current pc_
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ private:
+  // Code generation
+  // The relocation writer's position is at least kGap bytes below the end of
+  // the generated instructions. This is so that multi-instruction sequences do
+  // not have to check for overflow. The same is true for writes of large
+  // relocation info entries.
+  static const int kGap = 32;
+
+  // Relocation info generation
+  // Each relocation is encoded as a variable size value
+  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+  RelocInfoWriter reloc_info_writer;
+  std::vector<DeferredRelocInfo> relocations_;
+
+  // The bound position, before this we cannot do instruction elimination.
+  int last_bound_pos_;
+
+  // Code emission
+  inline void CheckBuffer();
+  void GrowBuffer(int needed = 0);
+  inline void TrackBranch();
+  inline void UntrackBranch();
+
+  inline int32_t emit_code_target(
+      Handle<Code> target, RelocInfo::Mode rmode,
+      TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+  // Helpers to emit binary encoding of 2/4/6 byte instructions.
+  inline void emit2bytes(uint16_t x);
+  inline void emit4bytes(uint32_t x);
+  inline void emit6bytes(uint64_t x);
+
+  // Helpers to emit binary encoding for various instruction formats.
+
+  inline void rr_form(Opcode op, Register r1, Register r2);
+  inline void rr_form(Opcode op, DoubleRegister r1, DoubleRegister r2);
+  inline void rr_form(Opcode op, Condition m1, Register r2);
+  inline void rr2_form(uint8_t op, Condition m1, Register r2);
+
+  inline void rx_form(Opcode op, Register r1, Register x2, Register b2,
+                      Disp d2);
+  inline void rx_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
+                      Disp d2);
+
+  inline void ri_form(Opcode op, Register r1, const Operand& i2);
+  inline void ri_form(Opcode op, Condition m1, const Operand& i2);
+
+  inline void rie_form(Opcode op, Register r1, Register r3, const Operand& i2);
+  inline void rie_f_form(Opcode op, Register r1, Register r2, const Operand& i3,
+                         const Operand& i4, const Operand& i5);
+
+  inline void ril_form(Opcode op, Register r1, const Operand& i2);
+  inline void ril_form(Opcode op, Condition m1, const Operand& i2);
+
+  inline void ris_form(Opcode op, Register r1, Condition m3, Register b4,
+                       Disp d4, const Operand& i2);
+
+  inline void rrd_form(Opcode op, Register r1, Register r3, Register r2);
+
+  inline void rre_form(Opcode op, Register r1, Register r2);
+  inline void rre_form(Opcode op, DoubleRegister r1, DoubleRegister r2);
+
+  inline void rrf1_form(Opcode op, Register r1, Register r2, Register r3);
+  inline void rrf1_form(uint32_t x);
+  inline void rrf2_form(uint32_t x);
+  inline void rrf3_form(uint32_t x);
+  inline void rrfe_form(Opcode op, Condition m3, Condition m4, Register r1,
+                        Register r2);
+
+  inline void rrs_form(Opcode op, Register r1, Register r2, Register b4,
+                       Disp d4, Condition m3);
+
+  inline void rs_form(Opcode op, Register r1, Condition m3, Register b2,
+                      const Disp d2);
+  inline void rs_form(Opcode op, Register r1, Register r3, Register b2,
+                      const Disp d2);
+
+  inline void rsi_form(Opcode op, Register r1, Register r3, const Operand& i2);
+  inline void rsl_form(Opcode op, Length l1, Register b2, Disp d2);
+
+  inline void rsy_form(Opcode op, Register r1, Register r3, Register b2,
+                       const Disp d2);
+  inline void rsy_form(Opcode op, Register r1, Condition m3, Register b2,
+                       const Disp d2);
+
+  inline void rxe_form(Opcode op, Register r1, Register x2, Register b2,
+                       Disp d2);
+
+  inline void rxf_form(Opcode op, Register r1, Register r3, Register b2,
+                       Register x2, Disp d2);
+
+  inline void rxy_form(Opcode op, Register r1, Register x2, Register b2,
+                       Disp d2);
+  inline void rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
+                       Disp d2);
+
+  inline void s_form(Opcode op, Register b1, Disp d2);
+
+  inline void si_form(Opcode op, const Operand& i2, Register b1, Disp d1);
+  inline void siy_form(Opcode op, const Operand& i2, Register b1, Disp d1);
+
+  inline void sil_form(Opcode op, Register b1, Disp d1, const Operand& i2);
+
+  inline void ss_form(Opcode op, Length l, Register b1, Disp d1, Register b2,
+                      Disp d2);
+  inline void ss_form(Opcode op, Length l1, Length l2, Register b1, Disp d1,
+                      Register b2, Disp d2);
+  inline void ss_form(Opcode op, Length l1, const Operand& i3, Register b1,
+                      Disp d1, Register b2, Disp d2);
+  inline void ss_form(Opcode op, Register r1, Register r2, Register b1, Disp d1,
+                      Register b2, Disp d2);
+  inline void sse_form(Opcode op, Register b1, Disp d1, Register b2, Disp d2);
+  inline void ssf_form(Opcode op, Register r3, Register b1, Disp d1,
+                       Register b2, Disp d2);
+
+  // Labels
+  void print(Label* L);
+  int max_reach_from(int pos);
+  void bind_to(Label* L, int pos);
+  void next(Label* L);
+
+  friend class RegExpMacroAssemblerS390;
+  friend class RelocInfo;
+  friend class CodePatcher;
+
+  List<Handle<Code> > code_targets_;
+
+  AssemblerPositionsRecorder positions_recorder_;
+  friend class AssemblerPositionsRecorder;
+  friend class EnsureSpace;
+};
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+  explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_S390_ASSEMBLER_S390_H_
diff --git a/src/s390/builtins-s390.cc b/src/s390/builtins-s390.cc
new file mode 100644
index 0000000..12b52c1
--- /dev/null
+++ b/src/s390/builtins-s390.cc
@@ -0,0 +1,2555 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
+                                BuiltinExtraArguments extra_args) {
+  // ----------- S t a t e -------------
+  //  -- r2                 : number of arguments excluding receiver
+  //  -- r3                 : target
+  //  -- r5                 : new.target
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+  __ AssertFunction(r3);
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+  // Insert extra arguments.
+  int num_extra_args = 0;
+  switch (extra_args) {
+    case BuiltinExtraArguments::kTarget:
+      __ Push(r3);
+      ++num_extra_args;
+      break;
+    case BuiltinExtraArguments::kNewTarget:
+      __ Push(r5);
+      ++num_extra_args;
+      break;
+    case BuiltinExtraArguments::kTargetAndNewTarget:
+      __ Push(r3, r5);
+      num_extra_args += 2;
+      break;
+    case BuiltinExtraArguments::kNone:
+      break;
+  }
+
+  // JumpToExternalReference expects r2 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  __ AddP(r2, r2, Operand(num_extra_args + 1));
+
+  __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the InternalArray function from the current native context.
+  __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
+}
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the Array function from the current native context.
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, r3);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+    __ TestIfSmi(r4);
+    __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
+    __ CompareObjectType(r4, r5, r6, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, r3);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+    __ TestIfSmi(r4);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+    __ CompareObjectType(r4, r5, r6, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  __ LoadRR(r5, r3);
+  // Run the native code for the Array function called as a normal function.
+  // tail call a stub
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- r2                 : number of arguments
+  //  -- lr                 : return address
+  //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- sp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in r3 and the double value in d1.
+  __ LoadRoot(r3, root_index);
+  __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+
+  // Setup state for loop
+  // r4: address of arg[0] + kPointerSize
+  // r5: number of slots to drop at exit (arguments + receiver)
+  __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+  __ AddP(r4, sp, r4);
+  __ AddP(r5, r2, Operand(1));
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ CmpLogicalP(r4, sp);
+    __ ble(&done_loop);
+
+    // Load the next parameter tagged value into r2.
+    __ lay(r4, MemOperand(r4, -kPointerSize));
+    __ LoadP(r2, MemOperand(r4));
+
+    // Load the double value of the parameter into d2, maybe converting the
+    // parameter to a number first using the ToNumberStub if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(r2, &convert_smi);
+    __ LoadP(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
+    __ JumpIfRoot(r6, Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumberStub to convert it.
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(r5);
+      __ Push(r3, r4, r5);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ Pop(r3, r4, r5);
+      __ SmiUntag(r5);
+      {
+        // Restore the double accumulator value (d1).
+        Label done_restore;
+        __ SmiToDouble(d1, r3);
+        __ JumpIfSmi(r3, &done_restore);
+        __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+        __ bind(&done_restore);
+      }
+    }
+    __ b(&convert);
+    __ bind(&convert_number);
+    __ LoadDouble(d2, FieldMemOperand(r2, HeapNumber::kValueOffset));
+    __ b(&done_convert);
+    __ bind(&convert_smi);
+    __ SmiToDouble(d2, r2);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (d1) and the next parameter value on the right hand side (d2).
+    Label compare_nan, compare_swap;
+    __ cdbr(d1, d2);
+    __ bunordered(&compare_nan);
+    __ b(cond_done, &loop);
+    __ b(CommuteCondition(cond_done), &compare_swap);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ TestDoubleIsMinusZero(reg, r6, r7);
+    __ bne(&loop);
+
+    // Update accumulator. Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ ldr(d1, d2);
+    __ LoadRR(r3, r2);
+    __ b(&loop);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    // We still need to visit the rest of the arguments.
+    __ bind(&compare_nan);
+    __ LoadRoot(r3, Heap::kNanValueRootIndex);
+    __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+    __ b(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ LoadRR(r2, r3);
+  __ Drop(r5);
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                     : number of arguments
+  //  -- r3                     : constructor function
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into r2 and get rid of the rest (including the
+  // receiver).
+  Label no_arguments;
+  {
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&no_arguments);
+    __ SubP(r2, r2, Operand(1));
+    __ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
+    __ la(sp, MemOperand(sp, r2));
+    __ LoadP(r2, MemOperand(sp));
+    __ Drop(2);
+  }
+
+  // 2a. Convert the first argument to a number.
+  ToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+
+  // 2b. No arguments, return +0.
+  __ bind(&no_arguments);
+  __ LoadSmiLiteral(r2, Smi::FromInt(0));
+  __ Ret(1);
+}
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                     : number of arguments
+  //  -- r3                     : constructor function
+  //  -- r5                     : new target
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into r4 and get rid of the rest (including the
+  // receiver).
+  {
+    Label no_arguments, done;
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&no_arguments);
+    __ SubP(r2, r2, Operand(1));
+    __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+    __ la(sp, MemOperand(sp, r4));
+    __ LoadP(r4, MemOperand(sp));
+    __ Drop(2);
+    __ b(&done);
+    __ bind(&no_arguments);
+    __ LoadSmiLiteral(r4, Smi::FromInt(0));
+    __ Drop(1);
+    __ bind(&done);
+  }
+
+  // 3. Make sure r4 is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(r4, &done_convert);
+    __ CompareObjectType(r4, r6, r6, HEAP_NUMBER_TYPE);
+    __ beq(&done_convert);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r3, r5);
+      __ LoadRR(r2, r4);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ LoadRR(r4, r2);
+      __ Pop(r3, r5);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label new_object;
+  __ CmpP(r3, r5);
+  __ bne(&new_object);
+
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
+  __ Ret();
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r4);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
+    __ Pop(r4);
+  }
+  __ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                     : number of arguments
+  //  -- r3                     : constructor function
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+  // 1. Load the first argument into r2 and get rid of the rest (including the
+  // receiver).
+  Label no_arguments;
+  {
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&no_arguments);
+    __ SubP(r2, r2, Operand(1));
+    __ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
+    __ lay(sp, MemOperand(sp, r2));
+    __ LoadP(r2, MemOperand(sp));
+    __ Drop(2);
+  }
+
+  // 2a. At least one argument, return r2 if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(r2, &to_string);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
+    __ bgt(&to_string);
+    __ beq(&symbol_descriptive_string);
+    __ Ret();
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
+  __ bind(&no_arguments);
+  {
+    __ LoadRoot(r2, Heap::kempty_stringRootIndex);
+    __ Ret(1);
+  }
+
+  // 3a. Convert r2 to a string.
+  __ bind(&to_string);
+  {
+    ToStringStub stub(masm->isolate());
+    __ TailCallStub(&stub);
+  }
+  // 3b. Convert symbol in r2 to a string.
+  __ bind(&symbol_descriptive_string);
+  {
+    __ Push(r2);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                     : number of arguments
+  //  -- r3                     : constructor function
+  //  -- r5                     : new target
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into r4 and get rid of the rest (including the
+  // receiver).
+  {
+    Label no_arguments, done;
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&no_arguments);
+    __ SubP(r2, r2, Operand(1));
+    __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+    __ lay(sp, MemOperand(sp, r4));
+    __ LoadP(r4, MemOperand(sp));
+    __ Drop(2);
+    __ b(&done);
+    __ bind(&no_arguments);
+    __ LoadRoot(r4, Heap::kempty_stringRootIndex);
+    __ Drop(1);
+    __ bind(&done);
+  }
+
+  // 3. Make sure r4 is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(r4, &convert);
+    __ CompareObjectType(r4, r6, r6, FIRST_NONSTRING_TYPE);
+    __ blt(&done_convert);
+    __ bind(&convert);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      ToStringStub stub(masm->isolate());
+      __ Push(r3, r5);
+      __ LoadRR(r2, r4);
+      __ CallStub(&stub);
+      __ LoadRR(r4, r2);
+      __ Pop(r3, r5);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label new_object;
+  __ CmpP(r3, r5);
+  __ bne(&new_object);
+
+  // 5. Allocate a JSValue wrapper for the string.
+  __ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
+  __ Ret();
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r4);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
+    __ Pop(r4);
+  }
+  __ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
+  __ Ret();
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+  __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- r2 : argument count (preserved for callee)
+  //  -- r3 : target function (preserved for callee)
+  //  -- r5 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    // Push a copy of the target function and the new target.
+    // Push function as parameter to the runtime call.
+    __ SmiTag(r2);
+    __ Push(r2, r3, r5, r3);
+
+    __ CallRuntime(function_id, 1);
+    __ LoadRR(r4, r2);
+
+    // Restore target function and new target.
+    __ Pop(r2, r3, r5);
+    __ SmiUntag(r2);
+  }
+  __ AddP(ip, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+  __ bge(&ok, Label::kNear);
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
+  // ----------- S t a t e -------------
+  //  -- r2     : number of arguments
+  //  -- r3     : constructor function
+  //  -- r4     : allocation site or undefined
+  //  -- r5     : new target
+  //  -- cp     : context
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  Isolate* isolate = masm->isolate();
+
+  // Enter a construct frame.
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(r4, r6);
+
+    if (!create_implicit_receiver) {
+      __ SmiTag(r6, r2);
+      __ LoadAndTestP(r6, r6);
+      __ Push(cp, r4, r6);
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    } else {
+      __ SmiTag(r2);
+      __ Push(cp, r4, r2);
+
+      // Allocate the new receiver object.
+      __ Push(r3, r5);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ LoadRR(r6, r2);
+      __ Pop(r3, r5);
+
+      // ----------- S t a t e -------------
+      //  -- r3: constructor function
+      //  -- r5: new target
+      //  -- r6: newly allocated object
+      // -----------------------------------
+
+      // Retrieve smi-tagged arguments count from the stack.
+      __ LoadP(r2, MemOperand(sp));
+      __ SmiUntag(r2);
+      __ LoadAndTestP(r2, r2);
+
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ Push(r6, r6);
+    }
+
+    // Set up pointer to last argument.
+    __ la(r4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    // r2: number of arguments
+    // r3: constructor function
+    // r4: address of last argument (caller sp)
+    // r5: new target
+    // cr0: condition indicating whether r2 is zero
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: number of arguments (smi-tagged)
+    Label loop, no_args;
+    __ beq(&no_args);
+    __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
+    __ SubP(sp, sp, ip);
+    __ LoadRR(r1, r2);
+    __ bind(&loop);
+    __ lay(ip, MemOperand(ip, -kPointerSize));
+    __ LoadP(r0, MemOperand(ip, r4));
+    __ StoreP(r0, MemOperand(ip, sp));
+    __ BranchOnCount(r1, &loop);
+    __ bind(&no_args);
+
+    // Call the function.
+    // r2: number of arguments
+    // r3: constructor function
+    // r5: new target
+    if (is_api_function) {
+      __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+      Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
+      __ Call(code, RelocInfo::CODE_TARGET);
+    } else {
+      ParameterCount actual(r2);
+      __ InvokeFunction(r3, r5, actual, CALL_FUNCTION,
+                        CheckDebugStepCallWrapper());
+    }
+
+    // Store offset of return address for deoptimizer.
+    if (create_implicit_receiver && !is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    // r2: result
+    // sp[0]: receiver
+    // sp[1]: number of arguments (smi-tagged)
+    __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+      // on page 74.
+      Label use_receiver, exit;
+
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      // r2: result
+      // sp[0]: receiver
+      // sp[1]: new.target
+      // sp[2]: number of arguments (smi-tagged)
+      __ JumpIfSmi(r2, &use_receiver);
+
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      __ CompareObjectType(r2, r3, r5, FIRST_JS_RECEIVER_TYPE);
+      __ bge(&exit);
+
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ bind(&use_receiver);
+      __ LoadP(r2, MemOperand(sp));
+
+      // Remove receiver from the stack, remove caller arguments, and
+      // return.
+      __ bind(&exit);
+      // r2: result
+      // sp[0]: receiver (newly allocated object)
+      // sp[1]: number of arguments (smi-tagged)
+      __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
+    } else {
+      __ LoadP(r3, MemOperand(sp));
+    }
+
+    // Leave construct frame.
+  }
+
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(r2, &dont_throw);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
+  __ SmiToPtrArrayOffset(r3, r3);
+  __ AddP(sp, sp, r3);
+  __ AddP(sp, sp, Operand(kPointerSize));
+  if (create_implicit_receiver) {
+    __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r3, r4);
+  }
+  __ Ret();
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+  __ push(r3);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
+
+// Clobbers r4; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+                                        IsTagged argc_is_tagged) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(r4, Heap::kRealStackLimitRootIndex);
+  // Make r4 the space we have left. The stack might already be overflowed
+  // here which will cause r4 to become negative.
+  __ SubP(r4, sp, r4);
+  // Check if the arguments will overflow the stack.
+  if (argc_is_tagged == kArgcIsSmiTagged) {
+    __ SmiToPtrArrayOffset(r0, argc);
+  } else {
+    DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
+    __ ShiftLeftP(r0, argc, Operand(kPointerSizeLog2));
+  }
+  __ CmpP(r4, r0);
+  __ bgt(&okay);  // Signed comparison.
+
+  // Out of stack space.
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+
+  __ bind(&okay);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from Generate_JS_Entry
+  // r2: new.target
+  // r3: function
+  // r4: receiver
+  // r5: argc
+  // r6: argv
+  // r0,r7-r9, cp may be clobbered
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Enter an internal frame.
+  {
+    // FrameScope ends up calling MacroAssembler::EnterFrame here
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ mov(cp, Operand(context_address));
+    __ LoadP(cp, MemOperand(cp));
+
+    __ InitializeRootRegister();
+
+    // Push the function and the receiver onto the stack.
+    __ Push(r3, r4);
+
+    // Check if we have enough stack space to push all arguments.
+    // Clobbers r4.
+    Generate_CheckStackOverflow(masm, r5, kArgcIsUntaggedInt);
+
+    // Copy arguments to the stack in a loop from argv to sp.
+    // The arguments are actually placed in reverse order on sp
+    // compared to argv (i.e. arg1 is highest memory in sp).
+    // r3: function
+    // r5: argc
+    // r6: argv, i.e. points to first arg
+    // r7: scratch reg to hold scaled argc
+    // r8: scratch reg to hold arg handle
+    // r9: scratch reg to hold index into argv
+    Label argLoop, argExit;
+    intptr_t zero = 0;
+    __ ShiftLeftP(r7, r5, Operand(kPointerSizeLog2));
+    __ SubRR(sp, r7);                // Buy the stack frame to fit args
+    __ LoadImmP(r9, Operand(zero));  // Initialize argv index
+    __ bind(&argLoop);
+    __ CmpPH(r7, Operand(zero));
+    __ beq(&argExit, Label::kNear);
+    __ lay(r7, MemOperand(r7, -kPointerSize));
+    __ LoadP(r8, MemOperand(r9, r6));         // read next parameter
+    __ la(r9, MemOperand(r9, kPointerSize));  // r9++;
+    __ LoadP(r0, MemOperand(r8));             // dereference handle
+    __ StoreP(r0, MemOperand(r7, sp));        // push parameter
+    __ b(&argLoop);
+    __ bind(&argExit);
+
+    // Setup new.target and argc.
+    __ LoadRR(r6, r2);
+    __ LoadRR(r2, r5);
+    __ LoadRR(r5, r6);
+
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+    __ LoadRR(r7, r6);
+    __ LoadRR(r8, r6);
+    __ LoadRR(r9, r6);
+
+    // Invoke the code.
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
+
+    // Exit the JS frame and remove the parameters (except function), and
+    // return.
+  }
+  __ b(r14);
+
+  // r2: result
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   o r3: the JS function object being called.
+//   o r5: the new target
+//   o cp: our context
+//   o pp: the caller's constant pool pointer (if enabled)
+//   o fp: the caller's frame pointer
+//   o sp: stack pointer
+//   o lr: return address
+//
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ PushStandardFrame(r3);
+
+  // Get the bytecode array from the function object and load the pointer to the
+  // first entry into kInterpreterBytecodeRegister.
+  __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  Label array_done;
+  Register debug_info = r4;
+  DCHECK(!debug_info.is(r2));
+  __ LoadP(debug_info,
+           FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+  // Load original bytecode array or the debug copy.
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+           FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
+  __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
+  __ beq(&array_done);
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+           FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+  __ bind(&array_done);
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ TestIfSmi(kInterpreterBytecodeArrayRegister);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
+                         BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Push new.target, bytecode array and zero for bytecode array offset.
+  __ LoadImmP(r2, Operand::Zero());
+  __ Push(r5, kInterpreterBytecodeArrayRegister, r2);
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size (word) from the BytecodeArray object.
+    __ LoadlW(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                                  BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    __ SubP(r5, sp, r4);
+    __ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
+    __ CmpLogicalP(r5, r0);
+    __ bge(&ok);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+    Label loop, no_args;
+    __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+    __ ShiftRightP(r4, r4, Operand(kPointerSizeLog2));
+    __ LoadAndTestP(r4, r4);
+    __ beq(&no_args);
+    __ LoadRR(r1, r4);
+    __ bind(&loop);
+    __ push(r5);
+    __ SubP(r1, Operand(1));
+    __ bne(&loop);
+    __ bind(&no_args);
+  }
+
+  // TODO(rmcilroy): List of things not currently dealt with here but done in
+  // fullcodegen's prologue:
+  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+  //  - Code aging of the BytecodeArray object.
+
+  // Load accumulator, register file, bytecode offset, dispatch table into
+  // registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ AddP(kInterpreterRegisterFileRegister, fp,
+          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Dispatch to the first bytecode handler for the function.
+  __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+                           kInterpreterBytecodeOffsetRegister));
+  __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
+  __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+  // and header removal.
+  __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Call(ip);
+
+  // Even though the first bytecode handler was called, we will never return.
+  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+}
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+  // TODO(rmcilroy): List of things not currently dealt with here but done in
+  // fullcodegen's EmitReturnSequence.
+  //  - Supporting FLAG_trace for Runtime::TraceExit.
+  //  - Support profiler (specifically decrementing profiling_counter
+  //    appropriately and calling out to HandleInterrupts if necessary).
+
+  // The return value is in accumulator, which is already in r2.
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments and return.
+  __ LoadlW(r0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                                BytecodeArray::kParameterSizeOffset));
+  __ AddP(sp, sp, r0);
+  __ Ret();
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
+                                         Register count, Register scratch) {
+  Label loop;
+  __ AddP(index, index, Operand(kPointerSize));  // Bias up for LoadPU
+  __ LoadRR(r0, count);
+  __ bind(&loop);
+  __ LoadP(scratch, MemOperand(index, -kPointerSize));
+  __ lay(index, MemOperand(index, -kPointerSize));
+  __ push(scratch);
+  __ SubP(r0, Operand(1));
+  __ bne(&loop);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r4 : the address of the first argument to be pushed. Subsequent
+  //          arguments should be consecutive above this, in the same order as
+  //          they are to be pushed onto the stack.
+  //  -- r3 : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Calculate number of arguments (AddP one for receiver).
+  __ AddP(r5, r2, Operand(1));
+
+  // Push the arguments.
+  Generate_InterpreterPushArgs(masm, r4, r5, r6);
+
+  // Call the target.
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            tail_call_mode),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- r2 : argument count (not including receiver)
+  // -- r5 : new target
+  // -- r3 : constructor to call
+  // -- r4 : address of the first argument
+  // -----------------------------------
+
+  // Push a slot for the receiver to be constructed.
+  __ LoadImmP(r0, Operand::Zero());
+  __ push(r0);
+
+  // Push the arguments (skip if none).
+  Label skip;
+  __ CmpP(r2, Operand::Zero());
+  __ beq(&skip);
+  Generate_InterpreterPushArgs(masm, r4, r2, r6);
+  __ bind(&skip);
+
+  // Call the constructor with r2, r3, and r5 unmodified.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
+  // Initialize register file register and dispatch table register.
+  __ AddP(kInterpreterRegisterFileRegister, fp,
+          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Get the context from the frame.
+  __ LoadP(kContextRegister,
+           MemOperand(kInterpreterRegisterFileRegister,
+                      InterpreterFrameConstants::kContextFromRegisterPointer));
+
+  // Get the bytecode array pointer from the frame.
+  __ LoadP(
+      kInterpreterBytecodeArrayRegister,
+      MemOperand(kInterpreterRegisterFileRegister,
+                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ TestIfSmi(kInterpreterBytecodeArrayRegister);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
+                         BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ LoadP(kInterpreterBytecodeOffsetRegister,
+           MemOperand(
+               kInterpreterRegisterFileRegister,
+               InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+  // Dispatch to the target bytecode.
+  __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+                           kInterpreterBytecodeOffsetRegister));
+  __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
+  __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+  __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(ip);
+}
+
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+    MacroAssembler* masm, Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass the deoptimization type to the runtime system.
+    __ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
+    __ Push(r3);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+    // Tear down internal frame.
+  }
+
+  // Drop state (we don't use these for interpreter deopts) and and pop the
+  // accumulator value into the accumulator register.
+  __ Drop(1);
+  __ Pop(kInterpreterAccumulatorRegister);
+
+  // Enter the bytecode dispatch.
+  Generate_EnterBytecodeDispatch(masm);
+}
+
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the address of the interpreter entry trampoline as a return address.
+  // This simulates the initial call to bytecode handlers in interpreter entry
+  // trampoline. The return will never actually be taken, but our stack walker
+  // uses this address to determine whether a frame is interpreted.
+  __ mov(r14,
+         Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+
+  Generate_EnterBytecodeDispatch(masm);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Point r2 at the start of the PlatformCodeAge sequence.
+  __ CleanseP(r14);
+  __ SubP(r14, Operand(kCodeAgingSequenceLength));
+  __ LoadRR(r2, r14);
+
+  __ pop(r14);
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r2 - contains return address (beginning of patch sequence)
+  //   r3 - isolate
+  //   r5 - new target
+  //   lr - return address
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+  __ PrepareCallCFunction(2, 0, r4);
+  __ mov(r3, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  __ MultiPop(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+  __ LoadRR(ip, r2);
+  __ Jump(ip);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Point r2 at the start of the PlatformCodeAge sequence.
+  __ CleanseP(r14);
+  __ SubP(r14, Operand(kCodeAgingSequenceLength));
+  __ LoadRR(r2, r14);
+
+  __ pop(r14);
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r2 - contains return address (beginning of patch sequence)
+  //   r3 - isolate
+  //   r5 - new target
+  //   lr - return address
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+  __ PrepareCallCFunction(2, 0, r4);
+  __ mov(r3, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+      2);
+  __ MultiPop(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+  __ LoadRR(ip, r2);
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ PushStandardFrame(r3);
+
+  // Jump to point after the code-age stub.
+  __ AddP(r2, ip, Operand(kNoCodeAgeSequenceLength));
+  __ Jump(r2);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ MultiPush(kJSCallerSaved | kCalleeSaved);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+    __ MultiPop(kJSCallerSaved | kCalleeSaved);
+  }
+
+  __ la(sp, MemOperand(sp, kPointerSize));  // Ignore state
+  __ Ret();                                 // Jump to miss handler
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ LoadSmiLiteral(r2, Smi::FromInt(static_cast<int>(type)));
+    __ push(r2);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+  }
+
+  // Get the full codegen state from the stack and untag it -> r8.
+  __ LoadP(r8, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(r8);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ CmpP(r8, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ bne(&with_tos_register);
+  __ la(sp, MemOperand(sp, 1 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&with_tos_register);
+  __ LoadP(r2, MemOperand(sp, 1 * kPointerSize));
+  __ CmpP(r8, Operand(FullCodeGenerator::TOS_REG));
+  __ bne(&unknown_state);
+  __ la(sp, MemOperand(sp, 2 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+// Clobbers registers {r6, r7, r8, r9}.
+void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                             Register function_template_info,
+                             Label* receiver_check_failed) {
+  Register signature = r6;
+  Register map = r7;
+  Register constructor = r8;
+  Register scratch = r9;
+
+  // If there is no signature, return the holder.
+  __ LoadP(signature, FieldMemOperand(function_template_info,
+                                      FunctionTemplateInfo::kSignatureOffset));
+  Label receiver_check_passed;
+  __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+                &receiver_check_passed);
+
+  // Walk the prototype chain.
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ bind(&prototype_loop_start);
+
+  // Get the constructor, if any.
+  __ GetMapConstructor(constructor, map, scratch, scratch);
+  __ CmpP(scratch, Operand(JS_FUNCTION_TYPE));
+  Label next_prototype;
+  __ bne(&next_prototype);
+  Register type = constructor;
+  __ LoadP(type,
+           FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(type,
+           FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ CmpP(signature, type);
+  __ beq(&receiver_check_passed);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(type, &next_prototype);
+  __ CompareObjectType(type, scratch, scratch, FUNCTION_TEMPLATE_INFO_TYPE);
+  __ bne(&next_prototype);
+
+  // Otherwise load the parent function template and iterate.
+  __ LoadP(type,
+           FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+  __ b(&function_template_loop);
+
+  // Load the next prototype.
+  __ bind(&next_prototype);
+  __ LoadlW(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+  __ DecodeField<Map::HasHiddenPrototype>(scratch);
+  __ beq(receiver_check_failed);
+
+  __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Iterate.
+  __ b(&prototype_loop_start);
+
+  __ bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                 : number of arguments excluding receiver
+  //  -- r3                 : callee
+  //  -- lr                 : return address
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+
+  // Load the FunctionTemplateInfo.
+  __ LoadP(r5, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check.
+  Label receiver_check_failed;
+  __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+  __ LoadP(r4, MemOperand(sp, r1));
+  CompatibleReceiverCheck(masm, r4, r5, &receiver_check_failed);
+
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ LoadP(r6, FieldMemOperand(r5, FunctionTemplateInfo::kCallCodeOffset));
+  __ LoadP(r6, FieldMemOperand(r6, CallHandlerInfo::kFastHandlerOffset));
+  __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+
+  // Compatible receiver check failed: throw an Illegal Invocation exception.
+  __ bind(&receiver_check_failed);
+  // Drop the arguments (including the receiver);
+  __ AddP(r1, r1, Operand(kPointerSize));
+  __ AddP(sp, sp, r1);
+  __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  // Lookup the function in the JavaScript frame.
+  __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ push(r2);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+  }
+
+  // If the code object is null, just return to the unoptimized code.
+  Label skip;
+  __ CmpSmiLiteral(r2, Smi::FromInt(0), r0);
+  __ bne(&skip);
+  __ Ret();
+
+  __ bind(&skip);
+
+  // Load deoptimization data from the code object.
+  // <deopt_data> = <code>[#deoptimization_data_offset]
+  __ LoadP(r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
+
+  // Load the OSR entrypoint offset from the deoptimization data.
+  // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+  __ LoadP(
+      r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
+                                  DeoptimizationInputData::kOsrPcOffsetIndex)));
+  __ SmiUntag(r3);
+
+  // Compute the target address = code_obj + header_size + osr_offset
+  // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+  __ AddP(r2, r3);
+  __ AddP(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ LoadRR(r14, r0);
+
+  // And "return" to the OSR entry point of the function.
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- sp[0] : receiver
+  // -----------------------------------
+
+  // 1. Pop receiver into r2 and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    __ Pop(r2);
+    __ JumpIfSmi(r2, &receiver_not_date);
+    __ CompareObjectType(r2, r3, r4, JS_DATE_TYPE);
+    __ bne(&receiver_not_date);
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ LoadP(r2, FieldMemOperand(r2, JSDate::kValueOffset));
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ mov(r3, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+      __ LoadP(r3, MemOperand(r3));
+      __ LoadP(ip, FieldMemOperand(r2, JSDate::kCacheStampOffset));
+      __ CmpP(r3, ip);
+      __ bne(&stamp_mismatch);
+      __ LoadP(r2, FieldMemOperand(
+                       r2, JSDate::kValueOffset + field_index * kPointerSize));
+      __ Ret();
+      __ bind(&stamp_mismatch);
+    }
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ PrepareCallCFunction(2, r3);
+    __ LoadSmiLiteral(r3, Smi::FromInt(field_index));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ Ret();
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ bind(&receiver_not_date);
+  __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2    : argc
+  //  -- sp[0] : first argument (left-hand side)
+  //  -- sp[4] : receiver (right-hand side)
+  // -----------------------------------
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ LoadP(InstanceOfDescriptor::LeftRegister(),
+             MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
+    __ LoadP(InstanceOfDescriptor::RightRegister(),
+             MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
+    InstanceOfStub stub(masm->isolate(), true);
+    __ CallStub(&stub);
+  }
+
+  // Pop the argument and the receiver.
+  __ Ret(2);
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2    : argc
+  //  -- sp[0] : argArray
+  //  -- sp[4] : thisArg
+  //  -- sp[8] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into r3, argArray into r2 (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    Label skip;
+    Register arg_size = r4;
+    Register new_sp = r5;
+    Register scratch = r6;
+    __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+    __ AddP(new_sp, sp, arg_size);
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ LoadRR(scratch, r2);
+    __ LoadP(r3, MemOperand(new_sp, 0));  // receiver
+    __ CmpP(arg_size, Operand(kPointerSize));
+    __ blt(&skip);
+    __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize));  // thisArg
+    __ beq(&skip);
+    __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize));  // argArray
+    __ bind(&skip);
+    __ LoadRR(sp, new_sp);
+    __ StoreP(scratch, MemOperand(sp, 0));
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r2    : argArray
+  //  -- r3    : receiver
+  //  -- sp[0] : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(r3, &receiver_not_callable);
+  __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r6, Map::kIsCallable);
+  __ beq(&receiver_not_callable);
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
+  __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target).
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver.
+  __ bind(&no_arguments);
+  {
+    __ LoadImmP(r2, Operand::Zero());
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ bind(&receiver_not_callable);
+  {
+    __ StoreP(r3, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  // r2: actual number of arguments
+  {
+    Label done;
+    __ CmpP(r2, Operand::Zero());
+    __ bne(&done, Label::kNear);
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ AddP(r2, Operand(1));
+    __ bind(&done);
+  }
+
+  // r2: actual number of arguments
+  // 2. Get the callable to call (passed as receiver) from the stack.
+  __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+  __ LoadP(r3, MemOperand(sp, r4));
+
+  // 3. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // r2: actual number of arguments
+  // r3: callable
+  {
+    Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ AddP(r4, sp, r4);
+
+    __ bind(&loop);
+    __ LoadP(ip, MemOperand(r4, -kPointerSize));
+    __ StoreP(ip, MemOperand(r4));
+    __ SubP(r4, Operand(kPointerSize));
+    __ CmpP(r4, sp);
+    __ bne(&loop);
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ SubP(r2, Operand(1));
+    __ pop();
+  }
+
+  // 4. Call the callable.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2     : argc
+  //  -- sp[0]  : argumentsList
+  //  -- sp[4]  : thisArgument
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
+  {
+    Label skip;
+    Register arg_size = r4;
+    Register new_sp = r5;
+    Register scratch = r6;
+    __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+    __ AddP(new_sp, sp, arg_size);
+    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ LoadRR(scratch, r3);
+    __ LoadRR(r2, r3);
+    __ CmpP(arg_size, Operand(kPointerSize));
+    __ blt(&skip);
+    __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize));  // target
+    __ beq(&skip);
+    __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize));  // thisArgument
+    __ CmpP(arg_size, Operand(2 * kPointerSize));
+    __ beq(&skip);
+    __ LoadP(r2, MemOperand(new_sp, 3 * -kPointerSize));  // argumentsList
+    __ bind(&skip);
+    __ LoadRR(sp, new_sp);
+    __ StoreP(scratch, MemOperand(sp, 0));
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r2    : argumentsList
+  //  -- r3    : target
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(r3, &target_not_callable);
+  __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r6, Map::kIsCallable);
+  __ beq(&target_not_callable);
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target).
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ bind(&target_not_callable);
+  {
+    __ StoreP(r3, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2     : argc
+  //  -- sp[0]  : new.target (optional)
+  //  -- sp[4]  : argumentsList
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+  // new.target into r5 (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    Label skip;
+    Register arg_size = r4;
+    Register new_sp = r6;
+    __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+    __ AddP(new_sp, sp, arg_size);
+    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ LoadRR(r2, r3);
+    __ LoadRR(r5, r3);
+    __ StoreP(r3, MemOperand(new_sp, 0));  // receiver (undefined)
+    __ CmpP(arg_size, Operand(kPointerSize));
+    __ blt(&skip);
+    __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize));  // target
+    __ LoadRR(r5, r3);  // new.target defaults to target
+    __ beq(&skip);
+    __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize));  // argumentsList
+    __ CmpP(arg_size, Operand(2 * kPointerSize));
+    __ beq(&skip);
+    __ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize));  // new.target
+    __ bind(&skip);
+    __ LoadRR(sp, new_sp);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r2    : argumentsList
+  //  -- r5    : new.target
+  //  -- r3    : target
+  //  -- sp[0] : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(r3, &target_not_constructor);
+  __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r6, Map::kIsConstructor);
+  __ beq(&target_not_constructor);
+
+  // 3. Make sure the target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(r5, &new_target_not_constructor);
+  __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r6, Map::kIsConstructor);
+  __ beq(&new_target_not_constructor);
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ bind(&target_not_constructor);
+  {
+    __ StoreP(r3, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ bind(&new_target_not_constructor);
+  {
+    __ StoreP(r5, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+                                      Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- r2 : actual number of arguments
+  //  -- r3 : function (passed through to callee)
+  //  -- r4 : expected number of arguments
+  //  -- r5 : new target (passed through to callee)
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(r7, Heap::kRealStackLimitRootIndex);
+  // Make r7 the space we have left. The stack might already be overflowed
+  // here which will cause r7 to become negative.
+  __ SubP(r7, sp, r7);
+  // Check if the arguments will overflow the stack.
+  __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
+  __ CmpP(r7, r0);
+  __ ble(stack_overflow);  // Signed comparison.
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ SmiTag(r2);
+  __ LoadSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  // Stack updated as such:
+  //    old SP --->
+  //                 R14 Return Addr
+  //                 Old FP                     <--- New FP
+  //                 Argument Adapter SMI
+  //                 Function
+  //                 ArgC as SMI                <--- New SP
+  __ lay(sp, MemOperand(sp, -5 * kPointerSize));
+
+  // Cleanse the top nibble of 31-bit pointers.
+  __ CleanseP(r14);
+  __ StoreP(r14, MemOperand(sp, 4 * kPointerSize));
+  __ StoreP(fp, MemOperand(sp, 3 * kPointerSize));
+  __ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
+  __ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
+  __ StoreP(r2, MemOperand(sp, 0 * kPointerSize));
+  __ la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp +
+                               kPointerSize));
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ LoadP(r3, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+                                kPointerSize)));
+  int stack_adjustment = kPointerSize;  // adjust for receiver
+  __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
+  __ SmiToPtrArrayOffset(r3, r3);
+  __ lay(sp, MemOperand(sp, r3));
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2    : argumentsList
+  //  -- r3    : target
+  //  -- r5    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(r2, &create_runtime);
+
+    // Load the map of argumentsList into r4.
+    __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
+
+    // Load native context into r6.
+    __ LoadP(r6, NativeContextMemOperand());
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ LoadP(ip, ContextMemOperand(r6, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ CmpP(ip, r4);
+    __ beq(&create_arguments);
+    __ LoadP(ip, ContextMemOperand(r6, Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ CmpP(ip, r4);
+    __ beq(&create_arguments);
+
+    // Check if argumentsList is a fast JSArray.
+    __ CompareInstanceType(r4, ip, JS_ARRAY_TYPE);
+    __ beq(&create_array);
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ bind(&create_runtime);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r3, r5, r2);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ Pop(r3, r5);
+      __ LoadP(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
+      __ SmiUntag(r4);
+    }
+    __ b(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ bind(&create_arguments);
+    __ LoadP(r4, FieldMemOperand(r2, JSArgumentsObject::kLengthOffset));
+    __ LoadP(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
+    __ LoadP(ip, FieldMemOperand(r6, FixedArray::kLengthOffset));
+    __ CmpP(r4, ip);
+    __ bne(&create_runtime);
+    __ SmiUntag(r4);
+    __ LoadRR(r2, r6);
+    __ b(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ bind(&create_array);
+    __ LoadlB(r4, FieldMemOperand(r4, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(r4);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    __ CmpP(r4, Operand(FAST_ELEMENTS));
+    __ bgt(&create_runtime);
+    __ CmpP(r4, Operand(FAST_HOLEY_SMI_ELEMENTS));
+    __ beq(&create_runtime);
+    __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
+    __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
+    __ SmiUntag(r4);
+
+    __ bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
+    // Make ip the space we have left. The stack might already be overflowed
+    // here which will cause ip to become negative.
+    __ SubP(ip, sp, ip);
+    // Check if the arguments will overflow the stack.
+    __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
+    __ CmpP(ip, r0);  // Signed comparison.
+    __ bgt(&done);
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r3    : target
+  //  -- r2    : args (a FixedArray built from argumentsList)
+  //  -- r4    : len (number of elements to push from args)
+  //  -- r5    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    Label loop, no_args;
+    __ CmpP(r4, Operand::Zero());
+    __ beq(&no_args);
+    __ AddP(r2, r2,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+    __ LoadRR(r1, r4);
+    __ bind(&loop);
+    __ LoadP(r0, MemOperand(r2, kPointerSize));
+    __ la(r2, MemOperand(r2, kPointerSize));
+    __ push(r0);
+    __ BranchOnCount(r1, &loop);
+    __ bind(&no_args);
+    __ LoadRR(r2, r4);
+  }
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if ES2015 tail call elimination is active.
+  Label done;
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
+  __ LoadlB(scratch1, MemOperand(scratch1));
+  __ CmpP(scratch1, Operand::Zero());
+  __ beq(&done);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ LoadP(scratch3,
+             MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+    __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+    __ bne(&no_interpreter_frame);
+    __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(
+      scratch3,
+      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&no_arguments_adaptor);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ LoadRR(fp, scratch2);
+  __ LoadP(caller_args_count_reg,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ b(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ LoadP(scratch1,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
+  __ LoadP(scratch1,
+           FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadW(caller_args_count_reg,
+           FieldMemOperand(scratch1,
+                           SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_S390X
+  __ SmiUntag(caller_args_count_reg);
+#endif
+
+  __ bind(&formal_parameter_count_loaded);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
+}  // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(r3);
+
+  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that the function is not a "classConstructor".
+  Label class_constructor;
+  __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+  __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorBits, r0);
+  __ bne(&class_constructor);
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ AndP(r0, r5, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+                          (1 << SharedFunctionInfo::kNativeBit)));
+  __ bne(&done_convert);
+  {
+    // ----------- S t a t e -------------
+    //  -- r2 : the number of arguments (not including the receiver)
+    //  -- r3 : the function to call (checked to be a JSFunction)
+    //  -- r4 : the shared function info.
+    //  -- cp : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(r5);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ ShiftLeftP(r5, r2, Operand(kPointerSizeLog2));
+      __ LoadP(r5, MemOperand(sp, r5));
+      __ JumpIfSmi(r5, &convert_to_object);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ CompareObjectType(r5, r6, r6, FIRST_JS_RECEIVER_TYPE);
+      __ bge(&done_convert);
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy);
+        __ JumpIfNotRoot(r5, Heap::kNullValueRootIndex, &convert_to_object);
+        __ bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(r5);
+        }
+        __ b(&convert_receiver);
+      }
+      __ bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+        __ SmiTag(r2);
+        __ Push(r2, r3);
+        __ LoadRR(r2, r5);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ LoadRR(r5, r2);
+        __ Pop(r2, r3);
+        __ SmiUntag(r2);
+      }
+      __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+      __ bind(&convert_receiver);
+    }
+    __ ShiftLeftP(r6, r2, Operand(kPointerSizeLog2));
+    __ StoreP(r5, MemOperand(sp, r6));
+  }
+  __ bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the function to call (checked to be a JSFunction)
+  //  -- r4 : the shared function info.
+  //  -- cp : the function context.
+  // -----------------------------------
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r2, r5, r6, r7);
+  }
+
+  __ LoadW(
+      r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_S390X
+  __ SmiUntag(r4);
+#endif
+  ParameterCount actual(r2);
+  ParameterCount expected(r4);
+  __ InvokeFunctionCode(r3, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
+    __ push(r3);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : target (checked to be a JSBoundFunction)
+  //  -- r5 : new.target (only in case of [[Construct]])
+  // -----------------------------------
+
+  // Load [[BoundArguments]] into r4 and length of that into r6.
+  Label no_bound_arguments;
+  __ LoadP(r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
+  __ LoadP(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
+  __ SmiUntag(r6);
+  __ LoadAndTestP(r6, r6);
+  __ beq(&no_bound_arguments);
+  {
+    // ----------- S t a t e -------------
+    //  -- r2 : the number of arguments (not including the receiver)
+    //  -- r3 : target (checked to be a JSBoundFunction)
+    //  -- r4 : the [[BoundArguments]] (implemented as FixedArray)
+    //  -- r5 : new.target (only in case of [[Construct]])
+    //  -- r6 : the number of [[BoundArguments]]
+    // -----------------------------------
+
+    // Reserve stack space for the [[BoundArguments]].
+    {
+      Label done;
+      __ LoadRR(r8, sp);  // preserve previous stack pointer
+      __ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2));
+      __ SubP(sp, sp, r9);
+      // Check the stack for overflow. We are not trying to catch interruptions
+      // (i.e. debug break and preemption) here, so check the "real stack
+      // limit".
+      __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+      __ bgt(&done);  // Signed comparison.
+      // Restore the stack pointer.
+      __ LoadRR(sp, r8);
+      {
+        FrameScope scope(masm, StackFrame::MANUAL);
+        __ EnterFrame(StackFrame::INTERNAL);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
+      }
+      __ bind(&done);
+    }
+
+    // Relocate arguments down the stack.
+    //  -- r2 : the number of arguments (not including the receiver)
+    //  -- r8 : the previous stack pointer
+    //  -- r9: the size of the [[BoundArguments]]
+    {
+      Label skip, loop;
+      __ LoadImmP(r7, Operand::Zero());
+      __ CmpP(r2, Operand::Zero());
+      __ beq(&skip);
+      __ LoadRR(r1, r2);
+      __ bind(&loop);
+      __ LoadP(r0, MemOperand(r8, r7));
+      __ StoreP(r0, MemOperand(sp, r7));
+      __ AddP(r7, r7, Operand(kPointerSize));
+      __ BranchOnCount(r1, &loop);
+      __ bind(&skip);
+    }
+
+    // Copy [[BoundArguments]] to the stack (below the arguments).
+    {
+      Label loop;
+      __ AddP(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+      __ AddP(r4, r4, r9);
+      __ LoadRR(r1, r6);
+      __ bind(&loop);
+      __ LoadP(r0, MemOperand(r4, -kPointerSize));
+      __ lay(r4, MemOperand(r4, -kPointerSize));
+      __ StoreP(r0, MemOperand(sp, r7));
+      __ AddP(r7, r7, Operand(kPointerSize));
+      __ BranchOnCount(r1, &loop);
+      __ AddP(r2, r2, r6);
+    }
+  }
+  __ bind(&no_bound_arguments);
+}
+
+}  // namespace
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(r3);
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r2, r5, r6, r7);
+  }
+
+  // Patch the receiver to [[BoundThis]].
+  __ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
+  __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+  __ StoreP(ip, MemOperand(sp, r1));
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ LoadP(r3,
+           FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+                                       masm->isolate())));
+  __ LoadP(ip, MemOperand(ip));
+  __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the target to call (can be any Object).
+  // -----------------------------------
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(r3, &non_callable);
+  __ bind(&non_smi);
+  __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+          RelocInfo::CODE_TARGET, eq);
+  __ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Call]] internal method.
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r6, Map::kIsCallable);
+  __ beq(&non_callable);
+
+  __ CmpP(r7, Operand(JS_PROXY_TYPE));
+  __ bne(&non_function);
+
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r2, r5, r6, r7);
+  }
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ Push(r3);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ AddP(r2, r2, Operand(2));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ bind(&non_function);
+  // Overwrite the original receiver the (original) target.
+  __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
+  __ StoreP(r3, MemOperand(sp, r7));
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r3);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the constructor to call (checked to be a JSFunction)
+  //  -- r5 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertFunction(r3);
+
+  // Calling convention for function specific ConstructStubs require
+  // r4 to contain either an AllocationSite or undefined.
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
+  __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the function to call (checked to be a JSBoundFunction)
+  //  -- r5 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertBoundFunction(r3);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  Label skip;
+  __ CmpP(r3, r5);
+  __ bne(&skip);
+  __ LoadP(r5,
+           FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ bind(&skip);
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ LoadP(r3,
+           FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+  __ LoadP(ip, MemOperand(ip));
+  __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the constructor to call (checked to be a JSProxy)
+  //  -- r5 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ Push(r3, r5);
+  // Include the pushed new_target, constructor and the receiver.
+  __ AddP(r2, r2, Operand(3));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the constructor to call (can be any Object)
+  //  -- r5 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(r3, &non_constructor);
+
+  // Dispatch based on instance type.
+  __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Construct]] internal method.
+  __ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r4, Map::kIsConstructor);
+  __ beq(&non_constructor);
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
+  __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ CmpP(r7, Operand(JS_PROXY_TYPE));
+  __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+          eq);
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
+    __ StoreP(r3, MemOperand(sp, r7));
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r3);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : actual number of arguments
+  //  -- r3 : function (passed through to callee)
+  //  -- r4 : expected number of arguments
+  //  -- r5 : new target (passed through to callee)
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments, stack_overflow;
+
+  Label enough, too_few;
+  __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
+  __ CmpP(r2, r4);
+  __ blt(&too_few);
+  __ CmpP(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  __ beq(&dont_adapt_arguments);
+
+  {  // Enough parameters: actual >= expected
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into r2 and copy end address into r6.
+    // r2: actual number of arguments as a smi
+    // r3: function
+    // r4: expected number of arguments
+    // r5: new target (passed through to callee)
+    // ip: code entry to call
+    __ SmiToPtrArrayOffset(r2, r2);
+    __ AddP(r2, fp);
+    // adjust for return address and receiver
+    __ AddP(r2, r2, Operand(2 * kPointerSize));
+    __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
+    __ SubP(r6, r2, r6);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r2: copy start address
+    // r3: function
+    // r4: expected number of arguments
+    // r5: new target (passed through to callee)
+    // r6: copy end address
+    // ip: code entry to call
+
+    Label copy;
+    __ bind(&copy);
+    __ LoadP(r0, MemOperand(r2, 0));
+    __ push(r0);
+    __ CmpP(r2, r6);  // Compare before moving to next argument.
+    __ lay(r2, MemOperand(r2, -kPointerSize));
+    __ bne(&copy);
+
+    __ b(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected
+    __ bind(&too_few);
+
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into r0 and copy end address is fp.
+    // r2: actual number of arguments as a smi
+    // r3: function
+    // r4: expected number of arguments
+    // r5: new target (passed through to callee)
+    // ip: code entry to call
+    __ SmiToPtrArrayOffset(r2, r2);
+    __ lay(r2, MemOperand(r2, fp));
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r2: copy start address
+    // r3: function
+    // r4: expected number of arguments
+    // r5: new target (passed through to callee)
+    // ip: code entry to call
+    Label copy;
+    __ bind(&copy);
+    // Adjust load for return address and receiver.
+    __ LoadP(r0, MemOperand(r2, 2 * kPointerSize));
+    __ push(r0);
+    __ CmpP(r2, fp);  // Compare before moving to next argument.
+    __ lay(r2, MemOperand(r2, -kPointerSize));
+    __ bne(&copy);
+
+    // Fill the remaining expected arguments with undefined.
+    // r3: function
+    // r4: expected number of argumentus
+    // ip: code entry to call
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
+    __ SubP(r6, fp, r6);
+    // Adjust for frame.
+    __ SubP(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                            2 * kPointerSize));
+
+    Label fill;
+    __ bind(&fill);
+    __ push(r0);
+    __ CmpP(sp, r6);
+    __ bne(&fill);
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ LoadRR(r2, r4);
+  // r2 : expected number of arguments
+  // r3 : function (passed through to callee)
+  // r5 : new target (passed through to callee)
+  __ CallJSEntry(ip);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Ret();
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ JumpToJSEntry(ip);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bkpt(0);
+  }
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/code-stubs-s390.cc b/src/s390/code-stubs-s390.cc
new file mode 100644
index 0000000..1c7d27b
--- /dev/null
+++ b/src/s390/code-stubs-s390.cc
@@ -0,0 +1,5695 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
+#include "src/base/bits.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/isolate.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/runtime/runtime.h"
+#include "src/s390/code-stubs-s390.h"
+
+namespace v8 {
+namespace internal {
+
+static void InitializeArrayConstructorDescriptor(
+    Isolate* isolate, CodeStubDescriptor* descriptor,
+    int constant_stack_parameter_count) {
+  Address deopt_handler =
+      Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+
+  if (constant_stack_parameter_count == 0) {
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
+  } else {
+    descriptor->Initialize(r2, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
+  }
+}
+
+static void InitializeInternalArrayConstructorDescriptor(
+    Isolate* isolate, CodeStubDescriptor* descriptor,
+    int constant_stack_parameter_count) {
+  Address deopt_handler =
+      Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+
+  if (constant_stack_parameter_count == 0) {
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
+  } else {
+    descriptor->Initialize(r2, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
+  }
+}
+
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+  descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
+
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+#define __ ACCESS_MASM(masm)
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+                                          Condition cond);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
+                                    Register rhs, Label* lhs_not_nan,
+                                    Label* slow, bool strict);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
+                                           Register rhs);
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+                                               ExternalReference miss) {
+  // Update the static counter each time a new code stub is generated.
+  isolate()->counters()->code_stubs()->Increment();
+
+  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+  int param_count = descriptor.GetRegisterParameterCount();
+  {
+    // Call the runtime system in a fresh internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    DCHECK(param_count == 0 ||
+           r2.is(descriptor.GetRegisterParameter(param_count - 1)));
+    // Push arguments
+    for (int i = 0; i < param_count; ++i) {
+      __ push(descriptor.GetRegisterParameter(i));
+    }
+    __ CallExternalReference(miss, param_count);
+  }
+
+  __ Ret();
+}
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+  Label out_of_range, only_low, negate, done, fastpath_done;
+  Register input_reg = source();
+  Register result_reg = destination();
+  DCHECK(is_truncating());
+
+  int double_offset = offset();
+
+  // Immediate values for this stub fit in instructions, so it's safe to use ip.
+  Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
+  Register scratch_low =
+      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+  Register scratch_high =
+      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+  DoubleRegister double_scratch = kScratchDoubleReg;
+
+  __ push(scratch);
+  // Account for saved regs if input is sp.
+  if (input_reg.is(sp)) double_offset += kPointerSize;
+
+  if (!skip_fastpath()) {
+    // Load double input.
+    __ LoadDouble(double_scratch, MemOperand(input_reg, double_offset));
+
+    // Do fast-path convert from double to int.
+    __ ConvertDoubleToInt64(double_scratch,
+#if !V8_TARGET_ARCH_S390X
+                            scratch,
+#endif
+                            result_reg, d0);
+
+// Test for overflow
+#if V8_TARGET_ARCH_S390X
+    __ TestIfInt32(result_reg, r0);
+#else
+    __ TestIfInt32(scratch, result_reg, r0);
+#endif
+    __ beq(&fastpath_done, Label::kNear);
+  }
+
+  __ Push(scratch_high, scratch_low);
+  // Account for saved regs if input is sp.
+  if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
+
+  __ LoadlW(scratch_high,
+            MemOperand(input_reg, double_offset + Register::kExponentOffset));
+  __ LoadlW(scratch_low,
+            MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+
+  __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
+  // Load scratch with exponent - 1. This is faster than loading
+  // with exponent because Bias + 1 = 1024 which is a *S390* immediate value.
+  STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
+  __ SubP(scratch, Operand(HeapNumber::kExponentBias + 1));
+  // If exponent is greater than or equal to 84, the 32 less significant
+  // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
+  // the result is 0.
+  // Compare exponent with 84 (compare exponent - 1 with 83).
+  __ CmpP(scratch, Operand(83));
+  __ bge(&out_of_range, Label::kNear);
+
+  // If we reach this code, 31 <= exponent <= 83.
+  // So, we don't have to handle cases where 0 <= exponent <= 20 for
+  // which we would need to shift right the high part of the mantissa.
+  // Scratch contains exponent - 1.
+  // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
+  __ Load(r0, Operand(51));
+  __ SubP(scratch, r0, scratch);
+  __ CmpP(scratch, Operand::Zero());
+  __ ble(&only_low, Label::kNear);
+  // 21 <= exponent <= 51, shift scratch_low and scratch_high
+  // to generate the result.
+  __ ShiftRight(scratch_low, scratch_low, scratch);
+  // Scratch contains: 52 - exponent.
+  // We needs: exponent - 20.
+  // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
+  __ Load(r0, Operand(32));
+  __ SubP(scratch, r0, scratch);
+  __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
+  // Set the implicit 1 before the mantissa part in scratch_high.
+  STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
+  __ Load(r0, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
+  __ ShiftLeftP(r0, r0, Operand(16));
+  __ OrP(result_reg, result_reg, r0);
+  __ ShiftLeft(r0, result_reg, scratch);
+  __ OrP(result_reg, scratch_low, r0);
+  __ b(&negate, Label::kNear);
+
+  __ bind(&out_of_range);
+  __ mov(result_reg, Operand::Zero());
+  __ b(&done, Label::kNear);
+
+  __ bind(&only_low);
+  // 52 <= exponent <= 83, shift only scratch_low.
+  // On entry, scratch contains: 52 - exponent.
+  __ LoadComplementRR(scratch, scratch);
+  __ ShiftLeft(result_reg, scratch_low, scratch);
+
+  __ bind(&negate);
+  // If input was positive, scratch_high ASR 31 equals 0 and
+  // scratch_high LSR 31 equals zero.
+  // New result = (result eor 0) + 0 = result.
+  // If the input was negative, we have to negate the result.
+  // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
+  // New result = (result eor 0xffffffff) + 1 = 0 - result.
+  __ ShiftRightArith(r0, scratch_high, Operand(31));
+#if V8_TARGET_ARCH_S390X
+  __ lgfr(r0, r0);
+  __ ShiftRightP(r0, r0, Operand(32));
+#endif
+  __ XorP(result_reg, r0);
+  __ ShiftRight(r0, scratch_high, Operand(31));
+  __ AddP(result_reg, r0);
+
+  __ bind(&done);
+  __ Pop(scratch_high, scratch_low);
+
+  __ bind(&fastpath_done);
+  __ pop(scratch);
+
+  __ Ret();
+}
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+                                          Condition cond) {
+  Label not_identical;
+  Label heap_number, return_equal;
+  __ CmpP(r2, r3);
+  __ bne(&not_identical);
+
+  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+  // so we do the second best thing - test it ourselves.
+  // They are both equal and they are not both Smis so both of them are not
+  // Smis.  If it's not a heap number, then return equal.
+  if (cond == lt || cond == gt) {
+    // Call runtime on identical JSObjects.
+    __ CompareObjectType(r2, r6, r6, FIRST_JS_RECEIVER_TYPE);
+    __ bge(slow);
+    // Call runtime on identical symbols since we need to throw a TypeError.
+    __ CmpP(r6, Operand(SYMBOL_TYPE));
+    __ beq(slow);
+    // Call runtime on identical SIMD values since we must throw a TypeError.
+    __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
+    __ beq(slow);
+  } else {
+    __ CompareObjectType(r2, r6, r6, HEAP_NUMBER_TYPE);
+    __ beq(&heap_number);
+    // Comparing JS objects with <=, >= is complicated.
+    if (cond != eq) {
+      __ CmpP(r6, Operand(FIRST_JS_RECEIVER_TYPE));
+      __ bge(slow);
+      // Call runtime on identical symbols since we need to throw a TypeError.
+      __ CmpP(r6, Operand(SYMBOL_TYPE));
+      __ beq(slow);
+      // Call runtime on identical SIMD values since we must throw a TypeError.
+      __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
+      __ beq(slow);
+      // Normally here we fall through to return_equal, but undefined is
+      // special: (undefined == undefined) == true, but
+      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
+      if (cond == le || cond == ge) {
+        __ CmpP(r6, Operand(ODDBALL_TYPE));
+        __ bne(&return_equal);
+        __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+        __ bne(&return_equal);
+        if (cond == le) {
+          // undefined <= undefined should fail.
+          __ LoadImmP(r2, Operand(GREATER));
+        } else {
+          // undefined >= undefined should fail.
+          __ LoadImmP(r2, Operand(LESS));
+        }
+        __ Ret();
+      }
+    }
+  }
+
+  __ bind(&return_equal);
+  if (cond == lt) {
+    __ LoadImmP(r2, Operand(GREATER));  // Things aren't less than themselves.
+  } else if (cond == gt) {
+    __ LoadImmP(r2, Operand(LESS));  // Things aren't greater than themselves.
+  } else {
+    __ LoadImmP(r2, Operand(EQUAL));  // Things are <=, >=, ==, === themselves
+  }
+  __ Ret();
+
+  // For less and greater we don't have to check for NaN since the result of
+  // x < x is false regardless.  For the others here is some code to check
+  // for NaN.
+  if (cond != lt && cond != gt) {
+    __ bind(&heap_number);
+    // It is a heap number, so return non-equal if it's NaN and equal if it's
+    // not NaN.
+
+    // The representation of NaN values has all exponent bits (52..62) set,
+    // and not all mantissa bits (0..51) clear.
+    // Read top bits of double representation (second word of value).
+    __ LoadlW(r4, FieldMemOperand(r2, HeapNumber::kExponentOffset));
+    // Test that exponent bits are all set.
+    STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
+    __ ExtractBitMask(r5, r4, HeapNumber::kExponentMask);
+    __ CmpLogicalP(r5, Operand(0x7ff));
+    __ bne(&return_equal);
+
+    // Shift out flag and all exponent bits, retaining only mantissa.
+    __ sll(r4, Operand(HeapNumber::kNonMantissaBitsInTopWord));
+    // Or with all low-bits of mantissa.
+    __ LoadlW(r5, FieldMemOperand(r2, HeapNumber::kMantissaOffset));
+    __ OrP(r2, r5, r4);
+    __ CmpP(r2, Operand::Zero());
+    // For equal we already have the right value in r2:  Return zero (equal)
+    // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+    // not (it's a NaN).  For <= and >= we need to load r0 with the failing
+    // value if it's a NaN.
+    if (cond != eq) {
+      Label not_equal;
+      __ bne(&not_equal, Label::kNear);
+      // All-zero means Infinity means equal.
+      __ Ret();
+      __ bind(&not_equal);
+      if (cond == le) {
+        __ LoadImmP(r2, Operand(GREATER));  // NaN <= NaN should fail.
+      } else {
+        __ LoadImmP(r2, Operand(LESS));  // NaN >= NaN should fail.
+      }
+    }
+    __ Ret();
+  }
+  // No fall through here.
+
+  __ bind(&not_identical);
+}
+
+// See comment at call site.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
+                                    Register rhs, Label* lhs_not_nan,
+                                    Label* slow, bool strict) {
+  DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
+
+  Label rhs_is_smi;
+  __ JumpIfSmi(rhs, &rhs_is_smi);
+
+  // Lhs is a Smi.  Check whether the rhs is a heap number.
+  __ CompareObjectType(rhs, r5, r6, HEAP_NUMBER_TYPE);
+  if (strict) {
+    // If rhs is not a number and lhs is a Smi then strict equality cannot
+    // succeed.  Return non-equal
+    // If rhs is r2 then there is already a non zero value in it.
+    Label skip;
+    __ beq(&skip, Label::kNear);
+    if (!rhs.is(r2)) {
+      __ mov(r2, Operand(NOT_EQUAL));
+    }
+    __ Ret();
+    __ bind(&skip);
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
+    // the runtime.
+    __ bne(slow);
+  }
+
+  // Lhs is a smi, rhs is a number.
+  // Convert lhs to a double in d7.
+  __ SmiToDouble(d7, lhs);
+  // Load the double from rhs, tagged HeapNumber r2, to d6.
+  __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+  // We now have both loaded as doubles but we can skip the lhs nan check
+  // since it's a smi.
+  __ b(lhs_not_nan);
+
+  __ bind(&rhs_is_smi);
+  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
+  __ CompareObjectType(lhs, r6, r6, HEAP_NUMBER_TYPE);
+  if (strict) {
+    // If lhs is not a number and rhs is a smi then strict equality cannot
+    // succeed.  Return non-equal.
+    // If lhs is r2 then there is already a non zero value in it.
+    Label skip;
+    __ beq(&skip, Label::kNear);
+    if (!lhs.is(r2)) {
+      __ mov(r2, Operand(NOT_EQUAL));
+    }
+    __ Ret();
+    __ bind(&skip);
+  } else {
+    // Smi compared non-strictly with a non-smi non-heap-number.  Call
+    // the runtime.
+    __ bne(slow);
+  }
+
+  // Rhs is a smi, lhs is a heap number.
+  // Load the double from lhs, tagged HeapNumber r3, to d7.
+  __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+  // Convert rhs to a double in d6.
+  __ SmiToDouble(d6, rhs);
+  // Fall through to both_loaded_as_doubles.
+}
+
+// See comment at call site.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
+                                           Register rhs) {
+  DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
+
+  // If either operand is a JS object or an oddball value, then they are
+  // not equal since their pointers are different.
+  // There is no test for undetectability in strict equality.
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  Label first_non_object;
+  // Get the type of the first operand into r4 and compare it with
+  // FIRST_JS_RECEIVER_TYPE.
+  __ CompareObjectType(rhs, r4, r4, FIRST_JS_RECEIVER_TYPE);
+  __ blt(&first_non_object, Label::kNear);
+
+  // Return non-zero (r2 is not zero)
+  Label return_not_equal;
+  __ bind(&return_not_equal);
+  __ Ret();
+
+  __ bind(&first_non_object);
+  // Check for oddballs: true, false, null, undefined.
+  __ CmpP(r4, Operand(ODDBALL_TYPE));
+  __ beq(&return_not_equal);
+
+  __ CompareObjectType(lhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
+  __ bge(&return_not_equal);
+
+  // Check for oddballs: true, false, null, undefined.
+  __ CmpP(r5, Operand(ODDBALL_TYPE));
+  __ beq(&return_not_equal);
+
+  // Now that we have the types we might as well check for
+  // internalized-internalized.
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  __ OrP(r4, r4, r5);
+  __ AndP(r0, r4, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+  __ beq(&return_not_equal);
+}
+
+// See comment at call site.
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
+                                       Register rhs,
+                                       Label* both_loaded_as_doubles,
+                                       Label* not_heap_numbers, Label* slow) {
+  DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
+
+  __ CompareObjectType(rhs, r5, r4, HEAP_NUMBER_TYPE);
+  __ bne(not_heap_numbers);
+  __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset));
+  __ CmpP(r4, r5);
+  __ bne(slow);  // First was a heap number, second wasn't.  Go slow case.
+
+  // Both are heap numbers.  Load them up then jump to the code we have
+  // for that.
+  __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+  __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+  __ b(both_loaded_as_doubles);
+}
+
+// Fast negative check for internalized-to-internalized equality or receiver
+// equality. Also handles the undetectable receiver to null/undefined
+// comparison.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+                                                     Register lhs, Register rhs,
+                                                     Label* possible_strings,
+                                                     Label* runtime_call) {
+  DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
+
+  // r4 is object type of rhs.
+  Label object_test, return_equal, return_unequal, undetectable;
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  __ mov(r0, Operand(kIsNotStringMask));
+  __ AndP(r0, r4);
+  __ bne(&object_test, Label::kNear);
+  __ mov(r0, Operand(kIsNotInternalizedMask));
+  __ AndP(r0, r4);
+  __ bne(possible_strings);
+  __ CompareObjectType(lhs, r5, r5, FIRST_NONSTRING_TYPE);
+  __ bge(runtime_call);
+  __ mov(r0, Operand(kIsNotInternalizedMask));
+  __ AndP(r0, r5);
+  __ bne(possible_strings);
+
+  // Both are internalized. We already checked they weren't the same pointer so
+  // they are not equal. Return non-equal by returning the non-zero object
+  // pointer in r2.
+  __ Ret();
+
+  __ bind(&object_test);
+  __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset));
+  __ LoadP(r5, FieldMemOperand(rhs, HeapObject::kMapOffset));
+  __ LoadlB(r6, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ LoadlB(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
+  __ AndP(r0, r6, Operand(1 << Map::kIsUndetectable));
+  __ bne(&undetectable);
+  __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
+  __ bne(&return_unequal);
+
+  __ CompareInstanceType(r4, r4, FIRST_JS_RECEIVER_TYPE);
+  __ blt(runtime_call);
+  __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
+  __ blt(runtime_call);
+
+  __ bind(&return_unequal);
+  // Return non-equal by returning the non-zero object pointer in r2.
+  __ Ret();
+
+  __ bind(&undetectable);
+  __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
+  __ beq(&return_unequal);
+
+  // If both sides are JSReceivers, then the result is false according to
+  // the HTML specification, which says that only comparisons with null or
+  // undefined are affected by special casing for document.all.
+  __ CompareInstanceType(r4, r4, ODDBALL_TYPE);
+  __ beq(&return_equal);
+  __ CompareInstanceType(r5, r5, ODDBALL_TYPE);
+  __ bne(&return_unequal);
+
+  __ bind(&return_equal);
+  __ LoadImmP(r2, Operand(EQUAL));
+  __ Ret();
+}
+
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
+                                         Register scratch,
+                                         CompareICState::State expected,
+                                         Label* fail) {
+  Label ok;
+  if (expected == CompareICState::SMI) {
+    __ JumpIfNotSmi(input, fail);
+  } else if (expected == CompareICState::NUMBER) {
+    __ JumpIfSmi(input, &ok);
+    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+                DONT_DO_SMI_CHECK);
+  }
+  // We could be strict about internalized/non-internalized here, but as long as
+  // hydrogen doesn't care, the stub doesn't have to care either.
+  __ bind(&ok);
+}
+
+// On entry r3 and r4 are the values to be compared.
+// On exit r2 is 0, positive or negative to indicate the result of
+// the comparison.
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
+  Register lhs = r3;
+  Register rhs = r2;
+  Condition cc = GetCondition();
+
+  Label miss;
+  CompareICStub_CheckInputType(masm, lhs, r4, left(), &miss);
+  CompareICStub_CheckInputType(masm, rhs, r5, right(), &miss);
+
+  Label slow;  // Call builtin.
+  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
+
+  Label not_two_smis, smi_done;
+  __ OrP(r4, r3, r2);
+  __ JumpIfNotSmi(r4, &not_two_smis);
+  __ SmiUntag(r3);
+  __ SmiUntag(r2);
+  __ SubP(r2, r3, r2);
+  __ Ret();
+  __ bind(&not_two_smis);
+
+  // NOTICE! This code is only reached after a smi-fast-case check, so
+  // it is certain that at least one operand isn't a smi.
+
+  // Handle the case where the objects are identical.  Either returns the answer
+  // or goes to slow.  Only falls through if the objects were not identical.
+  EmitIdenticalObjectComparison(masm, &slow, cc);
+
+  // If either is a Smi (we know that not both are), then they can only
+  // be strictly equal if the other is a HeapNumber.
+  STATIC_ASSERT(kSmiTag == 0);
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  __ AndP(r4, lhs, rhs);
+  __ JumpIfNotSmi(r4, &not_smis);
+  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
+  // 1) Return the answer.
+  // 2) Go to slow.
+  // 3) Fall through to both_loaded_as_doubles.
+  // 4) Jump to lhs_not_nan.
+  // In cases 3 and 4 we have found out we were dealing with a number-number
+  // comparison.  The double values of the numbers have been loaded
+  // into d7 and d6.
+  EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
+
+  __ bind(&both_loaded_as_doubles);
+  // The arguments have been converted to doubles and stored in d6 and d7
+  __ bind(&lhs_not_nan);
+  Label no_nan;
+  __ cdbr(d7, d6);
+
+  Label nan, equal, less_than;
+  __ bunordered(&nan);
+  __ beq(&equal, Label::kNear);
+  __ blt(&less_than, Label::kNear);
+  __ LoadImmP(r2, Operand(GREATER));
+  __ Ret();
+  __ bind(&equal);
+  __ LoadImmP(r2, Operand(EQUAL));
+  __ Ret();
+  __ bind(&less_than);
+  __ LoadImmP(r2, Operand(LESS));
+  __ Ret();
+
+  __ bind(&nan);
+  // If one of the sides was a NaN then the v flag is set.  Load r2 with
+  // whatever it takes to make the comparison fail, since comparisons with NaN
+  // always fail.
+  if (cc == lt || cc == le) {
+    __ LoadImmP(r2, Operand(GREATER));
+  } else {
+    __ LoadImmP(r2, Operand(LESS));
+  }
+  __ Ret();
+
+  __ bind(&not_smis);
+  // At this point we know we are dealing with two different objects,
+  // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
+  if (strict()) {
+    // This returns non-equal for some object types, or falls through if it
+    // was not lucky.
+    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
+  }
+
+  Label check_for_internalized_strings;
+  Label flat_string_check;
+  // Check for heap-number-heap-number comparison.  Can jump to slow case,
+  // or load both doubles into r2, r3, r4, r5 and jump to the code that handles
+  // that case.  If the inputs are not doubles then jumps to
+  // check_for_internalized_strings.
+  // In this case r4 will contain the type of rhs_.  Never falls through.
+  EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
+                             &check_for_internalized_strings,
+                             &flat_string_check);
+
+  __ bind(&check_for_internalized_strings);
+  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
+  // internalized strings.
+  if (cc == eq && !strict()) {
+    // Returns an answer for two internalized strings or two detectable objects.
+    // Otherwise jumps to string case or not both strings case.
+    // Assumes that r4 is the type of rhs_ on entry.
+    EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
+                                             &slow);
+  }
+
+  // Check for both being sequential one-byte strings,
+  // and inline if that is the case.
+  __ bind(&flat_string_check);
+
+  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r4, r5, &slow);
+
+  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
+                      r5);
+  if (cc == eq) {
+    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r4, r5);
+  } else {
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r4, r5, r6);
+  }
+  // Never falls through to here.
+
+  __ bind(&slow);
+
+  if (cc == eq) {
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(lhs, rhs);
+      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+    }
+    // Turn true into 0 and false into some non-zero value.
+    STATIC_ASSERT(EQUAL == 0);
+    __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+    __ SubP(r2, r2, r3);
+    __ Ret();
+  } else {
+    __ Push(lhs, rhs);
+    int ncr;  // NaN compare result
+    if (cc == lt || cc == le) {
+      ncr = GREATER;
+    } else {
+      DCHECK(cc == gt || cc == ge);  // remaining cases
+      ncr = LESS;
+    }
+    __ LoadSmiLiteral(r2, Smi::FromInt(ncr));
+    __ push(r2);
+
+    // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+    // tagged as a small integer.
+    __ TailCallRuntime(Runtime::kCompare);
+  }
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ MultiPush(kJSCallerSaved | r14.bit());
+  if (save_doubles()) {
+    __ MultiPushDoubles(kCallerSavedDoubles);
+  }
+  const int argument_count = 1;
+  const int fp_argument_count = 0;
+  const Register scratch = r3;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+  __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+  __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
+                   argument_count);
+  if (save_doubles()) {
+    __ MultiPopDoubles(kCallerSavedDoubles);
+  }
+  __ MultiPop(kJSCallerSaved | r14.bit());
+  __ Ret();
+}
+
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+  __ PushSafepointRegisters();
+  __ b(r14);
+}
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+  __ PopSafepointRegisters();
+  __ b(r14);
+}
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+  const Register base = r3;
+  const Register exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(exponent.is(r4));
+  const Register heapnumbermap = r7;
+  const Register heapnumber = r2;
+  const DoubleRegister double_base = d1;
+  const DoubleRegister double_exponent = d2;
+  const DoubleRegister double_result = d3;
+  const DoubleRegister double_scratch = d0;
+  const Register scratch = r1;
+  const Register scratch2 = r9;
+
+  Label call_runtime, done, int_exponent;
+  if (exponent_type() == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack to double registers.
+    __ LoadP(base, MemOperand(sp, 1 * kPointerSize));
+    __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
+
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+
+    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
+    __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+    __ CmpP(scratch, heapnumbermap);
+    __ bne(&call_runtime);
+
+    __ LoadDouble(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+    __ b(&unpack_exponent, Label::kNear);
+
+    __ bind(&base_is_smi);
+    __ ConvertIntToDouble(scratch, double_base);
+    __ bind(&unpack_exponent);
+
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+    __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+    __ CmpP(scratch, heapnumbermap);
+    __ bne(&call_runtime);
+
+    __ LoadDouble(double_exponent,
+                  FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type() == TAGGED) {
+    // Base is already in double_base.
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+
+    __ LoadDouble(double_exponent,
+                  FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  }
+
+  if (exponent_type() != INTEGER) {
+    // Detect integer exponents stored as double.
+    __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
+                             double_scratch);
+    __ beq(&int_exponent, Label::kNear);
+
+    if (exponent_type() == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label not_plus_half, not_minus_inf1, not_minus_inf2;
+
+      // Test for 0.5.
+      __ LoadDoubleLiteral(double_scratch, 0.5, scratch);
+      __ cdbr(double_exponent, double_scratch);
+      __ bne(&not_plus_half, Label::kNear);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
+      __ cdbr(double_base, double_scratch);
+      __ bne(&not_minus_inf1, Label::kNear);
+      __ lcdbr(double_result, double_scratch);
+      __ b(&done);
+      __ bind(&not_minus_inf1);
+
+      // Add +0 to convert -0 to +0.
+      __ ldr(double_scratch, double_base);
+      __ lzdr(kDoubleRegZero);
+      __ adbr(double_scratch, kDoubleRegZero);
+      __ sqdbr(double_result, double_scratch);
+      __ b(&done);
+
+      __ bind(&not_plus_half);
+      __ LoadDoubleLiteral(double_scratch, -0.5, scratch);
+      __ cdbr(double_exponent, double_scratch);
+      __ bne(&call_runtime);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
+      __ cdbr(double_base, double_scratch);
+      __ bne(&not_minus_inf2, Label::kNear);
+      __ ldr(double_result, kDoubleRegZero);
+      __ b(&done);
+      __ bind(&not_minus_inf2);
+
+      // Add +0 to convert -0 to +0.
+      __ ldr(double_scratch, double_base);
+      __ lzdr(kDoubleRegZero);
+      __ adbr(double_scratch, kDoubleRegZero);
+      __ LoadDoubleLiteral(double_result, 1.0, scratch);
+      __ sqdbr(double_scratch, double_scratch);
+      __ ddbr(double_result, double_scratch);
+      __ b(&done);
+    }
+
+    __ push(r14);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ MovToFloatParameters(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(isolate()), 0, 2);
+    }
+    __ pop(r14);
+    __ MovFromFloatResult(double_result);
+    __ b(&done);
+  }
+
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+
+  // Get two copies of exponent in the registers scratch and exponent.
+  if (exponent_type() == INTEGER) {
+    __ LoadRR(scratch, exponent);
+  } else {
+    // Exponent has previously been stored into scratch as untagged integer.
+    __ LoadRR(exponent, scratch);
+  }
+  __ ldr(double_scratch, double_base);  // Back up base.
+  __ LoadImmP(scratch2, Operand(1));
+  __ ConvertIntToDouble(scratch2, double_result);
+
+  // Get absolute value of exponent.
+  Label positive_exponent;
+  __ CmpP(scratch, Operand::Zero());
+  __ bge(&positive_exponent, Label::kNear);
+  __ LoadComplementRR(scratch, scratch);
+  __ bind(&positive_exponent);
+
+  Label while_true, no_carry, loop_end;
+  __ bind(&while_true);
+  __ mov(scratch2, Operand(1));
+  __ AndP(scratch2, scratch);
+  __ beq(&no_carry, Label::kNear);
+  __ mdbr(double_result, double_scratch);
+  __ bind(&no_carry);
+  __ ShiftRightArithP(scratch, scratch, Operand(1));
+  __ beq(&loop_end, Label::kNear);
+  __ mdbr(double_scratch, double_scratch);
+  __ b(&while_true);
+  __ bind(&loop_end);
+
+  __ CmpP(exponent, Operand::Zero());
+  __ bge(&done);
+
+  // get 1/double_result:
+  __ ldr(double_scratch, double_result);
+  __ LoadImmP(scratch2, Operand(1));
+  __ ConvertIntToDouble(scratch2, double_result);
+  __ ddbr(double_result, double_scratch);
+
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ lzdr(kDoubleRegZero);
+  __ cdbr(double_result, kDoubleRegZero);
+  __ bne(&done, Label::kNear);
+  // double_exponent may not containe the exponent value if the input was a
+  // smi.  We set it with exponent value before bailing out.
+  __ ConvertIntToDouble(exponent, double_exponent);
+
+  // Returning or bailing out.
+  if (exponent_type() == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMathPowRT);
+
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in exponent.
+    __ bind(&done);
+    __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
+                          &call_runtime);
+    __ StoreDouble(double_result,
+                   FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    DCHECK(heapnumber.is(r2));
+    __ Ret(2);
+  } else {
+    __ push(r14);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ MovToFloatParameters(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(isolate()), 0, 2);
+    }
+    __ pop(r14);
+    __ MovFromFloatResult(double_result);
+
+    __ bind(&done);
+    __ Ret();
+  }
+}
+
+bool CEntryStub::NeedsImmovableCode() { return true; }
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+  CEntryStub::GenerateAheadOfTime(isolate);
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+  CreateWeakCellStub::GenerateAheadOfTime(isolate);
+  BinaryOpICStub::GenerateAheadOfTime(isolate);
+  StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+  RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+  StoreFastElementStub::GenerateAheadOfTime(isolate);
+  TypeofStub::GenerateAheadOfTime(isolate);
+}
+
+void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+  StoreRegistersStateStub stub(isolate);
+  stub.GetCode();
+}
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+  RestoreRegistersStateStub stub(isolate);
+  stub.GetCode();
+}
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+  SaveFPRegsMode mode = kSaveFPRegs;
+  CEntryStub(isolate, 1, mode).GetCode();
+  StoreBufferOverflowStub(isolate, mode).GetCode();
+  isolate->set_fp_stubs_generated(true);
+}
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+  stub.GetCode();
+}
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+  // Called from JavaScript; parameters are on stack as if calling JS function.
+  // r2: number of arguments including receiver
+  // r3: pointer to builtin function
+  // fp: frame pointer  (restored after C call)
+  // sp: stack pointer  (restored as callee's sp after C call)
+  // cp: current context  (C callee-saved)
+  //
+  // If argv_in_register():
+  // r4: pointer to the first argument
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  __ LoadRR(r7, r3);
+
+  if (argv_in_register()) {
+    // Move argv into the correct register.
+    __ LoadRR(r3, r4);
+  } else {
+    // Compute the argv pointer.
+    __ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2));
+    __ lay(r3, MemOperand(r3, sp, -kPointerSize));
+  }
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  FrameScope scope(masm, StackFrame::MANUAL);
+
+  // Need at least one extra slot for return address location.
+  int arg_stack_space = 1;
+
+  // Pass buffer for return value on stack if necessary
+  bool needs_return_buffer =
+      result_size() > 2 ||
+      (result_size() == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS);
+  if (needs_return_buffer) {
+    arg_stack_space += result_size();
+  }
+
+#if V8_TARGET_ARCH_S390X
+  // 64-bit linux pass Argument object by reference not value
+  arg_stack_space += 2;
+#endif
+
+  __ EnterExitFrame(save_doubles(), arg_stack_space);
+
+  // Store a copy of argc, argv in callee-saved registers for later.
+  __ LoadRR(r6, r2);
+  __ LoadRR(r8, r3);
+  // r2, r6: number of arguments including receiver  (C callee-saved)
+  // r3, r8: pointer to the first argument
+  // r7: pointer to builtin function  (C callee-saved)
+
+  // Result returned in registers or stack, depending on result size and ABI.
+
+  Register isolate_reg = r4;
+  if (needs_return_buffer) {
+    // The return value is 16-byte non-scalar value.
+    // Use frame storage reserved by calling function to pass return
+    // buffer as implicit first argument in R2.  Shfit original parameters
+    // by one register each.
+    __ LoadRR(r4, r3);
+    __ LoadRR(r3, r2);
+    __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+    isolate_reg = r5;
+  }
+  // Call C built-in.
+  __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
+
+  Register target = r7;
+
+  // To let the GC traverse the return address of the exit frames, we need to
+  // know where the return address is. The CEntryStub is unmovable, so
+  // we can store the address on the stack to be able to find it again and
+  // we never have to restore it, because it will not change.
+  {
+    Label return_label;
+    __ larl(r14, &return_label);  // Generate the return addr of call later.
+    __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
+
+    // zLinux ABI requires caller's frame to have sufficient space for callee
+    // preserved regsiter save area.
+    // __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize));
+    __ positions_recorder()->WriteRecordedPositions();
+    __ b(target);
+    __ bind(&return_label);
+    // __ la(sp, MemOperand(sp, +kCalleeRegisterSaveAreaSize));
+  }
+
+  // If return value is on the stack, pop it to registers.
+  if (needs_return_buffer) {
+    if (result_size() > 2) __ LoadP(r4, MemOperand(r2, 2 * kPointerSize));
+    __ LoadP(r3, MemOperand(r2, kPointerSize));
+    __ LoadP(r2, MemOperand(r2));
+  }
+
+  // Check result for exception sentinel.
+  Label exception_returned;
+  __ CompareRoot(r2, Heap::kExceptionRootIndex);
+  __ beq(&exception_returned, Label::kNear);
+
+  // Check that there is no pending exception, otherwise we
+  // should have returned the exception sentinel.
+  if (FLAG_debug_code) {
+    Label okay;
+    ExternalReference pending_exception_address(
+        Isolate::kPendingExceptionAddress, isolate());
+    __ mov(r1, Operand(pending_exception_address));
+    __ LoadP(r1, MemOperand(r1));
+    __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
+    // Cannot use check here as it attempts to generate call into runtime.
+    __ beq(&okay, Label::kNear);
+    __ stop("Unexpected pending exception");
+    __ bind(&okay);
+  }
+
+  // Exit C frame and return.
+  // r2:r3: result
+  // sp: stack pointer
+  // fp: frame pointer
+  Register argc;
+  if (argv_in_register()) {
+    // We don't want to pop arguments so set argc to no_reg.
+    argc = no_reg;
+  } else {
+    // r6: still holds argc (callee-saved).
+    argc = r6;
+  }
+  __ LeaveExitFrame(save_doubles(), argc, true);
+  __ b(r14);
+
+  // Handling of exception.
+  __ bind(&exception_returned);
+
+  ExternalReference pending_handler_context_address(
+      Isolate::kPendingHandlerContextAddress, isolate());
+  ExternalReference pending_handler_code_address(
+      Isolate::kPendingHandlerCodeAddress, isolate());
+  ExternalReference pending_handler_offset_address(
+      Isolate::kPendingHandlerOffsetAddress, isolate());
+  ExternalReference pending_handler_fp_address(
+      Isolate::kPendingHandlerFPAddress, isolate());
+  ExternalReference pending_handler_sp_address(
+      Isolate::kPendingHandlerSPAddress, isolate());
+
+  // Ask the runtime for help to determine the handler. This will set r3 to
+  // contain the current pending exception, don't clobber it.
+  ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
+                                 isolate());
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PrepareCallCFunction(3, 0, r2);
+    __ LoadImmP(r2, Operand::Zero());
+    __ LoadImmP(r3, Operand::Zero());
+    __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+    __ CallCFunction(find_handler, 3);
+  }
+
+  // Retrieve the handler context, SP and FP.
+  __ mov(cp, Operand(pending_handler_context_address));
+  __ LoadP(cp, MemOperand(cp));
+  __ mov(sp, Operand(pending_handler_sp_address));
+  __ LoadP(sp, MemOperand(sp));
+  __ mov(fp, Operand(pending_handler_fp_address));
+  __ LoadP(fp, MemOperand(fp));
+
+  // If the handler is a JS frame, restore the context to the frame. Note that
+  // the context will be set to (cp == 0) for non-JS frames.
+  Label skip;
+  __ CmpP(cp, Operand::Zero());
+  __ beq(&skip, Label::kNear);
+  __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ bind(&skip);
+
+  // Compute the handler entry address and jump to it.
+  __ mov(r3, Operand(pending_handler_code_address));
+  __ LoadP(r3, MemOperand(r3));
+  __ mov(r4, Operand(pending_handler_offset_address));
+  __ LoadP(r4, MemOperand(r4));
+  __ AddP(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
+  __ AddP(ip, r3, r4);
+  __ Jump(ip);
+}
+
+void JSEntryStub::Generate(MacroAssembler* masm) {
+  // r2: code entry
+  // r3: function
+  // r4: receiver
+  // r5: argc
+  // r6: argv
+
+  Label invoke, handler_entry, exit;
+
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+// saving floating point registers
+#if V8_TARGET_ARCH_S390X
+  // 64bit ABI requires f8 to f15 be saved
+  __ lay(sp, MemOperand(sp, -8 * kDoubleSize));
+  __ std(d8, MemOperand(sp));
+  __ std(d9, MemOperand(sp, 1 * kDoubleSize));
+  __ std(d10, MemOperand(sp, 2 * kDoubleSize));
+  __ std(d11, MemOperand(sp, 3 * kDoubleSize));
+  __ std(d12, MemOperand(sp, 4 * kDoubleSize));
+  __ std(d13, MemOperand(sp, 5 * kDoubleSize));
+  __ std(d14, MemOperand(sp, 6 * kDoubleSize));
+  __ std(d15, MemOperand(sp, 7 * kDoubleSize));
+#else
+  // 31bit ABI requires you to store f4 and f6:
+  // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
+  __ lay(sp, MemOperand(sp, -2 * kDoubleSize));
+  __ std(d4, MemOperand(sp));
+  __ std(d6, MemOperand(sp, kDoubleSize));
+#endif
+
+  // zLinux ABI
+  //    Incoming parameters:
+  //          r2: code entry
+  //          r3: function
+  //          r4: receiver
+  //          r5: argc
+  //          r6: argv
+  //    Requires us to save the callee-preserved registers r6-r13
+  //    General convention is to also save r14 (return addr) and
+  //    sp/r15 as well in a single STM/STMG
+  __ lay(sp, MemOperand(sp, -10 * kPointerSize));
+  __ StoreMultipleP(r6, sp, MemOperand(sp, 0));
+
+  // Set up the reserved register for 0.0.
+  // __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
+
+  // Push a frame with special values setup to mark it as an entry frame.
+  //   Bad FP (-1)
+  //   SMI Marker
+  //   SMI Marker
+  //   kCEntryFPAddress
+  //   Frame type
+  __ lay(sp, MemOperand(sp, -5 * kPointerSize));
+  // Push a bad frame pointer to fail if it is used.
+  __ LoadImmP(r10, Operand(-1));
+
+  int marker = type();
+  __ LoadSmiLiteral(r9, Smi::FromInt(marker));
+  __ LoadSmiLiteral(r8, Smi::FromInt(marker));
+  // Save copies of the top frame descriptor on the stack.
+  __ mov(r7, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  __ LoadP(r7, MemOperand(r7));
+  __ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize));
+  // Set up frame pointer for the frame to be pushed.
+  // Need to add kPointerSize, because sp has one extra
+  // frame already for the frame type being pushed later.
+  __ lay(fp,
+         MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize));
+
+  // If this is the outermost JS call, set js_entry_sp value.
+  Label non_outermost_js;
+  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+  __ mov(r7, Operand(ExternalReference(js_entry_sp)));
+  __ LoadAndTestP(r8, MemOperand(r7));
+  __ bne(&non_outermost_js, Label::kNear);
+  __ StoreP(fp, MemOperand(r7));
+  __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+  Label cont;
+  __ b(&cont, Label::kNear);
+  __ bind(&non_outermost_js);
+  __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+
+  __ bind(&cont);
+  __ StoreP(ip, MemOperand(sp));  // frame-type
+
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ b(&invoke, Label::kNear);
+
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.  Coming in here the
+  // fp will be invalid because the PushStackHandler below sets it to 0 to
+  // signal the existence of the JSEntry frame.
+  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                       isolate())));
+
+  __ StoreP(r2, MemOperand(ip));
+  __ LoadRoot(r2, Heap::kExceptionRootIndex);
+  __ b(&exit, Label::kNear);
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  // Must preserve r2-r6.
+  __ PushStackHandler();
+  // If an exception not caught by another handler occurs, this handler
+  // returns control to the code after the b(&invoke) above, which
+  // restores all kCalleeSaved registers (including cp and fp) to their
+  // saved values before returning a failure to C.
+
+  // Clear any pending exceptions.
+  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                       isolate())));
+  __ mov(r7, Operand(isolate()->factory()->the_hole_value()));
+  __ StoreP(r7, MemOperand(ip));
+
+  // Invoke the function by calling through JS entry trampoline builtin.
+  // Notice that we cannot store a reference to the trampoline code directly in
+  // this stub, because runtime stubs are not traversed when doing GC.
+
+  // Expected registers by Builtins::JSEntryTrampoline
+  // r2: code entry
+  // r3: function
+  // r4: receiver
+  // r5: argc
+  // r6: argv
+  if (type() == StackFrame::ENTRY_CONSTRUCT) {
+    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+                                      isolate());
+    __ mov(ip, Operand(construct_entry));
+  } else {
+    ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
+    __ mov(ip, Operand(entry));
+  }
+  __ LoadP(ip, MemOperand(ip));  // deref address
+
+  // Branch and link to JSEntryTrampoline.
+  // the address points to the start of the code object, skip the header
+  __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  Label return_addr;
+  // __ basr(r14, ip);
+  __ larl(r14, &return_addr);
+  __ b(ip);
+  __ bind(&return_addr);
+
+  // Unlink this frame from the handler chain.
+  __ PopStackHandler();
+
+  __ bind(&exit);  // r2 holds result
+  // Check if the current stack frame is marked as the outermost JS frame.
+  Label non_outermost_js_2;
+  __ pop(r7);
+  __ CmpSmiLiteral(r7, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
+  __ bne(&non_outermost_js_2, Label::kNear);
+  __ mov(r8, Operand::Zero());
+  __ mov(r7, Operand(ExternalReference(js_entry_sp)));
+  __ StoreP(r8, MemOperand(r7));
+  __ bind(&non_outermost_js_2);
+
+  // Restore the top frame descriptors from the stack.
+  __ pop(r5);
+  __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  __ StoreP(r5, MemOperand(ip));
+
+  // Reset the stack to the callee saved registers.
+  __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
+
+  // Reload callee-saved preserved regs, return address reg (r14) and sp
+  __ LoadMultipleP(r6, sp, MemOperand(sp, 0));
+  __ la(sp, MemOperand(sp, 10 * kPointerSize));
+
+// saving floating point registers
+#if V8_TARGET_ARCH_S390X
+  // 64bit ABI requires f8 to f15 be saved
+  __ ld(d8, MemOperand(sp));
+  __ ld(d9, MemOperand(sp, 1 * kDoubleSize));
+  __ ld(d10, MemOperand(sp, 2 * kDoubleSize));
+  __ ld(d11, MemOperand(sp, 3 * kDoubleSize));
+  __ ld(d12, MemOperand(sp, 4 * kDoubleSize));
+  __ ld(d13, MemOperand(sp, 5 * kDoubleSize));
+  __ ld(d14, MemOperand(sp, 6 * kDoubleSize));
+  __ ld(d15, MemOperand(sp, 7 * kDoubleSize));
+  __ la(sp, MemOperand(sp, 8 * kDoubleSize));
+#else
+  // 31bit ABI requires you to store f4 and f6:
+  // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
+  __ ld(d4, MemOperand(sp));
+  __ ld(d6, MemOperand(sp, kDoubleSize));
+  __ la(sp, MemOperand(sp, 2 * kDoubleSize));
+#endif
+
+  __ b(r14);
+}
+
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+  Register const object = r3;              // Object (lhs).
+  Register const function = r2;            // Function (rhs).
+  Register const object_map = r4;          // Map of {object}.
+  Register const function_map = r5;        // Map of {function}.
+  Register const function_prototype = r6;  // Prototype of {function}.
+  Register const scratch = r7;
+
+  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+  // Check if {object} is a smi.
+  Label object_is_smi;
+  __ JumpIfSmi(object, &object_is_smi);
+
+  // Lookup the {function} and the {object} map in the global instanceof cache.
+  // Note: This is safe because we clear the global instanceof cache whenever
+  // we change the prototype of any object.
+  Label fast_case, slow_case;
+  __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+  __ bne(&fast_case);
+  __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+  __ bne(&fast_case);
+  __ LoadRoot(r2, Heap::kInstanceofCacheAnswerRootIndex);
+  __ Ret();
+
+  // If {object} is a smi we can safely return false if {function} is a JS
+  // function, otherwise we have to miss to the runtime and throw an exception.
+  __ bind(&object_is_smi);
+  __ JumpIfSmi(function, &slow_case);
+  __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
+  __ bne(&slow_case);
+  __ LoadRoot(r2, Heap::kFalseValueRootIndex);
+  __ Ret();
+
+  // Fast-case: The {function} must be a valid JSFunction.
+  __ bind(&fast_case);
+  __ JumpIfSmi(function, &slow_case);
+  __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
+  __ bne(&slow_case);
+
+  // Go to the runtime if the function is not a constructor.
+  __ LoadlB(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+  __ TestBit(scratch, Map::kIsConstructor, r0);
+  __ beq(&slow_case);
+
+  // Ensure that {function} has an instance prototype.
+  __ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
+  __ bne(&slow_case);
+
+  // Get the "prototype" (or initial map) of the {function}.
+  __ LoadP(function_prototype,
+           FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+  __ AssertNotSmi(function_prototype);
+
+  // Resolve the prototype if the {function} has an initial map.  Afterwards the
+  // {function_prototype} will be either the JSReceiver prototype object or the
+  // hole value, which means that no instances of the {function} were created so
+  // far and hence we should return false.
+  Label function_prototype_valid;
+  __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
+  __ bne(&function_prototype_valid);
+  __ LoadP(function_prototype,
+           FieldMemOperand(function_prototype, Map::kPrototypeOffset));
+  __ bind(&function_prototype_valid);
+  __ AssertNotSmi(function_prototype);
+
+  // Update the global instanceof cache with the current {object} map and
+  // {function}.  The cached answer will be set when it is known below.
+  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+  __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+  // Loop through the prototype chain looking for the {function} prototype.
+  // Assume true, and change to false if not found.
+  Register const object_instance_type = function_map;
+  Register const map_bit_field = function_map;
+  Register const null = scratch;
+  Register const result = r2;
+
+  Label done, loop, fast_runtime_fallback;
+  __ LoadRoot(result, Heap::kTrueValueRootIndex);
+  __ LoadRoot(null, Heap::kNullValueRootIndex);
+  __ bind(&loop);
+
+  // Check if the object needs to be access checked.
+  __ LoadlB(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+  __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0);
+  __ bne(&fast_runtime_fallback);
+  // Check if the current object is a Proxy.
+  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+  __ beq(&fast_runtime_fallback);
+
+  __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+  __ CmpP(object, function_prototype);
+  __ beq(&done);
+  __ CmpP(object, null);
+  __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ bne(&loop);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+  __ bind(&done);
+  __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+  __ Ret();
+
+  // Found Proxy or access check needed: Call the runtime
+  __ bind(&fast_runtime_fallback);
+  __ Push(object, function_prototype);
+  // Invalidate the instanceof cache.
+  __ LoadSmiLiteral(scratch, Smi::FromInt(0));
+  __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
+  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+  // Slow-case: Call the %InstanceOf runtime function.
+  __ bind(&slow_case);
+  __ Push(object, function);
+  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+                                         : Runtime::kInstanceOf);
+}
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+  Label miss;
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  // Ensure that the vector and slot registers won't be clobbered before
+  // calling the miss handler.
+  DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::VectorRegister(),
+                     LoadWithVectorDescriptor::SlotRegister()));
+
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6,
+                                                          r7, &miss);
+  __ bind(&miss);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
+}
+
+void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
+  // Return address is in lr.
+  Label miss;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register index = LoadDescriptor::NameRegister();
+  Register scratch = r7;
+  Register result = r2;
+  DCHECK(!scratch.is(receiver) && !scratch.is(index));
+  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
+         result.is(LoadWithVectorDescriptor::SlotRegister()));
+
+  // StringCharAtGenerator doesn't use the result register until it's passed
+  // the different miss possibilities. If it did, we would have a conflict
+  // when FLAG_vector_ics is true.
+  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX,
+                                          RECEIVER_IS_STRING);
+  char_at_generator.GenerateFast(masm);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
+
+  __ bind(&miss);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+// Just jump directly to runtime if native RegExp is not selected at compile
+// time or if regexp entry in generated code is turned off runtime switch or
+// at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExec);
+#else   // V8_INTERPRETED_REGEXP
+
+  // Stack frame on entry.
+  //  sp[0]: last_match_info (expected JSArray)
+  //  sp[4]: previous index
+  //  sp[8]: subject string
+  //  sp[12]: JSRegExp object
+
+  const int kLastMatchInfoOffset = 0 * kPointerSize;
+  const int kPreviousIndexOffset = 1 * kPointerSize;
+  const int kSubjectOffset = 2 * kPointerSize;
+  const int kJSRegExpOffset = 3 * kPointerSize;
+
+  Label runtime, br_over, encoding_type_UC16;
+
+  // Allocation of registers for this function. These are in callee save
+  // registers and will be preserved by the call to the native RegExp code, as
+  // this code is called using the normal C calling convention. When calling
+  // directly from generated code the native RegExp code will not do a GC and
+  // therefore the content of these registers are safe to use after the call.
+  Register subject = r6;
+  Register regexp_data = r7;
+  Register last_match_info_elements = r8;
+  Register code = r9;
+
+  __ CleanseP(r14);
+
+  // Ensure register assigments are consistent with callee save masks
+  DCHECK(subject.bit() & kCalleeSaved);
+  DCHECK(regexp_data.bit() & kCalleeSaved);
+  DCHECK(last_match_info_elements.bit() & kCalleeSaved);
+  DCHECK(code.bit() & kCalleeSaved);
+
+  // Ensure that a RegExp stack is allocated.
+  ExternalReference address_of_regexp_stack_memory_address =
+      ExternalReference::address_of_regexp_stack_memory_address(isolate());
+  ExternalReference address_of_regexp_stack_memory_size =
+      ExternalReference::address_of_regexp_stack_memory_size(isolate());
+  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
+  __ LoadAndTestP(r2, MemOperand(r2));
+  __ beq(&runtime);
+
+  // Check that the first argument is a JSRegExp object.
+  __ LoadP(r2, MemOperand(sp, kJSRegExpOffset));
+  __ JumpIfSmi(r2, &runtime);
+  __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
+  __ bne(&runtime);
+
+  // Check that the RegExp has been compiled (data contains a fixed array).
+  __ LoadP(regexp_data, FieldMemOperand(r2, JSRegExp::kDataOffset));
+  if (FLAG_debug_code) {
+    __ TestIfSmi(regexp_data);
+    __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
+    __ CompareObjectType(regexp_data, r2, r2, FIXED_ARRAY_TYPE);
+    __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+  }
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+  __ LoadP(r2, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+  // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
+  __ CmpSmiLiteral(r2, Smi::FromInt(JSRegExp::IRREGEXP), r0);
+  __ bne(&runtime);
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the number of captures fit in the static offsets vector buffer.
+  __ LoadP(r4,
+           FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Check (number_of_captures + 1) * 2 <= offsets vector size
+  // Or          number_of_captures * 2 <= offsets vector size - 2
+  // SmiToShortArrayOffset accomplishes the multiplication by 2 and
+  // SmiUntag (which is a nop for 32-bit).
+  __ SmiToShortArrayOffset(r4, r4);
+  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+  __ CmpLogicalP(r4, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
+  __ bgt(&runtime);
+
+  // Reset offset for possibly sliced string.
+  __ LoadImmP(ip, Operand::Zero());
+  __ LoadP(subject, MemOperand(sp, kSubjectOffset));
+  __ JumpIfSmi(subject, &runtime);
+  __ LoadRR(r5, subject);  // Make a copy of the original subject string.
+  // subject: subject string
+  // r5: subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Handle subject string according to its encoding and representation:
+  // (1) Sequential string?  If yes, go to (4).
+  // (2) Sequential or cons?  If not, go to (5).
+  // (3) Cons string.  If the string is flat, replace subject with first string
+  //     and go to (1). Otherwise bail out to runtime.
+  // (4) Sequential string.  Load regexp code according to encoding.
+  // (E) Carry on.
+  /// [...]
+
+  // Deferred code at the end of the stub:
+  // (5) Long external string?  If not, go to (7).
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
+  //     Go to (4).
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
+  // (8) Sliced string.  Replace subject with parent.  Go to (1).
+
+  Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
+      not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
+
+  __ bind(&check_underlying);
+  __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+
+  // (1) Sequential string?  If yes, go to (4).
+
+  STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
+                 kShortExternalStringMask) == 0x93);
+  __ mov(r3, Operand(kIsNotStringMask | kStringRepresentationMask |
+                     kShortExternalStringMask));
+  __ AndP(r3, r2);
+  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+  __ beq(&seq_string, Label::kNear);  // Go to (4).
+
+  // (2) Sequential or cons? If not, go to (5).
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+  STATIC_ASSERT(kExternalStringTag < 0xffffu);
+  __ CmpP(r3, Operand(kExternalStringTag));
+  __ bge(&not_seq_nor_cons);  // Go to (5).
+
+  // (3) Cons string.  Check that it's flat.
+  // Replace subject with first string and reload instance type.
+  __ LoadP(r2, FieldMemOperand(subject, ConsString::kSecondOffset));
+  __ CompareRoot(r2, Heap::kempty_stringRootIndex);
+  __ bne(&runtime);
+  __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  __ b(&check_underlying);
+
+  // (4) Sequential string.  Load regexp code according to encoding.
+  __ bind(&seq_string);
+  // subject: sequential subject string (or look-alike, external string)
+  // r5: original subject string
+  // Load previous index and check range before r5 is overwritten.  We have to
+  // use r5 instead of subject here because subject might have been only made
+  // to look like a sequential string when it actually is an external string.
+  __ LoadP(r3, MemOperand(sp, kPreviousIndexOffset));
+  __ JumpIfNotSmi(r3, &runtime);
+  __ LoadP(r5, FieldMemOperand(r5, String::kLengthOffset));
+  __ CmpLogicalP(r5, r3);
+  __ ble(&runtime);
+  __ SmiUntag(r3);
+
+  STATIC_ASSERT(4 == kOneByteStringTag);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  STATIC_ASSERT(kStringEncodingMask == 4);
+  __ ExtractBitMask(r5, r2, kStringEncodingMask, SetRC);
+  __ beq(&encoding_type_UC16, Label::kNear);
+  __ LoadP(code,
+           FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
+  __ b(&br_over, Label::kNear);
+  __ bind(&encoding_type_UC16);
+  __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+  __ bind(&br_over);
+
+  // (E) Carry on.  String handling is done.
+  // code: irregexp code
+  // Check that the irregexp code has been generated for the actual string
+  // encoding. If it has, the field contains a code object otherwise it contains
+  // a smi (code flushing support).
+  __ JumpIfSmi(code, &runtime);
+
+  // r3: previous index
+  // r5: encoding of subject string (1 if one_byte, 0 if two_byte);
+  // code: Address of generated regexp code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // All checks done. Now push arguments for native regexp code.
+  __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r2, r4);
+
+  // Isolates: note we add an additional parameter here (isolate pointer).
+  const int kRegExpExecuteArguments = 10;
+  const int kParameterRegisters = 5;
+  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+  // Stack pointer now points to cell where return address is to be written.
+  // Arguments are before that on the stack or in registers.
+
+  // Argument 10 (in stack parameter area): Pass current isolate address.
+  __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+  __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
+                                   4 * kPointerSize));
+
+  // Argument 9 is a dummy that reserves the space used for
+  // the return address added by the ExitFrame in native calls.
+  __ mov(r2, Operand::Zero());
+  __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
+                                   3 * kPointerSize));
+
+  // Argument 8: Indicate that this is a direct call from JavaScript.
+  __ mov(r2, Operand(1));
+  __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
+                                   2 * kPointerSize));
+
+  // Argument 7: Start (high end) of backtracking stack memory area.
+  __ mov(r2, Operand(address_of_regexp_stack_memory_address));
+  __ LoadP(r2, MemOperand(r2, 0));
+  __ mov(r1, Operand(address_of_regexp_stack_memory_size));
+  __ LoadP(r1, MemOperand(r1, 0));
+  __ AddP(r2, r1);
+  __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
+                                   1 * kPointerSize));
+
+  // Argument 6: Set the number of capture registers to zero to force
+  // global egexps to behave as non-global.  This does not affect non-global
+  // regexps.
+  __ mov(r2, Operand::Zero());
+  __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
+                                   0 * kPointerSize));
+
+  // Argument 1 (r2): Subject string.
+  // Load the length from the original subject string from the previous stack
+  // frame. Therefore we have to use fp, which points exactly to 15 pointer
+  // sizes below the previous sp. (Because creating a new stack frame pushes
+  // the previous fp onto the stack and moves up sp by 2 * kPointerSize and
+  // 13 registers saved on the stack previously)
+  __ LoadP(r2, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+
+  // Argument 2 (r3): Previous index.
+  // Already there
+  __ AddP(r1, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+
+  // Argument 5 (r6): static offsets vector buffer.
+  __ mov(
+      r6,
+      Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
+
+  // For arguments 4 (r5) and 3 (r4) get string length, calculate start of data
+  // and calculate the shift of the index (0 for one-byte and 1 for two byte).
+  __ XorP(r5, Operand(1));
+  // If slice offset is not 0, load the length from the original sliced string.
+  // Argument 3, r4: Start of string data
+  // Prepare start and end index of the input.
+  __ ShiftLeftP(ip, ip, r5);
+  __ AddP(ip, r1, ip);
+  __ ShiftLeftP(r4, r3, r5);
+  __ AddP(r4, ip, r4);
+
+  // Argument 4, r5: End of string data
+  __ LoadP(r1, FieldMemOperand(r2, String::kLengthOffset));
+  __ SmiUntag(r1);
+  __ ShiftLeftP(r0, r1, r5);
+  __ AddP(r5, ip, r0);
+
+  // Locate the code entry and call it.
+  __ AddP(code, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  DirectCEntryStub stub(isolate());
+  stub.GenerateCall(masm, code);
+
+  __ LeaveExitFrame(false, no_reg, true);
+
+  // r2: result (int32)
+  // subject: subject string -- needed to reload
+  __ LoadP(subject, MemOperand(sp, kSubjectOffset));
+
+  // regexp_data: RegExp data (callee saved)
+  // last_match_info_elements: Last match info elements (callee saved)
+  // Check the result.
+  Label success;
+  __ Cmp32(r2, Operand(1));
+  // We expect exactly one result since we force the called regexp to behave
+  // as non-global.
+  __ beq(&success);
+  Label failure;
+  __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::FAILURE));
+  __ beq(&failure);
+  __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  // If not exception it can only be retry. Handle that in the runtime system.
+  __ bne(&runtime);
+  // Result must now be exception. If there is no pending exception already a
+  // stack overflow (on the backtrack stack) was detected in RegExp code but
+  // haven't created the exception yet. Handle that in the runtime system.
+  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+  __ mov(r3, Operand(isolate()->factory()->the_hole_value()));
+  __ mov(r4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                       isolate())));
+  __ LoadP(r2, MemOperand(r4, 0));
+  __ CmpP(r2, r3);
+  __ beq(&runtime);
+
+  // For exception, throw the exception again.
+  __ TailCallRuntime(Runtime::kRegExpExecReThrow);
+
+  __ bind(&failure);
+  // For failure and exception return null.
+  __ mov(r2, Operand(isolate()->factory()->null_value()));
+  __ la(sp, MemOperand(sp, (4 * kPointerSize)));
+  __ Ret();
+
+  // Process the result from the native regexp code.
+  __ bind(&success);
+  __ LoadP(r3,
+           FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2.
+  // SmiToShortArrayOffset accomplishes the multiplication by 2 and
+  // SmiUntag (which is a nop for 32-bit).
+  __ SmiToShortArrayOffset(r3, r3);
+  __ AddP(r3, Operand(2));
+
+  __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(r2, &runtime);
+  __ CompareObjectType(r2, r4, r4, JS_ARRAY_TYPE);
+  __ bne(&runtime);
+  // Check that the JSArray is in fast case.
+  __ LoadP(last_match_info_elements,
+           FieldMemOperand(r2, JSArray::kElementsOffset));
+  __ LoadP(r2,
+           FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+  __ CompareRoot(r2, Heap::kFixedArrayMapRootIndex);
+  __ bne(&runtime);
+  // Check that the last match info has space for the capture registers and the
+  // additional information.
+  __ LoadP(
+      r2, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+  __ AddP(r4, r3, Operand(RegExpImpl::kLastMatchOverhead));
+  __ SmiUntag(r0, r2);
+  __ CmpP(r4, r0);
+  __ bgt(&runtime);
+
+  // r3: number of capture registers
+  // subject: subject string
+  // Store the capture count.
+  __ SmiTag(r4, r3);
+  __ StoreP(r4, FieldMemOperand(last_match_info_elements,
+                                RegExpImpl::kLastCaptureCountOffset));
+  // Store last subject and last input.
+  __ StoreP(subject, FieldMemOperand(last_match_info_elements,
+                                     RegExpImpl::kLastSubjectOffset));
+  __ LoadRR(r4, subject);
+  __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
+                      subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ LoadRR(subject, r4);
+  __ StoreP(subject, FieldMemOperand(last_match_info_elements,
+                                     RegExpImpl::kLastInputOffset));
+  __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
+                      subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Get the static offsets vector filled by the native regexp code.
+  ExternalReference address_of_static_offsets_vector =
+      ExternalReference::address_of_static_offsets_vector(isolate());
+  __ mov(r4, Operand(address_of_static_offsets_vector));
+
+  // r3: number of capture registers
+  // r4: offsets vector
+  Label next_capture;
+  // Capture register counter starts from number of capture registers and
+  // counts down until wraping after zero.
+  __ AddP(
+      r2, last_match_info_elements,
+      Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
+  __ AddP(r4, Operand(-kIntSize));  // bias down for lwzu
+  __ bind(&next_capture);
+  // Read the value from the static offsets vector buffer.
+  __ ly(r5, MemOperand(r4, kIntSize));
+  __ lay(r4, MemOperand(r4, kIntSize));
+  // Store the smi value in the last match info.
+  __ SmiTag(r5);
+  __ StoreP(r5, MemOperand(r2, kPointerSize));
+  __ lay(r2, MemOperand(r2, kPointerSize));
+  __ BranchOnCount(r3, &next_capture);
+
+  // Return last match info.
+  __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
+  __ la(sp, MemOperand(sp, (4 * kPointerSize)));
+  __ Ret();
+
+  // Do the runtime call to execute the regexp.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kRegExpExec);
+
+  // Deferred code for string handling.
+  // (5) Long external string? If not, go to (7).
+  __ bind(&not_seq_nor_cons);
+  // Compare flags are still set.
+  __ bgt(&not_long_external, Label::kNear);  // Go to (7).
+
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
+  __ bind(&external_string);
+  __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    STATIC_ASSERT(kIsIndirectStringMask == 1);
+    __ tmll(r2, Operand(kIsIndirectStringMask));
+    __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
+  }
+  __ LoadP(subject,
+           FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+  __ SubP(subject, subject,
+          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ b(&seq_string);  // Go to (4).
+
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
+  __ bind(&not_long_external);
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
+  __ mov(r0, Operand(kIsNotStringMask | kShortExternalStringMask));
+  __ AndP(r0, r3);
+  __ bne(&runtime);
+
+  // (8) Sliced string.  Replace subject with parent.  Go to (4).
+  // Load offset into ip and replace subject string with parent.
+  __ LoadP(ip, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+  __ SmiUntag(ip);
+  __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+  __ b(&check_underlying);  // Go to (4).
+#endif  // V8_INTERPRETED_REGEXP
+}
+
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
+  // r2 : number of arguments to the construct function
+  // r3 : the function to call
+  // r4 : feedback vector
+  // r5 : slot in feedback vector (Smi)
+  FrameScope scope(masm, StackFrame::INTERNAL);
+
+  // Number-of-arguments register must be smi-tagged to call out.
+  __ SmiTag(r2);
+  __ Push(r5, r4, r3, r2);
+
+  __ CallStub(stub);
+
+  __ Pop(r5, r4, r3, r2);
+  __ SmiUntag(r2);
+}
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+  // Cache the called function in a feedback vector slot.  Cache states
+  // are uninitialized, monomorphic (indicated by a JSFunction), and
+  // megamorphic.
+  // r2 : number of arguments to the construct function
+  // r3 : the function to call
+  // r4 : feedback vector
+  // r5 : slot in feedback vector (Smi)
+  Label initialize, done, miss, megamorphic, not_array_function;
+
+  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+            masm->isolate()->heap()->megamorphic_symbol());
+  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+            masm->isolate()->heap()->uninitialized_symbol());
+
+  // Load the cache state into r7.
+  __ SmiToPtrArrayOffset(r7, r5);
+  __ AddP(r7, r4, r7);
+  __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+
+  // A monomorphic cache hit or an already megamorphic state: invoke the
+  // function without changing the state.
+  // We don't know if r7 is a WeakCell or a Symbol, but it's harmless to read at
+  // this position in a symbol (see static asserts in type-feedback-vector.h).
+  Label check_allocation_site;
+  Register feedback_map = r8;
+  Register weak_value = r9;
+  __ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
+  __ CmpP(r3, weak_value);
+  __ beq(&done);
+  __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
+  __ beq(&done);
+  __ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
+  __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
+  __ bne(&check_allocation_site);
+
+  // If the weak cell is cleared, we have a new chance to become monomorphic.
+  __ JumpIfSmi(weak_value, &initialize);
+  __ b(&megamorphic);
+
+  __ bind(&check_allocation_site);
+  // If we came here, we need to see if we are the array function.
+  // If we didn't have a matching function, and we didn't find the megamorph
+  // sentinel, then we have in the slot either some other function or an
+  // AllocationSite.
+  __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
+  __ bne(&miss);
+
+  // Make sure the function is the Array() function
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
+  __ CmpP(r3, r7);
+  __ bne(&megamorphic);
+  __ b(&done);
+
+  __ bind(&miss);
+
+  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+  // megamorphic.
+  __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
+  __ beq(&initialize);
+  // MegamorphicSentinel is an immortal immovable object (undefined) so no
+  // write-barrier is needed.
+  __ bind(&megamorphic);
+  __ SmiToPtrArrayOffset(r7, r5);
+  __ AddP(r7, r4, r7);
+  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+  __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
+  __ jmp(&done);
+
+  // An uninitialized cache is patched with the function
+  __ bind(&initialize);
+
+  // Make sure the function is the Array() function.
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
+  __ CmpP(r3, r7);
+  __ bne(&not_array_function);
+
+  // The target function is the Array constructor,
+  // Create an AllocationSite if we don't already have it, store it in the
+  // slot.
+  CreateAllocationSiteStub create_stub(masm->isolate());
+  CallStubInRecordCallTarget(masm, &create_stub);
+  __ b(&done);
+
+  __ bind(&not_array_function);
+
+  CreateWeakCellStub weak_cell_stub(masm->isolate());
+  CallStubInRecordCallTarget(masm, &weak_cell_stub);
+  __ bind(&done);
+}
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+  // r2 : number of arguments
+  // r3 : the function to call
+  // r4 : feedback vector
+  // r5 : slot in feedback vector (Smi, for RecordCallTarget)
+
+  Label non_function;
+  // Check that the function is not a smi.
+  __ JumpIfSmi(r3, &non_function);
+  // Check that the function is a JSFunction.
+  __ CompareObjectType(r3, r7, r7, JS_FUNCTION_TYPE);
+  __ bne(&non_function);
+
+  GenerateRecordCallTarget(masm);
+
+  __ SmiToPtrArrayOffset(r7, r5);
+  __ AddP(r7, r4, r7);
+  // Put the AllocationSite from the feedback vector into r4, or undefined.
+  __ LoadP(r4, FieldMemOperand(r7, FixedArray::kHeaderSize));
+  __ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset));
+  __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
+  Label feedback_register_initialized;
+  __ beq(&feedback_register_initialized);
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+  __ bind(&feedback_register_initialized);
+
+  __ AssertUndefinedOrAllocationSite(r4, r7);
+
+  // Pass function as new target.
+  __ LoadRR(r5, r3);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
+  __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+
+  __ bind(&non_function);
+  __ LoadRR(r5, r3);
+  __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+  // r3 - function
+  // r5 - slot id
+  // r4 - vector
+  // r6 - allocation site (loaded from vector[slot])
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
+  __ CmpP(r3, r7);
+  __ bne(miss);
+
+  __ mov(r2, Operand(arg_count()));
+
+  // Increment the call count for monomorphic function calls.
+  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+  __ SmiToPtrArrayOffset(r7, r5);
+  __ AddP(r4, r4, r7);
+  __ LoadP(r5, FieldMemOperand(r4, count_offset));
+  __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+  __ StoreP(r5, FieldMemOperand(r4, count_offset), r0);
+
+  __ LoadRR(r4, r6);
+  __ LoadRR(r5, r3);
+  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  __ TailCallStub(&stub);
+}
+
+void CallICStub::Generate(MacroAssembler* masm) {
+  // r3 - function
+  // r5 - slot id (Smi)
+  // r4 - vector
+  Label extra_checks_or_miss, call, call_function;
+  int argc = arg_count();
+  ParameterCount actual(argc);
+
+  // The checks. First, does r3 match the recorded monomorphic target?
+  __ SmiToPtrArrayOffset(r8, r5);
+  __ AddP(r8, r4, r8);
+  __ LoadP(r6, FieldMemOperand(r8, FixedArray::kHeaderSize));
+
+  // We don't know that we have a weak cell. We might have a private symbol
+  // or an AllocationSite, but the memory is safe to examine.
+  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+  // FixedArray.
+  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+  // computed, meaning that it can't appear to be a pointer. If the low bit is
+  // 0, then hash is computed, but the 0 bit prevents the field from appearing
+  // to be a pointer.
+  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+                    WeakCell::kValueOffset &&
+                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+  __ LoadP(r7, FieldMemOperand(r6, WeakCell::kValueOffset));
+  __ CmpP(r3, r7);
+  __ bne(&extra_checks_or_miss, Label::kNear);
+
+  // The compare above could have been a SMI/SMI comparison. Guard against this
+  // convincing us that we have a monomorphic JSFunction.
+  __ JumpIfSmi(r3, &extra_checks_or_miss);
+
+  // Increment the call count for monomorphic function calls.
+  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+  __ LoadP(r5, FieldMemOperand(r8, count_offset));
+  __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+  __ StoreP(r5, FieldMemOperand(r8, count_offset), r0);
+
+  __ bind(&call_function);
+  __ mov(r2, Operand(argc));
+  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+                                                    tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+
+  __ bind(&extra_checks_or_miss);
+  Label uninitialized, miss, not_allocation_site;
+
+  __ CompareRoot(r6, Heap::kmegamorphic_symbolRootIndex);
+  __ beq(&call);
+
+  // Verify that r6 contains an AllocationSite
+  __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
+  __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
+  __ bne(&not_allocation_site);
+
+  // We have an allocation site.
+  HandleArrayCase(masm, &miss);
+
+  __ bind(&not_allocation_site);
+
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ b(&miss);
+  }
+
+  __ CompareRoot(r6, Heap::kuninitialized_symbolRootIndex);
+  __ beq(&uninitialized);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(r6);
+  __ CompareObjectType(r6, r7, r7, JS_FUNCTION_TYPE);
+  __ bne(&miss);
+  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+  __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
+
+  __ bind(&call);
+  __ mov(r2, Operand(argc));
+  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(r3, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ CompareObjectType(r3, r6, r6, JS_FUNCTION_TYPE);
+  __ bne(&miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r6);
+  __ CmpP(r3, r6);
+  __ beq(&miss);
+
+  // Make sure the function belongs to the same native context.
+  __ LoadP(r6, FieldMemOperand(r3, JSFunction::kContextOffset));
+  __ LoadP(r6, ContextMemOperand(r6, Context::NATIVE_CONTEXT_INDEX));
+  __ LoadP(ip, NativeContextMemOperand());
+  __ CmpP(r6, ip);
+  __ bne(&miss);
+
+  // Initialize the call counter.
+  __ LoadSmiLiteral(r7, Smi::FromInt(CallICNexus::kCallCountIncrement));
+  __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
+
+  // Store the function. Use a stub since we need a frame for allocation.
+  // r4 - vector
+  // r5 - slot
+  // r3 - function
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    CreateWeakCellStub create_stub(masm->isolate());
+    __ Push(r3);
+    __ CallStub(&create_stub);
+    __ Pop(r3);
+  }
+
+  __ b(&call_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
+  __ bind(&miss);
+  GenerateMiss(masm);
+
+  __ b(&call);
+}
+
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+
+  // Push the function and feedback info.
+  __ Push(r3, r4, r5);
+
+  // Call the entry.
+  __ CallRuntime(Runtime::kCallIC_Miss);
+
+  // Move result to r3 and exit the internal frame.
+  __ LoadRR(r3, r2);
+}
+
+// StringCharCodeAtGenerator
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+  // If the receiver is a smi trigger the non-string case.
+  if (check_mode_ == RECEIVER_IS_UNKNOWN) {
+    __ JumpIfSmi(object_, receiver_not_string_);
+
+    // Fetch the instance type of the receiver into result register.
+    __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+    __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+    // If the receiver is not a string trigger the non-string case.
+    __ mov(r0, Operand(kIsNotStringMask));
+    __ AndP(r0, result_);
+    __ bne(receiver_not_string_);
+  }
+
+  // If the index is non-smi trigger the non-smi case.
+  __ JumpIfNotSmi(index_, &index_not_smi_);
+  __ bind(&got_smi_index_);
+
+  // Check for index out of range.
+  __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
+  __ CmpLogicalP(ip, index_);
+  __ ble(index_out_of_range_);
+
+  __ SmiUntag(index_);
+
+  StringCharLoadGenerator::Generate(masm, object_, index_, result_,
+                                    &call_runtime_);
+
+  __ SmiTag(result_);
+  __ bind(&exit_);
+}
+
+void StringCharCodeAtGenerator::GenerateSlow(
+    MacroAssembler* masm, EmbedMode embed_mode,
+    const RuntimeCallHelper& call_helper) {
+  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+  // Index is not a smi.
+  __ bind(&index_not_smi_);
+  // If index is a heap number, try converting it to an integer.
+  __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
+              DONT_DO_SMI_CHECK);
+  call_helper.BeforeCall(masm);
+  if (embed_mode == PART_OF_IC_HANDLER) {
+    __ Push(LoadWithVectorDescriptor::VectorRegister(),
+            LoadWithVectorDescriptor::SlotRegister(), object_, index_);
+  } else {
+    // index_ is consumed by runtime conversion function.
+    __ Push(object_, index_);
+  }
+  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
+  } else {
+    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    // NumberToSmi discards numbers that are not exact integers.
+    __ CallRuntime(Runtime::kNumberToSmi);
+  }
+  // Save the conversion result before the pop instructions below
+  // have a chance to overwrite it.
+  __ Move(index_, r2);
+  if (embed_mode == PART_OF_IC_HANDLER) {
+    __ Pop(LoadWithVectorDescriptor::VectorRegister(),
+           LoadWithVectorDescriptor::SlotRegister(), object_);
+  } else {
+    __ pop(object_);
+  }
+  // Reload the instance type.
+  __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  call_helper.AfterCall(masm);
+  // If index is still not a smi, it must be out of range.
+  __ JumpIfNotSmi(index_, index_out_of_range_);
+  // Otherwise, return to the fast path.
+  __ b(&got_smi_index_);
+
+  // Call runtime. We get here when the receiver is a string and the
+  // index is a number, but the code of getting the actual character
+  // is too complex (e.g., when the string needs to be flattened).
+  __ bind(&call_runtime_);
+  call_helper.BeforeCall(masm);
+  __ SmiTag(index_);
+  __ Push(object_, index_);
+  __ CallRuntime(Runtime::kStringCharCodeAtRT);
+  __ Move(result_, r2);
+  call_helper.AfterCall(masm);
+  __ b(&exit_);
+
+  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
+  __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
+  __ OrP(r0, r0, Operand(kSmiTagMask));
+  __ AndP(r0, code_, r0);
+  __ bne(&slow_case_);
+
+  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+  // At this point code register contains smi tagged one-byte char code.
+  __ LoadRR(r0, code_);
+  __ SmiToPtrArrayOffset(code_, code_);
+  __ AddP(result_, code_);
+  __ LoadRR(code_, r0);
+  __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
+  __ beq(&slow_case_);
+  __ bind(&exit_);
+}
+
+void StringCharFromCodeGenerator::GenerateSlow(
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+  __ bind(&slow_case_);
+  call_helper.BeforeCall(masm);
+  __ push(code_);
+  __ CallRuntime(Runtime::kStringCharFromCode);
+  __ Move(result_, r2);
+  call_helper.AfterCall(masm);
+  __ b(&exit_);
+
+  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+enum CopyCharactersFlags { COPY_ASCII = 1, DEST_ALWAYS_ALIGNED = 2 };
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
+                                          Register src, Register count,
+                                          Register scratch,
+                                          String::Encoding encoding) {
+  if (FLAG_debug_code) {
+    // Check that destination is word aligned.
+    __ mov(r0, Operand(kPointerAlignmentMask));
+    __ AndP(r0, dest);
+    __ Check(eq, kDestinationOfCopyNotAligned, cr0);
+  }
+
+  // Nothing to do for zero characters.
+  Label done;
+  if (encoding == String::TWO_BYTE_ENCODING) {
+    // double the length
+    __ AddP(count, count, count);
+    __ beq(&done, Label::kNear);
+  } else {
+    __ CmpP(count, Operand::Zero());
+    __ beq(&done, Label::kNear);
+  }
+
+  // Copy count bytes from src to dst.
+  Label byte_loop;
+  // TODO(joransiu): Convert into MVC loop
+  __ bind(&byte_loop);
+  __ LoadlB(scratch, MemOperand(src));
+  __ la(src, MemOperand(src, 1));
+  __ stc(scratch, MemOperand(dest));
+  __ la(dest, MemOperand(dest, 1));
+  __ BranchOnCount(count, &byte_loop);
+
+  __ bind(&done);
+}
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  // Stack frame on entry.
+  //  lr: return address
+  //  sp[0]: to
+  //  sp[4]: from
+  //  sp[8]: string
+
+  // This stub is called from the native-call %_SubString(...), so
+  // nothing can be assumed about the arguments. It is tested that:
+  //  "string" is a sequential string,
+  //  both "from" and "to" are smis, and
+  //  0 <= from <= to <= string.length.
+  // If any of these assumptions fail, we call the runtime system.
+
+  const int kToOffset = 0 * kPointerSize;
+  const int kFromOffset = 1 * kPointerSize;
+  const int kStringOffset = 2 * kPointerSize;
+
+  __ LoadP(r4, MemOperand(sp, kToOffset));
+  __ LoadP(r5, MemOperand(sp, kFromOffset));
+
+  // If either to or from had the smi tag bit set, then fail to generic runtime
+  __ JumpIfNotSmi(r4, &runtime);
+  __ JumpIfNotSmi(r5, &runtime);
+  __ SmiUntag(r4);
+  __ SmiUntag(r5);
+  // Both r4 and r5 are untagged integers.
+
+  // We want to bailout to runtime here if From is negative.
+  __ blt(&runtime);  // From < 0.
+
+  __ CmpLogicalP(r5, r4);
+  __ bgt(&runtime);  // Fail if from > to.
+  __ SubP(r4, r4, r5);
+
+  // Make sure first argument is a string.
+  __ LoadP(r2, MemOperand(sp, kStringOffset));
+  __ JumpIfSmi(r2, &runtime);
+  Condition is_string = masm->IsObjectStringType(r2, r3);
+  __ b(NegateCondition(is_string), &runtime);
+
+  Label single_char;
+  __ CmpP(r4, Operand(1));
+  __ b(eq, &single_char);
+
+  // Short-cut for the case of trivial substring.
+  Label return_r2;
+  // r2: original string
+  // r4: result string length
+  __ LoadP(r6, FieldMemOperand(r2, String::kLengthOffset));
+  __ SmiUntag(r0, r6);
+  __ CmpLogicalP(r4, r0);
+  // Return original string.
+  __ beq(&return_r2);
+  // Longer than original string's length or negative: unsafe arguments.
+  __ bgt(&runtime);
+  // Shorter than original string's length: an actual substring.
+
+  // Deal with different string types: update the index if necessary
+  // and put the underlying string into r7.
+  // r2: original string
+  // r3: instance type
+  // r4: length
+  // r5: from index (untagged)
+  Label underlying_unpacked, sliced_string, seq_or_external_string;
+  // If the string is not indirect, it can only be sequential or external.
+  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+  STATIC_ASSERT(kIsIndirectStringMask != 0);
+  __ mov(r0, Operand(kIsIndirectStringMask));
+  __ AndP(r0, r3);
+  __ beq(&seq_or_external_string);
+
+  __ mov(r0, Operand(kSlicedNotConsMask));
+  __ AndP(r0, r3);
+  __ bne(&sliced_string);
+  // Cons string.  Check whether it is flat, then fetch first part.
+  __ LoadP(r7, FieldMemOperand(r2, ConsString::kSecondOffset));
+  __ CompareRoot(r7, Heap::kempty_stringRootIndex);
+  __ bne(&runtime);
+  __ LoadP(r7, FieldMemOperand(r2, ConsString::kFirstOffset));
+  // Update instance type.
+  __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset));
+  __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+  __ b(&underlying_unpacked);
+
+  __ bind(&sliced_string);
+  // Sliced string.  Fetch parent and correct start index by offset.
+  __ LoadP(r7, FieldMemOperand(r2, SlicedString::kParentOffset));
+  __ LoadP(r6, FieldMemOperand(r2, SlicedString::kOffsetOffset));
+  __ SmiUntag(r3, r6);
+  __ AddP(r5, r3);  // Add offset to index.
+  // Update instance type.
+  __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset));
+  __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+  __ b(&underlying_unpacked);
+
+  __ bind(&seq_or_external_string);
+  // Sequential or external string.  Just move string to the expected register.
+  __ LoadRR(r7, r2);
+
+  __ bind(&underlying_unpacked);
+
+  if (FLAG_string_slices) {
+    Label copy_routine;
+    // r7: underlying subject string
+    // r3: instance type of underlying subject string
+    // r4: length
+    // r5: adjusted start index (untagged)
+    __ CmpP(r4, Operand(SlicedString::kMinLength));
+    // Short slice.  Copy instead of slicing.
+    __ blt(&copy_routine);
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ mov(r0, Operand(kStringEncodingMask));
+    __ AndP(r0, r3);
+    __ beq(&two_byte_slice);
+    __ AllocateOneByteSlicedString(r2, r4, r8, r9, &runtime);
+    __ b(&set_slice_header);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(r2, r4, r8, r9, &runtime);
+    __ bind(&set_slice_header);
+    __ SmiTag(r5);
+    __ StoreP(r7, FieldMemOperand(r2, SlicedString::kParentOffset));
+    __ StoreP(r5, FieldMemOperand(r2, SlicedString::kOffsetOffset));
+    __ b(&return_r2);
+
+    __ bind(&copy_routine);
+  }
+
+  // r7: underlying subject string
+  // r3: instance type of underlying subject string
+  // r4: length
+  // r5: adjusted start index (untagged)
+  Label two_byte_sequential, sequential_string, allocate_result;
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ mov(r0, Operand(kExternalStringTag));
+  __ AndP(r0, r3);
+  __ beq(&sequential_string);
+
+  // Handle external string.
+  // Rule out short external strings.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ mov(r0, Operand(kShortExternalStringTag));
+  __ AndP(r0, r3);
+  __ bne(&runtime);
+  __ LoadP(r7, FieldMemOperand(r7, ExternalString::kResourceDataOffset));
+  // r7 already points to the first character of underlying string.
+  __ b(&allocate_result);
+
+  __ bind(&sequential_string);
+  // Locate first character of underlying subject string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+  __ AddP(r7, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+  __ bind(&allocate_result);
+  // Sequential acii string.  Allocate the result.
+  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+  __ mov(r0, Operand(kStringEncodingMask));
+  __ AndP(r0, r3);
+  __ beq(&two_byte_sequential);
+
+  // Allocate and copy the resulting one-byte string.
+  __ AllocateOneByteString(r2, r4, r6, r8, r9, &runtime);
+
+  // Locate first character of substring to copy.
+  __ AddP(r7, r5);
+  // Locate first character of result.
+  __ AddP(r3, r2, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+  // r2: result string
+  // r3: first character of result string
+  // r4: result string length
+  // r7: first character of substring to copy
+  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5,
+                                       String::ONE_BYTE_ENCODING);
+  __ b(&return_r2);
+
+  // Allocate and copy the resulting two-byte string.
+  __ bind(&two_byte_sequential);
+  __ AllocateTwoByteString(r2, r4, r6, r8, r9, &runtime);
+
+  // Locate first character of substring to copy.
+  __ ShiftLeftP(r3, r5, Operand(1));
+  __ AddP(r7, r3);
+  // Locate first character of result.
+  __ AddP(r3, r2, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // r2: result string.
+  // r3: first character of result.
+  // r4: result length.
+  // r7: first character of substring to copy.
+  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5,
+                                       String::TWO_BYTE_ENCODING);
+
+  __ bind(&return_r2);
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->sub_string_native(), 1, r5, r6);
+  __ Drop(3);
+  __ Ret();
+
+  // Just jump to runtime to create the sub string.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kSubString);
+
+  __ bind(&single_char);
+  // r2: original string
+  // r3: instance type
+  // r4: length
+  // r5: from index (untagged)
+  __ SmiTag(r5, r5);
+  StringCharAtGenerator generator(r2, r5, r4, r2, &runtime, &runtime, &runtime,
+                                  STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+  generator.GenerateFast(masm);
+  __ Drop(3);
+  __ Ret();
+  generator.SkipSlow(masm, &runtime);
+}
+
+void ToNumberStub::Generate(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in r2.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ TestIfSmi(r2);
+  __ Ret(eq);
+
+  __ CompareObjectType(r2, r3, r3, HEAP_NUMBER_TYPE);
+  // r2: receiver
+  // r3: receiver instance type
+  Label not_heap_number;
+  __ bne(&not_heap_number);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  NonNumberToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in r2.
+  __ AssertNotNumber(r2);
+
+  __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
+  // r2: receiver
+  // r3: receiver instance type
+  StringToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub, lt);
+
+  Label not_oddball;
+  __ CmpP(r3, Operand(ODDBALL_TYPE));
+  __ bne(&not_oddball, Label::kNear);
+  __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset));
+  __ b(r14);
+  __ bind(&not_oddball);
+
+  __ push(r2);  // Push argument.
+  __ TailCallRuntime(Runtime::kToNumber);
+}
+
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in r2.
+  __ AssertString(r2);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ LoadlW(r4, FieldMemOperand(r2, String::kHashFieldOffset));
+  __ And(r0, r4, Operand(String::kContainsCachedArrayIndexMask));
+  __ bne(&runtime);
+  __ IndexFromHash(r4, r2);
+  __ Ret();
+
+  __ bind(&runtime);
+  __ push(r2);  // Push argument.
+  __ TailCallRuntime(Runtime::kStringToNumber);
+}
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+  // The ToString stub takes one argument in r2.
+  Label done;
+  Label is_number;
+  __ JumpIfSmi(r2, &is_number);
+
+  __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
+  // r2: receiver
+  // r3: receiver instance type
+  __ blt(&done);
+
+  Label not_heap_number;
+  __ CmpP(r3, Operand(HEAP_NUMBER_TYPE));
+  __ bne(&not_heap_number);
+  __ bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ bind(&not_heap_number);
+
+  Label not_oddball;
+  __ CmpP(r3, Operand(ODDBALL_TYPE));
+  __ bne(&not_oddball);
+  __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  __ push(r2);  // Push argument.
+  __ TailCallRuntime(Runtime::kToString);
+
+  __ bind(&done);
+  __ Ret();
+}
+
+void ToNameStub::Generate(MacroAssembler* masm) {
+  // The ToName stub takes one argument in r2.
+  Label is_number;
+  __ JumpIfSmi(r2, &is_number);
+
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  __ CompareObjectType(r2, r3, r3, LAST_NAME_TYPE);
+  // r2: receiver
+  // r3: receiver instance type
+  __ Ret(le);
+
+  Label not_heap_number;
+  __ CmpP(r3, Operand(HEAP_NUMBER_TYPE));
+  __ bne(&not_heap_number);
+  __ bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ bind(&not_heap_number);
+
+  Label not_oddball;
+  __ CmpP(r3, Operand(ODDBALL_TYPE));
+  __ bne(&not_oddball);
+  __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  __ push(r2);  // Push argument.
+  __ TailCallRuntime(Runtime::kToName);
+}
+
+void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                                   Register left,
+                                                   Register right,
+                                                   Register scratch1,
+                                                   Register scratch2) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label strings_not_equal, check_zero_length;
+  __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
+  __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ CmpP(length, scratch2);
+  __ beq(&check_zero_length);
+  __ bind(&strings_not_equal);
+  __ LoadSmiLiteral(r2, Smi::FromInt(NOT_EQUAL));
+  __ Ret();
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ CmpP(length, Operand::Zero());
+  __ bne(&compare_chars);
+  __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
+  __ Ret();
+
+  // Compare characters.
+  __ bind(&compare_chars);
+  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
+                                  &strings_not_equal);
+
+  // Characters are equal.
+  __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
+  __ Ret();
+}
+
+void StringHelper::GenerateCompareFlatOneByteStrings(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3) {
+  Label skip, result_not_equal, compare_lengths;
+  // Find minimum length and length difference.
+  __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
+  __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ SubP(scratch3, scratch1, scratch2 /*, LeaveOE, SetRC*/);
+  // Removing RC looks okay here.
+  Register length_delta = scratch3;
+  __ ble(&skip, Label::kNear);
+  __ LoadRR(scratch1, scratch2);
+  __ bind(&skip);
+  Register min_length = scratch1;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ CmpP(min_length, Operand::Zero());
+  __ beq(&compare_lengths);
+
+  // Compare loop.
+  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                  &result_not_equal);
+
+  // Compare lengths - strings up to min-length are equal.
+  __ bind(&compare_lengths);
+  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  // Use length_delta as result if it's zero.
+  __ LoadRR(r2, length_delta);
+  __ CmpP(length_delta, Operand::Zero());
+  __ bind(&result_not_equal);
+  // Conditionally update the result based either on length_delta or
+  // the last comparion performed in the loop above.
+  Label less_equal, equal;
+  __ ble(&less_equal);
+  __ LoadSmiLiteral(r2, Smi::FromInt(GREATER));
+  __ Ret();
+  __ bind(&less_equal);
+  __ beq(&equal);
+  __ LoadSmiLiteral(r2, Smi::FromInt(LESS));
+  __ bind(&equal);
+  __ Ret();
+}
+
+void StringHelper::GenerateOneByteCharsCompareLoop(
+    MacroAssembler* masm, Register left, Register right, Register length,
+    Register scratch1, Label* chars_not_equal) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiUntag(length);
+  __ AddP(scratch1, length,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ AddP(left, scratch1);
+  __ AddP(right, scratch1);
+  __ LoadComplementRR(length, length);
+  Register index = length;  // index = -length;
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ LoadlB(scratch1, MemOperand(left, index));
+  __ LoadlB(r0, MemOperand(right, index));
+  __ CmpP(scratch1, r0);
+  __ bne(chars_not_equal);
+  __ AddP(index, Operand(1));
+  __ CmpP(index, Operand::Zero());
+  __ bne(&loop);
+}
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3    : left
+  //  -- r2    : right
+  // r3: second string
+  // -----------------------------------
+
+  // Load r4 with the allocation site.  We stick an undefined dummy value here
+  // and replace it with the real allocation site later when we instantiate this
+  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+  __ Move(r4, handle(isolate()->heap()->undefined_value()));
+
+  // Make sure that we actually patched the allocation site.
+  if (FLAG_debug_code) {
+    __ TestIfSmi(r4);
+    __ Assert(ne, kExpectedAllocationSite, cr0);
+    __ push(r4);
+    __ LoadP(r4, FieldMemOperand(r4, HeapObject::kMapOffset));
+    __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
+    __ pop(r4);
+    __ Assert(eq, kExpectedAllocationSite);
+  }
+
+  // Tail call into the stub that handles binary operations with allocation
+  // sites.
+  BinaryOpWithAllocationSiteStub stub(isolate(), state());
+  __ TailCallStub(&stub);
+}
+
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+  DCHECK_EQ(CompareICState::BOOLEAN, state());
+  Label miss;
+
+  __ CheckMap(r3, r4, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+  __ CheckMap(r2, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+  if (!Token::IsEqualityOp(op())) {
+    __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
+    __ AssertSmi(r3);
+    __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset));
+    __ AssertSmi(r2);
+  }
+  __ SubP(r2, r3, r2);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::SMI);
+  Label miss;
+  __ OrP(r4, r3, r2);
+  __ JumpIfNotSmi(r4, &miss);
+
+  if (GetCondition() == eq) {
+    // For equality we do not care about the sign of the result.
+    // __ sub(r2, r2, r3, SetCC);
+    __ SubP(r2, r2, r3);
+  } else {
+    // Untag before subtracting to avoid handling overflow.
+    __ SmiUntag(r3);
+    __ SmiUntag(r2);
+    __ SubP(r2, r3, r2);
+  }
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::NUMBER);
+
+  Label generic_stub;
+  Label unordered, maybe_undefined1, maybe_undefined2;
+  Label miss;
+  Label equal, less_than;
+
+  if (left() == CompareICState::SMI) {
+    __ JumpIfNotSmi(r3, &miss);
+  }
+  if (right() == CompareICState::SMI) {
+    __ JumpIfNotSmi(r2, &miss);
+  }
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved.
+  // Load left and right operand.
+  Label done, left, left_smi, right_smi;
+  __ JumpIfSmi(r2, &right_smi);
+  __ CheckMap(r2, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+              DONT_DO_SMI_CHECK);
+  __ LoadDouble(d1, FieldMemOperand(r2, HeapNumber::kValueOffset));
+  __ b(&left);
+  __ bind(&right_smi);
+  __ SmiToDouble(d1, r2);
+
+  __ bind(&left);
+  __ JumpIfSmi(r3, &left_smi);
+  __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+              DONT_DO_SMI_CHECK);
+  __ LoadDouble(d0, FieldMemOperand(r3, HeapNumber::kValueOffset));
+  __ b(&done);
+  __ bind(&left_smi);
+  __ SmiToDouble(d0, r3);
+
+  __ bind(&done);
+
+  // Compare operands
+  __ cdbr(d0, d1);
+
+  // Don't base result on status bits when a NaN is involved.
+  __ bunordered(&unordered);
+
+  // Return a result of -1, 0, or 1, based on status bits.
+  __ beq(&equal);
+  __ blt(&less_than);
+  //  assume greater than
+  __ LoadImmP(r2, Operand(GREATER));
+  __ Ret();
+  __ bind(&equal);
+  __ LoadImmP(r2, Operand(EQUAL));
+  __ Ret();
+  __ bind(&less_than);
+  __ LoadImmP(r2, Operand(LESS));
+  __ Ret();
+
+  __ bind(&unordered);
+  __ bind(&generic_stub);
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+                     CompareICState::GENERIC, CompareICState::GENERIC);
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&maybe_undefined1);
+  if (Token::IsOrderedRelationalCompareOp(op())) {
+    __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ bne(&miss);
+    __ JumpIfSmi(r3, &unordered);
+    __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
+    __ bne(&maybe_undefined2);
+    __ b(&unordered);
+  }
+
+  __ bind(&maybe_undefined2);
+  if (Token::IsOrderedRelationalCompareOp(op())) {
+    __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ beq(&unordered);
+  }
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+  Label miss, not_equal;
+
+  // Registers containing left and right operands respectively.
+  Register left = r3;
+  Register right = r2;
+  Register tmp1 = r4;
+  Register tmp2 = r5;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are symbols.
+  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  __ OrP(tmp1, tmp1, tmp2);
+  __ AndP(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+  __ bne(&miss);
+
+  // Internalized strings are compared by identity.
+  __ CmpP(left, right);
+  __ bne(&not_equal);
+  // Make sure r2 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  DCHECK(right.is(r2));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
+  __ bind(&not_equal);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::UNIQUE_NAME);
+  DCHECK(GetCondition() == eq);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = r3;
+  Register right = r2;
+  Register tmp1 = r4;
+  Register tmp2 = r5;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are unique names. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
+
+  // Unique names are compared by identity.
+  __ CmpP(left, right);
+  __ bne(&miss);
+  // Make sure r2 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  DCHECK(right.is(r2));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::STRING);
+  Label miss, not_identical, is_symbol;
+
+  bool equality = Token::IsEqualityOp(op());
+
+  // Registers containing left and right operands respectively.
+  Register left = r3;
+  Register right = r2;
+  Register tmp1 = r4;
+  Register tmp2 = r5;
+  Register tmp3 = r6;
+  Register tmp4 = r7;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ OrP(tmp3, tmp1, tmp2);
+  __ AndP(r0, tmp3, Operand(kIsNotStringMask));
+  __ bne(&miss);
+
+  // Fast check for identical strings.
+  __ CmpP(left, right);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ bne(&not_identical);
+  __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
+  __ Ret();
+  __ bind(&not_identical);
+
+  // Handle not identical strings.
+
+  // Check that both strings are internalized strings. If they are, we're done
+  // because we already know they are not identical. We know they are both
+  // strings.
+  if (equality) {
+    DCHECK(GetCondition() == eq);
+    STATIC_ASSERT(kInternalizedTag == 0);
+    __ OrP(tmp3, tmp1, tmp2);
+    __ AndP(r0, tmp3, Operand(kIsNotInternalizedMask));
+    __ bne(&is_symbol);
+    // Make sure r2 is non-zero. At this point input operands are
+    // guaranteed to be non-zero.
+    DCHECK(right.is(r2));
+    __ Ret();
+    __ bind(&is_symbol);
+  }
+
+  // Check that both strings are sequential one-byte.
+  Label runtime;
+  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+                                                    &runtime);
+
+  // Compare flat one-byte strings. Returns when done.
+  if (equality) {
+    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
+                                                  tmp2);
+  } else {
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+                                                    tmp2, tmp3);
+  }
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  if (equality) {
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(left, right);
+      __ CallRuntime(Runtime::kStringEqual);
+    }
+    __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+    __ SubP(r2, r2, r3);
+    __ Ret();
+  } else {
+    __ Push(left, right);
+    __ TailCallRuntime(Runtime::kStringCompare);
+  }
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+  DCHECK_EQ(CompareICState::RECEIVER, state());
+  Label miss;
+  __ AndP(r4, r3, r2);
+  __ JumpIfSmi(r4, &miss);
+
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  __ CompareObjectType(r2, r4, r4, FIRST_JS_RECEIVER_TYPE);
+  __ blt(&miss);
+  __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
+  __ blt(&miss);
+
+  DCHECK(GetCondition() == eq);
+  __ SubP(r2, r2, r3);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
+  Label miss;
+  Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
+  __ AndP(r4, r3, r2);
+  __ JumpIfSmi(r4, &miss);
+  __ GetWeakValue(r6, cell);
+  __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ CmpP(r4, r6);
+  __ bne(&miss);
+  __ CmpP(r5, r6);
+  __ bne(&miss);
+
+  if (Token::IsEqualityOp(op())) {
+    __ SubP(r2, r2, r3);
+    __ Ret();
+  } else {
+    if (op() == Token::LT || op() == Token::LTE) {
+      __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
+    } else {
+      __ LoadSmiLiteral(r4, Smi::FromInt(LESS));
+    }
+    __ Push(r3, r2, r4);
+    __ TailCallRuntime(Runtime::kCompare);
+  }
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
+  {
+    // Call the runtime system in a fresh internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r3, r2);
+    __ Push(r3, r2);
+    __ LoadSmiLiteral(r0, Smi::FromInt(op()));
+    __ push(r0);
+    __ CallRuntime(Runtime::kCompareIC_Miss);
+    // Compute the entry point of the rewritten stub.
+    __ AddP(r4, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Restore registers.
+    __ Pop(r3, r2);
+  }
+
+  __ JumpToJSEntry(r4);
+}
+
+// This stub is paired with DirectCEntryStub::GenerateCall
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+  __ CleanseP(r14);
+
+  // Statement positions are expected to be recorded when the target
+  // address is loaded.
+  __ positions_recorder()->WriteRecordedPositions();
+
+  __ b(ip);  // Callee will return to R14 directly
+}
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
+#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+  // Native AIX/S390X Linux use a function descriptor.
+  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
+  __ LoadP(target, MemOperand(target, 0));  // Instruction address
+#else
+  // ip needs to be set for DirectCEentryStub::Generate, and also
+  // for ABI_CALL_VIA_IP.
+  __ Move(ip, target);
+#endif
+
+  __ call(GetCode(), RelocInfo::CODE_TARGET);  // Call the stub.
+}
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm, Label* miss, Label* done, Register receiver,
+    Register properties, Handle<Name> name, Register scratch0) {
+  DCHECK(name->IsUniqueName());
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the hole value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // scratch0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = scratch0;
+    // Capacity is smi 2^n.
+    __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
+    __ SubP(index, Operand(1));
+    __ LoadSmiLiteral(
+        ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
+    __ AndP(index, ip);
+
+    // Scale the index by multiplying by the entry size.
+    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
+    __ ShiftLeftP(ip, index, Operand(1));
+    __ AddP(index, ip);  // index *= 3.
+
+    Register entity_name = scratch0;
+    // Having undefined at this place means the name is not contained.
+    Register tmp = properties;
+    __ SmiToPtrArrayOffset(ip, index);
+    __ AddP(tmp, properties, ip);
+    __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+    DCHECK(!tmp.is(entity_name));
+    __ CompareRoot(entity_name, Heap::kUndefinedValueRootIndex);
+    __ beq(done);
+
+    // Stop if found the property.
+    __ CmpP(entity_name, Operand(Handle<Name>(name)));
+    __ beq(miss);
+
+    Label good;
+    __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
+    __ beq(&good);
+
+    // Check if the entry name is not a unique name.
+    __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+    __ LoadlB(entity_name,
+              FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
+    __ bind(&good);
+
+    // Restore the properties.
+    __ LoadP(properties,
+             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  }
+
+  const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
+                          r4.bit() | r3.bit() | r2.bit());
+
+  __ LoadRR(r0, r14);
+  __ MultiPush(spill_mask);
+
+  __ LoadP(r2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ mov(r3, Operand(Handle<Name>(name)));
+  NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ CmpP(r2, Operand::Zero());
+
+  __ MultiPop(spill_mask);  // MultiPop does not touch condition flags
+  __ LoadRR(r14, r0);
+
+  __ beq(done);
+  __ bne(miss);
+}
+
+// Probe the name dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void NameDictionaryLookupStub::GeneratePositiveLookup(
+    MacroAssembler* masm, Label* miss, Label* done, Register elements,
+    Register name, Register scratch1, Register scratch2) {
+  DCHECK(!elements.is(scratch1));
+  DCHECK(!elements.is(scratch2));
+  DCHECK(!name.is(scratch1));
+  DCHECK(!name.is(scratch2));
+
+  __ AssertName(name);
+
+  // Compute the capacity mask.
+  __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
+  __ SmiUntag(scratch1);  // convert smi to int
+  __ SubP(scratch1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ LoadlW(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      DCHECK(NameDictionary::GetProbeOffset(i) <
+             1 << (32 - Name::kHashFieldOffset));
+      __ AddP(scratch2,
+              Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+    }
+    __ srl(scratch2, Operand(String::kHashShift));
+    __ AndP(scratch2, scratch1);
+
+    // Scale the index by multiplying by the entry size.
+    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
+    // scratch2 = scratch2 * 3.
+    __ ShiftLeftP(ip, scratch2, Operand(1));
+    __ AddP(scratch2, ip);
+
+    // Check if the key is identical to the name.
+    __ ShiftLeftP(ip, scratch2, Operand(kPointerSizeLog2));
+    __ AddP(scratch2, elements, ip);
+    __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
+    __ CmpP(name, ip);
+    __ beq(done);
+  }
+
+  const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
+                          r4.bit() | r3.bit() | r2.bit()) &
+                         ~(scratch1.bit() | scratch2.bit());
+
+  __ LoadRR(r0, r14);
+  __ MultiPush(spill_mask);
+  if (name.is(r2)) {
+    DCHECK(!elements.is(r3));
+    __ LoadRR(r3, name);
+    __ LoadRR(r2, elements);
+  } else {
+    __ LoadRR(r2, elements);
+    __ LoadRR(r3, name);
+  }
+  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ LoadRR(r1, r2);
+  __ LoadRR(scratch2, r4);
+  __ MultiPop(spill_mask);
+  __ LoadRR(r14, r0);
+
+  __ CmpP(r1, Operand::Zero());
+  __ bne(done);
+  __ beq(miss);
+}
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
+  // Registers:
+  //  result: NameDictionary to probe
+  //  r3: key
+  //  dictionary: NameDictionary to probe.
+  //  index: will hold an index of entry if lookup is successful.
+  //         might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Register result = r2;
+  Register dictionary = r2;
+  Register key = r3;
+  Register index = r4;
+  Register mask = r5;
+  Register hash = r6;
+  Register undefined = r7;
+  Register entry_key = r8;
+  Register scratch = r8;
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
+  __ SmiUntag(mask);
+  __ SubP(mask, Operand(1));
+
+  __ LoadlW(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    // Capacity is smi 2^n.
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      DCHECK(NameDictionary::GetProbeOffset(i) <
+             1 << (32 - Name::kHashFieldOffset));
+      __ AddP(index, hash,
+              Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+    } else {
+      __ LoadRR(index, hash);
+    }
+    __ ShiftRight(r0, index, Operand(String::kHashShift));
+    __ AndP(index, r0, mask);
+
+    // Scale the index by multiplying by the entry size.
+    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
+    __ ShiftLeftP(scratch, index, Operand(1));
+    __ AddP(index, scratch);  // index *= 3.
+
+    __ ShiftLeftP(scratch, index, Operand(kPointerSizeLog2));
+    __ AddP(index, dictionary, scratch);
+    __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+    // Having undefined at this place means the name is not contained.
+    __ CmpP(entry_key, undefined);
+    __ beq(&not_in_dictionary);
+
+    // Stop if found the property.
+    __ CmpP(entry_key, key);
+    __ beq(&in_dictionary);
+
+    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
+      // Check if the entry name is not a unique name.
+      __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+      __ LoadlB(entry_key,
+                FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode() == POSITIVE_LOOKUP) {
+    __ LoadImmP(result, Operand::Zero());
+    __ Ret();
+  }
+
+  __ bind(&in_dictionary);
+  __ LoadImmP(result, Operand(1));
+  __ Ret();
+
+  __ bind(&not_in_dictionary);
+  __ LoadImmP(result, Operand::Zero());
+  __ Ret();
+}
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+    Isolate* isolate) {
+  StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+  stub1.GetCode();
+  // Hydrogen code stubs need stub2 at snapshot time.
+  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+  stub2.GetCode();
+}
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two branch instructions are generated with labels so as to
+  // get the offset fixed up correctly by the bind(Label*) call.  We patch
+  // it back and forth between branch condition True and False
+  // when we start and stop incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+
+  // Clear the bit, branch on True for NOP action initially
+  __ b(CC_NOP, &skip_to_incremental_noncompacting);
+  __ b(CC_NOP, &skip_to_incremental_compacting);
+
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+  }
+  __ Ret();
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  // patching not required on S390 as the initial path is effectively NOP
+}
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(), &dont_need_remembered_set);
+
+    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+                        &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm);
+  regs_.Restore(masm);
+  __ Ret();
+}
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  Register address =
+      r2.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+  DCHECK(!address.is(regs_.object()));
+  DCHECK(!address.is(r2));
+  __ LoadRR(address, regs_.address());
+  __ LoadRR(r2, regs_.object());
+  __ LoadRR(r3, address);
+  __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ CallCFunction(
+      ExternalReference::incremental_marking_record_write_function(isolate()),
+      argument_count);
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
+}
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_scratch;
+
+  DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
+  __ AndP(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
+  __ LoadP(
+      regs_.scratch1(),
+      MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
+  __ SubP(regs_.scratch1(), regs_.scratch1(), Operand(1));
+  __ StoreP(
+      regs_.scratch1(),
+      MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
+  __ CmpP(regs_.scratch1(), Operand::Zero());  // S390, we could do better here
+  __ blt(&need_incremental);
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask, eq,
+                     &ensure_not_white);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need extra registers for this, so we push the object and the address
+  // register temporarily.
+  __ Push(regs_.object(), regs_.address());
+  __ JumpIfWhite(regs_.scratch0(),  // The value.
+                 regs_.scratch1(),  // Scratch.
+                 regs_.object(),    // Scratch.
+                 regs_.address(),   // Scratch.
+                 &need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+  CEntryStub ces(isolate(), 1, kSaveFPRegs);
+  __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+  int parameter_count_offset =
+      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
+  __ LoadP(r3, MemOperand(fp, parameter_count_offset));
+  if (function_mode() == JS_FUNCTION_STUB_MODE) {
+    __ AddP(r3, Operand(1));
+  }
+  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+  __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
+  __ la(sp, MemOperand(r3, sp));
+  __ Ret();
+}
+
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
+  LoadICStub stub(isolate(), state());
+  stub.GenerateForTrampoline(masm);
+}
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
+  KeyedLoadICStub stub(isolate(), state());
+  stub.GenerateForTrampoline(masm);
+}
+
+void CallICTrampolineStub::Generate(MacroAssembler* masm) {
+  __ EmitLoadTypeFeedbackVector(r4);
+  CallICStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
+
+void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
+  GenerateImpl(masm, true);
+}
+
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+                             Register receiver_map, Register scratch1,
+                             Register scratch2, bool is_polymorphic,
+                             Label* miss) {
+  // feedback initially contains the feedback array
+  Label next_loop, prepare_next;
+  Label start_polymorphic;
+
+  Register cached_map = scratch1;
+
+  __ LoadP(cached_map,
+           FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+  __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+  __ CmpP(receiver_map, cached_map);
+  __ bne(&start_polymorphic, Label::kNear);
+  // found, now call handler.
+  Register handler = feedback;
+  __ LoadP(handler,
+           FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+  __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(ip);
+
+  Register length = scratch2;
+  __ bind(&start_polymorphic);
+  __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+  if (!is_polymorphic) {
+    // If the IC could be monomorphic we have to make sure we don't go past the
+    // end of the feedback array.
+    __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
+    __ beq(miss);
+  }
+
+  Register too_far = length;
+  Register pointer_reg = feedback;
+
+  // +-----+------+------+-----+-----+ ... ----+
+  // | map | len  | wm0  | h0  | wm1 |      hN |
+  // +-----+------+------+-----+-----+ ... ----+
+  //                 0      1     2        len-1
+  //                              ^              ^
+  //                              |              |
+  //                         pointer_reg      too_far
+  //                         aka feedback     scratch2
+  // also need receiver_map
+  // use cached_map (scratch1) to look in the weak map values.
+  __ SmiToPtrArrayOffset(r0, length);
+  __ AddP(too_far, feedback, r0);
+  __ AddP(too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ AddP(pointer_reg, feedback,
+          Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
+
+  __ bind(&next_loop);
+  __ LoadP(cached_map, MemOperand(pointer_reg));
+  __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+  __ CmpP(receiver_map, cached_map);
+  __ bne(&prepare_next, Label::kNear);
+  __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
+  __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(ip);
+
+  __ bind(&prepare_next);
+  __ AddP(pointer_reg, Operand(kPointerSize * 2));
+  __ CmpP(pointer_reg, too_far);
+  __ blt(&next_loop, Label::kNear);
+
+  // We exhausted our array of map handler pairs.
+  __ b(miss);
+}
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+                                  Register receiver_map, Register feedback,
+                                  Register vector, Register slot,
+                                  Register scratch, Label* compare_map,
+                                  Label* load_smi_map, Label* try_array) {
+  __ JumpIfSmi(receiver, load_smi_map);
+  __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ bind(compare_map);
+  Register cached_map = scratch;
+  // Move the weak map into the weak_cell register.
+  __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
+  __ CmpP(cached_map, receiver_map);
+  __ bne(try_array);
+  Register handler = feedback;
+  __ SmiToPtrArrayOffset(r1, slot);
+  __ LoadP(handler,
+           FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
+  __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(ip);
+}
+
+void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r3
+  Register name = LoadWithVectorDescriptor::NameRegister();          // r4
+  Register vector = LoadWithVectorDescriptor::VectorRegister();      // r5
+  Register slot = LoadWithVectorDescriptor::SlotRegister();          // r2
+  Register feedback = r6;
+  Register receiver_map = r7;
+  Register scratch1 = r8;
+
+  __ SmiToPtrArrayOffset(r1, slot);
+  __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
+
+  // Try to quickly handle the monomorphic case without knowing for sure
+  // if we have a weak cell in feedback. We do know it's safe to look
+  // at WeakCell::kValueOffset.
+  Label try_array, load_smi_map, compare_map;
+  Label not_array, miss;
+  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+                        scratch1, &compare_map, &load_smi_map, &try_array);
+
+  // Is it a fixed array?
+  __ bind(&try_array);
+  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+  __ bne(&not_array, Label::kNear);
+  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
+
+  __ bind(&not_array);
+  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+  __ bne(&miss);
+  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+                                               receiver, name, feedback,
+                                               receiver_map, scratch1, r9);
+
+  __ bind(&miss);
+  LoadIC::GenerateMiss(masm);
+
+  __ bind(&load_smi_map);
+  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+  __ b(&compare_map);
+}
+
+void KeyedLoadICStub::Generate(MacroAssembler* masm) {
+  GenerateImpl(masm, false);
+}
+
+void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
+  GenerateImpl(masm, true);
+}
+
+void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r3
+  Register key = LoadWithVectorDescriptor::NameRegister();           // r4
+  Register vector = LoadWithVectorDescriptor::VectorRegister();      // r5
+  Register slot = LoadWithVectorDescriptor::SlotRegister();          // r2
+  Register feedback = r6;
+  Register receiver_map = r7;
+  Register scratch1 = r8;
+
+  __ SmiToPtrArrayOffset(r1, slot);
+  __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
+
+  // Try to quickly handle the monomorphic case without knowing for sure
+  // if we have a weak cell in feedback. We do know it's safe to look
+  // at WeakCell::kValueOffset.
+  Label try_array, load_smi_map, compare_map;
+  Label not_array, miss;
+  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+                        scratch1, &compare_map, &load_smi_map, &try_array);
+
+  __ bind(&try_array);
+  // Is it a fixed array?
+  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+  __ bne(&not_array);
+
+  // We have a polymorphic element handler.
+  Label polymorphic, try_poly_name;
+  __ bind(&polymorphic);
+  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
+
+  __ bind(&not_array);
+  // Is it generic?
+  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+  __ bne(&try_poly_name);
+  Handle<Code> megamorphic_stub =
+      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+  __ bind(&try_poly_name);
+  // We might have a name in feedback, and a fixed array in the next slot.
+  __ CmpP(key, feedback);
+  __ bne(&miss);
+  // If the name comparison succeeded, we know we have a fixed array with
+  // at least one map/handler pair.
+  __ SmiToPtrArrayOffset(r1, slot);
+  __ LoadP(feedback,
+           FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
+  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
+
+  __ bind(&miss);
+  KeyedLoadIC::GenerateMiss(masm);
+
+  __ bind(&load_smi_map);
+  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+  __ b(&compare_map);
+}
+
+void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+  __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
+  VectorStoreICStub stub(isolate(), state());
+  stub.GenerateForTrampoline(masm);
+}
+
+void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+  __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
+  VectorKeyedStoreICStub stub(isolate(), state());
+  stub.GenerateForTrampoline(masm);
+}
+
+void VectorStoreICStub::Generate(MacroAssembler* masm) {
+  GenerateImpl(masm, false);
+}
+
+void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+  GenerateImpl(masm, true);
+}
+
+void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+  Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r3
+  Register key = VectorStoreICDescriptor::NameRegister();           // r4
+  Register vector = VectorStoreICDescriptor::VectorRegister();      // r5
+  Register slot = VectorStoreICDescriptor::SlotRegister();          // r6
+  DCHECK(VectorStoreICDescriptor::ValueRegister().is(r2));          // r2
+  Register feedback = r7;
+  Register receiver_map = r8;
+  Register scratch1 = r9;
+
+  __ SmiToPtrArrayOffset(r0, slot);
+  __ AddP(feedback, vector, r0);
+  __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+  // Try to quickly handle the monomorphic case without knowing for sure
+  // if we have a weak cell in feedback. We do know it's safe to look
+  // at WeakCell::kValueOffset.
+  Label try_array, load_smi_map, compare_map;
+  Label not_array, miss;
+  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+                        scratch1, &compare_map, &load_smi_map, &try_array);
+
+  // Is it a fixed array?
+  __ bind(&try_array);
+  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+  __ bne(&not_array);
+
+  Register scratch2 = ip;
+  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
+                   &miss);
+
+  __ bind(&not_array);
+  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+  __ bne(&miss);
+  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
+      scratch1, scratch2);
+
+  __ bind(&miss);
+  StoreIC::GenerateMiss(masm);
+
+  __ bind(&load_smi_map);
+  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+  __ b(&compare_map);
+}
+
+void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+  GenerateImpl(masm, false);
+}
+
+void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+  GenerateImpl(masm, true);
+}
+
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
+                                       Register receiver_map, Register scratch1,
+                                       Register scratch2, Label* miss) {
+  // feedback initially contains the feedback array
+  Label next_loop, prepare_next;
+  Label start_polymorphic;
+  Label transition_call;
+
+  Register cached_map = scratch1;
+  Register too_far = scratch2;
+  Register pointer_reg = feedback;
+  __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+
+  // +-----+------+------+-----+-----+-----+ ... ----+
+  // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
+  // +-----+------+------+-----+-----+ ----+ ... ----+
+  //                 0      1     2              len-1
+  //                 ^                                 ^
+  //                 |                                 |
+  //             pointer_reg                        too_far
+  //             aka feedback                       scratch2
+  // also need receiver_map
+  // use cached_map (scratch1) to look in the weak map values.
+  __ SmiToPtrArrayOffset(r0, too_far);
+  __ AddP(too_far, feedback, r0);
+  __ AddP(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ AddP(pointer_reg, feedback,
+          Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
+
+  __ bind(&next_loop);
+  __ LoadP(cached_map, MemOperand(pointer_reg));
+  __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+  __ CmpP(receiver_map, cached_map);
+  __ bne(&prepare_next);
+  // Is it a transitioning store?
+  __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
+  __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
+  __ bne(&transition_call);
+  __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
+  __ AddP(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(ip);
+
+  __ bind(&transition_call);
+  __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
+  __ JumpIfSmi(too_far, miss);
+
+  __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
+
+  // Load the map into the correct register.
+  DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+  __ LoadRR(feedback, too_far);
+
+  __ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(ip);
+
+  __ bind(&prepare_next);
+  __ AddP(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
+  __ CmpLogicalP(pointer_reg, too_far);
+  __ blt(&next_loop);
+
+  // We exhausted our array of map handler pairs.
+  __ b(miss);
+}
+
+void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+  Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r3
+  Register key = VectorStoreICDescriptor::NameRegister();           // r4
+  Register vector = VectorStoreICDescriptor::VectorRegister();      // r5
+  Register slot = VectorStoreICDescriptor::SlotRegister();          // r6
+  DCHECK(VectorStoreICDescriptor::ValueRegister().is(r2));          // r2
+  Register feedback = r7;
+  Register receiver_map = r8;
+  Register scratch1 = r9;
+
+  __ SmiToPtrArrayOffset(r0, slot);
+  __ AddP(feedback, vector, r0);
+  __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+  // Try to quickly handle the monomorphic case without knowing for sure
+  // if we have a weak cell in feedback. We do know it's safe to look
+  // at WeakCell::kValueOffset.
+  Label try_array, load_smi_map, compare_map;
+  Label not_array, miss;
+  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+                        scratch1, &compare_map, &load_smi_map, &try_array);
+
+  __ bind(&try_array);
+  // Is it a fixed array?
+  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+  __ bne(&not_array);
+
+  // We have a polymorphic element handler.
+  Label polymorphic, try_poly_name;
+  __ bind(&polymorphic);
+
+  Register scratch2 = ip;
+
+  HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
+                             &miss);
+
+  __ bind(&not_array);
+  // Is it generic?
+  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+  __ bne(&try_poly_name);
+  Handle<Code> megamorphic_stub =
+      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+  __ bind(&try_poly_name);
+  // We might have a name in feedback, and a fixed array in the next slot.
+  __ CmpP(key, feedback);
+  __ bne(&miss);
+  // If the name comparison succeeded, we know we have a fixed array with
+  // at least one map/handler pair.
+  __ SmiToPtrArrayOffset(r0, slot);
+  __ AddP(feedback, vector, r0);
+  __ LoadP(feedback,
+           FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
+                   &miss);
+
+  __ bind(&miss);
+  KeyedStoreIC::GenerateMiss(masm);
+
+  __ bind(&load_smi_map);
+  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+  __ b(&compare_map);
+}
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+  if (masm->isolate()->function_entry_hook() != NULL) {
+    PredictableCodeSizeScope predictable(masm,
+#if V8_TARGET_ARCH_S390X
+                                         40);
+#elif V8_HOST_ARCH_S390
+                                         36);
+#else
+                                         32);
+#endif
+    ProfileEntryHookStub stub(masm->isolate());
+    __ CleanseP(r14);
+    __ Push(r14, ip);
+    __ CallStub(&stub);  // BRASL
+    __ Pop(r14, ip);
+  }
+}
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+// The entry hook is a "push lr" instruction (LAY+ST/STG), followed by a call.
+#if V8_TARGET_ARCH_S390X
+  const int32_t kReturnAddressDistanceFromFunctionStart =
+      Assembler::kCallTargetAddressOffset + 18;  // LAY + STG * 2
+#elif V8_HOST_ARCH_S390
+  const int32_t kReturnAddressDistanceFromFunctionStart =
+      Assembler::kCallTargetAddressOffset + 18;  // NILH + LAY + ST * 2
+#else
+  const int32_t kReturnAddressDistanceFromFunctionStart =
+      Assembler::kCallTargetAddressOffset + 14;  // LAY + ST * 2
+#endif
+
+  // This should contain all kJSCallerSaved registers.
+  const RegList kSavedRegs = kJSCallerSaved |  // Caller saved registers.
+                             r7.bit();         // Saved stack pointer.
+
+  // We also save r14+ip, so count here is one higher than the mask indicates.
+  const int32_t kNumSavedRegs = kNumJSCallerSaved + 3;
+
+  // Save all caller-save registers as this may be called from anywhere.
+  __ CleanseP(r14);
+  __ LoadRR(ip, r14);
+  __ MultiPush(kSavedRegs | ip.bit());
+
+  // Compute the function's address for the first argument.
+
+  __ SubP(r2, ip, Operand(kReturnAddressDistanceFromFunctionStart));
+
+  // The caller's return address is two slots above the saved temporaries.
+  // Grab that for the second argument to the hook.
+  __ lay(r3, MemOperand(sp, kNumSavedRegs * kPointerSize));
+
+  // Align the stack if necessary.
+  int frame_alignment = masm->ActivationFrameAlignment();
+  if (frame_alignment > kPointerSize) {
+    __ LoadRR(r7, sp);
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+    __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+  }
+
+#if !defined(USE_SIMULATOR)
+  uintptr_t entry_hook =
+      reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
+  __ mov(ip, Operand(entry_hook));
+
+#if ABI_USES_FUNCTION_DESCRIPTORS
+  // Function descriptor
+  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
+  __ LoadP(ip, MemOperand(ip, 0));
+// ip already set.
+#endif
+#endif
+
+  // zLinux ABI requires caller's frame to have sufficient space for callee
+  // preserved regsiter save area.
+  __ LoadImmP(r0, Operand::Zero());
+  __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize -
+                                kNumRequiredStackFrameSlots * kPointerSize));
+  __ StoreP(r0, MemOperand(sp));
+#if defined(USE_SIMULATOR)
+  // Under the simulator we need to indirect the entry hook through a
+  // trampoline function at a known address.
+  // It additionally takes an isolate as a third parameter
+  __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+
+  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+  __ mov(ip, Operand(ExternalReference(
+                 &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
+#endif
+  __ Call(ip);
+
+  // zLinux ABI requires caller's frame to have sufficient space for callee
+  // preserved regsiter save area.
+  __ la(sp, MemOperand(sp, kCalleeRegisterSaveAreaSize +
+                               kNumRequiredStackFrameSlots * kPointerSize));
+
+  // Restore the stack pointer if needed.
+  if (frame_alignment > kPointerSize) {
+    __ LoadRR(sp, r7);
+  }
+
+  // Also pop lr to get Ret(0).
+  __ MultiPop(kSavedRegs | ip.bit());
+  __ LoadRR(r14, ip);
+  __ Ret();
+}
+
+template <class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+                                AllocationSiteOverrideMode mode) {
+  if (mode == DISABLE_ALLOCATION_SITES) {
+    T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
+    __ TailCallStub(&stub);
+  } else if (mode == DONT_OVERRIDE) {
+    int last_index =
+        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+    for (int i = 0; i <= last_index; ++i) {
+      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+      __ CmpP(r5, Operand(kind));
+      T stub(masm->isolate(), kind);
+      __ TailCallStub(&stub, eq);
+    }
+
+    // If we reached this point there is a problem.
+    __ Abort(kUnexpectedElementsKindInArrayConstructor);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+                                           AllocationSiteOverrideMode mode) {
+  // r4 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+  // r5 - kind (if mode != DISABLE_ALLOCATION_SITES)
+  // r2 - number of arguments
+  // r3 - constructor?
+  // sp[0] - last argument
+  Label normal_sequence;
+  if (mode == DONT_OVERRIDE) {
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+    STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+    STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+    // is the low bit set? If so, we are holey and that is good.
+    __ AndP(r0, r5, Operand(1));
+    __ bne(&normal_sequence);
+  }
+
+  // look at the first argument
+  __ LoadP(r7, MemOperand(sp, 0));
+  __ CmpP(r7, Operand::Zero());
+  __ beq(&normal_sequence);
+
+  if (mode == DISABLE_ALLOCATION_SITES) {
+    ElementsKind initial = GetInitialFastElementsKind();
+    ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+    ArraySingleArgumentConstructorStub stub_holey(
+        masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
+    __ TailCallStub(&stub_holey);
+
+    __ bind(&normal_sequence);
+    ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
+                                            DISABLE_ALLOCATION_SITES);
+    __ TailCallStub(&stub);
+  } else if (mode == DONT_OVERRIDE) {
+    // We are going to create a holey array, but our kind is non-holey.
+    // Fix kind and retry (only if we have an allocation site in the slot).
+    __ AddP(r5, r5, Operand(1));
+    if (FLAG_debug_code) {
+      __ LoadP(r7, FieldMemOperand(r4, 0));
+      __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
+      __ Assert(eq, kExpectedAllocationSite);
+    }
+
+    // Save the resulting elements kind in type info. We can't just store r5
+    // in the AllocationSite::transition_info field because elements kind is
+    // restricted to a portion of the field...upper bits need to be left alone.
+    STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+    __ LoadP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
+    __ AddSmiLiteral(r6, r6, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
+    __ StoreP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
+
+    __ bind(&normal_sequence);
+    int last_index =
+        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+    for (int i = 0; i <= last_index; ++i) {
+      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+      __ CmpP(r5, Operand(kind));
+      ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+      __ TailCallStub(&stub, eq);
+    }
+
+    // If we reached this point there is a problem.
+    __ Abort(kUnexpectedElementsKindInArrayConstructor);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+template <class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+  int to_index =
+      GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+  for (int i = 0; i <= to_index; ++i) {
+    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+    T stub(isolate, kind);
+    stub.GetCode();
+    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+      T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+      stub1.GetCode();
+    }
+  }
+}
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+      isolate);
+  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+      isolate);
+  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+      isolate);
+}
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+    Isolate* isolate) {
+  ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
+  for (int i = 0; i < 2; i++) {
+    // For internal arrays we only need a few things
+    InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+    stubh1.GetCode();
+    InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+    stubh2.GetCode();
+    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+    stubh3.GetCode();
+  }
+}
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+    MacroAssembler* masm, AllocationSiteOverrideMode mode) {
+  if (argument_count() == ANY) {
+    Label not_zero_case, not_one_case;
+    __ CmpP(r2, Operand::Zero());
+    __ bne(&not_zero_case);
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+    __ bind(&not_zero_case);
+    __ CmpP(r2, Operand(1));
+    __ bgt(&not_one_case);
+    CreateArrayDispatchOneArgument(masm, mode);
+
+    __ bind(&not_one_case);
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+  } else if (argument_count() == NONE) {
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+  } else if (argument_count() == ONE) {
+    CreateArrayDispatchOneArgument(masm, mode);
+  } else if (argument_count() == MORE_THAN_ONE) {
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : argc (only if argument_count() == ANY)
+  //  -- r3 : constructor
+  //  -- r4 : AllocationSite or undefined
+  //  -- r5 : new target
+  //  -- sp[0] : return address
+  //  -- sp[4] : last argument
+  // -----------------------------------
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the global and natives
+    // builtin Array functions which always have maps.
+
+    // Initial map for the builtin Array function should be a map.
+    __ LoadP(r6, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ TestIfSmi(r6);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+    __ CompareObjectType(r6, r6, r7, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+
+    // We should either have undefined in r4 or a valid AllocationSite
+    __ AssertUndefinedOrAllocationSite(r4, r6);
+  }
+
+  // Enter the context of the Array function.
+  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+  Label subclassing;
+  __ CmpP(r5, r3);
+  __ bne(&subclassing, Label::kNear);
+
+  Label no_info;
+  // Get the elements kind and case on that.
+  __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
+  __ beq(&no_info);
+
+  __ LoadP(r5, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
+  __ SmiUntag(r5);
+  STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+  __ AndP(r5, Operand(AllocationSite::ElementsKindBits::kMask));
+  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+  __ bind(&no_info);
+  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+  __ bind(&subclassing);
+  switch (argument_count()) {
+    case ANY:
+    case MORE_THAN_ONE:
+      __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+      __ StoreP(r3, MemOperand(sp, r1));
+      __ AddP(r2, r2, Operand(3));
+      break;
+    case NONE:
+      __ StoreP(r3, MemOperand(sp, 0 * kPointerSize));
+      __ LoadImmP(r2, Operand(3));
+      break;
+    case ONE:
+      __ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
+      __ LoadImmP(r2, Operand(4));
+      break;
+  }
+
+  __ Push(r5, r4);
+  __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
+}
+
+void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
+                                                ElementsKind kind) {
+  __ CmpLogicalP(r2, Operand(1));
+
+  InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+  __ TailCallStub(&stub0, lt);
+
+  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  __ TailCallStub(&stubN, gt);
+
+  if (IsFastPackedElementsKind(kind)) {
+    // We might need to create a holey array
+    // look at the first argument
+    __ LoadP(r5, MemOperand(sp, 0));
+    __ CmpP(r5, Operand::Zero());
+
+    InternalArraySingleArgumentConstructorStub stub1_holey(
+        isolate(), GetHoleyElementsKind(kind));
+    __ TailCallStub(&stub1_holey, ne);
+  }
+
+  InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
+  __ TailCallStub(&stub1);
+}
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : argc
+  //  -- r3 : constructor
+  //  -- sp[0] : return address
+  //  -- sp[4] : last argument
+  // -----------------------------------
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the global and natives
+    // builtin Array functions which always have maps.
+
+    // Initial map for the builtin Array function should be a map.
+    __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ TestIfSmi(r5);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+    __ CompareObjectType(r5, r5, r6, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  // Figure out the right elements kind
+  __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the map's "bit field 2" into |result|.
+  __ LoadlB(r5, FieldMemOperand(r5, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ DecodeField<Map::ElementsKindBits>(r5);
+
+  if (FLAG_debug_code) {
+    Label done;
+    __ CmpP(r5, Operand(FAST_ELEMENTS));
+    __ beq(&done);
+    __ CmpP(r5, Operand(FAST_HOLEY_ELEMENTS));
+    __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+    __ bind(&done);
+  }
+
+  Label fast_elements_case;
+  __ CmpP(r5, Operand(FAST_ELEMENTS));
+  __ beq(&fast_elements_case);
+  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+  __ bind(&fast_elements_case);
+  GenerateCase(masm, FAST_ELEMENTS);
+}
+
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : target
+  //  -- r5 : new target
+  //  -- cp : context
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r3);
+  __ AssertReceiver(r5);
+
+  // Verify that the new target is a JSFunction.
+  Label new_object;
+  __ CompareObjectType(r5, r4, r4, JS_FUNCTION_TYPE);
+  __ bne(&new_object);
+
+  // Load the initial map and verify that it's in fact a map.
+  __ LoadP(r4, FieldMemOperand(r5, JSFunction::kPrototypeOrInitialMapOffset));
+  __ JumpIfSmi(r4, &new_object);
+  __ CompareObjectType(r4, r2, r2, MAP_TYPE);
+  __ bne(&new_object);
+
+  // Fall back to runtime if the target differs from the new target's
+  // initial map constructor.
+  __ LoadP(r2, FieldMemOperand(r4, Map::kConstructorOrBackPointerOffset));
+  __ CmpP(r2, r3);
+  __ bne(&new_object);
+
+  // Allocate the JSObject on the heap.
+  Label allocate, done_allocate;
+  __ LoadlB(r6, FieldMemOperand(r4, Map::kInstanceSizeOffset));
+  __ Allocate(r6, r2, r7, r8, &allocate, SIZE_IN_WORDS);
+  __ bind(&done_allocate);
+
+  // Initialize the JSObject fields.
+  __ StoreP(r4, MemOperand(r2, JSObject::kMapOffset));
+  __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r5, MemOperand(r2, JSObject::kPropertiesOffset));
+  __ StoreP(r5, MemOperand(r2, JSObject::kElementsOffset));
+  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+  __ AddP(r3, r2, Operand(JSObject::kHeaderSize));
+
+  // ----------- S t a t e -------------
+  //  -- r2 : result (untagged)
+  //  -- r3 : result fields (untagged)
+  //  -- r7 : result end (untagged)
+  //  -- r4 : initial map
+  //  -- cp : context
+  //  -- lr : return address
+  // -----------------------------------
+
+  // Perform in-object slack tracking if requested.
+  Label slack_tracking;
+  STATIC_ASSERT(Map::kNoSlackTracking == 0);
+  __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+  __ LoadlW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
+  __ DecodeField<Map::ConstructionCounter>(r9, r5);
+  __ LoadAndTestP(r9, r9);
+  __ bne(&slack_tracking);
+  {
+    // Initialize all in-object fields with undefined.
+    __ InitializeFieldsWithFiller(r3, r7, r8);
+
+    // Add the object tag to make the JSObject real.
+    __ AddP(r2, r2, Operand(kHeapObjectTag));
+    __ Ret();
+  }
+  __ bind(&slack_tracking);
+  {
+    // Decrease generous allocation count.
+    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+    __ Add32(r5, r5, Operand(-(1 << Map::ConstructionCounter::kShift)));
+    __ StoreW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
+
+    // Initialize the in-object fields with undefined.
+    __ LoadlB(r6, FieldMemOperand(r4, Map::kUnusedPropertyFieldsOffset));
+    __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
+    __ SubP(r6, r7, r6);
+    __ InitializeFieldsWithFiller(r3, r6, r8);
+
+    // Initialize the remaining (reserved) fields with one pointer filler map.
+    __ LoadRoot(r8, Heap::kOnePointerFillerMapRootIndex);
+    __ InitializeFieldsWithFiller(r3, r7, r8);
+
+    // Add the object tag to make the JSObject real.
+    __ AddP(r2, r2, Operand(kHeapObjectTag));
+
+    // Check if we can finalize the instance size.
+    __ CmpP(r9, Operand(Map::kSlackTrackingCounterEnd));
+    __ Ret(ne);
+
+    // Finalize the instance size.
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r2, r4);
+      __ CallRuntime(Runtime::kFinalizeInstanceSize);
+      __ Pop(r2);
+    }
+    __ Ret();
+  }
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    STATIC_ASSERT(kSmiTag == 0);
+    __ ShiftLeftP(r6, r6,
+                  Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
+    __ Push(r4, r6);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(r4);
+  }
+  __ SubP(r2, r2, Operand(kHeapObjectTag));
+  __ LoadlB(r7, FieldMemOperand(r4, Map::kInstanceSizeOffset));
+  __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
+  __ AddP(r7, r2, r7);
+  __ b(&done_allocate);
+
+  // Fall back to %NewObject.
+  __ bind(&new_object);
+  __ Push(r3, r5);
+  __ TailCallRuntime(Runtime::kNewObject);
+}
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r3);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make r4 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ LoadRR(r4, fp);
+    __ b(&loop_entry);
+    __ bind(&loop);
+    __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
+    __ CmpP(ip, r3);
+    __ bne(&loop);
+  }
+
+  // Check if we have rest parameters (only possible if we have an
+  // arguments adaptor frame below the function frame).
+  Label no_rest_parameters;
+  __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&no_rest_parameters);
+
+  // Check if the arguments adaptor frame contains more arguments than
+  // specified by the function's internal formal parameter count.
+  Label rest_parameters;
+  __ LoadP(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadW(
+      r3, FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_S390X
+  __ SmiTag(r3);
+#endif
+  __ SubP(r2, r2, r3);
+  __ bgt(&rest_parameters);
+
+  // Return an empty rest parameter array.
+  __ bind(&no_rest_parameters);
+  {
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- lr : return address
+    // -----------------------------------
+
+    // Allocate an empty rest parameter array.
+    Label allocate, done_allocate;
+    __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the rest parameter array in r0.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
+    __ StoreP(r3, FieldMemOperand(r2, JSArray::kMapOffset), r0);
+    __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+    __ StoreP(r3, FieldMemOperand(r2, JSArray::kPropertiesOffset), r0);
+    __ StoreP(r3, FieldMemOperand(r2, JSArray::kElementsOffset), r0);
+    __ LoadImmP(r3, Operand::Zero());
+    __ StoreP(r3, FieldMemOperand(r2, JSArray::kLengthOffset), r0);
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(Smi::FromInt(JSArray::kSize));
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+    }
+    __ b(&done_allocate);
+  }
+
+  __ bind(&rest_parameters);
+  {
+    // Compute the pointer to the first rest parameter (skippping the receiver).
+    __ SmiToPtrArrayOffset(r8, r2);
+    __ AddP(r4, r4, r8);
+    __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- r2 : number of rest parameters (tagged)
+    //  -- r4 : pointer just past first rest parameters
+    //  -- r8 : size of rest parameters
+    //  -- lr : return address
+    // -----------------------------------
+
+    // Allocate space for the rest parameter array plus the backing store.
+    Label allocate, done_allocate;
+    __ mov(r3, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ AddP(r3, r3, r8);
+    __ Allocate(r3, r5, r6, r7, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the elements array in r5.
+    __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
+    __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
+    __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
+    __ AddP(r6, r5,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+    {
+      Label loop;
+      __ SmiUntag(r1, r2);
+      // __ mtctr(r0);
+      __ bind(&loop);
+      __ lay(r4, MemOperand(r4, -kPointerSize));
+      __ LoadP(ip, MemOperand(r4));
+      __ la(r6, MemOperand(r6, kPointerSize));
+      __ StoreP(ip, MemOperand(r6));
+      // __ bdnz(&loop);
+      __ BranchOnCount(r1, &loop);
+      __ AddP(r6, r6, Operand(kPointerSize));
+    }
+
+    // Setup the rest parameter array in r6.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
+    __ StoreP(r3, MemOperand(r6, JSArray::kMapOffset));
+    __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+    __ StoreP(r3, MemOperand(r6, JSArray::kPropertiesOffset));
+    __ StoreP(r5, MemOperand(r6, JSArray::kElementsOffset));
+    __ StoreP(r2, MemOperand(r6, JSArray::kLengthOffset));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ AddP(r2, r6, Operand(kHeapObjectTag));
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(r3);
+      __ Push(r2, r4, r3);
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+      __ LoadRR(r5, r2);
+      __ Pop(r2, r4);
+    }
+    __ b(&done_allocate);
+  }
+}
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r3);
+
+  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+  __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadW(
+      r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_S390X
+  __ SmiTag(r4);
+#endif
+  __ SmiToPtrArrayOffset(r5, r4);
+  __ AddP(r5, fp, r5);
+  __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // r3 : function
+  // r4 : number of parameters (tagged)
+  // r5 : parameters pointer
+  // Registers used over whole function:
+  // r7 : arguments count (tagged)
+  // r8 : mapped parameter count (tagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ beq(&adaptor_frame);
+
+  // No adaptor, parameter count = argument count.
+  __ LoadRR(r7, r4);
+  __ LoadRR(r8, r4);
+  __ b(&try_allocate);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiToPtrArrayOffset(r5, r7);
+  __ AddP(r5, r5, r6);
+  __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // r7 = argument count (tagged)
+  // r8 = parameter count (tagged)
+  // Compute the mapped parameter count = min(r4, r7) in r8.
+  __ CmpP(r4, r7);
+  Label skip;
+  __ LoadRR(r8, r4);
+  __ blt(&skip);
+  __ LoadRR(r8, r7);
+  __ bind(&skip);
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  // If there are no mapped parameters, we do not need the parameter_map.
+  __ CmpSmiLiteral(r8, Smi::FromInt(0), r0);
+  Label skip2, skip3;
+  __ bne(&skip2);
+  __ LoadImmP(r1, Operand::Zero());
+  __ b(&skip3);
+  __ bind(&skip2);
+  __ SmiToPtrArrayOffset(r1, r8);
+  __ AddP(r1, r1, Operand(kParameterMapHeaderSize));
+  __ bind(&skip3);
+
+  // 2. Backing store.
+  __ SmiToPtrArrayOffset(r6, r7);
+  __ AddP(r1, r1, r6);
+  __ AddP(r1, r1, Operand(FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ AddP(r1, r1, Operand(JSSloppyArgumentsObject::kSize));
+
+  // Do the allocation of all three objects in one go.
+  __ Allocate(r1, r2, r1, r6, &runtime, TAG_OBJECT);
+
+  // r2 = address of new object(s) (tagged)
+  // r4 = argument count (smi-tagged)
+  // Get the arguments boilerplate from the current native context into r3.
+  const int kNormalOffset =
+      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+  const int kAliasedOffset =
+      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+  __ LoadP(r6, NativeContextMemOperand());
+  __ CmpP(r8, Operand::Zero());
+  Label skip4, skip5;
+  __ bne(&skip4);
+  __ LoadP(r6, MemOperand(r6, kNormalOffset));
+  __ b(&skip5);
+  __ bind(&skip4);
+  __ LoadP(r6, MemOperand(r6, kAliasedOffset));
+  __ bind(&skip5);
+
+  // r2 = address of new object (tagged)
+  // r4 = argument count (smi-tagged)
+  // r6 = address of arguments map (tagged)
+  // r8 = mapped parameter count (tagged)
+  __ StoreP(r6, FieldMemOperand(r2, JSObject::kMapOffset), r0);
+  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r1, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
+  __ StoreP(r1, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+
+  // Set up the callee in-object property.
+  __ AssertNotSmi(r3);
+  __ StoreP(r3, FieldMemOperand(r2, JSSloppyArgumentsObject::kCalleeOffset),
+            r0);
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(r7);
+  __ StoreP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset),
+            r0);
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, r6 will point there, otherwise
+  // it will point to the backing store.
+  __ AddP(r6, r2, Operand(JSSloppyArgumentsObject::kSize));
+  __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+
+  // r2 = address of new object (tagged)
+  // r4 = argument count (tagged)
+  // r6 = address of parameter map or backing store (tagged)
+  // r8 = mapped parameter count (tagged)
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  __ CmpSmiLiteral(r8, Smi::FromInt(0), r0);
+  Label skip6;
+  __ bne(&skip6);
+  // Move backing store address to r3, because it is
+  // expected there when filling in the unmapped arguments.
+  __ LoadRR(r3, r6);
+  __ b(&skip_parameter_map);
+  __ bind(&skip6);
+
+  __ LoadRoot(r7, Heap::kSloppyArgumentsElementsMapRootIndex);
+  __ StoreP(r7, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
+  __ AddSmiLiteral(r7, r8, Smi::FromInt(2), r0);
+  __ StoreP(r7, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+  __ StoreP(cp, FieldMemOperand(r6, FixedArray::kHeaderSize + 0 * kPointerSize),
+            r0);
+  __ SmiToPtrArrayOffset(r7, r8);
+  __ AddP(r7, r7, r6);
+  __ AddP(r7, r7, Operand(kParameterMapHeaderSize));
+  __ StoreP(r7, FieldMemOperand(r6, FixedArray::kHeaderSize + 1 * kPointerSize),
+            r0);
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop;
+  __ LoadRR(r7, r8);
+  __ AddSmiLiteral(r1, r4, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
+  __ SubP(r1, r1, r8);
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ SmiToPtrArrayOffset(r3, r7);
+  __ AddP(r3, r3, r6);
+  __ AddP(r3, r3, Operand(kParameterMapHeaderSize));
+
+  // r3 = address of backing store (tagged)
+  // r6 = address of parameter map (tagged)
+  // r7 = temporary scratch (a.o., for address calculation)
+  // r9 = temporary scratch (a.o., for address calculation)
+  // ip = the hole value
+  __ SmiUntag(r7);
+  __ push(r4);
+  __ LoadRR(r4, r7);
+  __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
+  __ AddP(r9, r3, r7);
+  __ AddP(r7, r6, r7);
+  __ AddP(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ AddP(r7, r7, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+
+  __ bind(&parameters_loop);
+  __ StoreP(r1, MemOperand(r7, -kPointerSize));
+  __ lay(r7, MemOperand(r7, -kPointerSize));
+  __ StoreP(ip, MemOperand(r9, -kPointerSize));
+  __ lay(r9, MemOperand(r9, -kPointerSize));
+  __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
+  __ BranchOnCount(r4, &parameters_loop);
+  __ pop(r4);
+
+  // Restore r7 = argument count (tagged).
+  __ LoadP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset));
+
+  __ bind(&skip_parameter_map);
+  // r2 = address of new object (tagged)
+  // r3 = address of backing store (tagged)
+  // r7 = argument count (tagged)
+  // r8 = mapped parameter count (tagged)
+  // r1 = scratch
+  // Copy arguments header and remaining slots (if there are any).
+  __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
+  __ StoreP(r1, FieldMemOperand(r3, FixedArray::kMapOffset), r0);
+  __ StoreP(r7, FieldMemOperand(r3, FixedArray::kLengthOffset), r0);
+  __ SubP(r1, r7, r8);
+  __ Ret(eq);
+
+  Label arguments_loop;
+  __ SmiUntag(r1);
+  __ LoadRR(r4, r1);
+
+  __ SmiToPtrArrayOffset(r0, r8);
+  __ SubP(r5, r5, r0);
+  __ AddP(r1, r3, r0);
+  __ AddP(r1, r1,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+
+  __ bind(&arguments_loop);
+  __ LoadP(r6, MemOperand(r5, -kPointerSize));
+  __ lay(r5, MemOperand(r5, -kPointerSize));
+  __ StoreP(r6, MemOperand(r1, kPointerSize));
+  __ la(r1, MemOperand(r1, kPointerSize));
+  __ BranchOnCount(r4, &arguments_loop);
+
+  // Return.
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  // r7 = argument count (tagged)
+  __ bind(&runtime);
+  __ Push(r3, r5, r7);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r3);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make r4 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ LoadRR(r4, fp);
+    __ b(&loop_entry);
+    __ bind(&loop);
+    __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
+    __ CmpP(ip, r3);
+    __ bne(&loop);
+  }
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ LoadP(r5, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ beq(&arguments_adaptor);
+  {
+    __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadW(r2, FieldMemOperand(
+                     r3, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_S390X
+    __ SmiTag(r2);
+#endif
+    __ SmiToPtrArrayOffset(r8, r2);
+    __ AddP(r4, r4, r8);
+  }
+  __ b(&arguments_done);
+  __ bind(&arguments_adaptor);
+  {
+    __ LoadP(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ SmiToPtrArrayOffset(r8, r2);
+    __ AddP(r4, r5, r8);
+  }
+  __ bind(&arguments_done);
+  __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // ----------- S t a t e -------------
+  //  -- cp : context
+  //  -- r2 : number of rest parameters (tagged)
+  //  -- r4 : pointer just past first rest parameters
+  //  -- r8 : size of rest parameters
+  //  -- lr : return address
+  // -----------------------------------
+
+  // Allocate space for the strict arguments object plus the backing store.
+  Label allocate, done_allocate;
+  __ mov(r3, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ AddP(r3, r3, r8);
+  __ Allocate(r3, r5, r6, r7, &allocate, TAG_OBJECT);
+  __ bind(&done_allocate);
+
+  // Setup the elements array in r5.
+  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
+  __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
+  __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
+  __ AddP(r6, r5,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+  {
+    Label loop, done_loop;
+    __ SmiUntag(r1, r2);
+    __ LoadAndTestP(r1, r1);
+    __ beq(&done_loop);
+    __ bind(&loop);
+    __ lay(r4, MemOperand(r4, -kPointerSize));
+    __ LoadP(ip, MemOperand(r4));
+    __ la(r6, MemOperand(r6, kPointerSize));
+    __ StoreP(ip, MemOperand(r6));
+    __ BranchOnCount(r1, &loop);
+    __ bind(&done_loop);
+    __ AddP(r6, r6, Operand(kPointerSize));
+  }
+
+  // Setup the rest parameter array in r6.
+  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r3);
+  __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kMapOffset));
+  __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kPropertiesOffset));
+  __ StoreP(r5, MemOperand(r6, JSStrictArgumentsObject::kElementsOffset));
+  __ StoreP(r2, MemOperand(r6, JSStrictArgumentsObject::kLengthOffset));
+  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+  __ AddP(r2, r6, Operand(kHeapObjectTag));
+  __ Ret();
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(r3);
+    __ Push(r2, r4, r3);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ LoadRR(r5, r2);
+    __ Pop(r2, r4);
+  }
+  __ b(&done_allocate);
+}
+
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+  Register context = cp;
+  Register result = r2;
+  Register slot = r4;
+
+  // Go up the context chain to the script context.
+  for (int i = 0; i < depth(); ++i) {
+    __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+    context = result;
+  }
+
+  // Load the PropertyCell value at the specified slot.
+  __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2));
+  __ AddP(result, context, r0);
+  __ LoadP(result, ContextMemOperand(result));
+  __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+
+  // If the result is not the_hole, return. Otherwise, handle in the runtime.
+  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+  Label runtime;
+  __ beq(&runtime);
+  __ Ret();
+  __ bind(&runtime);
+
+  // Fallback to runtime.
+  __ SmiTag(slot);
+  __ Push(slot);
+  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+}
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+  Register value = r2;
+  Register slot = r4;
+
+  Register cell = r3;
+  Register cell_details = r5;
+  Register cell_value = r6;
+  Register cell_value_map = r7;
+  Register scratch = r8;
+
+  Register context = cp;
+  Register context_temp = cell;
+
+  Label fast_heapobject_case, fast_smi_case, slow_case;
+
+  if (FLAG_debug_code) {
+    __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
+    __ Check(ne, kUnexpectedValue);
+  }
+
+  // Go up the context chain to the script context.
+  for (int i = 0; i < depth(); i++) {
+    __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+    context = context_temp;
+  }
+
+  // Load the PropertyCell at the specified slot.
+  __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2));
+  __ AddP(cell, context, r0);
+  __ LoadP(cell, ContextMemOperand(cell));
+
+  // Load PropertyDetails for the cell (actually only the cell_type and kind).
+  __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
+  __ SmiUntag(cell_details);
+  __ AndP(cell_details, cell_details,
+          Operand(PropertyDetails::PropertyCellTypeField::kMask |
+                  PropertyDetails::KindField::kMask |
+                  PropertyDetails::kAttributesReadOnlyMask));
+
+  // Check if PropertyCell holds mutable data.
+  Label not_mutable_data;
+  __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+                                    PropertyCellType::kMutable) |
+                                PropertyDetails::KindField::encode(kData)));
+  __ bne(&not_mutable_data);
+  __ JumpIfSmi(value, &fast_smi_case);
+
+  __ bind(&fast_heapobject_case);
+  __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
+  // RecordWriteField clobbers the value register, so we copy it before the
+  // call.
+  __ LoadRR(r5, value);
+  __ RecordWriteField(cell, PropertyCell::kValueOffset, r5, scratch,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ Ret();
+
+  __ bind(&not_mutable_data);
+  // Check if PropertyCell value matches the new value (relevant for Constant,
+  // ConstantType and Undefined cells).
+  Label not_same_value;
+  __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+  __ CmpP(cell_value, value);
+  __ bne(&not_same_value);
+
+  // Make sure the PropertyCell is not marked READ_ONLY.
+  __ AndP(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
+  __ bne(&slow_case);
+
+  if (FLAG_debug_code) {
+    Label done;
+    // This can only be true for Constant, ConstantType and Undefined cells,
+    // because we never store the_hole via this stub.
+    __ CmpP(cell_details,
+            Operand(PropertyDetails::PropertyCellTypeField::encode(
+                        PropertyCellType::kConstant) |
+                    PropertyDetails::KindField::encode(kData)));
+    __ beq(&done);
+    __ CmpP(cell_details,
+            Operand(PropertyDetails::PropertyCellTypeField::encode(
+                        PropertyCellType::kConstantType) |
+                    PropertyDetails::KindField::encode(kData)));
+    __ beq(&done);
+    __ CmpP(cell_details,
+            Operand(PropertyDetails::PropertyCellTypeField::encode(
+                        PropertyCellType::kUndefined) |
+                    PropertyDetails::KindField::encode(kData)));
+    __ Check(eq, kUnexpectedValue);
+    __ bind(&done);
+  }
+  __ Ret();
+  __ bind(&not_same_value);
+
+  // Check if PropertyCell contains data with constant type (and is not
+  // READ_ONLY).
+  __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+                                    PropertyCellType::kConstantType) |
+                                PropertyDetails::KindField::encode(kData)));
+  __ bne(&slow_case);
+
+  // Now either both old and new values must be smis or both must be heap
+  // objects with same map.
+  Label value_is_heap_object;
+  __ JumpIfNotSmi(value, &value_is_heap_object);
+  __ JumpIfNotSmi(cell_value, &slow_case);
+  // Old and new values are smis, no need for a write barrier here.
+  __ bind(&fast_smi_case);
+  __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
+  __ Ret();
+
+  __ bind(&value_is_heap_object);
+  __ JumpIfSmi(cell_value, &slow_case);
+
+  __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
+  __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+  __ CmpP(cell_value_map, scratch);
+  __ beq(&fast_heapobject_case);
+
+  // Fallback to runtime.
+  __ bind(&slow_case);
+  __ SmiTag(slot);
+  __ Push(slot, value);
+  __ TailCallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreGlobalViaContext_Strict
+                         : Runtime::kStoreGlobalViaContext_Sloppy);
+}
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+  return ref0.address() - ref1.address();
+}
+
+// Calls an API function.  Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions.  Restores context.  stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+static void CallApiFunctionAndReturn(MacroAssembler* masm,
+                                     Register function_address,
+                                     ExternalReference thunk_ref,
+                                     int stack_space,
+                                     MemOperand* stack_space_operand,
+                                     MemOperand return_value_operand,
+                                     MemOperand* context_restore_operand) {
+  Isolate* isolate = masm->isolate();
+  ExternalReference next_address =
+      ExternalReference::handle_scope_next_address(isolate);
+  const int kNextOffset = 0;
+  const int kLimitOffset = AddressOffset(
+      ExternalReference::handle_scope_limit_address(isolate), next_address);
+  const int kLevelOffset = AddressOffset(
+      ExternalReference::handle_scope_level_address(isolate), next_address);
+
+  // Additional parameter is the address of the actual callback.
+  DCHECK(function_address.is(r3) || function_address.is(r4));
+  Register scratch = r5;
+
+  __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
+  __ LoadlB(scratch, MemOperand(scratch, 0));
+  __ CmpP(scratch, Operand::Zero());
+
+  Label profiler_disabled;
+  Label end_profiler_check;
+  __ beq(&profiler_disabled, Label::kNear);
+  __ mov(scratch, Operand(thunk_ref));
+  __ b(&end_profiler_check, Label::kNear);
+  __ bind(&profiler_disabled);
+  __ LoadRR(scratch, function_address);
+  __ bind(&end_profiler_check);
+
+  // Allocate HandleScope in callee-save registers.
+  // r9 - next_address
+  // r6 - next_address->kNextOffset
+  // r7 - next_address->kLimitOffset
+  // r8 - next_address->kLevelOffset
+  __ mov(r9, Operand(next_address));
+  __ LoadP(r6, MemOperand(r9, kNextOffset));
+  __ LoadP(r7, MemOperand(r9, kLimitOffset));
+  __ LoadlW(r8, MemOperand(r9, kLevelOffset));
+  __ AddP(r8, Operand(1));
+  __ StoreW(r8, MemOperand(r9, kLevelOffset));
+
+  if (FLAG_log_timer_events) {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ PushSafepointRegisters();
+    __ PrepareCallCFunction(1, r2);
+    __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
+    __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
+                     1);
+    __ PopSafepointRegisters();
+  }
+
+  // Native call returns to the DirectCEntry stub which redirects to the
+  // return address pushed on stack (could have moved after GC).
+  // DirectCEntry stub itself is generated early and never moves.
+  DirectCEntryStub stub(isolate);
+  stub.GenerateCall(masm, scratch);
+
+  if (FLAG_log_timer_events) {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ PushSafepointRegisters();
+    __ PrepareCallCFunction(1, r2);
+    __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
+    __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
+                     1);
+    __ PopSafepointRegisters();
+  }
+
+  Label promote_scheduled_exception;
+  Label delete_allocated_handles;
+  Label leave_exit_frame;
+  Label return_value_loaded;
+
+  // load value from ReturnValue
+  __ LoadP(r2, return_value_operand);
+  __ bind(&return_value_loaded);
+  // No more valid handles (the result handle was the last one). Restore
+  // previous handle scope.
+  __ StoreP(r6, MemOperand(r9, kNextOffset));
+  if (__ emit_debug_code()) {
+    __ LoadlW(r3, MemOperand(r9, kLevelOffset));
+    __ CmpP(r3, r8);
+    __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+  }
+  __ SubP(r8, Operand(1));
+  __ StoreW(r8, MemOperand(r9, kLevelOffset));
+  __ CmpP(r7, MemOperand(r9, kLimitOffset));
+  __ bne(&delete_allocated_handles, Label::kNear);
+
+  // Leave the API exit frame.
+  __ bind(&leave_exit_frame);
+  bool restore_context = context_restore_operand != NULL;
+  if (restore_context) {
+    __ LoadP(cp, *context_restore_operand);
+  }
+  // LeaveExitFrame expects unwind space to be in a register.
+  if (stack_space_operand != NULL) {
+    __ l(r6, *stack_space_operand);
+  } else {
+    __ mov(r6, Operand(stack_space));
+  }
+  __ LeaveExitFrame(false, r6, !restore_context, stack_space_operand != NULL);
+
+  // Check if the function scheduled an exception.
+  __ mov(r7, Operand(ExternalReference::scheduled_exception_address(isolate)));
+  __ LoadP(r7, MemOperand(r7));
+  __ CompareRoot(r7, Heap::kTheHoleValueRootIndex);
+  __ bne(&promote_scheduled_exception, Label::kNear);
+
+  __ b(r14);
+
+  // Re-throw by promoting a scheduled exception.
+  __ bind(&promote_scheduled_exception);
+  __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+  // HandleScope limit has changed. Delete allocated extensions.
+  __ bind(&delete_allocated_handles);
+  __ StoreP(r7, MemOperand(r9, kLimitOffset));
+  __ LoadRR(r6, r2);
+  __ PrepareCallCFunction(1, r7);
+  __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
+  __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
+                   1);
+  __ LoadRR(r2, r6);
+  __ b(&leave_exit_frame, Label::kNear);
+}
+
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                  : callee
+  //  -- r6                  : call_data
+  //  -- r4                  : holder
+  //  -- r3                  : api_function_address
+  //  -- cp                  : context
+  //  --
+  //  -- sp[0]               : last argument
+  //  -- ...
+  //  -- sp[(argc - 1)* 4]   : first argument
+  //  -- sp[argc * 4]        : receiver
+  // -----------------------------------
+
+  Register callee = r2;
+  Register call_data = r6;
+  Register holder = r4;
+  Register api_function_address = r3;
+  Register context = cp;
+
+  typedef FunctionCallbackArguments FCA;
+
+  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+  STATIC_ASSERT(FCA::kCalleeIndex == 5);
+  STATIC_ASSERT(FCA::kDataIndex == 4);
+  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(FCA::kIsolateIndex == 1);
+  STATIC_ASSERT(FCA::kHolderIndex == 0);
+  STATIC_ASSERT(FCA::kArgsLength == 7);
+
+  // context save
+  __ push(context);
+  if (!is_lazy()) {
+    // load context from callee
+    __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  }
+
+  // callee
+  __ push(callee);
+
+  // call data
+  __ push(call_data);
+
+  Register scratch = call_data;
+  if (!call_data_undefined()) {
+    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  }
+  // return value
+  __ push(scratch);
+  // return value default
+  __ push(scratch);
+  // isolate
+  __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ push(scratch);
+  // holder
+  __ push(holder);
+
+  // Prepare arguments.
+  __ LoadRR(scratch, sp);
+
+  // Allocate the v8::Arguments structure in the arguments' space since
+  // it's not controlled by GC.
+  // S390 LINUX ABI:
+  //
+  // Create 5 extra slots on stack:
+  //    [0] space for DirectCEntryStub's LR save
+  //    [1-4] FunctionCallbackInfo
+  const int kApiStackSpace = 5;
+  const int kFunctionCallbackInfoOffset =
+      (kStackFrameExtraParamSlot + 1) * kPointerSize;
+
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ EnterExitFrame(false, kApiStackSpace);
+
+  DCHECK(!api_function_address.is(r2) && !scratch.is(r2));
+  // r2 = FunctionCallbackInfo&
+  // Arguments is after the return address.
+  __ AddP(r2, sp, Operand(kFunctionCallbackInfoOffset));
+  // FunctionCallbackInfo::implicit_args_
+  __ StoreP(scratch, MemOperand(r2, 0 * kPointerSize));
+  // FunctionCallbackInfo::values_
+  __ AddP(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+  __ StoreP(ip, MemOperand(r2, 1 * kPointerSize));
+  // FunctionCallbackInfo::length_ = argc
+  __ LoadImmP(ip, Operand(argc()));
+  __ StoreW(ip, MemOperand(r2, 2 * kPointerSize));
+  // FunctionCallbackInfo::is_construct_call_ = 0
+  __ LoadImmP(ip, Operand::Zero());
+  __ StoreW(ip, MemOperand(r2, 2 * kPointerSize + kIntSize));
+
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_function_callback(masm->isolate());
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  MemOperand context_restore_operand(
+      fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+  // Stores return the first js argument
+  int return_value_offset = 0;
+  if (is_store()) {
+    return_value_offset = 2 + FCA::kArgsLength;
+  } else {
+    return_value_offset = 2 + FCA::kReturnValueOffset;
+  }
+  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+  int stack_space = 0;
+  MemOperand is_construct_call_operand =
+      MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
+  MemOperand* stack_space_operand = &is_construct_call_operand;
+  stack_space = argc() + FCA::kArgsLength + 1;
+  stack_space_operand = NULL;
+  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
+                           stack_space_operand, return_value_operand,
+                           &context_restore_operand);
+}
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- sp[0]                        : name
+  //  -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
+  //  -- ...
+  //  -- r4                           : api_function_address
+  // -----------------------------------
+
+  Register api_function_address = ApiGetterDescriptor::function_address();
+  int arg0Slot = 0;
+  int accessorInfoSlot = 0;
+  int apiStackSpace = 0;
+  DCHECK(api_function_address.is(r4));
+
+  // v8::PropertyCallbackInfo::args_ array and name handle.
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+  __ LoadRR(r2, sp);                           // r2 = Handle<Name>
+  __ AddP(r3, r2, Operand(1 * kPointerSize));  // r3 = v8::PCI::args_
+
+  // If ABI passes Handles (pointer-sized struct) in a register:
+  //
+  // Create 2 extra slots on stack:
+  //    [0] space for DirectCEntryStub's LR save
+  //    [1] AccessorInfo&
+  //
+  // Otherwise:
+  //
+  // Create 3 extra slots on stack:
+  //    [0] space for DirectCEntryStub's LR save
+  //    [1] copy of Handle (first arg)
+  //    [2] AccessorInfo&
+  if (ABI_PASSES_HANDLES_IN_REGS) {
+    accessorInfoSlot = kStackFrameExtraParamSlot + 1;
+    apiStackSpace = 2;
+  } else {
+    arg0Slot = kStackFrameExtraParamSlot + 1;
+    accessorInfoSlot = arg0Slot + 1;
+    apiStackSpace = 3;
+  }
+
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ EnterExitFrame(false, apiStackSpace);
+
+  if (!ABI_PASSES_HANDLES_IN_REGS) {
+    // pass 1st arg by reference
+    __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize));
+    __ AddP(r2, sp, Operand(arg0Slot * kPointerSize));
+  }
+
+  // Create v8::PropertyCallbackInfo object on the stack and initialize
+  // it's args_ field.
+  __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize));
+  __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize));
+  // r3 = v8::PropertyCallbackInfo&
+
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_accessor_getter_callback(isolate());
+
+  // +3 is to skip prolog, return address and name handle.
+  MemOperand return_value_operand(
+      fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+                           kStackUnwindSpace, NULL, return_value_operand, NULL);
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/code-stubs-s390.h b/src/s390/code-stubs-s390.h
new file mode 100644
index 0000000..461e569
--- /dev/null
+++ b/src/s390/code-stubs-s390.h
@@ -0,0 +1,467 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_CODE_STUBS_S390_H_
+#define V8_S390_CODE_STUBS_S390_H_
+
+#include "src/s390/frames-s390.h"
+
+namespace v8 {
+namespace internal {
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+class StringHelper : public AllStatic {
+ public:
+  // Generate code for copying a large number of characters. This function
+  // is allowed to spend extra time setting up conditions to make copying
+  // faster. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
+                                     Register src, Register count,
+                                     Register scratch,
+                                     String::Encoding encoding);
+
+  // Compares two flat one-byte strings and returns result in r0.
+  static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
+                                                Register left, Register right,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3);
+
+  // Compares two flat one-byte strings for equality and returns result in r0.
+  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                              Register left, Register right,
+                                              Register scratch1,
+                                              Register scratch2);
+
+ private:
+  static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
+                                              Register left, Register right,
+                                              Register length,
+                                              Register scratch1,
+                                              Label* chars_not_equal);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+class StoreRegistersStateStub : public PlatformCodeStub {
+ public:
+  explicit StoreRegistersStateStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
+
+  static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
+};
+
+class RestoreRegistersStateStub : public PlatformCodeStub {
+ public:
+  explicit RestoreRegistersStateStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
+
+  static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
+};
+
+class RecordWriteStub : public PlatformCodeStub {
+ public:
+  RecordWriteStub(Isolate* isolate, Register object, Register value,
+                  Register address, RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : PlatformCodeStub(isolate),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+    minor_key_ = ObjectBits::encode(object.code()) |
+                 ValueBits::encode(value.code()) |
+                 AddressBits::encode(address.code()) |
+                 RememberedSetActionBits::encode(remembered_set_action) |
+                 SaveFPRegsModeBits::encode(fp_mode);
+  }
+
+  RecordWriteStub(uint32_t key, Isolate* isolate)
+      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
+  enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
+
+  bool SometimesSetsUpAFrame() override { return false; }
+
+  // Patch an always taken branch into a NOP branch
+  static void PatchBranchCondMask(MacroAssembler* masm, int pos, Condition c) {
+    int32_t instrLen = masm->instr_length_at(pos);
+    DCHECK(instrLen == 4 || instrLen == 6);
+
+    if (instrLen == 4) {
+      // BRC - Branch Mask @ Bits 23-20
+      FourByteInstr updatedMask = static_cast<FourByteInstr>(c) << 20;
+      masm->instr_at_put<FourByteInstr>(
+          pos, (masm->instr_at(pos) & ~kFourByteBrCondMask) | updatedMask);
+    } else {
+      // BRCL - Branch Mask @ Bits 39-36
+      SixByteInstr updatedMask = static_cast<SixByteInstr>(c) << 36;
+      masm->instr_at_put<SixByteInstr>(
+          pos, (masm->instr_at(pos) & ~kSixByteBrCondMask) | updatedMask);
+    }
+  }
+
+  static bool isBranchNop(SixByteInstr instr, int instrLength) {
+    if ((4 == instrLength && 0 == (instr & kFourByteBrCondMask)) ||
+        // BRC - Check for 0x0 mask condition.
+        (6 == instrLength && 0 == (instr & kSixByteBrCondMask))) {
+      // BRCL - Check for 0x0 mask condition
+      return true;
+    }
+    return false;
+  }
+
+  static Mode GetMode(Code* stub) {
+    int32_t first_instr_length =
+        Instruction::InstructionLength(stub->instruction_start());
+    int32_t second_instr_length = Instruction::InstructionLength(
+        stub->instruction_start() + first_instr_length);
+
+    uint64_t first_instr = Assembler::instr_at(stub->instruction_start());
+    uint64_t second_instr =
+        Assembler::instr_at(stub->instruction_start() + first_instr_length);
+
+    DCHECK(first_instr_length == 4 || first_instr_length == 6);
+    DCHECK(second_instr_length == 4 || second_instr_length == 6);
+
+    bool isFirstInstrNOP = isBranchNop(first_instr, first_instr_length);
+    bool isSecondInstrNOP = isBranchNop(second_instr, second_instr_length);
+
+    // STORE_BUFFER_ONLY has NOP on both branches
+    if (isSecondInstrNOP && isFirstInstrNOP) return STORE_BUFFER_ONLY;
+    // INCREMENTAL_COMPACTION has NOP on second branch.
+    else if (isFirstInstrNOP && !isSecondInstrNOP)
+      return INCREMENTAL_COMPACTION;
+    // INCREMENTAL has NOP on first branch.
+    else if (!isFirstInstrNOP && isSecondInstrNOP)
+      return INCREMENTAL;
+
+    DCHECK(false);
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+                        stub->instruction_size(), CodeObjectRequired::kNo);
+
+    // Get instruction lengths of two branches
+    int32_t first_instr_length = masm.instr_length_at(0);
+    int32_t second_instr_length = masm.instr_length_at(first_instr_length);
+
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        DCHECK(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+
+        PatchBranchCondMask(&masm, 0, CC_NOP);
+        PatchBranchCondMask(&masm, first_instr_length, CC_NOP);
+        break;
+      case INCREMENTAL:
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchBranchCondMask(&masm, 0, CC_ALWAYS);
+        break;
+      case INCREMENTAL_COMPACTION:
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchBranchCondMask(&masm, first_instr_length, CC_ALWAYS);
+        break;
+    }
+    DCHECK(GetMode(stub) == mode);
+    Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+                           first_instr_length + second_instr_length);
+  }
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers.  The input is
+  // two registers that must be preserved and one scratch register provided by
+  // the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object, Register address, Register scratch0)
+        : object_(object), address_(address), scratch0_(scratch0) {
+      DCHECK(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
+    }
+
+    void Save(MacroAssembler* masm) {
+      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
+      // We don't have to save scratch0_ because it was given to us as
+      // a scratch register.
+      masm->push(scratch1_);
+    }
+
+    void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The scratch registers
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->push(r14);
+      masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
+      if (mode == kSaveFPRegs) {
+        // Save all volatile FP registers except d0.
+        masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        // Restore all volatile FP registers except d0.
+        masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
+      }
+      masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
+      masm->pop(r14);
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  inline Major MajorKey() const final { return RecordWrite; }
+
+  void Generate(MacroAssembler* masm) override;
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm);
+
+  void Activate(Code* code) override {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  Register object() const {
+    return Register::from_code(ObjectBits::decode(minor_key_));
+  }
+
+  Register value() const {
+    return Register::from_code(ValueBits::decode(minor_key_));
+  }
+
+  Register address() const {
+    return Register::from_code(AddressBits::decode(minor_key_));
+  }
+
+  RememberedSetAction remembered_set_action() const {
+    return RememberedSetActionBits::decode(minor_key_);
+  }
+
+  SaveFPRegsMode save_fp_regs_mode() const {
+    return SaveFPRegsModeBits::decode(minor_key_);
+  }
+
+  class ObjectBits : public BitField<int, 0, 4> {};
+  class ValueBits : public BitField<int, 4, 4> {};
+  class AddressBits : public BitField<int, 8, 4> {};
+  class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
+  };
+  class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
+
+  Label slow_;
+  RegisterAllocation regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
+};
+
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub : public PlatformCodeStub {
+ public:
+  explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+  void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+  bool NeedsImmovableCode() override { return true; }
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
+};
+
+class NameDictionaryLookupStub : public PlatformCodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = LookupModeBits::encode(mode);
+  }
+
+  static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
+                                     Label* done, Register receiver,
+                                     Register properties, Handle<Name> name,
+                                     Register scratch0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
+                                     Label* done, Register elements,
+                                     Register name, Register r0, Register r1);
+
+  bool SometimesSetsUpAFrame() override { return false; }
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+
+  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
+  class LookupModeBits : public BitField<LookupMode, 0, 1> {};
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
+};
+
+class FloatingPointHelper : public AllStatic {
+ public:
+  enum Destination { kFPRegisters, kCoreRegisters };
+
+  // Loads smis from r0 and r1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+  // floating point registers VFP3 must be supported. If core registers are
+  // requested when VFP3 is supported d6 and d7 will be scratched.
+  static void LoadSmis(MacroAssembler* masm, Register scratch1,
+                       Register scratch2);
+
+  // Loads objects from r0 and r1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+  // floating point registers VFP3 must be supported. If core registers are
+  // requested when VFP3 is supported d6 and d7 will still be scratched. If
+  // either r0 or r1 is not a number (not smi and not heap number object) the
+  // not_number label is jumped to with r0 and r1 intact.
+  static void LoadOperands(MacroAssembler* masm, Register heap_number_map,
+                           Register scratch1, Register scratch2,
+                           Label* not_number);
+
+  // Convert the smi or heap number in object to an int32 using the rules
+  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+  // and brought into the range -2^31 .. +2^31 - 1.
+  static void ConvertNumberToInt32(MacroAssembler* masm, Register object,
+                                   Register dst, Register heap_number_map,
+                                   Register scratch1, Register scratch2,
+                                   Register scratch3,
+                                   DoubleRegister double_scratch,
+                                   Label* not_int32);
+
+  // Converts the integer (untagged smi) in |src| to a double, storing
+  // the result to |double_dst|
+  static void ConvertIntToDouble(MacroAssembler* masm, Register src,
+                                 DoubleRegister double_dst);
+
+  // Converts the unsigned integer (untagged smi) in |src| to
+  // a double, storing the result to |double_dst|
+  static void ConvertUnsignedIntToDouble(MacroAssembler* masm, Register src,
+                                         DoubleRegister double_dst);
+
+  // Converts the integer (untagged smi) in |src| to
+  // a float, storing the result in |dst|
+  static void ConvertIntToFloat(MacroAssembler* masm, const DoubleRegister dst,
+                                const Register src);
+
+  // Load the number from object into double_dst in the double format.
+  // Control will jump to not_int32 if the value cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be loaded.
+  static void LoadNumberAsInt32Double(MacroAssembler* masm, Register object,
+                                      DoubleRegister double_dst,
+                                      DoubleRegister double_scratch,
+                                      Register heap_number_map,
+                                      Register scratch1, Register scratch2,
+                                      Label* not_int32);
+
+  // Loads the number from object into dst as a 32-bit integer.
+  // Control will jump to not_int32 if the object cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be converted.
+  // scratch3 is not used when VFP3 is supported.
+  static void LoadNumberAsInt32(MacroAssembler* masm, Register object,
+                                Register dst, Register heap_number_map,
+                                Register scratch1, Register scratch2,
+                                Register scratch3,
+                                DoubleRegister double_scratch0,
+                                DoubleRegister double_scratch1,
+                                Label* not_int32);
+
+  // Generate non VFP3 code to check if a double can be exactly represented by a
+  // 32-bit integer. This does not check for 0 or -0, which need
+  // to be checked for separately.
+  // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+  // through otherwise.
+  // src1 and src2 will be cloberred.
+  //
+  // Expected input:
+  // - src1: higher (exponent) part of the double value.
+  // - src2: lower (mantissa) part of the double value.
+  // Output status:
+  // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+  // - src2: contains 1.
+  // - other registers are clobbered.
+  static void DoubleIs32BitInteger(MacroAssembler* masm, Register src1,
+                                   Register src2, Register dst,
+                                   Register scratch, Label* not_int32);
+
+  // Generates code to call a C function to do a double operation using core
+  // registers. (Used when VFP3 is not supported.)
+  // This code never falls through, but returns with a heap number containing
+  // the result in r0.
+  // Register heapnumber_result must be a heap number in which the
+  // result of the operation will be stored.
+  // Requires the following layout on entry:
+  // r0: Left value (least significant part of mantissa).
+  // r1: Left value (sign, exponent, top of mantissa).
+  // r2: Right value (least significant part of mantissa).
+  // r3: Right value (sign, exponent, top of mantissa).
+  static void CallCCodeForDoubleOperation(MacroAssembler* masm, Token::Value op,
+                                          Register heap_number_result,
+                                          Register scratch);
+
+ private:
+  static void LoadNumber(MacroAssembler* masm, Register object,
+                         DoubleRegister dst, Register heap_number_map,
+                         Register scratch1, Register scratch2,
+                         Label* not_number);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_S390_CODE_STUBS_S390_H_
diff --git a/src/s390/codegen-s390.cc b/src/s390/codegen-s390.cc
new file mode 100644
index 0000000..6636a7c
--- /dev/null
+++ b/src/s390/codegen-s390.cc
@@ -0,0 +1,675 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/s390/codegen-s390.h"
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
+#include "src/s390/simulator-s390.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ masm.
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_s390_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+  return Simulator::current(isolate)->CallFPReturnsDouble(
+      fast_exp_s390_machine_code, x, 0);
+}
+#endif
+
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+  if (buffer == nullptr) return nullptr;
+  ExternalReference::InitializeMathExpData();
+
+  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+                      CodeObjectRequired::kNo);
+
+  {
+    DoubleRegister input = d0;
+    DoubleRegister result = d2;
+    DoubleRegister double_scratch1 = d3;
+    DoubleRegister double_scratch2 = d4;
+    Register temp1 = r6;
+    Register temp2 = r7;
+    Register temp3 = r8;
+
+    __ Push(temp3, temp2, temp1);
+    MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
+                                  double_scratch2, temp1, temp2, temp3);
+    __ Pop(temp3, temp2, temp1);
+    __ ldr(d0, result);
+    __ Ret();
+  }
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
+
+  Assembler::FlushICache(isolate, buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+#else
+  fast_exp_s390_machine_code = buffer;
+  return &fast_exp_simulator;
+#endif
+}
+
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+#if defined(USE_SIMULATOR)
+  return nullptr;
+#else
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+  if (buffer == nullptr) return nullptr;
+
+  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+                      CodeObjectRequired::kNo);
+
+  __ MovFromFloatParameter(d0);
+  __ sqdbr(d0, d0);
+  __ MovToFloatResult(d0);
+  __ Ret();
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
+
+  Assembler::FlushICache(isolate, buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
+  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
+#endif
+}
+
+#undef __
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+  masm->EnterFrame(StackFrame::INTERNAL);
+  DCHECK(!masm->has_frame());
+  masm->set_has_frame(true);
+}
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  DCHECK(masm->has_frame());
+  masm->set_has_frame(false);
+}
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+    MacroAssembler* masm, Register receiver, Register key, Register value,
+    Register target_map, AllocationSiteMode mode,
+    Label* allocation_memento_found) {
+  Register scratch_elements = r6;
+  DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
+
+  if (mode == TRACK_ALLOCATION_SITE) {
+    DCHECK(allocation_memento_found != NULL);
+    __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r1,
+                                         allocation_memento_found);
+  }
+
+  // Set transitioned map.
+  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+    MacroAssembler* masm, Register receiver, Register key, Register value,
+    Register target_map, AllocationSiteMode mode, Label* fail) {
+  // lr contains the return address
+  Label loop, entry, convert_hole, gc_required, only_change_map, done;
+  Register elements = r6;
+  Register length = r7;
+  Register array = r8;
+  Register array_end = array;
+
+  // target_map parameter can be clobbered.
+  Register scratch1 = target_map;
+  Register scratch2 = r1;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
+                     scratch2));
+
+  if (mode == TRACK_ALLOCATION_SITE) {
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch2, fail);
+  }
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+  __ beq(&only_change_map, Label::kNear);
+
+  // Preserve lr and use r14 as a temporary register.
+  __ push(r14);
+
+  __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  // length: number of elements (smi-tagged)
+
+  // Allocate new FixedDoubleArray.
+  __ SmiToDoubleArrayOffset(r14, length);
+  __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
+  __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+
+  // Set destination FixedDoubleArray's length and map.
+  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
+  __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+  // Update receiver's map.
+  __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
+
+  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
+                      kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created FixedDoubleArray.
+  __ AddP(scratch1, array, Operand(kHeapObjectTag));
+  __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
+                      kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Prepare for conversion loop.
+  __ AddP(target_map, elements,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
+  __ SmiToDoubleArrayOffset(array, length);
+  __ AddP(array_end, r9, array);
+// Repurpose registers no longer in use.
+#if V8_TARGET_ARCH_S390X
+  Register hole_int64 = elements;
+#else
+  Register hole_lower = elements;
+  Register hole_upper = length;
+#endif
+  // scratch1: begin of source FixedArray element fields, not tagged
+  // hole_lower: kHoleNanLower32 OR hol_int64
+  // hole_upper: kHoleNanUpper32
+  // array_end: end of destination FixedDoubleArray, not tagged
+  // scratch2: begin of FixedDoubleArray element fields, not tagged
+
+  __ b(&entry, Label::kNear);
+
+  __ bind(&only_change_map);
+  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ b(&done, Label::kNear);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ pop(r14);
+  __ b(fail);
+
+  // Convert and copy elements.
+  __ bind(&loop);
+  __ LoadP(r14, MemOperand(scratch1));
+  __ la(scratch1, MemOperand(scratch1, kPointerSize));
+  // r1: current element
+  __ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
+
+  // Normal smi, convert to double and store.
+  __ ConvertIntToDouble(r14, d0);
+  __ StoreDouble(d0, MemOperand(r9, 0));
+  __ la(r9, MemOperand(r9, 8));
+
+  __ b(&entry, Label::kNear);
+
+  // Hole found, store the-hole NaN.
+  __ bind(&convert_hole);
+  if (FLAG_debug_code) {
+    // Restore a "smi-untagged" heap object.
+    __ LoadP(r1, MemOperand(r5, -kPointerSize));
+    __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
+    __ Assert(eq, kObjectFoundInSmiOnlyArray);
+  }
+#if V8_TARGET_ARCH_S390X
+  __ stg(hole_int64, MemOperand(r9, 0));
+#else
+  __ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
+  __ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
+#endif
+  __ AddP(r9, Operand(8));
+
+  __ bind(&entry);
+  __ CmpP(r9, array_end);
+  __ blt(&loop);
+
+  __ pop(r14);
+  __ bind(&done);
+}
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm, Register receiver, Register key, Register value,
+    Register target_map, AllocationSiteMode mode, Label* fail) {
+  // Register lr contains the return address.
+  Label loop, convert_hole, gc_required, only_change_map;
+  Register elements = r6;
+  Register array = r8;
+  Register length = r7;
+  Register scratch = r1;
+  Register scratch3 = r9;
+  Register hole_value = r9;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
+                     scratch));
+
+  if (mode == TRACK_ALLOCATION_SITE) {
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
+  }
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+  __ beq(&only_change_map);
+
+  __ Push(target_map, receiver, key, value);
+  __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  // elements: source FixedDoubleArray
+  // length: number of elements (smi-tagged)
+
+  // Allocate new FixedArray.
+  // Re-use value and target_map registers, as they have been saved on the
+  // stack.
+  Register array_size = value;
+  Register allocate_scratch = target_map;
+  __ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
+  __ SmiToPtrArrayOffset(r0, length);
+  __ AddP(array_size, r0);
+  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
+              NO_ALLOCATION_FLAGS);
+  // array: destination FixedArray, not tagged as heap object
+  // Set destination FixedDoubleArray's length and map.
+  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+  __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+  __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
+  __ AddP(array, Operand(kHeapObjectTag));
+
+  // Prepare for conversion loop.
+  Register src_elements = elements;
+  Register dst_elements = target_map;
+  Register dst_end = length;
+  Register heap_number_map = scratch;
+  __ AddP(src_elements,
+          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  __ SmiToPtrArrayOffset(length, length);
+  __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
+
+  Label initialization_loop, loop_done;
+  __ ShiftRightP(scratch, length, Operand(kPointerSizeLog2));
+  __ beq(&loop_done, Label::kNear);
+
+  // Allocating heap numbers in the loop below can fail and cause a jump to
+  // gc_required. We can't leave a partly initialized FixedArray behind,
+  // so pessimistically fill it with holes now.
+  __ AddP(dst_elements, array,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+  __ bind(&initialization_loop);
+  __ StoreP(hole_value, MemOperand(dst_elements, kPointerSize));
+  __ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
+  __ BranchOnCount(scratch, &initialization_loop);
+
+  __ AddP(dst_elements, array,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ AddP(dst_end, dst_elements, length);
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  // Using offsetted addresses in src_elements to fully take advantage of
+  // post-indexing.
+  // dst_elements: begin of destination FixedArray element fields, not tagged
+  // src_elements: begin of source FixedDoubleArray element fields,
+  //               not tagged, +4
+  // dst_end: end of destination FixedArray, not tagged
+  // array: destination FixedArray
+  // hole_value: the-hole pointer
+  // heap_number_map: heap number map
+  __ b(&loop, Label::kNear);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ Pop(target_map, receiver, key, value);
+  __ b(fail);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ StoreP(hole_value, MemOperand(dst_elements));
+  __ AddP(dst_elements, Operand(kPointerSize));
+  __ CmpLogicalP(dst_elements, dst_end);
+  __ bge(&loop_done);
+
+  __ bind(&loop);
+  Register upper_bits = key;
+  __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
+  __ AddP(src_elements, Operand(kDoubleSize));
+  // upper_bits: current element's upper 32 bit
+  // src_elements: address of next element's upper 32 bit
+  __ Cmp32(upper_bits, Operand(kHoleNanUpper32));
+  __ beq(&convert_hole, Label::kNear);
+
+  // Non-hole double, copy value into a heap number.
+  Register heap_number = receiver;
+  Register scratch2 = value;
+  __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
+                        &gc_required);
+// heap_number: new heap number
+#if V8_TARGET_ARCH_S390X
+  __ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
+  // subtract tag for std
+  __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
+  __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
+#else
+  __ LoadlW(scratch2,
+            MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
+  __ LoadlW(upper_bits,
+            MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
+  __ StoreW(scratch2,
+            FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
+  __ StoreW(upper_bits,
+            FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
+#endif
+  __ LoadRR(scratch2, dst_elements);
+  __ StoreP(heap_number, MemOperand(dst_elements));
+  __ AddP(dst_elements, Operand(kPointerSize));
+  __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
+                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ CmpLogicalP(dst_elements, dst_end);
+  __ blt(&loop);
+  __ bind(&loop_done);
+
+  __ Pop(target_map, receiver, key, value);
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  __ bind(&only_change_map);
+  // Update receiver's map.
+  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+// assume ip can be used as a scratch register below
+void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
+                                       Register index, Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ mov(r0, Operand(kIsIndirectStringMask));
+  __ AndP(r0, result);
+  __ beq(&check_sequential, Label::kNear /*, cr0*/);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ mov(ip, Operand(kSlicedNotConsMask));
+  __ LoadRR(r0, result);
+  __ AndP(r0, ip /*, SetRC*/);  // Should be okay to remove RC
+  __ beq(&cons_string, Label::kNear /*, cr0*/);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ SmiUntag(ip, result);
+  __ AddP(index, ip);
+  __ b(&indirect_string_loaded, Label::kNear);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ CompareRoot(result, Heap::kempty_stringRootIndex);
+  __ bne(call_runtime);
+  // Get the first of the two strings and load its instance type.
+  __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label external_string, check_encoding;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ mov(r0, Operand(kStringRepresentationMask));
+  __ AndP(r0, result);
+  __ bne(&external_string, Label::kNear);
+
+  // Prepare sequential strings
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+  __ AddP(string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ b(&check_encoding, Label::kNear);
+
+  // Handle external strings.
+  __ bind(&external_string);
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ mov(r0, Operand(kIsIndirectStringMask));
+    __ AndP(r0, result);
+    __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
+  }
+  // Rule out short external strings.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ mov(r0, Operand(kShortExternalStringMask));
+  __ AndP(r0, result);
+  __ bne(call_runtime /*, cr0*/);
+  __ LoadP(string,
+           FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+  Label one_byte, done;
+  __ bind(&check_encoding);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ mov(r0, Operand(kStringEncodingMask));
+  __ AndP(r0, result);
+  __ bne(&one_byte, Label::kNear);
+  // Two-byte string.
+  __ ShiftLeftP(result, index, Operand(1));
+  __ LoadLogicalHalfWordP(result, MemOperand(string, result));
+  __ b(&done, Label::kNear);
+  __ bind(&one_byte);
+  // One-byte string.
+  __ LoadlB(result, MemOperand(string, index));
+  __ bind(&done);
+}
+
+static MemOperand ExpConstant(int index, Register base) {
+  return MemOperand(base, index * kDoubleSize);
+}
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
+                                   DoubleRegister result,
+                                   DoubleRegister double_scratch1,
+                                   DoubleRegister double_scratch2,
+                                   Register temp1, Register temp2,
+                                   Register temp3) {
+  DCHECK(!input.is(result));
+  DCHECK(!input.is(double_scratch1));
+  DCHECK(!input.is(double_scratch2));
+  DCHECK(!result.is(double_scratch1));
+  DCHECK(!result.is(double_scratch2));
+  DCHECK(!double_scratch1.is(double_scratch2));
+  DCHECK(!temp1.is(temp2));
+  DCHECK(!temp1.is(temp3));
+  DCHECK(!temp2.is(temp3));
+  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!masm->serializer_enabled());  // External references not serializable.
+
+  Label zero, infinity, done;
+
+  __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+  __ LoadDouble(double_scratch1, ExpConstant(0, temp3));
+  __ cdbr(double_scratch1, input);
+  __ ldr(result, input);
+  __ bunordered(&done, Label::kNear);
+  __ bge(&zero, Label::kNear);
+
+  __ LoadDouble(double_scratch2, ExpConstant(1, temp3));
+  __ cdbr(input, double_scratch2);
+  __ bge(&infinity, Label::kNear);
+
+  __ LoadDouble(double_scratch1, ExpConstant(3, temp3));
+  __ LoadDouble(result, ExpConstant(4, temp3));
+
+  // Do not generate madbr, as intermediate result are not
+  // rounded properly
+  __ mdbr(double_scratch1, input);
+  __ adbr(double_scratch1, result);
+
+  // Move low word of double_scratch1 to temp2
+  __ lgdr(temp2, double_scratch1);
+  __ nihf(temp2, Operand::Zero());
+
+  __ sdbr(double_scratch1, result);
+  __ LoadDouble(result, ExpConstant(6, temp3));
+  __ LoadDouble(double_scratch2, ExpConstant(5, temp3));
+  __ mdbr(double_scratch1, double_scratch2);
+  __ sdbr(double_scratch1, input);
+  __ sdbr(result, double_scratch1);
+  __ ldr(double_scratch2, double_scratch1);
+  __ mdbr(double_scratch2, double_scratch2);
+  __ mdbr(result, double_scratch2);
+  __ LoadDouble(double_scratch2, ExpConstant(7, temp3));
+  __ mdbr(result, double_scratch2);
+  __ sdbr(result, double_scratch1);
+  __ LoadDouble(double_scratch2, ExpConstant(8, temp3));
+  __ adbr(result, double_scratch2);
+  __ ShiftRight(temp1, temp2, Operand(11));
+  __ AndP(temp2, Operand(0x7ff));
+  __ AddP(temp1, Operand(0x3ff));
+
+  // Must not call ExpConstant() after overwriting temp3!
+  __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
+  __ ShiftLeft(temp2, temp2, Operand(3));
+
+  __ lg(temp2, MemOperand(temp2, temp3));
+  __ sllg(temp1, temp1, Operand(52));
+  __ ogr(temp2, temp1);
+  __ ldgr(double_scratch1, temp2);
+
+  __ mdbr(result, double_scratch1);
+  __ b(&done, Label::kNear);
+
+  __ bind(&zero);
+  __ lzdr(kDoubleRegZero);
+  __ ldr(result, kDoubleRegZero);
+  __ b(&done, Label::kNear);
+
+  __ bind(&infinity);
+  __ LoadDouble(result, ExpConstant(2, temp3));
+
+  __ bind(&done);
+}
+
+#undef __
+
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+  USE(isolate);
+  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
+  // Since patcher is a large object, allocate it dynamically when needed,
+  // to avoid overloading the stack in stress conditions.
+  // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
+  // the process, before ARM simulator ICache is setup.
+  base::SmartPointer<CodePatcher> patcher(
+      new CodePatcher(isolate, young_sequence_.start(),
+                      young_sequence_.length(), CodePatcher::DONT_FLUSH));
+  PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
+  patcher->masm()->PushStandardFrame(r3);
+}
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+  return Assembler::IsNop(Assembler::instr_at(candidate));
+}
+#endif
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+  bool result = isolate->code_aging_helper()->IsYoung(sequence);
+  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
+  return result;
+}
+
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
+                               MarkingParity* parity) {
+  if (IsYoungSequence(isolate, sequence)) {
+    *age = kNoAgeCodeAge;
+    *parity = NO_MARKING_PARITY;
+  } else {
+    Code* code = NULL;
+    Address target_address =
+        Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
+    Code* stub = GetCodeFromTargetAddress(target_address);
+    GetCodeAgeAndParity(stub, age, parity);
+  }
+}
+
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
+                                MarkingParity parity) {
+  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
+  if (age == kNoAgeCodeAge) {
+    isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
+    Assembler::FlushICache(isolate, sequence, young_length);
+  } else {
+    // FIXED_SEQUENCE
+    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    CodePatcher patcher(isolate, sequence, young_length);
+    intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
+    // We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
+    // knows where to pick up the return address
+    //
+    // Since we can no longer guarentee ip will hold the branch address
+    // because of BRASL, use Call so that GenerateMakeCodeYoungAgainCommon
+    // can calculate the branch address offset
+    patcher.masm()->nop();  // marker to detect sequence (see IsOld)
+    patcher.masm()->CleanseP(r14);
+    patcher.masm()->Push(r14);
+    patcher.masm()->mov(r2, Operand(target));
+    patcher.masm()->Call(r2);
+    for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
+         i += 2) {
+      // TODO(joransiu): Create nop function to pad
+      //       (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
+      patcher.masm()->nop();  // 2-byte nops().
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/codegen-s390.h b/src/s390/codegen-s390.h
new file mode 100644
index 0000000..18cf8e2
--- /dev/null
+++ b/src/s390/codegen-s390.h
@@ -0,0 +1,44 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+//
+// Copyright IBM Corp. 2012, 2015. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_CODEGEN_S390_H_
+#define V8_S390_CODEGEN_S390_H_
+
+#include "src/ast/ast.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm, Register string, Register index,
+                       Register result, Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+class MathExpGenerator : public AllStatic {
+ public:
+  // Register input isn't modified. All other registers are clobbered.
+  static void EmitMathExp(MacroAssembler* masm, DoubleRegister input,
+                          DoubleRegister result, DoubleRegister double_scratch1,
+                          DoubleRegister double_scratch2, Register temp1,
+                          Register temp2, Register temp3);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_S390_CODEGEN_S390_H_
diff --git a/src/s390/constants-s390.cc b/src/s390/constants-s390.cc
new file mode 100644
index 0000000..a958082
--- /dev/null
+++ b/src/s390/constants-s390.cc
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/s390/constants-s390.h"
+
+namespace v8 {
+namespace internal {
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumRegisters] = {
+    "r0", "r1", "r2",  "r3", "r4", "r5",  "r6",  "r7",
+    "r8", "r9", "r10", "fp", "ip", "r13", "r14", "sp"};
+
+const char* DoubleRegisters::names_[kNumDoubleRegisters] = {
+    "f0", "f1", "f2",  "f3",  "f4",  "f5",  "f6",  "f7",
+    "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15"};
+
+int DoubleRegisters::Number(const char* name) {
+  for (int i = 0; i < kNumDoubleRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // No register with the requested name found.
+  return kNoRegister;
+}
+
+int Registers::Number(const char* name) {
+  // Look through the canonical names.
+  for (int i = 0; i < kNumRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // No register with the requested name found.
+  return kNoRegister;
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/constants-s390.h b/src/s390/constants-s390.h
new file mode 100644
index 0000000..c313c92
--- /dev/null
+++ b/src/s390/constants-s390.h
@@ -0,0 +1,1561 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_CONSTANTS_S390_H_
+#define V8_S390_CONSTANTS_S390_H_
+
+// Get the standard printf format macros for C99 stdint types.
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+
+#include <stdint.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Number of registers
+const int kNumRegisters = 16;
+
+// FP support.
+const int kNumDoubleRegisters = 16;
+
+const int kNoRegister = -1;
+
+// sign-extend the least significant 16-bits of value <imm>
+#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
+
+// sign-extend the least significant 26-bits of value <imm>
+#define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
+
+// -----------------------------------------------------------------------------
+// Conditions.
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate z/Architecture instructions.
+//
+// Section references in the code refer to the "z/Architecture Principles
+// Of Operation" http://publibfi.boulder.ibm.com/epubs/pdf/dz9zr009.pdf
+//
+
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+enum Condition {
+  kNoCondition = -1,
+  eq = 0x8,  // Equal.
+  ne = 0x7,  // Not equal.
+  ge = 0xa,  // Greater or equal.
+  lt = 0x4,  // Less than.
+  gt = 0x2,  // Greater than.
+  le = 0xc,  // Less then or equal
+  al = 0xf,  // Always.
+
+  CC_NOP = 0x0,           // S390 NOP
+  CC_EQ = 0x08,           // S390 condition code 0b1000
+  CC_LT = 0x04,           // S390 condition code 0b0100
+  CC_LE = CC_EQ | CC_LT,  // S390 condition code 0b1100
+  CC_GT = 0x02,           // S390 condition code 0b0010
+  CC_GE = CC_EQ | CC_GT,  // S390 condition code 0b1010
+  CC_OF = 0x01,           // S390 condition code 0b0001
+  CC_NOF = 0x0E,          // S390 condition code 0b1110
+  CC_ALWAYS = 0x0F,       // S390 always taken branch
+  unordered = CC_OF,      // Floating-point unordered
+  ordered = CC_NOF,       // floating-point ordered
+  overflow = CC_OF,       // Summary overflow
+  nooverflow = CC_NOF,
+
+  mask0x0 = 0,  // no jumps
+  mask0x1 = 1,
+  mask0x2 = 2,
+  mask0x3 = 3,
+  mask0x4 = 4,
+  mask0x5 = 5,
+  mask0x6 = 6,
+  mask0x7 = 7,
+  mask0x8 = 8,
+  mask0x9 = 9,
+  mask0xA = 10,
+  mask0xB = 11,
+  mask0xC = 12,
+  mask0xD = 13,
+  mask0xE = 14,
+  mask0xF = 15,
+
+  // Rounding modes for floating poing facility
+  CURRENT_ROUNDING_MODE = 0,
+  ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0 = 1,
+  ROUND_TO_PREPARE_FOR_SHORTER_PRECISION = 3,
+  ROUND_TO_NEAREST_WITH_TIES_TO_EVEN = 4,
+  ROUND_TOWARD_0 = 5,
+  ROUND_TOWARD_PLUS_INFINITE = 6,
+  ROUND_TOWARD_MINUS_INFINITE = 7
+};
+
+inline Condition NegateCondition(Condition cond) {
+  DCHECK(cond != al);
+  switch (cond) {
+    case eq:
+      return ne;
+    case ne:
+      return eq;
+    case ge:
+      return lt;
+    case gt:
+      return le;
+    case le:
+      return gt;
+    case lt:
+      return ge;
+    case lt | gt:
+      return eq;
+    case le | ge:
+      return CC_OF;
+    case CC_OF:
+      return CC_NOF;
+    default:
+      DCHECK(false);
+  }
+  return al;
+}
+
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cond) {
+  switch (cond) {
+    case lt:
+      return gt;
+    case gt:
+      return lt;
+    case ge:
+      return le;
+    case le:
+      return ge;
+    case eq:
+      return eq;
+    case ne:
+      return ne;
+    default:
+      DCHECK(false);
+      return cond;
+  }
+}
+
+// -----------------------------------------------------------------------------
+// Instructions encoding.
+
+// Instr is merely used by the Assembler to distinguish 32bit integers
+// representing instructions from usual 32 bit values.
+// Instruction objects are pointers to 32bit values, and provide methods to
+// access the various ISA fields.
+typedef int32_t Instr;
+typedef uint16_t TwoByteInstr;
+typedef uint32_t FourByteInstr;
+typedef uint64_t SixByteInstr;
+
+// Opcodes as defined in Appendix B-2 table
+enum Opcode {
+  A = 0x5A,           // Add (32)
+  ADB = 0xED1A,       // Add (long BFP)
+  ADBR = 0xB31A,      // Add (long BFP)
+  ADTR = 0xB3D2,      // Add (long DFP)
+  ADTRA = 0xB3D2,     // Add (long DFP)
+  AEB = 0xED0A,       // Add (short BFP)
+  AEBR = 0xB30A,      // Add (short BFP)
+  AFI = 0xC29,        // Add Immediate (32)
+  AG = 0xE308,        // Add (64)
+  AGF = 0xE318,       // Add (64<-32)
+  AGFI = 0xC28,       // Add Immediate (64<-32)
+  AGFR = 0xB918,      // Add (64<-32)
+  AGHI = 0xA7B,       // Add Halfword Immediate (64)
+  AGHIK = 0xECD9,     // Add Immediate (64<-16)
+  AGR = 0xB908,       // Add (64)
+  AGRK = 0xB9E8,      // Add (64)
+  AGSI = 0xEB7A,      // Add Immediate (64<-8)
+  AH = 0x4A,          // Add Halfword
+  AHHHR = 0xB9C8,     // Add High (32)
+  AHHLR = 0xB9D8,     // Add High (32)
+  AHI = 0xA7A,        // Add Halfword Immediate (32)
+  AHIK = 0xECD8,      // Add Immediate (32<-16)
+  AHY = 0xE37A,       // Add Halfword
+  AIH = 0xCC8,        // Add Immediate High (32)
+  AL = 0x5E,          // Add Logical (32)
+  ALC = 0xE398,       // Add Logical With Carry (32)
+  ALCG = 0xE388,      // Add Logical With Carry (64)
+  ALCGR = 0xB988,     // Add Logical With Carry (64)
+  ALCR = 0xB998,      // Add Logical With Carry (32)
+  ALFI = 0xC2B,       // Add Logical Immediate (32)
+  ALG = 0xE30A,       // Add Logical (64)
+  ALGF = 0xE31A,      // Add Logical (64<-32)
+  ALGFI = 0xC2A,      // Add Logical Immediate (64<-32)
+  ALGFR = 0xB91A,     // Add Logical (64<-32)
+  ALGHSIK = 0xECDB,   // Add Logical With Signed Immediate (64<-16)
+  ALGR = 0xB90A,      // Add Logical (64)
+  ALGRK = 0xB9EA,     // Add Logical (64)
+  ALGSI = 0xEB7E,     // Add Logical With Signed Immediate (64<-8)
+  ALHHHR = 0xB9CA,    // Add Logical High (32)
+  ALHHLR = 0xB9DA,    // Add Logical High (32)
+  ALHSIK = 0xECDA,    // Add Logical With Signed Immediate (32<-16)
+  ALR = 0x1E,         // Add Logical (32)
+  ALRK = 0xB9FA,      // Add Logical (32)
+  ALSI = 0xEB6E,      // Add Logical With Signed Immediate (32<-8)
+  ALSIH = 0xCCA,      // Add Logical With Signed Immediate High (32)
+  ALSIHN = 0xCCB,     // Add Logical With Signed Immediate High (32)
+  ALY = 0xE35E,       // Add Logical (32)
+  AP = 0xFA,          // Add Decimal
+  AR = 0x1A,          // Add (32)
+  ARK = 0xB9F8,       // Add (32)
+  ASI = 0xEB6A,       // Add Immediate (32<-8)
+  AXBR = 0xB34A,      // Add (extended BFP)
+  AXTR = 0xB3DA,      // Add (extended DFP)
+  AXTRA = 0xB3DA,     // Add (extended DFP)
+  AY = 0xE35A,        // Add (32)
+  BAL = 0x45,         // Branch And Link
+  BALR = 0x05,        // Branch And Link
+  BAS = 0x4D,         // Branch And Save
+  BASR = 0x0D,        // Branch And Save
+  BASSM = 0x0C,       // Branch And Save And Set Mode
+  BC = 0x47,          // Branch On Condition
+  BCR = 0x07,         // Branch On Condition
+  BCT = 0x46,         // Branch On Count (32)
+  BCTG = 0xE346,      // Branch On Count (64)
+  BCTGR = 0xB946,     // Branch On Count (64)
+  BCTR = 0x06,        // Branch On Count (32)
+  BPP = 0xC7,         // Branch Prediction Preload
+  BPRP = 0xC5,        // Branch Prediction Relative Preload
+  BRAS = 0xA75,       // Branch Relative And Save
+  BRASL = 0xC05,      // Branch Relative And Save Long
+  BRC = 0xA74,        // Branch Relative On Condition
+  BRCL = 0xC04,       // Branch Relative On Condition Long
+  BRCT = 0xA76,       // Branch Relative On Count (32)
+  BRCTG = 0xA77,      // Branch Relative On Count (64)
+  BRCTH = 0xCC6,      // Branch Relative On Count High (32)
+  BRXH = 0x84,        // Branch Relative On Index High (32)
+  BRXHG = 0xEC44,     // Branch Relative On Index High (64)
+  BRXLE = 0x85,       // Branch Relative On Index Low Or Eq. (32)
+  BRXLG = 0xEC45,     // Branch Relative On Index Low Or Eq. (64)
+  BSM = 0x0B,         // Branch And Set Mode
+  BXH = 0x86,         // Branch On Index High (32)
+  BXHG = 0xEB44,      // Branch On Index High (64)
+  BXLE = 0x87,        // Branch On Index Low Or Equal (32)
+  BXLEG = 0xEB45,     // Branch On Index Low Or Equal (64)
+  C = 0x59,           // Compare (32)
+  CDB = 0xED19,       // Compare (long BFP)
+  CDBR = 0xB319,      // Compare (long BFP)
+  CDFBR = 0xB395,     // Convert From Fixed (32 to long BFP)
+  CDFBRA = 0xB395,    // Convert From Fixed (32 to long BFP)
+  CDFTR = 0xB951,     // Convert From Fixed (32 to long DFP)
+  CDGBR = 0xB3A5,     // Convert From Fixed (64 to long BFP)
+  CDGBRA = 0xB3A5,    // Convert From Fixed (64 to long BFP)
+  CDGTR = 0xB3F1,     // Convert From Fixed (64 to long DFP)
+  CDGTRA = 0xB3F1,    // Convert From Fixed (64 to long DFP)
+  CDLFBR = 0xB391,    // Convert From Logical (32 to long BFP)
+  CDLFTR = 0xB953,    // Convert From Logical (32 to long DFP)
+  CDLGBR = 0xB3A1,    // Convert From Logical (64 to long BFP)
+  CDLGTR = 0xB952,    // Convert From Logical (64 to long DFP)
+  CDS = 0xBB,         // Compare Double And Swap (32)
+  CDSG = 0xEB3E,      // Compare Double And Swap (64)
+  CDSTR = 0xB3F3,     // Convert From Signed Packed (64 to long DFP)
+  CDSY = 0xEB31,      // Compare Double And Swap (32)
+  CDTR = 0xB3E4,      // Compare (long DFP)
+  CDUTR = 0xB3F2,     // Convert From Unsigned Packed (64 to long DFP)
+  CDZT = 0xEDAA,      // Convert From Zoned (to long DFP)
+  CEB = 0xED09,       // Compare (short BFP)
+  CEBR = 0xB309,      // Compare (short BFP)
+  CEDTR = 0xB3F4,     // Compare Biased Exponent (long DFP)
+  CEFBR = 0xB394,     // Convert From Fixed (32 to short BFP)
+  CEFBRA = 0xB394,    // Convert From Fixed (32 to short BFP)
+  CEGBR = 0xB3A4,     // Convert From Fixed (64 to short BFP)
+  CEGBRA = 0xB3A4,    // Convert From Fixed (64 to short BFP)
+  CELFBR = 0xB390,    // Convert From Logical (32 to short BFP)
+  CELGBR = 0xB3A0,    // Convert From Logical (64 to short BFP)
+  CEXTR = 0xB3FC,     // Compare Biased Exponent (extended DFP)
+  CFC = 0xB21A,       // Compare And Form Codeword
+  CFDBR = 0xB399,     // Convert To Fixed (long BFP to 32)
+  CFDBRA = 0xB399,    // Convert To Fixed (long BFP to 32)
+  CFDR = 0xB3B9,      // Convert To Fixed (long HFP to 32)
+  CFDTR = 0xB941,     // Convert To Fixed (long DFP to 32)
+  CFEBR = 0xB398,     // Convert To Fixed (short BFP to 32)
+  CFEBRA = 0xB398,    // Convert To Fixed (short BFP to 32)
+  CFER = 0xB3B8,      // Convert To Fixed (short HFP to 32)
+  CFI = 0xC2D,        // Compare Immediate (32)
+  CFXBR = 0xB39A,     // Convert To Fixed (extended BFP to 32)
+  CFXBRA = 0xB39A,    // Convert To Fixed (extended BFP to 32)
+  CFXR = 0xB3BA,      // Convert To Fixed (extended HFP to 32)
+  CFXTR = 0xB949,     // Convert To Fixed (extended DFP to 32)
+  CG = 0xE320,        // Compare (64)
+  CGDBR = 0xB3A9,     // Convert To Fixed (long BFP to 64)
+  CGDBRA = 0xB3A9,    // Convert To Fixed (long BFP to 64)
+  CGDR = 0xB3C9,      // Convert To Fixed (long HFP to 64)
+  CGDTR = 0xB3E1,     // Convert To Fixed (long DFP to 64)
+  CGDTRA = 0xB3E1,    // Convert To Fixed (long DFP to 64)
+  CGEBR = 0xB3A8,     // Convert To Fixed (short BFP to 64)
+  CGEBRA = 0xB3A8,    // Convert To Fixed (short BFP to 64)
+  CGER = 0xB3C8,      // Convert To Fixed (short HFP to 64)
+  CGF = 0xE330,       // Compare (64<-32)
+  CGFI = 0xC2C,       // Compare Immediate (64<-32)
+  CGFR = 0xB930,      // Compare (64<-32)
+  CGFRL = 0xC6C,      // Compare Relative Long (64<-32)
+  CGH = 0xE334,       // Compare Halfword (64<-16)
+  CGHI = 0xA7F,       // Compare Halfword Immediate (64<-16)
+  CGHRL = 0xC64,      // Compare Halfword Relative Long (64<-16)
+  CGHSI = 0xE558,     // Compare Halfword Immediate (64<-16)
+  CGIB = 0xECFC,      // Compare Immediate And Branch (64<-8)
+  CGIJ = 0xEC7C,      // Compare Immediate And Branch Relative (64<-8)
+  CGIT = 0xEC70,      // Compare Immediate And Trap (64<-16)
+  CGR = 0xB920,       // Compare (64)
+  CGRB = 0xECE4,      // Compare And Branch (64)
+  CGRJ = 0xEC64,      // Compare And Branch Relative (64)
+  CGRL = 0xC68,       // Compare Relative Long (64)
+  CGRT = 0xB960,      // Compare And Trap (64)
+  CGXBR = 0xB3AA,     // Convert To Fixed (extended BFP to 64)
+  CGXBRA = 0xB3AA,    // Convert To Fixed (extended BFP to 64)
+  CGXR = 0xB3CA,      // Convert To Fixed (extended HFP to 64)
+  CGXTR = 0xB3E9,     // Convert To Fixed (extended DFP to 64)
+  CGXTRA = 0xB3E9,    // Convert To Fixed (extended DFP to 64)
+  CH = 0x49,          // Compare Halfword (32<-16)
+  CHF = 0xE3CD,       // Compare High (32)
+  CHHR = 0xB9CD,      // Compare High (32)
+  CHHSI = 0xE554,     // Compare Halfword Immediate (16)
+  CHI = 0xA7E,        // Compare Halfword Immediate (32<-16)
+  CHLR = 0xB9DD,      // Compare High (32)
+  CHRL = 0xC65,       // Compare Halfword Relative Long (32<-16)
+  CHSI = 0xE55C,      // Compare Halfword Immediate (32<-16)
+  CHY = 0xE379,       // Compare Halfword (32<-16)
+  CIB = 0xECFE,       // Compare Immediate And Branch (32<-8)
+  CIH = 0xCCD,        // Compare Immediate High (32)
+  CIJ = 0xEC7E,       // Compare Immediate And Branch Relative (32<-8)
+  CIT = 0xEC72,       // Compare Immediate And Trap (32<-16)
+  CKSM = 0xB241,      // Checksum
+  CL = 0x55,          // Compare Logical (32)
+  CLC = 0xD5,         // Compare Logical (character)
+  CLCL = 0x0F,        // Compare Logical Long
+  CLCLE = 0xA9,       // Compare Logical Long Extended
+  CLCLU = 0xEB8F,     // Compare Logical Long Unicode
+  CLFDBR = 0xB39D,    // Convert To Logical (long BFP to 32)
+  CLFDTR = 0xB943,    // Convert To Logical (long DFP to 32)
+  CLFEBR = 0xB39C,    // Convert To Logical (short BFP to 32)
+  CLFHSI = 0xE55D,    // Compare Logical Immediate (32<-16)
+  CLFI = 0xC2F,       // Compare Logical Immediate (32)
+  CLFIT = 0xEC73,     // Compare Logical Immediate And Trap (32<-16)
+  CLFXBR = 0xB39E,    // Convert To Logical (extended BFP to 32)
+  CLFXTR = 0xB94B,    // Convert To Logical (extended DFP to 32)
+  CLG = 0xE321,       // Compare Logical (64)
+  CLGDBR = 0xB3AD,    // Convert To Logical (long BFP to 64)
+  CLGDTR = 0xB942,    // Convert To Logical (long DFP to 64)
+  CLGEBR = 0xB3AC,    // Convert To Logical (short BFP to 64)
+  CLGF = 0xE331,      // Compare Logical (64<-32)
+  CLGFI = 0xC2E,      // Compare Logical Immediate (64<-32)
+  CLGR = 0xB921,      // Compare Logical (64)
+  CLI = 0x95,         // Compare Logical Immediate (8)
+  CLIY = 0xEB55,      // Compare Logical Immediate (8)
+  CLR = 0x15,         // Compare Logical (32)
+  CLY = 0xE355,       // Compare Logical (32)
+  CD = 0x69,          // Compare (LH)
+  CDR = 0x29,         // Compare (LH)
+  CR = 0x19,          // Compare (32)
+  CSST = 0xC82,       // Compare And Swap And Store
+  CSXTR = 0xB3EB,     // Convert To Signed Packed (extended DFP to 128)
+  CSY = 0xEB14,       // Compare And Swap (32)
+  CU12 = 0xB2A7,      // Convert Utf-8 To Utf-16
+  CU14 = 0xB9B0,      // Convert Utf-8 To Utf-32
+  CU21 = 0xB2A6,      // Convert Utf-16 To Utf-8
+  CU24 = 0xB9B1,      // Convert Utf-16 To Utf-32
+  CU41 = 0xB9B2,      // Convert Utf-32 To Utf-8
+  CU42 = 0xB9B3,      // Convert Utf-32 To Utf-16
+  CUDTR = 0xB3E2,     // Convert To Unsigned Packed (long DFP to 64)
+  CUSE = 0xB257,      // Compare Until Substring Equal
+  CUTFU = 0xB2A7,     // Convert Utf-8 To Unicode
+  CUUTF = 0xB2A6,     // Convert Unicode To Utf-8
+  CUXTR = 0xB3EA,     // Convert To Unsigned Packed (extended DFP to 128)
+  CVB = 0x4F,         // Convert To Binary (32)
+  CVBG = 0xE30E,      // Convert To Binary (64)
+  CVBY = 0xE306,      // Convert To Binary (32)
+  CVD = 0x4E,         // Convert To Decimal (32)
+  CVDG = 0xE32E,      // Convert To Decimal (64)
+  CVDY = 0xE326,      // Convert To Decimal (32)
+  CXBR = 0xB349,      // Compare (extended BFP)
+  CXFBR = 0xB396,     // Convert From Fixed (32 to extended BFP)
+  CXFBRA = 0xB396,    // Convert From Fixed (32 to extended BFP)
+  CXFTR = 0xB959,     // Convert From Fixed (32 to extended DFP)
+  CXGBR = 0xB3A6,     // Convert From Fixed (64 to extended BFP)
+  CXGBRA = 0xB3A6,    // Convert From Fixed (64 to extended BFP)
+  CXGTR = 0xB3F9,     // Convert From Fixed (64 to extended DFP)
+  CXGTRA = 0xB3F9,    // Convert From Fixed (64 to extended DFP)
+  CXLFBR = 0xB392,    // Convert From Logical (32 to extended BFP)
+  CXLFTR = 0xB95B,    // Convert From Logical (32 to extended DFP)
+  CXLGBR = 0xB3A2,    // Convert From Logical (64 to extended BFP)
+  CXLGTR = 0xB95A,    // Convert From Logical (64 to extended DFP)
+  CXSTR = 0xB3FB,     // Convert From Signed Packed (128 to extended DFP)
+  CXTR = 0xB3EC,      // Compare (extended DFP)
+  CXUTR = 0xB3FA,     // Convert From Unsigned Packed (128 to ext. DFP)
+  CXZT = 0xEDAB,      // Convert From Zoned (to extended DFP)
+  CY = 0xE359,        // Compare (32)
+  CZDT = 0xEDA8,      // Convert To Zoned (from long DFP)
+  CZXT = 0xEDA9,      // Convert To Zoned (from extended DFP)
+  D = 0x5D,           // Divide (32<-64)
+  DDB = 0xED1D,       // Divide (long BFP)
+  DDBR = 0xB31D,      // Divide (long BFP)
+  DDTR = 0xB3D1,      // Divide (long DFP)
+  DDTRA = 0xB3D1,     // Divide (long DFP)
+  DEB = 0xED0D,       // Divide (short BFP)
+  DEBR = 0xB30D,      // Divide (short BFP)
+  DIDBR = 0xB35B,     // Divide To Integer (long BFP)
+  DIEBR = 0xB353,     // Divide To Integer (short BFP)
+  DL = 0xE397,        // Divide Logical (32<-64)
+  DLG = 0xE387,       // Divide Logical (64<-128)
+  DLGR = 0xB987,      // Divide Logical (64<-128)
+  DLR = 0xB997,       // Divide Logical (32<-64)
+  DP = 0xFD,          // Divide Decimal
+  DR = 0x1D,          // Divide (32<-64)
+  DSG = 0xE30D,       // Divide Single (64)
+  DSGF = 0xE31D,      // Divide Single (64<-32)
+  DSGFR = 0xB91D,     // Divide Single (64<-32)
+  DSGR = 0xB90D,      // Divide Single (64)
+  DXBR = 0xB34D,      // Divide (extended BFP)
+  DXTR = 0xB3D9,      // Divide (extended DFP)
+  DXTRA = 0xB3D9,     // Divide (extended DFP)
+  EAR = 0xB24F,       // Extract Access
+  ECAG = 0xEB4C,      // Extract Cache Attribute
+  ECTG = 0xC81,       // Extract Cpu Time
+  ED = 0xDE,          // Edit
+  EDMK = 0xDF,        // Edit And Mark
+  EEDTR = 0xB3E5,     // Extract Biased Exponent (long DFP to 64)
+  EEXTR = 0xB3ED,     // Extract Biased Exponent (extended DFP to 64)
+  EFPC = 0xB38C,      // Extract Fpc
+  EPSW = 0xB98D,      // Extract Psw
+  ESDTR = 0xB3E7,     // Extract Significance (long DFP)
+  ESXTR = 0xB3EF,     // Extract Significance (extended DFP)
+  ETND = 0xB2EC,      // Extract Transaction Nesting Depth
+  EX = 0x44,          // Execute
+  EXRL = 0xC60,       // Execute Relative Long
+  FIDBR = 0xB35F,     // Load Fp Integer (long BFP)
+  FIDBRA = 0xB35F,    // Load Fp Integer (long BFP)
+  FIDTR = 0xB3D7,     // Load Fp Integer (long DFP)
+  FIEBR = 0xB357,     // Load Fp Integer (short BFP)
+  FIEBRA = 0xB357,    // Load Fp Integer (short BFP)
+  FIXBR = 0xB347,     // Load Fp Integer (extended BFP)
+  FIXBRA = 0xB347,    // Load Fp Integer (extended BFP)
+  FIXTR = 0xB3DF,     // Load Fp Integer (extended DFP)
+  FLOGR = 0xB983,     // Find Leftmost One
+  HSCH = 0xB231,      // Halt Subchannel
+  IC_z = 0x43,        // Insert Character
+  ICM = 0xBF,         // Insert Characters Under Mask (low)
+  ICMH = 0xEB80,      // Insert Characters Under Mask (high)
+  ICMY = 0xEB81,      // Insert Characters Under Mask (low)
+  ICY = 0xE373,       // Insert Character
+  IEDTR = 0xB3F6,     // Insert Biased Exponent (64 to long DFP)
+  IEXTR = 0xB3FE,     // Insert Biased Exponent (64 to extended DFP)
+  IIHF = 0xC08,       // Insert Immediate (high)
+  IIHH = 0xA50,       // Insert Immediate (high high)
+  IIHL = 0xA51,       // Insert Immediate (high low)
+  IILF = 0xC09,       // Insert Immediate (low)
+  IILH = 0xA52,       // Insert Immediate (low high)
+  IILL = 0xA53,       // Insert Immediate (low low)
+  IPM = 0xB222,       // Insert Program Mask
+  KDB = 0xED18,       // Compare And Signal (long BFP)
+  KDBR = 0xB318,      // Compare And Signal (long BFP)
+  KDTR = 0xB3E0,      // Compare And Signal (long DFP)
+  KEB = 0xED08,       // Compare And Signal (short BFP)
+  KEBR = 0xB308,      // Compare And Signal (short BFP)
+  KIMD = 0xB93E,      // Compute Intermediate Message Digest
+  KLMD = 0xB93F,      // Compute Last Message Digest
+  KM = 0xB92E,        // Cipher Message
+  KMAC = 0xB91E,      // Compute Message Authentication Code
+  KMC = 0xB92F,       // Cipher Message With Chaining
+  KMCTR = 0xB92D,     // Cipher Message With Counter
+  KMF = 0xB92A,       // Cipher Message With Cfb
+  KMO = 0xB92B,       // Cipher Message With Ofb
+  KXBR = 0xB348,      // Compare And Signal (extended BFP)
+  KXTR = 0xB3E8,      // Compare And Signal (extended DFP)
+  L = 0x58,           // Load (32)
+  LA = 0x41,          // Load Address
+  LAA = 0xEBF8,       // Load And Add (32)
+  LAAG = 0xEBE8,      // Load And Add (64)
+  LAAL = 0xEBFA,      // Load And Add Logical (32)
+  LAALG = 0xEBEA,     // Load And Add Logical (64)
+  LAE = 0x51,         // Load Address Extended
+  LAEY = 0xE375,      // Load Address Extended
+  LAN = 0xEBF4,       // Load And And (32)
+  LANG = 0xEBE4,      // Load And And (64)
+  LAO = 0xEBF6,       // Load And Or (32)
+  LAOG = 0xEBE6,      // Load And Or (64)
+  LARL = 0xC00,       // Load Address Relative Long
+  LAT = 0xE39F,       // Load And Trap (32L<-32)
+  LAX = 0xEBF7,       // Load And Exclusive Or (32)
+  LAXG = 0xEBE7,      // Load And Exclusive Or (64)
+  LAY = 0xE371,       // Load Address
+  LB = 0xE376,        // Load Byte (32)
+  LBH = 0xE3C0,       // Load Byte High (32<-8)
+  LBR = 0xB926,       // Load Byte (32)
+  LCDBR = 0xB313,     // Load Complement (long BFP)
+  LCDFR = 0xB373,     // Load Complement (long)
+  LCEBR = 0xB303,     // Load Complement (short BFP)
+  LCGFR = 0xB913,     // Load Complement (64<-32)
+  LCGR = 0xB903,      // Load Complement (64)
+  LCR = 0x13,         // Load Complement (32)
+  LCXBR = 0xB343,     // Load Complement (extended BFP)
+  LD = 0x68,          // Load (long)
+  LDEB = 0xED04,      // Load Lengthened (short to long BFP)
+  LDEBR = 0xB304,     // Load Lengthened (short to long BFP)
+  LDETR = 0xB3D4,     // Load Lengthened (short to long DFP)
+  LDGR = 0xB3C1,      // Load Fpr From Gr (64 to long)
+  LDR = 0x28,         // Load (long)
+  LDXBR = 0xB345,     // Load Rounded (extended to long BFP)
+  LDXBRA = 0xB345,    // Load Rounded (extended to long BFP)
+  LDXTR = 0xB3DD,     // Load Rounded (extended to long DFP)
+  LDY = 0xED65,       // Load (long)
+  LE = 0x78,          // Load (short)
+  LEDBR = 0xB344,     // Load Rounded (long to short BFP)
+  LEDBRA = 0xB344,    // Load Rounded (long to short BFP)
+  LEDTR = 0xB3D5,     // Load Rounded (long to short DFP)
+  LER = 0x38,         // Load (short)
+  LEXBR = 0xB346,     // Load Rounded (extended to short BFP)
+  LEXBRA = 0xB346,    // Load Rounded (extended to short BFP)
+  LEY = 0xED64,       // Load (short)
+  LFAS = 0xB2BD,      // Load Fpc And Signal
+  LFH = 0xE3CA,       // Load High (32)
+  LFHAT = 0xE3C8,     // Load High And Trap (32H<-32)
+  LFPC = 0xB29D,      // Load Fpc
+  LG = 0xE304,        // Load (64)
+  LGAT = 0xE385,      // Load And Trap (64)
+  LGB = 0xE377,       // Load Byte (64)
+  LGBR = 0xB906,      // Load Byte (64)
+  LGDR = 0xB3CD,      // Load Gr From Fpr (long to 64)
+  LGF = 0xE314,       // Load (64<-32)
+  LGFI = 0xC01,       // Load Immediate (64<-32)
+  LGFR = 0xB914,      // Load (64<-32)
+  LGFRL = 0xC4C,      // Load Relative Long (64<-32)
+  LGH = 0xE315,       // Load Halfword (64)
+  LGHI = 0xA79,       // Load Halfword Immediate (64)
+  LGHR = 0xB907,      // Load Halfword (64)
+  LGHRL = 0xC44,      // Load Halfword Relative Long (64<-16)
+  LGR = 0xB904,       // Load (64)
+  LGRL = 0xC48,       // Load Relative Long (64)
+  LH = 0x48,          // Load Halfword (32)
+  LHH = 0xE3C4,       // Load Halfword High (32<-16)
+  LHI = 0xA78,        // Load Halfword Immediate (32)
+  LHR = 0xB927,       // Load Halfword (32)
+  LHRL = 0xC45,       // Load Halfword Relative Long (32<-16)
+  LHY = 0xE378,       // Load Halfword (32)
+  LLC = 0xE394,       // Load Logical Character (32)
+  LLCH = 0xE3C2,      // Load Logical Character High (32<-8)
+  LLCR = 0xB994,      // Load Logical Character (32)
+  LLGC = 0xE390,      // Load Logical Character (64)
+  LLGCR = 0xB984,     // Load Logical Character (64)
+  LLGF = 0xE316,      // Load Logical (64<-32)
+  LLGFAT = 0xE39D,    // Load Logical And Trap (64<-32)
+  LLGFR = 0xB916,     // Load Logical (64<-32)
+  LLGFRL = 0xC4E,     // Load Logical Relative Long (64<-32)
+  LLGH = 0xE391,      // Load Logical Halfword (64)
+  LLGHR = 0xB985,     // Load Logical Halfword (64)
+  LLGHRL = 0xC46,     // Load Logical Halfword Relative Long (64<-16)
+  LLGT = 0xE317,      // Load Logical Thirty One Bits
+  LLGTAT = 0xE39C,    // Load Logical Thirty One Bits And Trap (64<-31)
+  LLGTR = 0xB917,     // Load Logical Thirty One Bits
+  LLH = 0xE395,       // Load Logical Halfword (32)
+  LLHH = 0xE3C6,      // Load Logical Halfword High (32<-16)
+  LLHR = 0xB995,      // Load Logical Halfword (32)
+  LLHRL = 0xC42,      // Load Logical Halfword Relative Long (32<-16)
+  LLIHF = 0xC0E,      // Load Logical Immediate (high)
+  LLIHH = 0xA5C,      // Load Logical Immediate (high high)
+  LLIHL = 0xA5D,      // Load Logical Immediate (high low)
+  LLILF = 0xC0F,      // Load Logical Immediate (low)
+  LLILH = 0xA5E,      // Load Logical Immediate (low high)
+  LLILL = 0xA5F,      // Load Logical Immediate (low low)
+  LM = 0x98,          // Load Multiple (32)
+  LMD = 0xEF,         // Load Multiple Disjoint
+  LMG = 0xEB04,       // Load Multiple (64)
+  LMH = 0xEB96,       // Load Multiple High
+  LMY = 0xEB98,       // Load Multiple (32)
+  LNDBR = 0xB311,     // Load Negative (long BFP)
+  LNDFR = 0xB371,     // Load Negative (long)
+  LNEBR = 0xB301,     // Load Negative (short BFP)
+  LNGFR = 0xB911,     // Load Negative (64<-32)
+  LNGR = 0xB901,      // Load Negative (64)
+  LNR = 0x11,         // Load Negative (32)
+  LNXBR = 0xB341,     // Load Negative (extended BFP)
+  LOC = 0xEBF2,       // Load On Condition (32)
+  LOCG = 0xEBE2,      // Load On Condition (64)
+  LOCGR = 0xB9E2,     // Load On Condition (64)
+  LOCR = 0xB9F2,      // Load On Condition (32)
+  LPD = 0xC84,        // Load Pair Disjoint (32)
+  LPDBR = 0xB310,     // Load Positive (long BFP)
+  LPDFR = 0xB370,     // Load Positive (long)
+  LPDG = 0xC85,       // Load Pair Disjoint (64)
+  LPEBR = 0xB300,     // Load Positive (short BFP)
+  LPGFR = 0xB910,     // Load Positive (64<-32)
+  LPGR = 0xB900,      // Load Positive (64)
+  LPQ = 0xE38F,       // Load Pair From Quadword
+  LPR = 0x10,         // Load Positive (32)
+  LPXBR = 0xB340,     // Load Positive (extended BFP)
+  LR = 0x18,          // Load (32)
+  LRL = 0xC4D,        // Load Relative Long (32)
+  LRV = 0xE31E,       // Load Reversed (32)
+  LRVG = 0xE30F,      // Load Reversed (64)
+  LRVGR = 0xB90F,     // Load Reversed (64)
+  LRVH = 0xE31F,      // Load Reversed (16)
+  LRVR = 0xB91F,      // Load Reversed (32)
+  LT = 0xE312,        // Load And Test (32)
+  LTDBR = 0xB312,     // Load And Test (long BFP)
+  LTDTR = 0xB3D6,     // Load And Test (long DFP)
+  LTEBR = 0xB302,     // Load And Test (short BFP)
+  LTG = 0xE302,       // Load And Test (64)
+  LTGF = 0xE332,      // Load And Test (64<-32)
+  LTGFR = 0xB912,     // Load And Test (64<-32)
+  LTGR = 0xB902,      // Load And Test (64)
+  LTR = 0x12,         // Load And Test (32)
+  LTXBR = 0xB342,     // Load And Test (extended BFP)
+  LTXTR = 0xB3DE,     // Load And Test (extended DFP)
+  LXDB = 0xED05,      // Load Lengthened (long to extended BFP)
+  LXDBR = 0xB305,     // Load Lengthened (long to extended BFP)
+  LXDTR = 0xB3DC,     // Load Lengthened (long to extended DFP)
+  LXEB = 0xED06,      // Load Lengthened (short to extended BFP)
+  LXEBR = 0xB306,     // Load Lengthened (short to extended BFP)
+  LXR = 0xB365,       // Load (extended)
+  LY = 0xE358,        // Load (32)
+  LZDR = 0xB375,      // Load Zero (long)
+  LZER = 0xB374,      // Load Zero (short)
+  LZXR = 0xB376,      // Load Zero (extended)
+  M = 0x5C,           // Multiply (64<-32)
+  MADB = 0xED1E,      // Multiply And Add (long BFP)
+  MADBR = 0xB31E,     // Multiply And Add (long BFP)
+  MAEB = 0xED0E,      // Multiply And Add (short BFP)
+  MAEBR = 0xB30E,     // Multiply And Add (short BFP)
+  MC = 0xAF,          // Monitor Call
+  MDB = 0xED1C,       // Multiply (long BFP)
+  MDBR = 0xB31C,      // Multiply (long BFP)
+  MDEB = 0xED0C,      // Multiply (short to long BFP)
+  MDEBR = 0xB30C,     // Multiply (short to long BFP)
+  MDTR = 0xB3D0,      // Multiply (long DFP)
+  MDTRA = 0xB3D0,     // Multiply (long DFP)
+  MEEB = 0xED17,      // Multiply (short BFP)
+  MEEBR = 0xB317,     // Multiply (short BFP)
+  MFY = 0xE35C,       // Multiply (64<-32)
+  MGHI = 0xA7D,       // Multiply Halfword Immediate (64)
+  MH = 0x4C,          // Multiply Halfword (32)
+  MHI = 0xA7C,        // Multiply Halfword Immediate (32)
+  MHY = 0xE37C,       // Multiply Halfword (32)
+  ML = 0xE396,        // Multiply Logical (64<-32)
+  MLG = 0xE386,       // Multiply Logical (128<-64)
+  MLGR = 0xB986,      // Multiply Logical (128<-64)
+  MLR = 0xB996,       // Multiply Logical (64<-32)
+  MP = 0xFC,          // Multiply Decimal
+  MR = 0x1C,          // Multiply (64<-32)
+  MS = 0x71,          // Multiply Single (32)
+  MSCH = 0xB232,      // Modify Subchannel
+  MSDB = 0xED1F,      // Multiply And Subtract (long BFP)
+  MSDBR = 0xB31F,     // Multiply And Subtract (long BFP)
+  MSEB = 0xED0F,      // Multiply And Subtract (short BFP)
+  MSEBR = 0xB30F,     // Multiply And Subtract (short BFP)
+  MSFI = 0xC21,       // Multiply Single Immediate (32)
+  MSG = 0xE30C,       // Multiply Single (64)
+  MSGF = 0xE31C,      // Multiply Single (64<-32)
+  MSGFI = 0xC20,      // Multiply Single Immediate (64<-32)
+  MSGFR = 0xB91C,     // Multiply Single (64<-32)
+  MSGR = 0xB90C,      // Multiply Single (64)
+  MSR = 0xB252,       // Multiply Single (32)
+  MSY = 0xE351,       // Multiply Single (32)
+  MVC = 0xD2,         // Move (character)
+  MVCP = 0xDA,        // Move To Primary
+  MVCDK = 0xE50F,     // Move To Primary
+  MVCIN = 0xE8,       // Move Inverse
+  MVCL = 0x0E,        // Move Long
+  MVCLE = 0xA8,       // Move Long Extended
+  MVCLU = 0xEB8E,     // Move Long Unicode
+  MVGHI = 0xE548,     // Move (64<-16)
+  MVHHI = 0xE544,     // Move (16<-16)
+  MVHI = 0xE54C,      // Move (32<-16)
+  MVI = 0x92,         // Move (immediate)
+  MVIY = 0xEB52,      // Move (immediate)
+  MVN = 0xD1,         // Move Numerics
+  MVO = 0xF1,         // Move With Offset
+  MVST = 0xB255,      // Move String
+  MVZ = 0xD3,         // Move Zones
+  MXBR = 0xB34C,      // Multiply (extended BFP)
+  MXDB = 0xED07,      // Multiply (long to extended BFP)
+  MXDBR = 0xB307,     // Multiply (long to extended BFP)
+  MXTR = 0xB3D8,      // Multiply (extended DFP)
+  MXTRA = 0xB3D8,     // Multiply (extended DFP)
+  N = 0x54,           // And (32)
+  NC = 0xD4,          // And (character)
+  NG = 0xE380,        // And (64)
+  NGR = 0xB980,       // And (64)
+  NGRK = 0xB9E4,      // And (64)
+  NI = 0x94,          // And (immediate)
+  NIAI = 0xB2FA,      // Next Instruction Access Intent Ie Eh
+  NIHF = 0xC0A,       // And Immediate (high)
+  NIHH = 0xA54,       // And Immediate (high high)
+  NIHL = 0xA55,       // And Immediate (high low)
+  NILF = 0xC0B,       // And Immediate (low)
+  NILH = 0xA56,       // And Immediate (low high)
+  NILL = 0xA57,       // And Immediate (low low)
+  NIY = 0xEB54,       // And (immediate)
+  NR = 0x14,          // And (32)
+  NRK = 0xB9F4,       // And (32)
+  NTSTG = 0xE325,     // Nontransactional Store Rxy Tx ¤9 A Sp St B2
+  NY = 0xE354,        // And (32)
+  O = 0x56,           // Or (32)
+  OC = 0xD6,          // Or (character)
+  OG = 0xE381,        // Or (64)
+  OGR = 0xB981,       // Or (64)
+  OGRK = 0xB9E6,      // Or (64)
+  OI = 0x96,          // Or (immediate)
+  OIHF = 0xC0C,       // Or Immediate (high)
+  OIHH = 0xA58,       // Or Immediate (high high)
+  OIHL = 0xA59,       // Or Immediate (high low)
+  OILF = 0xC0D,       // Or Immediate (low)
+  OILH = 0xA5A,       // Or Immediate (low high)
+  OILL = 0xA5B,       // Or Immediate (low low)
+  OIY = 0xEB56,       // Or (immediate)
+  OR = 0x16,          // Or (32)
+  ORK = 0xB9F6,       // Or (32)
+  OY = 0xE356,        // Or (32)
+  PACK = 0xF2,        // Pack
+  PCC = 0xB92C,       // Perform Cryptographic Computation
+  PFD = 0xE336,       // Prefetch Data
+  PFDRL = 0xC62,      // Prefetch Data Relative Long
+  PFPO = 0x010A,      // Perform Floating-POINT Operation
+  PKA = 0xE9,         // Pack Ascii
+  PKU = 0xE1,         // Pack Unicode
+  PLO = 0xEE,         // Perform Locked Operation
+  POPCNT_Z = 0xB9E1,  // Population Count
+  PPA = 0xB2E8,       // Perform Processor Assist
+  QADTR = 0xB3F5,     // Quantize (long DFP)
+  QAXTR = 0xB3FD,     // Quantize (extended DFP)
+  RCHP = 0xB23B,      // Reset Channel Path
+  RISBG = 0xEC55,     // Rotate Then Insert Selected Bits
+  RISBGN = 0xEC59,    // Rotate Then Insert Selected Bits
+  RISBHG = 0xEC5D,    // Rotate Then Insert Selected Bits High
+  RISBLG = 0xEC51,    // Rotate Then Insert Selected Bits Low
+  RLL = 0xEB1D,       // Rotate Left Single Logical (32)
+  RLLG = 0xEB1C,      // Rotate Left Single Logical (64)
+  RNSBG = 0xEC54,     // Rotate Then And Selected Bits
+  ROSBG = 0xEC56,     // Rotate Then Or Selected Bits
+  RRDTR = 0xB3F7,     // Reround (long DFP)
+  RRXTR = 0xB3FF,     // Reround (extended DFP)
+  RSCH = 0xB238,      // Resume Subchannel
+  RXSBG = 0xEC57,     // Rotate Then Exclusive Or Selected Bits
+  S = 0x5B,           // Subtract (32)
+  SAL = 0xB237,       // Set Address Limit
+  SAR = 0xB24E,       // Set Access
+  SCHM = 0xB23C,      // Set Channel Monitor
+  SDB = 0xED1B,       // Subtract (long BFP)
+  SDBR = 0xB31B,      // Subtract (long BFP)
+  SDTR = 0xB3D3,      // Subtract (long DFP)
+  SDTRA = 0xB3D3,     // Subtract (long DFP)
+  SEB = 0xED0B,       // Subtract (short BFP)
+  SEBR = 0xB30B,      // Subtract (short BFP)
+  SFASR = 0xB385,     // Set Fpc And Signal
+  SFPC = 0xB384,      // Set Fpc
+  SG = 0xE309,        // Subtract (64)
+  SGF = 0xE319,       // Subtract (64<-32)
+  SGFR = 0xB919,      // Subtract (64<-32)
+  SGR = 0xB909,       // Subtract (64)
+  SGRK = 0xB9E9,      // Subtract (64)
+  SH = 0x4B,          // Subtract Halfword
+  SHHHR = 0xB9C9,     // Subtract High (32)
+  SHHLR = 0xB9D9,     // Subtract High (32)
+  SHY = 0xE37B,       // Subtract Halfword
+  SL = 0x5F,          // Subtract Logical (32)
+  SLA = 0x8B,         // Shift Left Single (32)
+  SLAG = 0xEB0B,      // Shift Left Single (64)
+  SLAK = 0xEBDD,      // Shift Left Single (32)
+  SLB = 0xE399,       // Subtract Logical With Borrow (32)
+  SLBG = 0xE389,      // Subtract Logical With Borrow (64)
+  SLBGR = 0xB989,     // Subtract Logical With Borrow (64)
+  SLBR = 0xB999,      // Subtract Logical With Borrow (32)
+  SLDA = 0x8F,        // Shift Left Double
+  SLDL = 0x8D,        // Shift Left Double Logical
+  SLDT = 0xED40,      // Shift Significand Left (long DFP)
+  SLFI = 0xC25,       // Subtract Logical Immediate (32)
+  SLG = 0xE30B,       // Subtract Logical (64)
+  SLGF = 0xE31B,      // Subtract Logical (64<-32)
+  SLGFI = 0xC24,      // Subtract Logical Immediate (64<-32)
+  SLGFR = 0xB91B,     // Subtract Logical (64<-32)
+  SLGR = 0xB90B,      // Subtract Logical (64)
+  SLGRK = 0xB9EB,     // Subtract Logical (64)
+  SLHHHR = 0xB9CB,    // Subtract Logical High (32)
+  SLHHLR = 0xB9DB,    // Subtract Logical High (32)
+  SLL = 0x89,         // Shift Left Single Logical (32)
+  SLLG = 0xEB0D,      // Shift Left Single Logical (64)
+  SLLK = 0xEBDF,      // Shift Left Single Logical (32)
+  SLR = 0x1F,         // Subtract Logical (32)
+  SLRK = 0xB9FB,      // Subtract Logical (32)
+  SLXT = 0xED48,      // Shift Significand Left (extended DFP)
+  SLY = 0xE35F,       // Subtract Logical (32)
+  SP = 0xFB,          // Subtract Decimal
+  SPM = 0x04,         // Set Program Mask
+  SQDB = 0xED15,      // Square Root (long BFP)
+  SQDBR = 0xB315,     // Square Root (long BFP)
+  SQEB = 0xED14,      // Square Root (short BFP)
+  SQEBR = 0xB314,     // Square Root (short BFP)
+  SQXBR = 0xB316,     // Square Root (extended BFP)
+  SR = 0x1B,          // Subtract (32)
+  SRA = 0x8A,         // Shift Right Single (32)
+  SRAG = 0xEB0A,      // Shift Right Single (64)
+  SRAK = 0xEBDC,      // Shift Right Single (32)
+  SRDA = 0x8E,        // Shift Right Double
+  SRDL = 0x8C,        // Shift Right Double Logical
+  SRDT = 0xED41,      // Shift Significand Right (long DFP)
+  SRK = 0xB9F9,       // Subtract (32)
+  SRL = 0x88,         // Shift Right Single Logical (32)
+  SRLG = 0xEB0C,      // Shift Right Single Logical (64)
+  SRLK = 0xEBDE,      // Shift Right Single Logical (32)
+  SRNM = 0xB299,      // Set BFP Rounding Mode (2 bit)
+  SRNMB = 0xB2B8,     // Set BFP Rounding Mode (3 bit)
+  SRNMT = 0xB2B9,     // Set DFP Rounding Mode
+  SRP = 0xF0,         // Shift And Round Decimal
+  SRST = 0xB25E,      // Search String
+  SRSTU = 0xB9BE,     // Search String Unicode
+  SRXT = 0xED49,      // Shift Significand Right (extended DFP)
+  SSCH = 0xB233,      // Start Subchannel
+  ST = 0x50,          // Store (32)
+  STC = 0x42,         // Store Character
+  STCH = 0xE3C3,      // Store Character High (8)
+  STCK = 0xB205,      // Store Clock
+  STCKE = 0xB278,     // Store Clock Extended
+  STCKF = 0xB27C,     // Store Clock Fast
+  STCM = 0xBE,        // Store Characters Under Mask (low)
+  STCMH = 0xEB2C,     // Store Characters Under Mask (high)
+  STCMY = 0xEB2D,     // Store Characters Under Mask (low)
+  STCPS = 0xB23A,     // Store Channel Path Status
+  STCRW = 0xB239,     // Store Channel Report Word
+  STCY = 0xE372,      // Store Character
+  STD = 0x60,         // Store (long)
+  STDY = 0xED67,      // Store (long)
+  STE = 0x70,         // Store (short)
+  STEY = 0xED66,      // Store (short)
+  STFH = 0xE3CB,      // Store High (32)
+  STFLE = 0xB2B0,     // Store Facility List Extended
+  STFPC = 0xB29C,     // Store Fpc
+  STG = 0xE324,       // Store (64)
+  STGRL = 0xC4B,      // Store Relative Long (64)
+  STH = 0x40,         // Store Halfword
+  STHH = 0xE3C7,      // Store Halfword High (16)
+  STHRL = 0xC47,      // Store Halfword Relative Long
+  STHY = 0xE370,      // Store Halfword
+  STM = 0x90,         // Store Multiple (32)
+  STMG = 0xEB24,      // Store Multiple (64)
+  STMH = 0xEB26,      // Store Multiple High
+  STMY = 0xEB90,      // Store Multiple (32)
+  STOC = 0xEBF3,      // Store On Condition (32)
+  STOCG = 0xEBE3,     // Store On Condition (64)
+  STPQ = 0xE38E,      // Store Pair To Quadword
+  STRL = 0xC4F,       // Store Relative Long (32)
+  STRV = 0xE33E,      // Store Reversed (32)
+  STRVG = 0xE32F,     // Store Reversed (64)
+  STRVH = 0xE33F,     // Store Reversed (16)
+  STSCH = 0xB234,     // Store Subchannel
+  STY = 0xE350,       // Store (32)
+  SVC = 0x0A,         // Supervisor Call
+  SXBR = 0xB34B,      // Subtract (extended BFP)
+  SXTR = 0xB3DB,      // Subtract (extended DFP)
+  SXTRA = 0xB3DB,     // Subtract (extended DFP)
+  SY = 0xE35B,        // Subtract (32)
+  TABORT = 0xB2FC,    // Transaction Abort
+  TBDR = 0xB351,      // Convert HFP To BFP (long)
+  TBEDR = 0xB350,     // Convert HFP To BFP (long to short)
+  TBEGIN = 0xE560,    // Transaction Begin
+  TBEGINC = 0xE561,   // Transaction Begin
+  TCDB = 0xED11,      // Test Data Class (long BFP)
+  TCEB = 0xED10,      // Test Data Class (short BFP)
+  TCXB = 0xED12,      // Test Data Class (extended BFP)
+  TDCDT = 0xED54,     // Test Data Class (long DFP)
+  TDCET = 0xED50,     // Test Data Class (short DFP)
+  TDCXT = 0xED58,     // Test Data Class (extended DFP)
+  TDGDT = 0xED55,     // Test Data Group (long DFP)
+  TDGET = 0xED51,     // Test Data Group (short DFP)
+  TDGXT = 0xED59,     // Test Data Group (extended DFP)
+  TEND = 0xB2F8,      // Transaction End
+  THDER = 0xB358,     // Convert BFP To HFP (short to long)
+  THDR = 0xB359,      // Convert BFP To HFP (long)
+  TM = 0x91,          // Test Under Mask Si C A B1
+  TMH = 0xA70,        // Test Under Mask High
+  TMHH = 0xA72,       // Test Under Mask (high high)
+  TMHL = 0xA73,       // Test Under Mask (high low)
+  TML = 0xA71,        // Test Under Mask Low
+  TMLH = 0xA70,       // Test Under Mask (low high)
+  TMLL = 0xA71,       // Test Under Mask (low low)
+  TMY = 0xEB51,       // Test Under Mask
+  TP = 0xEBC0,        // Test Decimal
+  TPI = 0xB236,       // Test Pending Interruption
+  TR = 0xDC,          // Translate
+  TRAP4 = 0xB2FF,     // Trap (4)
+  TRE = 0xB2A5,       // Translate Extended
+  TROO = 0xB993,      // Translate One To One
+  TROT = 0xB992,      // Translate One To Two
+  TRT = 0xDD,         // Translate And Test
+  TRTE = 0xB9BF,      // Translate And Test Extended
+  TRTO = 0xB991,      // Translate Two To One
+  TRTR = 0xD0,        // Translate And Test Reverse
+  TRTRE = 0xB9BD,     // Translate And Test Reverse Extended
+  TRTT = 0xB990,      // Translate Two To Two
+  TS = 0x93,          // Test And Set
+  TSCH = 0xB235,      // Test Subchannel
+  UNPK = 0xF3,        // Unpack
+  UNPKA = 0xEA,       // Unpack Ascii
+  UNPKU = 0xE2,       // Unpack Unicode
+  UPT = 0x0102,       // Update Tree
+  X = 0x57,           // Exclusive Or (32)
+  XC = 0xD7,          // Exclusive Or (character)
+  XG = 0xE382,        // Exclusive Or (64)
+  XGR = 0xB982,       // Exclusive Or (64)
+  XGRK = 0xB9E7,      // Exclusive Or (64)
+  XI = 0x97,          // Exclusive Or (immediate)
+  XIHF = 0xC06,       // Exclusive Or Immediate (high)
+  XILF = 0xC07,       // Exclusive Or Immediate (low)
+  XIY = 0xEB57,       // Exclusive Or (immediate)
+  XR = 0x17,          // Exclusive Or (32)
+  XRK = 0xB9F7,       // Exclusive Or (32)
+  XSCH = 0xB276,      // Cancel Subchannel
+  XY = 0xE357,        // Exclusive Or (32)
+  ZAP = 0xF8,         // Zero And Add
+  BKPT = 0x0001       // GDB Software Breakpoint
+};
+
+// Instruction encoding bits and masks.
+enum {
+  // Instruction encoding bit
+  B1 = 1 << 1,
+  B4 = 1 << 4,
+  B5 = 1 << 5,
+  B7 = 1 << 7,
+  B8 = 1 << 8,
+  B9 = 1 << 9,
+  B12 = 1 << 12,
+  B18 = 1 << 18,
+  B19 = 1 << 19,
+  B20 = 1 << 20,
+  B22 = 1 << 22,
+  B23 = 1 << 23,
+  B24 = 1 << 24,
+  B25 = 1 << 25,
+  B26 = 1 << 26,
+  B27 = 1 << 27,
+  B28 = 1 << 28,
+
+  B6 = 1 << 6,
+  B10 = 1 << 10,
+  B11 = 1 << 11,
+  B16 = 1 << 16,
+  B17 = 1 << 17,
+  B21 = 1 << 21,
+
+  // Instruction bit masks
+  kCondMask = 0x1F << 21,
+  kOff12Mask = (1 << 12) - 1,
+  kImm24Mask = (1 << 24) - 1,
+  kOff16Mask = (1 << 16) - 1,
+  kImm16Mask = (1 << 16) - 1,
+  kImm26Mask = (1 << 26) - 1,
+  kBOfieldMask = 0x1f << 21,
+  kOpcodeMask = 0x3f << 26,
+  kExt2OpcodeMask = 0x1f << 1,
+  kExt5OpcodeMask = 0x3 << 2,
+  kBIMask = 0x1F << 16,
+  kBDMask = 0x14 << 2,
+  kAAMask = 0x01 << 1,
+  kLKMask = 0x01,
+  kRCMask = 0x01,
+  kTOMask = 0x1f << 21
+};
+
+// S390 instructions requires bigger shifts,
+// make them macros instead of enum because of the typing issue
+#define B32 ((uint64_t)1 << 32)
+#define B36 ((uint64_t)1 << 36)
+#define B40 ((uint64_t)1 << 40)
+const FourByteInstr kFourByteBrCondMask = 0xF << 20;
+const SixByteInstr kSixByteBrCondMask = static_cast<SixByteInstr>(0xF) << 36;
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants.
+
+// Overflow Exception
+enum OEBit {
+  SetOE = 1 << 10,   // Set overflow exception
+  LeaveOE = 0 << 10  // No overflow exception
+};
+
+// Record bit
+enum RCBit {   // Bit 0
+  SetRC = 1,   // LT,GT,EQ,SO
+  LeaveRC = 0  // None
+};
+
+// Link bit
+enum LKBit {   // Bit 0
+  SetLK = 1,   // Load effective address of next instruction
+  LeaveLK = 0  // No action
+};
+
+enum BOfield {        // Bits 25-21
+  DCBNZF = 0 << 21,   // Decrement CTR; branch if CTR != 0 and condition false
+  DCBEZF = 2 << 21,   // Decrement CTR; branch if CTR == 0 and condition false
+  BF = 4 << 21,       // Branch if condition false
+  DCBNZT = 8 << 21,   // Decrement CTR; branch if CTR != 0 and condition true
+  DCBEZT = 10 << 21,  // Decrement CTR; branch if CTR == 0 and condition true
+  BT = 12 << 21,      // Branch if condition true
+  DCBNZ = 16 << 21,   // Decrement CTR; branch if CTR != 0
+  DCBEZ = 18 << 21,   // Decrement CTR; branch if CTR == 0
+  BA = 20 << 21       // Branch always
+};
+
+#ifdef _AIX
+#undef CR_LT
+#undef CR_GT
+#undef CR_EQ
+#undef CR_SO
+#endif
+
+enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
+
+#define CRWIDTH 4
+
+// -----------------------------------------------------------------------------
+// Supervisor Call (svc) specific support.
+
+// Special Software Interrupt codes when used in the presence of the S390
+// simulator.
+// SVC provides a 24bit immediate value. Use bits 22:0 for standard
+// SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
+enum SoftwareInterruptCodes {
+  // Transition to C code
+  kCallRtRedirected = 0x0010,
+  // Breakpoint
+  kBreakpoint = 0x0000,
+  // Stop
+  kStopCode = 1 << 23
+};
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+const int32_t kDefaultStopCode = -1;
+
+// FP rounding modes.
+enum FPRoundingMode {
+  RN = 0,  // Round to Nearest.
+  RZ = 1,  // Round towards zero.
+  RP = 2,  // Round towards Plus Infinity.
+  RM = 3,  // Round towards Minus Infinity.
+
+  // Aliases.
+  kRoundToNearest = RN,
+  kRoundToZero = RZ,
+  kRoundToPlusInf = RP,
+  kRoundToMinusInf = RM
+};
+
+const uint32_t kFPRoundingModeMask = 3;
+
+enum CheckForInexactConversion {
+  kCheckForInexactConversion,
+  kDontCheckForInexactConversion
+};
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+// use TRAP4 to indicate redirection call for simulation mode
+const Instr rtCallRedirInstr = TRAP4;
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the
+// z/Architecture instruction set encoding.
+class Instruction {
+ public:
+  // S390 Opcode Format Types
+  //   Based on the first byte of the opcode, we can determine how to extract
+  //   the entire opcode of the instruction.  The various favours include:
+  enum OpcodeFormatType {
+    ONE_BYTE_OPCODE,           // One Byte - Bits 0 to 7
+    TWO_BYTE_OPCODE,           // Two Bytes - Bits 0 to 15
+    TWO_BYTE_DISJOINT_OPCODE,  // Two Bytes - Bits 0 to 7, 40 to 47
+    THREE_NIBBLE_OPCODE        // Three Nibbles - Bits 0 to 7, 12 to 15
+  };
+
+// Helper macro to define static accessors.
+// We use the cast to char* trick to bypass the strict anti-aliasing rules.
+#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
+  static inline return_type Name(Instr instr) {          \
+    char* temp = reinterpret_cast<char*>(&instr);        \
+    return reinterpret_cast<Instruction*>(temp)->Name(); \
+  }
+
+#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
+
+  // Get the raw instruction bits.
+  template <typename T>
+  inline T InstructionBits() const {
+    return Instruction::InstructionBits<T>(reinterpret_cast<const byte*>(this));
+  }
+  inline Instr InstructionBits() const {
+    return *reinterpret_cast<const Instr*>(this);
+  }
+
+  // Set the raw instruction bits to value.
+  template <typename T>
+  inline void SetInstructionBits(T value) const {
+    Instruction::SetInstructionBits<T>(reinterpret_cast<const byte*>(this),
+                                       value);
+  }
+  inline void SetInstructionBits(Instr value) {
+    *reinterpret_cast<Instr*>(this) = value;
+  }
+
+  // Read one particular bit out of the instruction bits.
+  inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+  // Read a bit field's value out of the instruction bits.
+  inline int Bits(int hi, int lo) const {
+    return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+  }
+
+  // Read bits according to instruction type
+  template <typename T, typename U>
+  inline U Bits(int hi, int lo) const {
+    return (InstructionBits<T>() >> lo) & ((2 << (hi - lo)) - 1);
+  }
+
+  // Read a bit field out of the instruction bits.
+  inline int BitField(int hi, int lo) const {
+    return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
+  }
+
+  // Determine the instruction length
+  inline int InstructionLength() {
+    return Instruction::InstructionLength(reinterpret_cast<const byte*>(this));
+  }
+  // Extract the Instruction Opcode
+  inline Opcode S390OpcodeValue() {
+    return Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(this));
+  }
+
+  // Static support.
+
+  // Read one particular bit out of the instruction bits.
+  static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
+
+  // Read the value of a bit field out of the instruction bits.
+  static inline int Bits(Instr instr, int hi, int lo) {
+    return (instr >> lo) & ((2 << (hi - lo)) - 1);
+  }
+
+  // Read a bit field out of the instruction bits.
+  static inline int BitField(Instr instr, int hi, int lo) {
+    return instr & (((2 << (hi - lo)) - 1) << lo);
+  }
+
+  // Determine the instruction length of the given instruction
+  static inline int InstructionLength(const byte* instr) {
+    // Length can be determined by the first nibble.
+    // 0x0 to 0x3 => 2-bytes
+    // 0x4 to 0xB => 4-bytes
+    // 0xC to 0xF => 6-bytes
+    byte topNibble = (*instr >> 4) & 0xF;
+    if (topNibble <= 3)
+      return 2;
+    else if (topNibble <= 0xB)
+      return 4;
+    return 6;
+  }
+
+  // Returns the instruction bits of the given instruction
+  static inline uint64_t InstructionBits(const byte* instr) {
+    int length = InstructionLength(instr);
+    if (2 == length)
+      return static_cast<uint64_t>(InstructionBits<TwoByteInstr>(instr));
+    else if (4 == length)
+      return static_cast<uint64_t>(InstructionBits<FourByteInstr>(instr));
+    else
+      return InstructionBits<SixByteInstr>(instr);
+  }
+
+  // Extract the raw instruction bits
+  template <typename T>
+  static inline T InstructionBits(const byte* instr) {
+#if !V8_TARGET_LITTLE_ENDIAN
+    if (sizeof(T) <= 4) {
+      return *reinterpret_cast<const T*>(instr);
+    } else {
+      // We cannot read 8-byte instructon address directly, because for a
+      // six-byte instruction, the extra 2-byte address might not be
+      // allocated.
+      uint64_t fourBytes = *reinterpret_cast<const uint32_t*>(instr);
+      uint16_t twoBytes = *reinterpret_cast<const uint16_t*>(instr + 4);
+      return (fourBytes << 16 | twoBytes);
+    }
+#else
+    // Even on little endian hosts (simulation), the instructions
+    // are stored as big-endian in order to decode the opcode and
+    // instruction length.
+    T instr_bits = 0;
+
+    // 6-byte instrs are represented by uint64_t
+    uint32_t size = (sizeof(T) == 8) ? 6 : sizeof(T);
+
+    for (T i = 0; i < size; i++) {
+      instr_bits <<= 8;
+      instr_bits |= *(instr + i);
+    }
+    return instr_bits;
+#endif
+  }
+
+  // Set the Instruction Bits to value
+  template <typename T>
+  static inline void SetInstructionBits(byte* instr, T value) {
+#if V8_TARGET_LITTLE_ENDIAN
+    // The instruction bits are stored in big endian format even on little
+    // endian hosts, in order to decode instruction length and opcode.
+    // The following code will reverse the bytes so that the stores later
+    // (which are in native endianess) will effectively save the instruction
+    // in big endian.
+    if (sizeof(T) == 2) {
+      // Two Byte Instruction
+      value = ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
+    } else if (sizeof(T) == 4) {
+      // Four Byte Instruction
+      value = ((value & 0x000000FF) << 24) | ((value & 0x0000FF00) << 8) |
+              ((value & 0x00FF0000) >> 8) | ((value & 0xFF000000) >> 24);
+    } else if (sizeof(T) == 8) {
+      // Six Byte Instruction
+      uint64_t orig_value = static_cast<uint64_t>(value);
+      value = (static_cast<uint64_t>(orig_value & 0xFF) << 40) |
+              (static_cast<uint64_t>((orig_value >> 8) & 0xFF) << 32) |
+              (static_cast<uint64_t>((orig_value >> 16) & 0xFF) << 24) |
+              (static_cast<uint64_t>((orig_value >> 24) & 0xFF) << 16) |
+              (static_cast<uint64_t>((orig_value >> 32) & 0xFF) << 8) |
+              (static_cast<uint64_t>((orig_value >> 40) & 0xFF));
+    }
+#endif
+    if (sizeof(T) <= 4) {
+      *reinterpret_cast<T*>(instr) = value;
+    } else {
+#if V8_TARGET_LITTLE_ENDIAN
+      uint64_t orig_value = static_cast<uint64_t>(value);
+      *reinterpret_cast<uint32_t*>(instr) = static_cast<uint32_t>(value);
+      *reinterpret_cast<uint16_t*>(instr + 4) =
+          static_cast<uint16_t>((orig_value >> 32) & 0xFFFF);
+#else
+      *reinterpret_cast<uint32_t*>(instr) = static_cast<uint32_t>(value >> 16);
+      *reinterpret_cast<uint16_t*>(instr + 4) =
+          static_cast<uint16_t>(value & 0xFFFF);
+#endif
+    }
+  }
+
+  // Get Instruction Format Type
+  static OpcodeFormatType getOpcodeFormatType(const byte* instr) {
+    const byte firstByte = *instr;
+    // Based on Figure B-3 in z/Architecture Principles of
+    // Operation.
+
+    // 1-byte opcodes
+    //   I, RR, RS, RSI, RX, SS Formats
+    if ((0x04 <= firstByte && 0x9B >= firstByte) ||
+        (0xA8 <= firstByte && 0xB1 >= firstByte) ||
+        (0xBA <= firstByte && 0xBF >= firstByte) || (0xC5 == firstByte) ||
+        (0xC7 == firstByte) || (0xD0 <= firstByte && 0xE2 >= firstByte) ||
+        (0xE8 <= firstByte && 0xEA >= firstByte) ||
+        (0xEE <= firstByte && 0xFD >= firstByte)) {
+      return ONE_BYTE_OPCODE;
+    }
+
+    // 2-byte opcodes
+    //   E, IE, RRD, RRE, RRF, SIL, S, SSE Formats
+    if ((0x00 == firstByte) ||  // Software breakpoint 0x0001
+        (0x01 == firstByte) || (0xB2 == firstByte) || (0xB3 == firstByte) ||
+        (0xB9 == firstByte) || (0xE5 == firstByte)) {
+      return TWO_BYTE_OPCODE;
+    }
+
+    // 3-nibble opcodes
+    //   RI, RIL, SSF Formats
+    if ((0xA5 == firstByte) || (0xA7 == firstByte) ||
+        (0xC0 <= firstByte && 0xCC >= firstByte)) {  // C5,C7 handled above
+      return THREE_NIBBLE_OPCODE;
+    }
+    // Remaining ones are all TWO_BYTE_DISJOINT OPCODES.
+    DCHECK(InstructionLength(instr) == 6);
+    return TWO_BYTE_DISJOINT_OPCODE;
+  }
+
+  // Extract the full opcode from the instruction.
+  static inline Opcode S390OpcodeValue(const byte* instr) {
+    OpcodeFormatType opcodeType = getOpcodeFormatType(instr);
+
+    // The native instructions are encoded in big-endian format
+    // even if running on little-endian host.  Hence, we need
+    // to ensure we use byte* based bit-wise logic.
+    switch (opcodeType) {
+      case ONE_BYTE_OPCODE:
+        // One Byte - Bits 0 to 7
+        return static_cast<Opcode>(*instr);
+      case TWO_BYTE_OPCODE:
+        // Two Bytes - Bits 0 to 15
+        return static_cast<Opcode>((*instr << 8) | (*(instr + 1)));
+      case TWO_BYTE_DISJOINT_OPCODE:
+        // Two Bytes - Bits 0 to 7, 40 to 47
+        return static_cast<Opcode>((*instr << 8) | (*(instr + 5) & 0xFF));
+      case THREE_NIBBLE_OPCODE:
+        // Three Nibbles - Bits 0 to 7, 12 to 15
+        return static_cast<Opcode>((*instr << 4) | (*(instr + 1) & 0xF));
+      default:
+        break;
+    }
+
+    UNREACHABLE();
+    return static_cast<Opcode>(-1);
+  }
+
+  // Fields used in Software interrupt instructions
+  inline SoftwareInterruptCodes SvcValue() const {
+    return static_cast<SoftwareInterruptCodes>(Bits<FourByteInstr, int>(15, 0));
+  }
+
+  // Instructions are read of out a code stream. The only way to get a
+  // reference to an instruction is to convert a pointer. There is no way
+  // to allocate or create instances of class Instruction.
+  // Use the At(pc) function to create references to Instruction.
+  static Instruction* At(byte* pc) {
+    return reinterpret_cast<Instruction*>(pc);
+  }
+
+ private:
+  // We need to prevent the creation of instances of class Instruction.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+// I Instruction -- suspect this will not be used,
+// but implement for completeness
+class IInstruction : Instruction {
+ public:
+  inline int IValue() const { return Bits<TwoByteInstr, int>(7, 0); }
+
+  inline int size() const { return 2; }
+};
+
+// RR Instruction
+class RRInstruction : Instruction {
+ public:
+  inline int R1Value() const {
+    // the high and low parameters of Bits is the number of bits from
+    // rightmost place
+    return Bits<TwoByteInstr, int>(7, 4);
+  }
+  inline int R2Value() const { return Bits<TwoByteInstr, int>(3, 0); }
+  inline Condition M1Value() const {
+    return static_cast<Condition>(Bits<TwoByteInstr, int>(7, 4));
+  }
+
+  inline int size() const { return 2; }
+};
+
+// RRE Instruction
+class RREInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<FourByteInstr, int>(7, 4); }
+  inline int R2Value() const { return Bits<FourByteInstr, int>(3, 0); }
+  inline int M3Value() const { return Bits<FourByteInstr, int>(15, 12); }
+  inline int M4Value() const { return Bits<FourByteInstr, int>(19, 16); }
+  inline int size() const { return 4; }
+};
+
+// RRF Instruction
+class RRFInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<FourByteInstr, int>(7, 4); }
+  inline int R2Value() const { return Bits<FourByteInstr, int>(3, 0); }
+  inline int R3Value() const { return Bits<FourByteInstr, int>(15, 12); }
+  inline int M3Value() const { return Bits<FourByteInstr, int>(15, 12); }
+  inline int M4Value() const { return Bits<FourByteInstr, int>(11, 8); }
+  inline int size() const { return 4; }
+};
+
+// RRD Isntruction
+class RRDInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<FourByteInstr, int>(15, 12); }
+  inline int R2Value() const { return Bits<FourByteInstr, int>(3, 0); }
+  inline int R3Value() const { return Bits<FourByteInstr, int>(7, 4); }
+  inline int size() const { return 4; }
+};
+
+// RI Instruction
+class RIInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
+  inline int16_t I2Value() const { return Bits<FourByteInstr, int16_t>(15, 0); }
+  inline uint16_t I2UnsignedValue() const {
+    return Bits<FourByteInstr, uint16_t>(15, 0);
+  }
+  inline Condition M1Value() const {
+    return static_cast<Condition>(Bits<FourByteInstr, int>(23, 20));
+  }
+  inline int size() const { return 4; }
+};
+
+// RS Instruction
+class RSInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
+  inline int R3Value() const { return Bits<FourByteInstr, int>(19, 16); }
+  inline int B2Value() const { return Bits<FourByteInstr, int>(15, 12); }
+  inline unsigned int D2Value() const {
+    return Bits<FourByteInstr, unsigned int>(11, 0);
+  }
+  inline int size() const { return 4; }
+};
+
+// RSY Instruction
+class RSYInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
+  inline int R3Value() const { return Bits<SixByteInstr, int>(35, 32); }
+  inline int B2Value() const { return Bits<SixByteInstr, int>(31, 28); }
+  inline int32_t D2Value() const {
+    int32_t value = Bits<SixByteInstr, int32_t>(27, 16);
+    value += Bits<SixByteInstr, int8_t>(15, 8) << 12;
+    return value;
+  }
+  inline int size() const { return 6; }
+};
+
+// RX Instruction
+class RXInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
+  inline int X2Value() const { return Bits<FourByteInstr, int>(19, 16); }
+  inline int B2Value() const { return Bits<FourByteInstr, int>(15, 12); }
+  inline uint32_t D2Value() const {
+    return Bits<FourByteInstr, uint32_t>(11, 0);
+  }
+  inline int size() const { return 4; }
+};
+
+// RXY Instruction
+class RXYInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
+  inline int X2Value() const { return Bits<SixByteInstr, int>(35, 32); }
+  inline int B2Value() const { return Bits<SixByteInstr, int>(31, 28); }
+  inline int32_t D2Value() const {
+    int32_t value = Bits<SixByteInstr, uint32_t>(27, 16);
+    value += Bits<SixByteInstr, int8_t>(15, 8) << 12;
+    return value;
+  }
+  inline int size() const { return 6; }
+};
+
+// RIL Instruction
+class RILInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
+  inline int32_t I2Value() const { return Bits<SixByteInstr, int32_t>(31, 0); }
+  inline uint32_t I2UnsignedValue() const {
+    return Bits<SixByteInstr, uint32_t>(31, 0);
+  }
+  inline int size() const { return 6; }
+};
+
+// SI Instruction
+class SIInstruction : Instruction {
+ public:
+  inline int B1Value() const { return Bits<FourByteInstr, int>(15, 12); }
+  inline uint32_t D1Value() const {
+    return Bits<FourByteInstr, uint32_t>(11, 0);
+  }
+  inline uint8_t I2Value() const {
+    return Bits<FourByteInstr, uint8_t>(23, 16);
+  }
+  inline int size() const { return 4; }
+};
+
+// SIY Instruction
+class SIYInstruction : Instruction {
+ public:
+  inline int B1Value() const { return Bits<SixByteInstr, int>(31, 28); }
+  inline int32_t D1Value() const {
+    int32_t value = Bits<SixByteInstr, uint32_t>(27, 16);
+    value += Bits<SixByteInstr, int8_t>(15, 8) << 12;
+    return value;
+  }
+  inline uint8_t I2Value() const { return Bits<SixByteInstr, uint8_t>(39, 32); }
+  inline int size() const { return 6; }
+};
+
+// SIL Instruction
+class SILInstruction : Instruction {
+ public:
+  inline int B1Value() const { return Bits<SixByteInstr, int>(31, 28); }
+  inline int D1Value() const { return Bits<SixByteInstr, int>(27, 16); }
+  inline int I2Value() const { return Bits<SixByteInstr, int>(15, 0); }
+  inline int size() const { return 6; }
+};
+
+// SS Instruction
+class SSInstruction : Instruction {
+ public:
+  inline int B1Value() const { return Bits<SixByteInstr, int>(31, 28); }
+  inline int B2Value() const { return Bits<SixByteInstr, int>(15, 12); }
+  inline int D1Value() const { return Bits<SixByteInstr, int>(27, 16); }
+  inline int D2Value() const { return Bits<SixByteInstr, int>(11, 0); }
+  inline int Length() const { return Bits<SixByteInstr, int>(39, 32); }
+  inline int size() const { return 6; }
+};
+
+// RXE Instruction
+class RXEInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
+  inline int X2Value() const { return Bits<SixByteInstr, int>(35, 32); }
+  inline int B2Value() const { return Bits<SixByteInstr, int>(31, 28); }
+  inline int D2Value() const { return Bits<SixByteInstr, int>(27, 16); }
+  inline int size() const { return 6; }
+};
+
+// RIE Instruction
+class RIEInstruction : Instruction {
+ public:
+  inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
+  inline int R2Value() const { return Bits<SixByteInstr, int>(35, 32); }
+  inline int I3Value() const { return Bits<SixByteInstr, uint32_t>(31, 24); }
+  inline int I4Value() const { return Bits<SixByteInstr, uint32_t>(23, 16); }
+  inline int I5Value() const { return Bits<SixByteInstr, uint32_t>(15, 8); }
+  inline int I6Value() const {
+    return static_cast<int32_t>(Bits<SixByteInstr, int16_t>(31, 16));
+  }
+  inline int size() const { return 6; }
+};
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+ private:
+  static const char* names_[kNumRegisters];
+};
+
+// Helper functions for converting between FP register numbers and names.
+class DoubleRegisters {
+ public:
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+ private:
+  static const char* names_[kNumDoubleRegisters];
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_S390_CONSTANTS_S390_H_
diff --git a/src/s390/cpu-s390.cc b/src/s390/cpu-s390.cc
new file mode 100644
index 0000000..d0d54a8
--- /dev/null
+++ b/src/s390/cpu-s390.cc
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for s390 independent of OS goes here.
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_S390
+#include "src/assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void CpuFeatures::FlushICache(void* buffer, size_t size) {
+  // Given the strong memory model on z/Architecture, and the single
+  // thread nature of V8 and JavaScript, instruction cache flushing
+  // is not necessary.  The architecture guarantees that if a core
+  // patches its own instruction cache, the updated instructions will be
+  // reflected automatically.
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/deoptimizer-s390.cc b/src/s390/deoptimizer-s390.cc
new file mode 100644
index 0000000..44062d6
--- /dev/null
+++ b/src/s390/deoptimizer-s390.cc
@@ -0,0 +1,338 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimizer.h"
+#include "src/codegen.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+// LAY + LGHI/LHI + BRCL
+const int Deoptimizer::table_entry_size_ = 16;
+
+int Deoptimizer::patch_size() {
+#if V8_TARGET_ARCH_S390X
+  const int kCallInstructionSize = 16;
+#else
+  const int kCallInstructionSize = 10;
+#endif
+  return kCallInstructionSize;
+}
+
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+  // Empty because there is no need for relocation information for the code
+  // patching in Deoptimizer::PatchCodeForDeoptimization below.
+}
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+  Address code_start_address = code->instruction_start();
+
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  if (FLAG_zap_code_space) {
+    // Fail hard and early if we enter this code object again.
+    byte* pointer = code->FindCodeAgeSequence();
+    if (pointer != NULL) {
+      pointer += kNoCodeAgeSequenceLength;
+    } else {
+      pointer = code->instruction_start();
+    }
+    CodePatcher patcher(isolate, pointer, 2);
+    patcher.masm()->bkpt(0);
+
+    DeoptimizationInputData* data =
+        DeoptimizationInputData::cast(code->deoptimization_data());
+    int osr_offset = data->OsrPcOffset()->value();
+    if (osr_offset > 0) {
+      CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+                              2);
+      osr_patcher.masm()->bkpt(0);
+    }
+  }
+
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+#ifdef DEBUG
+  Address prev_call_address = NULL;
+#endif
+  // For each LLazyBailout instruction insert a call to the corresponding
+  // deoptimization entry.
+  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+    if (deopt_data->Pc(i)->value() == -1) continue;
+    Address call_address = code_start_address + deopt_data->Pc(i)->value();
+    Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+    // We need calls to have a predictable size in the unoptimized code, but
+    // this is optimized code, so we don't have to have a predictable size.
+    int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
+        deopt_entry, kRelocInfo_NONEPTR);
+    DCHECK(call_size_in_bytes <= patch_size());
+    CodePatcher patcher(isolate, call_address, call_size_in_bytes);
+    patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
+    DCHECK(prev_call_address == NULL ||
+           call_address >= prev_call_address + patch_size());
+    DCHECK(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+    prev_call_address = call_address;
+#endif
+  }
+}
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+    FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
+  ApiFunction function(descriptor->deoptimization_handler());
+  ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+  intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+  int params = descriptor->GetHandlerParameterCount();
+  output_frame->SetRegister(r2.code(), params);
+  output_frame->SetRegister(r3.code(), handler);
+}
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+  for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
+    double double_value = input_->GetDoubleRegister(i);
+    output_frame->SetDoubleRegister(i, double_value);
+  }
+}
+
+#define __ masm()->
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Deoptimizer::TableEntryGenerator::Generate() {
+  GeneratePrologue();
+
+  // Save all the registers onto the stack
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+
+  const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+  // Save all double registers before messing with them.
+  __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+    int code = config->GetAllocatableDoubleCode(i);
+    const DoubleRegister dreg = DoubleRegister::from_code(code);
+    int offset = code * kDoubleSize;
+    __ StoreDouble(dreg, MemOperand(sp, offset));
+  }
+
+  // Push all GPRs onto the stack
+  __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
+  __ StoreMultipleP(r0, sp, MemOperand(sp));  // Save all 16 registers
+
+  __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  __ StoreP(fp, MemOperand(ip));
+
+  const int kSavedRegistersAreaSize =
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ LoadP(r4, MemOperand(sp, kSavedRegistersAreaSize));
+
+  // Cleanse the Return address for 31-bit
+  __ CleanseP(r14);
+
+  // Get the address of the location in the code object (r5)(return
+  // address for lazy deoptimization) and compute the fp-to-sp delta in
+  // register r6.
+  __ LoadRR(r5, r14);
+  __ la(r6, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
+  __ SubP(r6, fp, r6);
+
+  // Allocate a new deoptimizer object.
+  // Pass six arguments in r2 to r7.
+  __ PrepareCallCFunction(6, r7);
+  __ LoadImmP(r2, Operand::Zero());
+  Label context_check;
+  __ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ JumpIfSmi(r3, &context_check);
+  __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ bind(&context_check);
+  __ LoadImmP(r3, Operand(type()));  // bailout type,
+  // r4: bailout id already loaded.
+  // r5: code address or 0 already loaded.
+  // r6: Fp-to-sp delta.
+  // Parm6: isolate is passed on the stack.
+  __ mov(r7, Operand(ExternalReference::isolate_address(isolate())));
+  __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+
+  // Call Deoptimizer::New().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+  }
+
+  // Preserve "deoptimizer" object in register r2 and get the input
+  // frame descriptor pointer to r3 (deoptimizer->input_);
+  __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
+
+  // Copy core registers into FrameDescription::registers_[kNumRegisters].
+  // DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+  // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
+  //        MemOperand(sp), kNumberOfRegisters * kPointerSize);
+  // Copy core registers into FrameDescription::registers_[kNumRegisters].
+  // TODO(john.yan): optimize the following code by using mvc instruction
+  DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    __ LoadP(r4, MemOperand(sp, i * kPointerSize));
+    __ StoreP(r4, MemOperand(r3, offset));
+  }
+
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  // Copy double registers to
+  // double_registers_[DoubleRegister::kNumRegisters]
+  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+    int code = config->GetAllocatableDoubleCode(i);
+    int dst_offset = code * kDoubleSize + double_regs_offset;
+    int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    // TODO(joransiu): MVC opportunity
+    __ LoadDouble(d0, MemOperand(sp, src_offset));
+    __ StoreDouble(d0, MemOperand(r3, dst_offset));
+  }
+
+  // Remove the bailout id and the saved registers from the stack.
+  __ la(sp, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+  // Compute a pointer to the unwinding limit in register r4; that is
+  // the first stack slot not part of the input frame.
+  __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
+  __ AddP(r4, sp);
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  Label pop_loop_header;
+  __ b(&pop_loop_header, Label::kNear);
+  __ bind(&pop_loop);
+  __ pop(r6);
+  __ StoreP(r6, MemOperand(r5, 0));
+  __ la(r5, MemOperand(r5, kPointerSize));
+  __ bind(&pop_loop_header);
+  __ CmpP(r4, sp);
+  __ bne(&pop_loop);
+
+  // Compute the output frame in the deoptimizer.
+  __ push(r2);  // Preserve deoptimizer object across call.
+  // r2: deoptimizer object; r3: scratch.
+  __ PrepareCallCFunction(1, r3);
+  // Call Deoptimizer::ComputeOutputFrames().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate()), 1);
+  }
+  __ pop(r2);  // Restore deoptimizer object (class Deoptimizer).
+
+  __ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
+
+  // Replace the current (input) frame with the output frames.
+  Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+  // Outer loop state: r6 = current "FrameDescription** output_",
+  // r3 = one past the last FrameDescription**.
+  __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
+  __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset()));  // r6 is output_.
+  __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
+  __ AddP(r3, r6, r3);
+  __ b(&outer_loop_header, Label::kNear);
+
+  __ bind(&outer_push_loop);
+  // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
+  __ LoadP(r4, MemOperand(r6, 0));  // output_[ix]
+  __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
+  __ b(&inner_loop_header, Label::kNear);
+
+  __ bind(&inner_push_loop);
+  __ AddP(r5, Operand(-sizeof(intptr_t)));
+  __ AddP(r8, r4, r5);
+  __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
+  __ push(r8);
+
+  __ bind(&inner_loop_header);
+  __ CmpP(r5, Operand::Zero());
+  __ bne(&inner_push_loop);  // test for gt?
+
+  __ AddP(r6, r6, Operand(kPointerSize));
+  __ bind(&outer_loop_header);
+  __ CmpP(r6, r3);
+  __ blt(&outer_push_loop);
+
+  __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
+  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+    int code = config->GetAllocatableDoubleCode(i);
+    const DoubleRegister dreg = DoubleRegister::from_code(code);
+    int src_offset = code * kDoubleSize + double_regs_offset;
+    __ ld(dreg, MemOperand(r3, src_offset));
+  }
+
+  // Push state, pc, and continuation from the last output frame.
+  __ LoadP(r8, MemOperand(r4, FrameDescription::state_offset()));
+  __ push(r8);
+  __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
+  __ push(r8);
+  __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
+  __ push(r8);
+
+  // Restore the registers from the last output frame.
+  __ LoadRR(r1, r4);
+  for (int i = kNumberOfRegisters - 1; i > 0; i--) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    if ((restored_regs & (1 << i)) != 0) {
+      __ LoadP(ToRegister(i), MemOperand(r1, offset));
+    }
+  }
+
+  __ InitializeRootRegister();
+
+  __ pop(ip);  // get continuation, leave pc on stack
+  __ pop(r14);
+  __ Jump(ip);
+  __ stop("Unreachable.");
+}
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  // Create a sequence of deoptimization entries. Note that any
+  // registers may be still live.
+  Label done;
+  for (int i = 0; i < count(); i++) {
+    int start = masm()->pc_offset();
+    USE(start);
+    __ lay(sp, MemOperand(sp, -kPointerSize));
+    __ LoadImmP(ip, Operand(i));
+    __ b(&done);
+    int end = masm()->pc_offset();
+    USE(end);
+    DCHECK(masm()->pc_offset() - start == table_entry_size_);
+  }
+  __ bind(&done);
+  __ StoreP(ip, MemOperand(sp));
+}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+  // No out-of-line constant pool support.
+  UNREACHABLE();
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/s390/disasm-s390.cc b/src/s390/disasm-s390.cc
new file mode 100644
index 0000000..5bab604
--- /dev/null
+++ b/src/s390/disasm-s390.cc
@@ -0,0 +1,1421 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+//   NameConverter converter;
+//   Disassembler d(converter);
+//   for (byte* pc = begin; pc < end;) {
+//     v8::internal::EmbeddedVector<char, 256> buffer;
+//     byte* prev_pc = pc;
+//     pc += d.InstructionDecode(buffer, pc);
+//     printf("%p    %08x      %s\n",
+//            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+//   }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/base/platform/platform.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/s390/constants-s390.h"
+
+namespace v8 {
+namespace internal {
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+  Decoder(const disasm::NameConverter& converter, Vector<char> out_buffer)
+      : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+    out_buffer_[out_buffer_pos_] = '\0';
+  }
+
+  ~Decoder() {}
+
+  // Writes one disassembled instruction into 'buffer' (0-terminated).
+  // Returns the length of the disassembled machine instruction in bytes.
+  int InstructionDecode(byte* instruction);
+
+ private:
+  // Bottleneck functions to print into the out_buffer.
+  void PrintChar(const char ch);
+  void Print(const char* str);
+
+  // Printing of common values.
+  void PrintRegister(int reg);
+  void PrintDRegister(int reg);
+  void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
+
+  // Handle formatting of instructions and their options.
+  int FormatRegister(Instruction* instr, const char* option);
+  int FormatFloatingRegister(Instruction* instr, const char* option);
+  int FormatMask(Instruction* instr, const char* option);
+  int FormatDisplacement(Instruction* instr, const char* option);
+  int FormatImmediate(Instruction* instr, const char* option);
+  int FormatOption(Instruction* instr, const char* option);
+  void Format(Instruction* instr, const char* format);
+  void Unknown(Instruction* instr);
+  void UnknownFormat(Instruction* instr, const char* opcname);
+
+  bool DecodeTwoByte(Instruction* instr);
+  bool DecodeFourByte(Instruction* instr);
+  bool DecodeSixByte(Instruction* instr);
+
+  const disasm::NameConverter& converter_;
+  Vector<char> out_buffer_;
+  int out_buffer_pos_;
+
+  DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+  (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+  char cur = *str++;
+  while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    PrintChar(cur);
+    cur = *str++;
+  }
+  out_buffer_[out_buffer_pos_] = 0;
+}
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+  Print(converter_.NameOfCPURegister(reg));
+}
+
+// Print the double FP register name according to the active name converter.
+void Decoder::PrintDRegister(int reg) {
+  Print(DoubleRegister::from_code(reg).ToString());
+}
+
+// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
+// the FormatOption method.
+void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
+  switch (svc) {
+    case kCallRtRedirected:
+      Print("call rt redirected");
+      return;
+    case kBreakpoint:
+      Print("breakpoint");
+      return;
+    default:
+      if (svc >= kStopCode) {
+        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d - 0x%x",
+                                    svc & kStopCodeMask, svc & kStopCodeMask);
+      } else {
+        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", svc);
+      }
+      return;
+  }
+}
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+  DCHECK(format[0] == 'r');
+
+  if (format[1] == '1') {  // 'r1: register resides in bit 8-11
+    RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
+    int reg = rrinstr->R1Value();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == '2') {  // 'r2: register resides in bit 12-15
+    RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
+    int reg = rrinstr->R2Value();
+    // indicating it is a r0 for displacement, in which case the offset
+    // should be 0.
+    if (format[2] == 'd') {
+      if (reg == 0) return 4;
+      PrintRegister(reg);
+      return 3;
+    } else {
+      PrintRegister(reg);
+      return 2;
+    }
+  } else if (format[1] == '3') {  // 'r3: register resides in bit 16-19
+    RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
+    int reg = rsinstr->B2Value();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == '4') {  // 'r4: register resides in bit 20-23
+    RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
+    int reg = rsinstr->B2Value();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == '5') {  // 'r5: register resides in bit 24-28
+    RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
+    int reg = rreinstr->R1Value();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == '6') {  // 'r6: register resides in bit 29-32
+    RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
+    int reg = rreinstr->R2Value();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == '7') {  // 'r6: register resides in bit 32-35
+    SSInstruction* ssinstr = reinterpret_cast<SSInstruction*>(instr);
+    int reg = ssinstr->B2Value();
+    PrintRegister(reg);
+    return 2;
+  }
+
+  UNREACHABLE();
+  return -1;
+}
+
+int Decoder::FormatFloatingRegister(Instruction* instr, const char* format) {
+  DCHECK(format[0] == 'f');
+
+  // reuse 1, 5 and 6 because it is coresponding
+  if (format[1] == '1') {  // 'r1: register resides in bit 8-11
+    RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
+    int reg = rrinstr->R1Value();
+    PrintDRegister(reg);
+    return 2;
+  } else if (format[1] == '2') {  // 'f2: register resides in bit 12-15
+    RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
+    int reg = rrinstr->R2Value();
+    PrintDRegister(reg);
+    return 2;
+  } else if (format[1] == '3') {  // 'f3: register resides in bit 16-19
+    RRDInstruction* rrdinstr = reinterpret_cast<RRDInstruction*>(instr);
+    int reg = rrdinstr->R1Value();
+    PrintDRegister(reg);
+    return 2;
+  } else if (format[1] == '5') {  // 'f5: register resides in bit 24-28
+    RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
+    int reg = rreinstr->R1Value();
+    PrintDRegister(reg);
+    return 2;
+  } else if (format[1] == '6') {  // 'f6: register resides in bit 29-32
+    RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
+    int reg = rreinstr->R2Value();
+    PrintDRegister(reg);
+    return 2;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.)  FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+  switch (format[0]) {
+    case 'o': {
+      if (instr->Bit(10) == 1) {
+        Print("o");
+      }
+      return 1;
+    }
+    case '.': {
+      if (instr->Bit(0) == 1) {
+        Print(".");
+      } else {
+        Print(" ");  // ensure consistent spacing
+      }
+      return 1;
+    }
+    case 'r': {
+      return FormatRegister(instr, format);
+    }
+    case 'f': {
+      return FormatFloatingRegister(instr, format);
+    }
+    case 'i': {  // int16
+      return FormatImmediate(instr, format);
+    }
+    case 'u': {  // uint16
+      int32_t value = instr->Bits(15, 0);
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+      return 6;
+    }
+    case 'l': {
+      // Link (LK) Bit 0
+      if (instr->Bit(0) == 1) {
+        Print("l");
+      }
+      return 1;
+    }
+    case 'a': {
+      // Absolute Address Bit 1
+      if (instr->Bit(1) == 1) {
+        Print("a");
+      }
+      return 1;
+    }
+    case 't': {  // 'target: target of branch instructions
+      // target26 or target16
+      DCHECK(STRING_STARTS_WITH(format, "target"));
+      if ((format[6] == '2') && (format[7] == '6')) {
+        int off = ((instr->Bits(25, 2)) << 8) >> 6;
+        out_buffer_pos_ += SNPrintF(
+            out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
+            converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+        return 8;
+      } else if ((format[6] == '1') && (format[7] == '6')) {
+        int off = ((instr->Bits(15, 2)) << 18) >> 16;
+        out_buffer_pos_ += SNPrintF(
+            out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
+            converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+        return 8;
+      }
+      case 'm': {
+        return FormatMask(instr, format);
+      }
+    }
+    case 'd': {  // ds value for offset
+      return FormatDisplacement(instr, format);
+    }
+    default: {
+      UNREACHABLE();
+      break;
+    }
+  }
+
+  UNREACHABLE();
+  return -1;
+}
+
+int Decoder::FormatMask(Instruction* instr, const char* format) {
+  DCHECK(format[0] == 'm');
+  int32_t value = 0;
+  if ((format[1] == '1')) {  // prints the mask format in bits 8-12
+    value = reinterpret_cast<RRInstruction*>(instr)->R1Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
+    return 2;
+  } else if (format[1] == '2') {  // mask format in bits 16-19
+    value = reinterpret_cast<RXInstruction*>(instr)->B2Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
+    return 2;
+  } else if (format[1] == '3') {  // mask format in bits 20-23
+    value = reinterpret_cast<RRFInstruction*>(instr)->M4Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
+    return 2;
+  }
+
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+  return 2;
+}
+
+int Decoder::FormatDisplacement(Instruction* instr, const char* format) {
+  DCHECK(format[0] == 'd');
+
+  if (format[1] == '1') {  // displacement in 20-31
+    RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
+    uint16_t value = rsinstr->D2Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+
+    return 2;
+  } else if (format[1] == '2') {  // displacement in 20-39
+    RXYInstruction* rxyinstr = reinterpret_cast<RXYInstruction*>(instr);
+    int32_t value = rxyinstr->D2Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == '4') {  // SS displacement 2 36-47
+    SSInstruction* ssInstr = reinterpret_cast<SSInstruction*>(instr);
+    uint16_t value = ssInstr->D2Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == '3') {  // SS displacement 1 20 - 32
+    SSInstruction* ssInstr = reinterpret_cast<SSInstruction*>(instr);
+    uint16_t value = ssInstr->D1Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else {  // s390 specific
+    int32_t value = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 1;
+  }
+}
+
+int Decoder::FormatImmediate(Instruction* instr, const char* format) {
+  DCHECK(format[0] == 'i');
+
+  if (format[1] == '1') {  // immediate in 16-31
+    RIInstruction* riinstr = reinterpret_cast<RIInstruction*>(instr);
+    int16_t value = riinstr->I2Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == '2') {  // immediate in 16-48
+    RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
+    int32_t value = rilinstr->I2Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == '3') {  // immediate in I format
+    IInstruction* iinstr = reinterpret_cast<IInstruction*>(instr);
+    int8_t value = iinstr->IValue();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == '4') {  // immediate in 16-31, but outputs as offset
+    RIInstruction* riinstr = reinterpret_cast<RIInstruction*>(instr);
+    int16_t value = riinstr->I2Value() * 2;
+    if (value >= 0)
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*+");
+    else
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*");
+
+    out_buffer_pos_ += SNPrintF(
+        out_buffer_ + out_buffer_pos_, "%d -> %s", value,
+        converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + value));
+    return 2;
+  } else if (format[1] == '5') {  // immediate in 16-31, but outputs as offset
+    RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
+    int32_t value = rilinstr->I2Value() * 2;
+    if (value >= 0)
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*+");
+    else
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*");
+
+    out_buffer_pos_ += SNPrintF(
+        out_buffer_ + out_buffer_pos_, "%d -> %s", value,
+        converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + value));
+    return 2;
+  } else if (format[1] == '6') {  // unsigned immediate in 16-31
+    RIInstruction* riinstr = reinterpret_cast<RIInstruction*>(instr);
+    uint16_t value = riinstr->I2UnsignedValue();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == '7') {  // unsigned immediate in 16-47
+    RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
+    uint32_t value = rilinstr->I2UnsignedValue();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == '8') {  // unsigned immediate in 8-15
+    SSInstruction* ssinstr = reinterpret_cast<SSInstruction*>(instr);
+    uint8_t value = ssinstr->Length();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == '9') {  // unsigned immediate in 16-23
+    RIEInstruction* rie_instr = reinterpret_cast<RIEInstruction*>(instr);
+    uint8_t value = rie_instr->I3Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == 'a') {  // unsigned immediate in 24-31
+    RIEInstruction* rie_instr = reinterpret_cast<RIEInstruction*>(instr);
+    uint8_t value = rie_instr->I4Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == 'b') {  // unsigned immediate in 32-39
+    RIEInstruction* rie_instr = reinterpret_cast<RIEInstruction*>(instr);
+    uint8_t value = rie_instr->I5Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == 'c') {  // signed immediate in 8-15
+    SSInstruction* ssinstr = reinterpret_cast<SSInstruction*>(instr);
+    int8_t value = ssinstr->Length();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == 'd') {  // signed immediate in 32-47
+    SILInstruction* silinstr = reinterpret_cast<SILInstruction*>(instr);
+    int16_t value = silinstr->I2Value();
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+    return 2;
+  } else if (format[1] == 'e') {  // immediate in 16-47, but outputs as offset
+    RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
+    int32_t value = rilinstr->I2Value() * 2;
+    if (value >= 0)
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*+");
+    else
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*");
+
+    out_buffer_pos_ += SNPrintF(
+        out_buffer_ + out_buffer_pos_, "%d -> %s", value,
+        converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + value));
+    return 2;
+  }
+
+  UNREACHABLE();
+  return -1;
+}
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+  char cur = *format++;
+  while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    if (cur == '\'') {  // Single quote is used as the formatting escape.
+      format += FormatOption(instr, format);
+    } else {
+      out_buffer_[out_buffer_pos_++] = cur;
+    }
+    cur = *format++;
+  }
+  out_buffer_[out_buffer_pos_] = '\0';
+}
+
+// The disassembler may end up decoding data inlined in the code. We do not want
+// it to crash if the data does not ressemble any known instruction.
+#define VERIFY(condition) \
+  if (!(condition)) {     \
+    Unknown(instr);       \
+    return;               \
+  }
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+// For currently unimplemented decodings the disassembler calls
+// UnknownFormat(instr) which will just print opcode name of the
+// instruction bits.
+void Decoder::UnknownFormat(Instruction* instr, const char* name) {
+  char buffer[100];
+  snprintf(buffer, sizeof(buffer), "%s (unknown-format)", name);
+  Format(instr, buffer);
+}
+
+// Disassembles Two Byte S390 Instructions
+// @return true if successfully decoded
+bool Decoder::DecodeTwoByte(Instruction* instr) {
+  // Print the Instruction bits.
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%04x           ",
+                              instr->InstructionBits<TwoByteInstr>());
+
+  Opcode opcode = instr->S390OpcodeValue();
+  switch (opcode) {
+    case AR:
+      Format(instr, "ar\t'r1,'r2");
+      break;
+    case SR:
+      Format(instr, "sr\t'r1,'r2");
+      break;
+    case MR:
+      Format(instr, "mr\t'r1,'r2");
+      break;
+    case DR:
+      Format(instr, "dr\t'r1,'r2");
+      break;
+    case OR:
+      Format(instr, "or\t'r1,'r2");
+      break;
+    case NR:
+      Format(instr, "nr\t'r1,'r2");
+      break;
+    case XR:
+      Format(instr, "xr\t'r1,'r2");
+      break;
+    case LR:
+      Format(instr, "lr\t'r1,'r2");
+      break;
+    case CR:
+      Format(instr, "cr\t'r1,'r2");
+      break;
+    case CLR:
+      Format(instr, "clr\t'r1,'r2");
+      break;
+    case BCR:
+      Format(instr, "bcr\t'm1,'r2");
+      break;
+    case LTR:
+      Format(instr, "ltr\t'r1,'r2");
+      break;
+    case ALR:
+      Format(instr, "alr\t'r1,'r2");
+      break;
+    case SLR:
+      Format(instr, "slr\t'r1,'r2");
+      break;
+    case LNR:
+      Format(instr, "lnr\t'r1,'r2");
+      break;
+    case LCR:
+      Format(instr, "lcr\t'r1,'r2");
+      break;
+    case BASR:
+      Format(instr, "basr\t'r1,'r2");
+      break;
+    case LDR:
+      Format(instr, "ldr\t'f1,'f2");
+      break;
+    case BKPT:
+      Format(instr, "bkpt");
+      break;
+    default:
+      return false;
+  }
+  return true;
+}
+
+// Disassembles Four Byte S390 Instructions
+// @return true if successfully decoded
+bool Decoder::DecodeFourByte(Instruction* instr) {
+  // Print the Instruction bits.
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x       ",
+                              instr->InstructionBits<FourByteInstr>());
+
+  Opcode opcode = instr->S390OpcodeValue();
+  switch (opcode) {
+    case AHI:
+      Format(instr, "ahi\t'r1,'i1");
+      break;
+    case AGHI:
+      Format(instr, "aghi\t'r1,'i1");
+      break;
+    case LHI:
+      Format(instr, "lhi\t'r1,'i1");
+      break;
+    case LGHI:
+      Format(instr, "lghi\t'r1,'i1");
+      break;
+    case MHI:
+      Format(instr, "mhi\t'r1,'i1");
+      break;
+    case MGHI:
+      Format(instr, "mghi\t'r1,'i1");
+      break;
+    case CHI:
+      Format(instr, "chi\t'r1,'i1");
+      break;
+    case CGHI:
+      Format(instr, "cghi\t'r1,'i1");
+      break;
+    case BRAS:
+      Format(instr, "bras\t'r1,'i1");
+      break;
+    case BRC:
+      Format(instr, "brc\t'm1,'i4");
+      break;
+    case BRCT:
+      Format(instr, "brct\t'r1,'i4");
+      break;
+    case BRCTG:
+      Format(instr, "brctg\t'r1,'i4");
+      break;
+    case IIHH:
+      Format(instr, "iihh\t'r1,'i1");
+      break;
+    case IIHL:
+      Format(instr, "iihl\t'r1,'i1");
+      break;
+    case IILH:
+      Format(instr, "iilh\t'r1,'i1");
+      break;
+    case IILL:
+      Format(instr, "iill\t'r1,'i1");
+      break;
+    case OILL:
+      Format(instr, "oill\t'r1,'i1");
+      break;
+    case TMLL:
+      Format(instr, "tmll\t'r1,'i1");
+      break;
+    case STM:
+      Format(instr, "stm\t'r1,'r2,'d1('r3)");
+      break;
+    case LM:
+      Format(instr, "lm\t'r1,'r2,'d1('r3)");
+      break;
+    case SLL:
+      Format(instr, "sll\t'r1,'d1('r3)");
+      break;
+    case SRL:
+      Format(instr, "srl\t'r1,'d1('r3)");
+      break;
+    case SLA:
+      Format(instr, "sla\t'r1,'d1('r3)");
+      break;
+    case SRA:
+      Format(instr, "sra\t'r1,'d1('r3)");
+      break;
+    case SLDL:
+      Format(instr, "sldl\t'r1,'d1('r3)");
+      break;
+    case AGR:
+      Format(instr, "agr\t'r5,'r6");
+      break;
+    case AGFR:
+      Format(instr, "agfr\t'r5,'r6");
+      break;
+    case ARK:
+      Format(instr, "ark\t'r5,'r6,'r3");
+      break;
+    case AGRK:
+      Format(instr, "agrk\t'r5,'r6,'r3");
+      break;
+    case SGR:
+      Format(instr, "sgr\t'r5,'r6");
+      break;
+    case SGFR:
+      Format(instr, "sgfr\t'r5,'r6");
+      break;
+    case SRK:
+      Format(instr, "srk\t'r5,'r6,'r3");
+      break;
+    case SGRK:
+      Format(instr, "sgrk\t'r5,'r6,'r3");
+      break;
+    case NGR:
+      Format(instr, "ngr\t'r5,'r6");
+      break;
+    case NRK:
+      Format(instr, "nrk\t'r5,'r6,'r3");
+      break;
+    case NGRK:
+      Format(instr, "ngrk\t'r5,'r6,'r3");
+      break;
+    case NILL:
+      Format(instr, "nill\t'r1,'i1");
+      break;
+    case NILH:
+      Format(instr, "nilh\t'r1,'i1");
+      break;
+    case OGR:
+      Format(instr, "ogr\t'r5,'r6");
+      break;
+    case ORK:
+      Format(instr, "ork\t'r5,'r6,'r3");
+      break;
+    case OGRK:
+      Format(instr, "ogrk\t'r5,'r6,'r3");
+      break;
+    case XGR:
+      Format(instr, "xgr\t'r5,'r6");
+      break;
+    case XRK:
+      Format(instr, "xrk\t'r5,'r6,'r3");
+      break;
+    case XGRK:
+      Format(instr, "xgrk\t'r5,'r6,'r3");
+      break;
+    case CGR:
+      Format(instr, "cgr\t'r5,'r6");
+      break;
+    case CLGR:
+      Format(instr, "clgr\t'r5,'r6");
+      break;
+    case LLGFR:
+      Format(instr, "llgfr\t'r5,'r6");
+      break;
+    case LBR:
+      Format(instr, "lbr\t'r5,'r6");
+      break;
+    case LEDBR:
+      Format(instr, "ledbr\t'f5,'f6");
+      break;
+    case LDEBR:
+      Format(instr, "ldebr\t'f5,'f6");
+      break;
+    case LTGR:
+      Format(instr, "ltgr\t'r5,'r6");
+      break;
+    case LTDBR:
+      Format(instr, "ltdbr\t'f5,'f6");
+      break;
+    case LTEBR:
+      Format(instr, "ltebr\t'f5,'f6");
+      break;
+    case LGR:
+      Format(instr, "lgr\t'r5,'r6");
+      break;
+    case LGDR:
+      Format(instr, "lgdr\t'r5,'f6");
+      break;
+    case LGFR:
+      Format(instr, "lgfr\t'r5,'r6");
+      break;
+    case LTGFR:
+      Format(instr, "ltgfr\t'r5,'r6");
+      break;
+    case LCGR:
+      Format(instr, "lcgr\t'r5,'r6");
+      break;
+    case MSR:
+      Format(instr, "msr\t'r5,'r6");
+      break;
+    case LGBR:
+      Format(instr, "lgbr\t'r5,'r6");
+      break;
+    case LGHR:
+      Format(instr, "lghr\t'r5,'r6");
+      break;
+    case MSGR:
+      Format(instr, "msgr\t'r5,'r6");
+      break;
+    case DSGR:
+      Format(instr, "dsgr\t'r5,'r6");
+      break;
+    case LZDR:
+      Format(instr, "lzdr\t'f5");
+      break;
+    case MLR:
+      Format(instr, "mlr\t'r5,'r6");
+      break;
+    case MLGR:
+      Format(instr, "mlgr\t'r5,'r6");
+      break;
+    case ALCR:
+      Format(instr, "alcr\t'r5,'r6");
+      break;
+    case ALGR:
+      Format(instr, "algr\t'r5,'r6");
+      break;
+    case ALRK:
+      Format(instr, "alrk\t'r5,'r6,'r3");
+      break;
+    case ALGRK:
+      Format(instr, "algrk\t'r5,'r6,'r3");
+      break;
+    case SLGR:
+      Format(instr, "slgr\t'r5,'r6");
+      break;
+    case SLBR:
+      Format(instr, "slbr\t'r5,'r6");
+      break;
+    case DLR:
+      Format(instr, "dlr\t'r5,'r6");
+      break;
+    case DLGR:
+      Format(instr, "dlgr\t'r5,'r6");
+      break;
+    case SLRK:
+      Format(instr, "slrk\t'r5,'r6,'r3");
+      break;
+    case SLGRK:
+      Format(instr, "slgrk\t'r5,'r6,'r3");
+      break;
+    case LHR:
+      Format(instr, "lhr\t'r5,'r6");
+      break;
+    case LLHR:
+      Format(instr, "llhr\t'r5,'r6");
+      break;
+    case LLGHR:
+      Format(instr, "llghr\t'r5,'r6");
+      break;
+    case LNGR:
+      Format(instr, "lngr\t'r5,'r6");
+      break;
+    case A:
+      Format(instr, "a\t'r1,'d1('r2d,'r3)");
+      break;
+    case S:
+      Format(instr, "s\t'r1,'d1('r2d,'r3)");
+      break;
+    case M:
+      Format(instr, "m\t'r1,'d1('r2d,'r3)");
+      break;
+    case D:
+      Format(instr, "d\t'r1,'d1('r2d,'r3)");
+      break;
+    case O:
+      Format(instr, "o\t'r1,'d1('r2d,'r3)");
+      break;
+    case N:
+      Format(instr, "n\t'r1,'d1('r2d,'r3)");
+      break;
+    case L:
+      Format(instr, "l\t'r1,'d1('r2d,'r3)");
+      break;
+    case C:
+      Format(instr, "c\t'r1,'d1('r2d,'r3)");
+      break;
+    case AH:
+      Format(instr, "ah\t'r1,'d1('r2d,'r3)");
+      break;
+    case SH:
+      Format(instr, "sh\t'r1,'d1('r2d,'r3)");
+      break;
+    case MH:
+      Format(instr, "mh\t'r1,'d1('r2d,'r3)");
+      break;
+    case AL:
+      Format(instr, "al\t'r1,'d1('r2d,'r3)");
+      break;
+    case SL:
+      Format(instr, "sl\t'r1,'d1('r2d,'r3)");
+      break;
+    case LA:
+      Format(instr, "la\t'r1,'d1('r2d,'r3)");
+      break;
+    case CH:
+      Format(instr, "ch\t'r1,'d1('r2d,'r3)");
+      break;
+    case CL:
+      Format(instr, "cl\t'r1,'d1('r2d,'r3)");
+      break;
+    case CLI:
+      Format(instr, "cli\t'd1('r3),'i8");
+      break;
+    case TM:
+      Format(instr, "tm\t'd1('r3),'i8");
+      break;
+    case BC:
+      Format(instr, "bc\t'm1,'d1('r2d,'r3)");
+      break;
+    case BCT:
+      Format(instr, "bct\t'r1,'d1('r2d,'r3)");
+      break;
+    case ST:
+      Format(instr, "st\t'r1,'d1('r2d,'r3)");
+      break;
+    case STC:
+      Format(instr, "stc\t'r1,'d1('r2d,'r3)");
+      break;
+    case IC_z:
+      Format(instr, "ic\t'r1,'d1('r2d,'r3)");
+      break;
+    case LD:
+      Format(instr, "ld\t'f1,'d1('r2d,'r3)");
+      break;
+    case LE:
+      Format(instr, "le\t'f1,'d1('r2d,'r3)");
+      break;
+    case LDGR:
+      Format(instr, "ldgr\t'f5,'r6");
+      break;
+    case STE:
+      Format(instr, "ste\t'f1,'d1('r2d,'r3)");
+      break;
+    case STD:
+      Format(instr, "std\t'f1,'d1('r2d,'r3)");
+      break;
+    case CFDBR:
+      Format(instr, "cfdbr\t'r5,'m2,'f6");
+      break;
+    case CDFBR:
+      Format(instr, "cdfbr\t'f5,'m2,'r6");
+      break;
+    case CFEBR:
+      Format(instr, "cfebr\t'r5,'m2,'f6");
+      break;
+    case CEFBR:
+      Format(instr, "cefbr\t'f5,'m2,'r6");
+      break;
+    case CGEBR:
+      Format(instr, "cgebr\t'r5,'m2,'f6");
+      break;
+    case CGDBR:
+      Format(instr, "cgdbr\t'r5,'m2,'f6");
+      break;
+    case CEGBR:
+      Format(instr, "cegbr\t'f5,'m2,'r6");
+      break;
+    case CDGBR:
+      Format(instr, "cdgbr\t'f5,'m2,'r6");
+      break;
+    case CDLFBR:
+      Format(instr, "cdlfbr\t'f5,'m2,'r6");
+      break;
+    case CDLGBR:
+      Format(instr, "cdlgbr\t'f5,'m2,'r6");
+      break;
+    case CELGBR:
+      Format(instr, "celgbr\t'f5,'m2,'r6");
+      break;
+    case CLFDBR:
+      Format(instr, "clfdbr\t'r5,'m2,'f6");
+      break;
+    case CLGDBR:
+      Format(instr, "clgdbr\t'r5,'m2,'f6");
+      break;
+    case AEBR:
+      Format(instr, "aebr\t'f5,'f6");
+      break;
+    case SEBR:
+      Format(instr, "sebr\t'f5,'f6");
+      break;
+    case MEEBR:
+      Format(instr, "meebr\t'f5,'f6");
+      break;
+    case DEBR:
+      Format(instr, "debr\t'f5,'f6");
+      break;
+    case ADBR:
+      Format(instr, "adbr\t'f5,'f6");
+      break;
+    case SDBR:
+      Format(instr, "sdbr\t'f5,'f6");
+      break;
+    case MDBR:
+      Format(instr, "mdbr\t'f5,'f6");
+      break;
+    case DDBR:
+      Format(instr, "ddbr\t'f5,'f6");
+      break;
+    case CDBR:
+      Format(instr, "cdbr\t'f5,'f6");
+      break;
+    case CEBR:
+      Format(instr, "cebr\t'f5,'f6");
+      break;
+    case SQDBR:
+      Format(instr, "sqdbr\t'f5,'f6");
+      break;
+    case SQEBR:
+      Format(instr, "sqebr\t'f5,'f6");
+      break;
+    case LCDBR:
+      Format(instr, "lcdbr\t'f5,'f6");
+      break;
+    case STH:
+      Format(instr, "sth\t'r1,'d1('r2d,'r3)");
+      break;
+    case SRDA:
+      Format(instr, "srda\t'r1,'d1('r3)");
+      break;
+    case SRDL:
+      Format(instr, "srdl\t'r1,'d1('r3)");
+      break;
+    case MADBR:
+      Format(instr, "madbr\t'f3,'f5,'f6");
+      break;
+    case MSDBR:
+      Format(instr, "msdbr\t'f3,'f5,'f6");
+      break;
+    case FLOGR:
+      Format(instr, "flogr\t'r5,'r6");
+      break;
+    case FIEBRA:
+      Format(instr, "fiebra\t'f5,'m2,'f6,'m3");
+      break;
+    case FIDBRA:
+      Format(instr, "fidbra\t'f5,'m2,'f6,'m3");
+      break;
+    // TRAP4 is used in calling to native function. it will not be generated
+    // in native code.
+    case TRAP4: {
+      Format(instr, "trap4");
+      break;
+    }
+    default:
+      return false;
+  }
+  return true;
+}
+
+// Disassembles Six Byte S390 Instructions
+// @return true if successfully decoded
+bool Decoder::DecodeSixByte(Instruction* instr) {
+  // Print the Instruction bits.
+  out_buffer_pos_ +=
+      SNPrintF(out_buffer_ + out_buffer_pos_, "%012" PRIx64 "   ",
+               instr->InstructionBits<SixByteInstr>());
+
+  Opcode opcode = instr->S390OpcodeValue();
+  switch (opcode) {
+    case LLILF:
+      Format(instr, "llilf\t'r1,'i7");
+      break;
+    case LLIHF:
+      Format(instr, "llihf\t'r1,'i7");
+      break;
+    case AFI:
+      Format(instr, "afi\t'r1,'i7");
+      break;
+    case ASI:
+      Format(instr, "asi\t'd2('r3),'ic");
+      break;
+    case AGSI:
+      Format(instr, "agsi\t'd2('r3),'ic");
+      break;
+    case ALFI:
+      Format(instr, "alfi\t'r1,'i7");
+      break;
+    case AHIK:
+      Format(instr, "ahik\t'r1,'r2,'i1");
+      break;
+    case AGHIK:
+      Format(instr, "aghik\t'r1,'r2,'i1");
+      break;
+    case CLGFI:
+      Format(instr, "clgfi\t'r1,'i7");
+      break;
+    case CLFI:
+      Format(instr, "clfi\t'r1,'i7");
+      break;
+    case CFI:
+      Format(instr, "cfi\t'r1,'i2");
+      break;
+    case CGFI:
+      Format(instr, "cgfi\t'r1,'i2");
+      break;
+    case BRASL:
+      Format(instr, "brasl\t'r1,'ie");
+      break;
+    case BRCL:
+      Format(instr, "brcl\t'm1,'i5");
+      break;
+    case IIHF:
+      Format(instr, "iihf\t'r1,'i7");
+      break;
+    case IILF:
+      Format(instr, "iilf\t'r1,'i7");
+      break;
+    case XIHF:
+      Format(instr, "xihf\t'r1,'i7");
+      break;
+    case XILF:
+      Format(instr, "xilf\t'r1,'i7");
+      break;
+    case SLLK:
+      Format(instr, "sllk\t'r1,'r2,'d2('r3)");
+      break;
+    case SLLG:
+      Format(instr, "sllg\t'r1,'r2,'d2('r3)");
+      break;
+    case RLL:
+      Format(instr, "rll\t'r1,'r2,'d2('r3)");
+      break;
+    case RLLG:
+      Format(instr, "rllg\t'r1,'r2,'d2('r3)");
+      break;
+    case SRLK:
+      Format(instr, "srlk\t'r1,'r2,'d2('r3)");
+      break;
+    case SRLG:
+      Format(instr, "srlg\t'r1,'r2,'d2('r3)");
+      break;
+    case SLAK:
+      Format(instr, "slak\t'r1,'r2,'d2('r3)");
+      break;
+    case SLAG:
+      Format(instr, "slag\t'r1,'r2,'d2('r3)");
+      break;
+    case SRAK:
+      Format(instr, "srak\t'r1,'r2,'d2('r3)");
+      break;
+    case SRAG:
+      Format(instr, "srag\t'r1,'r2,'d2('r3)");
+      break;
+    case RISBG:
+      Format(instr, "risbg\t'r1,'r2,'i9,'ia,'ib");
+      break;
+    case RISBGN:
+      Format(instr, "risbgn\t'r1,'r2,'i9,'ia,'ib");
+      break;
+    case LMY:
+      Format(instr, "lmy\t'r1,'r2,'d2('r3)");
+      break;
+    case LMG:
+      Format(instr, "lmg\t'r1,'r2,'d2('r3)");
+      break;
+    case STMY:
+      Format(instr, "stmy\t'r1,'r2,'d2('r3)");
+      break;
+    case STMG:
+      Format(instr, "stmg\t'r1,'r2,'d2('r3)");
+      break;
+    case LT:
+      Format(instr, "lt\t'r1,'d2('r2d,'r3)");
+      break;
+    case LTG:
+      Format(instr, "ltg\t'r1,'d2('r2d,'r3)");
+      break;
+    case ML:
+      Format(instr, "ml\t'r1,'d2('r2d,'r3)");
+      break;
+    case AY:
+      Format(instr, "ay\t'r1,'d2('r2d,'r3)");
+      break;
+    case SY:
+      Format(instr, "sy\t'r1,'d2('r2d,'r3)");
+      break;
+    case NY:
+      Format(instr, "ny\t'r1,'d2('r2d,'r3)");
+      break;
+    case OY:
+      Format(instr, "oy\t'r1,'d2('r2d,'r3)");
+      break;
+    case XY:
+      Format(instr, "xy\t'r1,'d2('r2d,'r3)");
+      break;
+    case CY:
+      Format(instr, "cy\t'r1,'d2('r2d,'r3)");
+      break;
+    case AHY:
+      Format(instr, "ahy\t'r1,'d2('r2d,'r3)");
+      break;
+    case SHY:
+      Format(instr, "shy\t'r1,'d2('r2d,'r3)");
+      break;
+    case LGH:
+      Format(instr, "lgh\t'r1,'d2('r2d,'r3)");
+      break;
+    case AG:
+      Format(instr, "ag\t'r1,'d2('r2d,'r3)");
+      break;
+    case AGF:
+      Format(instr, "agf\t'r1,'d2('r2d,'r3)");
+      break;
+    case SG:
+      Format(instr, "sg\t'r1,'d2('r2d,'r3)");
+      break;
+    case NG:
+      Format(instr, "ng\t'r1,'d2('r2d,'r3)");
+      break;
+    case OG:
+      Format(instr, "og\t'r1,'d2('r2d,'r3)");
+      break;
+    case XG:
+      Format(instr, "xg\t'r1,'d2('r2d,'r3)");
+      break;
+    case CG:
+      Format(instr, "cg\t'r1,'d2('r2d,'r3)");
+      break;
+    case LB:
+      Format(instr, "lb\t'r1,'d2('r2d,'r3)");
+      break;
+    case LG:
+      Format(instr, "lg\t'r1,'d2('r2d,'r3)");
+      break;
+    case LGF:
+      Format(instr, "lgf\t'r1,'d2('r2d,'r3)");
+      break;
+    case LLGF:
+      Format(instr, "llgf\t'r1,'d2('r2d,'r3)");
+      break;
+    case LY:
+      Format(instr, "ly\t'r1,'d2('r2d,'r3)");
+      break;
+    case ALY:
+      Format(instr, "aly\t'r1,'d2('r2d,'r3)");
+      break;
+    case ALG:
+      Format(instr, "alg\t'r1,'d2('r2d,'r3)");
+      break;
+    case SLG:
+      Format(instr, "slg\t'r1,'d2('r2d,'r3)");
+      break;
+    case SGF:
+      Format(instr, "sgf\t'r1,'d2('r2d,'r3)");
+      break;
+    case SLY:
+      Format(instr, "sly\t'r1,'d2('r2d,'r3)");
+      break;
+    case LLH:
+      Format(instr, "llh\t'r1,'d2('r2d,'r3)");
+      break;
+    case LLGH:
+      Format(instr, "llgh\t'r1,'d2('r2d,'r3)");
+      break;
+    case LLC:
+      Format(instr, "llc\t'r1,'d2('r2d,'r3)");
+      break;
+    case LLGC:
+      Format(instr, "llgc\t'r1,'d2('r2d,'r3)");
+      break;
+    case LDEB:
+      Format(instr, "ldeb\t'f1,'d2('r2d,'r3)");
+      break;
+    case LAY:
+      Format(instr, "lay\t'r1,'d2('r2d,'r3)");
+      break;
+    case LARL:
+      Format(instr, "larl\t'r1,'i5");
+      break;
+    case LGB:
+      Format(instr, "lgb\t'r1,'d2('r2d,'r3)");
+      break;
+    case CHY:
+      Format(instr, "chy\t'r1,'d2('r2d,'r3)");
+      break;
+    case CLY:
+      Format(instr, "cly\t'r1,'d2('r2d,'r3)");
+      break;
+    case CLIY:
+      Format(instr, "cliy\t'd2('r3),'i8");
+      break;
+    case TMY:
+      Format(instr, "tmy\t'd2('r3),'i8");
+      break;
+    case CLG:
+      Format(instr, "clg\t'r1,'d2('r2d,'r3)");
+      break;
+    case BCTG:
+      Format(instr, "bctg\t'r1,'d2('r2d,'r3)");
+      break;
+    case STY:
+      Format(instr, "sty\t'r1,'d2('r2d,'r3)");
+      break;
+    case STG:
+      Format(instr, "stg\t'r1,'d2('r2d,'r3)");
+      break;
+    case ICY:
+      Format(instr, "icy\t'r1,'d2('r2d,'r3)");
+      break;
+    case MVC:
+      Format(instr, "mvc\t'd3('i8,'r3),'d4('r7)");
+      break;
+    case MVHI:
+      Format(instr, "mvhi\t'd3('r3),'id");
+      break;
+    case MVGHI:
+      Format(instr, "mvghi\t'd3('r3),'id");
+      break;
+    case ALGFI:
+      Format(instr, "algfi\t'r1,'i7");
+      break;
+    case SLGFI:
+      Format(instr, "slgfi\t'r1,'i7");
+      break;
+    case SLFI:
+      Format(instr, "slfi\t'r1,'i7");
+      break;
+    case NIHF:
+      Format(instr, "nihf\t'r1,'i7");
+      break;
+    case NILF:
+      Format(instr, "nilf\t'r1,'i7");
+      break;
+    case OIHF:
+      Format(instr, "oihf\t'r1,'i7");
+      break;
+    case OILF:
+      Format(instr, "oilf\t'r1,'i7");
+      break;
+    case MSFI:
+      Format(instr, "msfi\t'r1,'i7");
+      break;
+    case MSGFI:
+      Format(instr, "msgfi\t'r1,'i7");
+      break;
+    case LDY:
+      Format(instr, "ldy\t'f1,'d2('r2d,'r3)");
+      break;
+    case LEY:
+      Format(instr, "ley\t'f1,'d2('r2d,'r3)");
+      break;
+    case STEY:
+      Format(instr, "stey\t'f1,'d2('r2d,'r3)");
+      break;
+    case STDY:
+      Format(instr, "stdy\t'f1,'d2('r2d,'r3)");
+      break;
+    case ADB:
+      Format(instr, "adb\t'r1,'d1('r2d, 'r3)");
+      break;
+    case SDB:
+      Format(instr, "sdb\t'r1,'d1('r2d, 'r3)");
+      break;
+    case MDB:
+      Format(instr, "mdb\t'r1,'d1('r2d, 'r3)");
+      break;
+    case DDB:
+      Format(instr, "ddb\t'r1,'d1('r2d, 'r3)");
+      break;
+    case SQDB:
+      Format(instr, "sqdb\t'r1,'d1('r2d, 'r3)");
+      break;
+    default:
+      return false;
+  }
+  return true;
+}
+
+#undef VERIFIY
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+  Instruction* instr = Instruction::At(instr_ptr);
+  int instrLength = instr->InstructionLength();
+
+  if (2 == instrLength)
+    DecodeTwoByte(instr);
+  else if (4 == instrLength)
+    DecodeFourByte(instr);
+  else
+    DecodeSixByte(instr);
+
+  return instrLength;
+}
+
+}  // namespace internal
+}  // namespace v8
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  return tmp_buffer_.start();
+}
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+  return NameOfAddress(addr);
+}
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+  return v8::internal::Register::from_code(reg).ToString();
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  UNREACHABLE();  // S390 does not have the concept of a byte register
+  return "nobytereg";
+}
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+  // S390 does not have XMM register
+  // TODO(joransiu): Consider update this for Vector Regs
+  UNREACHABLE();
+  return "noxmmreg";
+}
+
+const char* NameConverter::NameInCode(byte* addr) const {
+  // The default name converter is called for unknown code. So we will not try
+  // to access any memory.
+  return "";
+}
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+    : converter_(converter) {}
+
+Disassembler::~Disassembler() {}
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+                                    byte* instruction) {
+  v8::internal::Decoder d(converter_, buffer);
+  return d.InstructionDecode(instruction);
+}
+
+// The S390 assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+  NameConverter converter;
+  Disassembler d(converter);
+  for (byte* pc = begin; pc < end;) {
+    v8::internal::EmbeddedVector<char, 128> buffer;
+    buffer[0] = '\0';
+    byte* prev_pc = pc;
+    pc += d.InstructionDecode(buffer, pc);
+    v8::internal::PrintF(f, "%p    %08x      %s\n", prev_pc,
+                         *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+  }
+}
+
+}  // namespace disasm
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/frames-s390.cc b/src/s390/frames-s390.cc
new file mode 100644
index 0000000..20506ec
--- /dev/null
+++ b/src/s390/frames-s390.cc
@@ -0,0 +1,35 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/frames.h"
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+#include "src/s390/assembler-s390-inl.h"
+#include "src/s390/assembler-s390.h"
+#include "src/s390/frames-s390.h"
+#include "src/s390/macro-assembler-s390.h"
+
+namespace v8 {
+namespace internal {
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+  UNREACHABLE();
+  return no_reg;
+}
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+  UNREACHABLE();
+  return no_reg;
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/frames-s390.h b/src/s390/frames-s390.h
new file mode 100644
index 0000000..cca060a
--- /dev/null
+++ b/src/s390/frames-s390.h
@@ -0,0 +1,189 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_FRAMES_S390_H_
+#define V8_S390_FRAMES_S390_H_
+
+namespace v8 {
+namespace internal {
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 16;
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = 1 << 1 | 1 << 2 |  // r2  a1
+                               1 << 3 |           // r3  a2
+                               1 << 4 |           // r4  a3
+                               1 << 5;            // r5  a4
+
+const int kNumJSCallerSaved = 5;
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved =
+    1 << 6 |   // r6 (argument passing in CEntryStub)
+               //    (HandleScope logic in MacroAssembler)
+    1 << 7 |   // r7 (argument passing in CEntryStub)
+               //    (HandleScope logic in MacroAssembler)
+    1 << 8 |   // r8 (argument passing in CEntryStub)
+               //    (HandleScope logic in MacroAssembler)
+    1 << 9 |   // r9 (HandleScope logic in MacroAssembler)
+    1 << 10 |  // r10 (Roots register in Javascript)
+    1 << 11 |  // r11 (fp in Javascript)
+    1 << 12 |  // r12 (ip in Javascript)
+    1 << 13;   // r13 (cp in Javascript)
+// 1 << 15;   // r15 (sp in Javascript)
+
+const int kNumCalleeSaved = 8;
+
+#ifdef V8_TARGET_ARCH_S390X
+
+const RegList kCallerSavedDoubles = 1 << 0 |  // d0
+                                    1 << 1 |  // d1
+                                    1 << 2 |  // d2
+                                    1 << 3 |  // d3
+                                    1 << 4 |  // d4
+                                    1 << 5 |  // d5
+                                    1 << 6 |  // d6
+                                    1 << 7;   // d7
+
+const int kNumCallerSavedDoubles = 8;
+
+const RegList kCalleeSavedDoubles = 1 << 8 |   // d8
+                                    1 << 9 |   // d9
+                                    1 << 10 |  // d10
+                                    1 << 11 |  // d11
+                                    1 << 12 |  // d12
+                                    1 << 13 |  // d12
+                                    1 << 14 |  // d12
+                                    1 << 15;   // d13
+
+const int kNumCalleeSavedDoubles = 8;
+
+#else
+
+const RegList kCallerSavedDoubles = 1 << 14 |  // d14
+                                    1 << 15 |  // d15
+                                    1 << 0 |   // d0
+                                    1 << 1 |   // d1
+                                    1 << 2 |   // d2
+                                    1 << 3 |   // d3
+                                    1 << 5 |   // d5
+                                    1 << 7 |   // d7
+                                    1 << 8 |   // d8
+                                    1 << 9 |   // d9
+                                    1 << 10 |  // d10
+                                    1 << 11 |  // d10
+                                    1 << 12 |  // d10
+                                    1 << 13;   // d11
+
+const int kNumCallerSavedDoubles = 14;
+
+const RegList kCalleeSavedDoubles = 1 << 4 |  // d4
+                                    1 << 6;   // d6
+
+const int kNumCalleeSavedDoubles = 2;
+
+#endif
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
+const int kNumSafepointRegisters = 16;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+// const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+// const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+// The following constants describe the stack frame linkage area as
+// defined by the ABI.
+
+#if V8_TARGET_ARCH_S390X
+// [0] Back Chain
+// [1] Reserved for compiler use
+// [2] GPR 2
+// [3] GPR 3
+// ...
+// [15] GPR 15
+// [16] FPR 0
+// [17] FPR 2
+// [18] FPR 4
+// [19] FPR 6
+const int kNumRequiredStackFrameSlots = 20;
+const int kStackFrameRASlot = 14;
+const int kStackFrameSPSlot = 15;
+const int kStackFrameExtraParamSlot = 20;
+#else
+// [0] Back Chain
+// [1] Reserved for compiler use
+// [2] GPR 2
+// [3] GPR 3
+// ...
+// [15] GPR 15
+// [16..17] FPR 0
+// [18..19] FPR 2
+// [20..21] FPR 4
+// [22..23] FPR 6
+const int kNumRequiredStackFrameSlots = 24;
+const int kStackFrameRASlot = 14;
+const int kStackFrameSPSlot = 15;
+const int kStackFrameExtraParamSlot = 24;
+#endif
+
+// zLinux ABI requires caller frames to include sufficient space for
+// callee preserved register save area.
+#if V8_TARGET_ARCH_S390X
+const int kCalleeRegisterSaveAreaSize = 160;
+#elif V8_TARGET_ARCH_S390
+const int kCalleeRegisterSaveAreaSize = 96;
+#else
+const int kCalleeRegisterSaveAreaSize = 0;
+#endif
+
+// ----------------------------------------------------
+
+class EntryFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset =
+      -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+class ExitFrameConstants : public TypedFrameConstants {
+ public:
+  static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
+
+  // The caller fields are below the frame pointer on the stack.
+  static const int kCallerFPOffset = 0 * kPointerSize;
+  // The calling JS function is below FP.
+  static const int kCallerPCOffset = 1 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.  It points just
+  // below the saved PC.
+  static const int kCallerSPDisplacement = 2 * kPointerSize;
+};
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+  static const int kLastParameterOffset = +2 * kPointerSize;
+  static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+
+  // Caller SP-relative.
+  static const int kParam0Offset = -2 * kPointerSize;
+  static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_S390_FRAMES_S390_H_
diff --git a/src/s390/interface-descriptors-s390.cc b/src/s390/interface-descriptors-s390.cc
new file mode 100644
index 0000000..63afca8
--- /dev/null
+++ b/src/s390/interface-descriptors-s390.cc
@@ -0,0 +1,373 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+const Register LoadDescriptor::ReceiverRegister() { return r3; }
+const Register LoadDescriptor::NameRegister() { return r4; }
+const Register LoadDescriptor::SlotRegister() { return r2; }
+
+const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
+
+const Register StoreDescriptor::ReceiverRegister() { return r3; }
+const Register StoreDescriptor::NameRegister() { return r4; }
+const Register StoreDescriptor::ValueRegister() { return r2; }
+
+const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r6; }
+
+const Register VectorStoreICDescriptor::VectorRegister() { return r5; }
+
+const Register VectorStoreTransitionDescriptor::SlotRegister() { return r6; }
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return r5; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return r7; }
+
+const Register StoreTransitionDescriptor::MapRegister() { return r5; }
+
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r4; }
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
+
+const Register InstanceOfDescriptor::LeftRegister() { return r3; }
+const Register InstanceOfDescriptor::RightRegister() { return r2; }
+
+const Register StringCompareDescriptor::LeftRegister() { return r3; }
+const Register StringCompareDescriptor::RightRegister() { return r2; }
+
+const Register ApiGetterDescriptor::function_address() { return r4; }
+
+const Register MathPowTaggedDescriptor::exponent() { return r4; }
+
+const Register MathPowIntegerDescriptor::exponent() {
+  return MathPowTaggedDescriptor::exponent();
+}
+
+const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
+const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
+
+void FastNewClosureDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewContextDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3, r5};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+// static
+const Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
+
+void TypeofDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r5};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r5, r4, r3, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r5, r4, r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r5, r4, r3, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4, r5};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CreateWeakCellDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4, r5, r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallFunctionDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3, r5};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3, r5, r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallConstructDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // r2 : number of arguments
+  // r3 : the function to call
+  // r4 : feedback vector
+  // r5 : slot in feedback vector (Smi, for RecordCallTarget)
+  // r6 : new target (for IsSuperConstructorCall)
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {r2, r3, r6, r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // r2 : number of arguments
+  // r3 : the target to call
+  Register registers[] = {r3, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructStubDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // r2 : number of arguments
+  // r3 : the target to call
+  // r5 : the new target
+  // r4 : allocation site or undefined
+  Register registers[] = {r3, r5, r2, r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // r2 : number of arguments
+  // r3 : the target to call
+  // r5 : the new target
+  Register registers[] = {r3, r5, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void RegExpConstructResultDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4, r3, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void TransitionElementsKindDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r2, r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  data->InitializePlatformSpecific(0, nullptr, nullptr);
+}
+
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
+      CallInterfaceDescriptorData* data) {                          \
+    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
+  }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
+
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // r2 -- number of arguments
+  // r3 -- function
+  // r4 -- allocation site with elements kind
+  Register registers[] = {r3, r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {r3, r4, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InternalArrayConstructorConstantArgCountDescriptor::
+    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+  // register state
+  // r2 -- number of arguments
+  // r3 -- constructor function
+  Register registers[] = {r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {r3, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (arg count)
+  Register registers[] = {r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CompareDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void BinaryOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4, r3, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void StringAddDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r3, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void KeyedDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r4,  // key
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void NamedDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r4,  // name
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallHandlerDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r2,  // receiver
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r3,  // JSFunction
+      r5,  // the new target
+      r2,  // actual number of arguments
+      r4,  // expected number of arguments
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r2,  // callee
+      r6,  // call_data
+      r4,  // holder
+      r3,  // api_function_address
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+      kInterpreterDispatchTableRegister};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r2,  // argument count (not including receiver)
+      r4,  // address of first argument
+      r3   // the target callable to be call
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r2,  // argument count (not including receiver)
+      r5,  // new target
+      r3,  // constructor to call
+      r4   // address of the first argument
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r2,  // argument count (argc)
+      r4,  // address of first argument (argv)
+      r3   // the runtime function to call
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/macro-assembler-s390.cc b/src/s390/macro-assembler-s390.cc
new file mode 100644
index 0000000..e6abf68
--- /dev/null
+++ b/src/s390/macro-assembler-s390.cc
@@ -0,0 +1,5431 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h>  // For assert
+#include <limits.h>  // For LONG_MIN, LONG_MAX.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/register-configuration.h"
+#include "src/runtime/runtime.h"
+
+#include "src/s390/macro-assembler-s390.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+                               CodeObjectRequired create_code_object)
+    : Assembler(arg_isolate, buffer, size),
+      generating_stub_(false),
+      has_frame_(false) {
+  if (create_code_object == CodeObjectRequired::kYes) {
+    code_object_ =
+        Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
+  }
+}
+
+void MacroAssembler::Jump(Register target) { b(target); }
+
+void MacroAssembler::JumpToJSEntry(Register target) {
+  Move(ip, target);
+  Jump(ip);
+}
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+                          Condition cond, CRegister) {
+  Label skip;
+
+  if (cond != al) b(NegateCondition(cond), &skip);
+
+  DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
+
+  mov(ip, Operand(target, rmode));
+  b(ip);
+
+  bind(&skip);
+}
+
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+                          CRegister cr) {
+  DCHECK(!RelocInfo::IsCodeTarget(rmode));
+  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
+}
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+                          Condition cond) {
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
+  jump(code, rmode, cond);
+}
+
+int MacroAssembler::CallSize(Register target) { return 2; }  // BASR
+
+void MacroAssembler::Call(Register target) {
+  Label start;
+  bind(&start);
+
+  // Statement positions are expected to be recorded when the target
+  // address is loaded.
+  positions_recorder()->WriteRecordedPositions();
+
+  // Branch to target via indirect branch
+  basr(r14, target);
+
+  DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
+}
+
+void MacroAssembler::CallJSEntry(Register target) {
+  DCHECK(target.is(ip));
+  Call(target);
+}
+
+int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
+                             Condition cond) {
+  // S390 Assembler::move sequence is IILF / IIHF
+  int size;
+#if V8_TARGET_ARCH_S390X
+  size = 14;  // IILF + IIHF + BASR
+#else
+  size = 8;  // IILF + BASR
+#endif
+  return size;
+}
+
+int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
+                                                   RelocInfo::Mode rmode,
+                                                   Condition cond) {
+  // S390 Assembler::move sequence is IILF / IIHF
+  int size;
+#if V8_TARGET_ARCH_S390X
+  size = 14;  // IILF + IIHF + BASR
+#else
+  size = 8;  // IILF + BASR
+#endif
+  return size;
+}
+
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
+                          Condition cond) {
+  DCHECK(cond == al);
+
+#ifdef DEBUG
+  // Check the expected size before generating code to ensure we assume the same
+  // constant pool availability (e.g., whether constant pool is full or not).
+  int expected_size = CallSize(target, rmode, cond);
+  Label start;
+  bind(&start);
+#endif
+
+  // Statement positions are expected to be recorded when the target
+  // address is loaded.
+  positions_recorder()->WriteRecordedPositions();
+
+  mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
+  basr(r14, ip);
+
+  DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+}
+
+int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
+                             TypeFeedbackId ast_id, Condition cond) {
+  return 6;  // BRASL
+}
+
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+                          TypeFeedbackId ast_id, Condition cond) {
+  DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
+
+#ifdef DEBUG
+  // Check the expected size before generating code to ensure we assume the same
+  // constant pool availability (e.g., whether constant pool is full or not).
+  int expected_size = CallSize(code, rmode, ast_id, cond);
+  Label start;
+  bind(&start);
+#endif
+  call(code, rmode, ast_id);
+  DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+}
+
+void MacroAssembler::Drop(int count) {
+  if (count > 0) {
+    int total = count * kPointerSize;
+    if (is_uint12(total)) {
+      la(sp, MemOperand(sp, total));
+    } else if (is_int20(total)) {
+      lay(sp, MemOperand(sp, total));
+    } else {
+      AddP(sp, Operand(total));
+    }
+  }
+}
+
+void MacroAssembler::Drop(Register count, Register scratch) {
+  ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
+  AddP(sp, sp, scratch);
+}
+
+void MacroAssembler::Call(Label* target) { b(r14, target); }
+
+void MacroAssembler::Push(Handle<Object> handle) {
+  mov(r0, Operand(handle));
+  push(r0);
+}
+
+void MacroAssembler::Move(Register dst, Handle<Object> value) {
+  AllowDeferredHandleDereference smi_check;
+  if (value->IsSmi()) {
+    LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
+  } else {
+    DCHECK(value->IsHeapObject());
+    if (isolate()->heap()->InNewSpace(*value)) {
+      Handle<Cell> cell = isolate()->factory()->NewCell(value);
+      mov(dst, Operand(cell));
+      LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
+    } else {
+      mov(dst, Operand(value));
+    }
+  }
+}
+
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
+  if (!dst.is(src)) {
+    LoadRR(dst, src);
+  }
+}
+
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+  if (!dst.is(src)) {
+    ldr(dst, src);
+  }
+}
+
+void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src) {
+  StoreDouble(dst, MemOperand(sp, -kDoubleSize));
+#if V8_TARGET_LITTLE_ENDIAN
+  StoreW(src, MemOperand(sp, -kDoubleSize));
+#else
+  StoreW(src, MemOperand(sp, -kDoubleSize / 2));
+#endif
+  ldy(dst, MemOperand(sp, -kDoubleSize));
+}
+
+void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src) {
+  StoreDouble(dst, MemOperand(sp, -kDoubleSize));
+#if V8_TARGET_LITTLE_ENDIAN
+  StoreW(src, MemOperand(sp, -kDoubleSize / 2));
+#else
+  StoreW(src, MemOperand(sp, -kDoubleSize));
+#endif
+  ldy(dst, MemOperand(sp, -kDoubleSize));
+}
+
+void MacroAssembler::MultiPush(RegList regs, Register location) {
+  int16_t num_to_push = NumberOfBitsSet(regs);
+  int16_t stack_offset = num_to_push * kPointerSize;
+
+  SubP(location, location, Operand(stack_offset));
+  for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      stack_offset -= kPointerSize;
+      StoreP(ToRegister(i), MemOperand(location, stack_offset));
+    }
+  }
+}
+
+void MacroAssembler::MultiPop(RegList regs, Register location) {
+  int16_t stack_offset = 0;
+
+  for (int16_t i = 0; i < Register::kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      LoadP(ToRegister(i), MemOperand(location, stack_offset));
+      stack_offset += kPointerSize;
+    }
+  }
+  AddP(location, location, Operand(stack_offset));
+}
+
+void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
+  int16_t num_to_push = NumberOfBitsSet(dregs);
+  int16_t stack_offset = num_to_push * kDoubleSize;
+
+  SubP(location, location, Operand(stack_offset));
+  for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
+    if ((dregs & (1 << i)) != 0) {
+      DoubleRegister dreg = DoubleRegister::from_code(i);
+      stack_offset -= kDoubleSize;
+      StoreDouble(dreg, MemOperand(location, stack_offset));
+    }
+  }
+}
+
+void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
+  int16_t stack_offset = 0;
+
+  for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
+    if ((dregs & (1 << i)) != 0) {
+      DoubleRegister dreg = DoubleRegister::from_code(i);
+      LoadDouble(dreg, MemOperand(location, stack_offset));
+      stack_offset += kDoubleSize;
+    }
+  }
+  AddP(location, location, Operand(stack_offset));
+}
+
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+                              Condition) {
+  LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
+}
+
+void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
+                               Condition) {
+  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
+  StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2));
+}
+
+void MacroAssembler::InNewSpace(Register object, Register scratch,
+                                Condition cond, Label* branch) {
+  DCHECK(cond == eq || cond == ne);
+  // TODO(joransiu): check if we can merge mov Operand into AndP.
+  const int mask =
+      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+  CheckPageFlag(object, scratch, mask, cond, branch);
+}
+
+void MacroAssembler::RecordWriteField(
+    Register object, int offset, Register value, Register dst,
+    LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action, SmiCheck smi_check,
+    PointersToHereCheck pointers_to_here_check_for_value) {
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
+
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  DCHECK(IsAligned(offset, kPointerSize));
+
+  lay(dst, MemOperand(object, offset - kHeapObjectTag));
+  if (emit_debug_code()) {
+    Label ok;
+    AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+    beq(&ok, Label::kNear);
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
+              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
+
+  bind(&done);
+
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
+    mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
+  }
+}
+
+// Will clobber 4 registers: object, map, dst, ip.  The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object, Register map,
+                                       Register dst,
+                                       LinkRegisterStatus lr_status,
+                                       SaveFPRegsMode fp_mode) {
+  if (emit_debug_code()) {
+    LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+    CmpP(dst, Operand(isolate()->factory()->meta_map()));
+    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+  }
+
+  if (!FLAG_incremental_marking) {
+    return;
+  }
+
+  if (emit_debug_code()) {
+    CmpP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+  }
+
+  Label done;
+
+  // A single check of the map's pages interesting flag suffices, since it is
+  // only set during incremental collection, and then it's also guaranteed that
+  // the from object's page's interesting flag is also set.  This optimization
+  // relies on the fact that maps can never be in new space.
+  CheckPageFlag(map,
+                map,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+
+  lay(dst, MemOperand(object, HeapObject::kMapOffset - kHeapObjectTag));
+  if (emit_debug_code()) {
+    Label ok;
+    AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+    beq(&ok, Label::kNear);
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  // Record the actual write.
+  if (lr_status == kLRHasNotBeenSaved) {
+    push(r14);
+  }
+  RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+                       fp_mode);
+  CallStub(&stub);
+  if (lr_status == kLRHasNotBeenSaved) {
+    pop(r14);
+  }
+
+  bind(&done);
+
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
+
+  // Clobber clobbered registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
+    mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
+  }
+}
+
+// Will clobber 4 registers: object, address, scratch, ip.  The
+// register 'object' contains a heap object pointer.  The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(
+    Register object, Register address, Register value,
+    LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
+    RememberedSetAction remembered_set_action, SmiCheck smi_check,
+    PointersToHereCheck pointers_to_here_check_for_value) {
+  DCHECK(!object.is(value));
+  if (emit_debug_code()) {
+    CmpP(value, MemOperand(address));
+    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+  }
+
+  if (remembered_set_action == OMIT_REMEMBERED_SET &&
+      !FLAG_incremental_marking) {
+    return;
+  }
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of smis and stores into the young generation.
+  Label done;
+
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
+
+  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+    CheckPageFlag(value,
+                  value,  // Used as scratch.
+                  MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+  }
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+  // Record the actual write.
+  if (lr_status == kLRHasNotBeenSaved) {
+    push(r14);
+  }
+  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+                       fp_mode);
+  CallStub(&stub);
+  if (lr_status == kLRHasNotBeenSaved) {
+    pop(r14);
+  }
+
+  bind(&done);
+
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
+                   value);
+
+  // Clobber clobbered registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
+    mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
+  }
+}
+
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+                                               Register code_entry,
+                                               Register scratch) {
+  const int offset = JSFunction::kCodeEntryOffset;
+
+  // Since a code entry (value) is always in old space, we don't need to update
+  // remembered set. If incremental marking is off, there is nothing for us to
+  // do.
+  if (!FLAG_incremental_marking) return;
+
+  DCHECK(js_function.is(r3));
+  DCHECK(code_entry.is(r6));
+  DCHECK(scratch.is(r7));
+  AssertNotSmi(js_function);
+
+  if (emit_debug_code()) {
+    AddP(scratch, js_function, Operand(offset - kHeapObjectTag));
+    LoadP(ip, MemOperand(scratch));
+    CmpP(ip, code_entry);
+    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  CheckPageFlag(code_entry, scratch,
+                MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+  CheckPageFlag(js_function, scratch,
+                MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+  const Register dst = scratch;
+  AddP(dst, js_function, Operand(offset - kHeapObjectTag));
+
+  // Save caller-saved registers.  js_function and code_entry are in the
+  // caller-saved register list.
+  DCHECK(kJSCallerSaved & js_function.bit());
+  DCHECK(kJSCallerSaved & code_entry.bit());
+  MultiPush(kJSCallerSaved | r14.bit());
+
+  int argument_count = 3;
+  PrepareCallCFunction(argument_count, code_entry);
+
+  LoadRR(r2, js_function);
+  LoadRR(r3, dst);
+  mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+
+  {
+    AllowExternalCallThatCantCauseGC scope(this);
+    CallCFunction(
+        ExternalReference::incremental_marking_record_write_code_entry_function(
+            isolate()),
+        argument_count);
+  }
+
+  // Restore caller-saved registers (including js_function and code_entry).
+  MultiPop(kJSCallerSaved | r14.bit());
+
+  bind(&done);
+}
+
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register address, Register scratch,
+                                         SaveFPRegsMode fp_mode,
+                                         RememberedSetFinalAction and_then) {
+  Label done;
+  if (emit_debug_code()) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok);
+    stop("Remembered set pointer is in new space");
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  mov(ip, Operand(store_buffer));
+  LoadP(scratch, MemOperand(ip));
+  // Store pointer to buffer and increment buffer top.
+  StoreP(address, MemOperand(scratch));
+  AddP(scratch, Operand(kPointerSize));
+  // Write back new top of buffer.
+  StoreP(scratch, MemOperand(ip));
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  AndP(scratch, Operand(StoreBuffer::kStoreBufferMask));
+
+  if (and_then == kFallThroughAtEnd) {
+    bne(&done, Label::kNear);
+  } else {
+    DCHECK(and_then == kReturnAtEnd);
+    bne(&done, Label::kNear);
+  }
+  push(r14);
+  StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
+  CallStub(&store_buffer_overflow);
+  pop(r14);
+  bind(&done);
+  if (and_then == kReturnAtEnd) {
+    Ret();
+  }
+}
+
+void MacroAssembler::PushCommonFrame(Register marker_reg) {
+  int fp_delta = 0;
+  CleanseP(r14);
+  if (marker_reg.is_valid()) {
+    Push(r14, fp, marker_reg);
+    fp_delta = 1;
+  } else {
+    Push(r14, fp);
+    fp_delta = 0;
+  }
+  la(fp, MemOperand(sp, fp_delta * kPointerSize));
+}
+
+void MacroAssembler::PopCommonFrame(Register marker_reg) {
+  if (marker_reg.is_valid()) {
+    Pop(r14, fp, marker_reg);
+  } else {
+    Pop(r14, fp);
+  }
+}
+
+void MacroAssembler::PushStandardFrame(Register function_reg) {
+  int fp_delta = 0;
+  CleanseP(r14);
+  if (function_reg.is_valid()) {
+    Push(r14, fp, cp, function_reg);
+    fp_delta = 2;
+  } else {
+    Push(r14, fp, cp);
+    fp_delta = 1;
+  }
+  la(fp, MemOperand(sp, fp_delta * kPointerSize));
+}
+
+void MacroAssembler::RestoreFrameStateForTailCall() {
+  // if (FLAG_enable_embedded_constant_pool) {
+  //   LoadP(kConstantPoolRegister,
+  //         MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+  //   set_constant_pool_available(false);
+  // }
+  DCHECK(!FLAG_enable_embedded_constant_pool);
+  LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+}
+
+const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
+const int MacroAssembler::kNumSafepointSavedRegisters =
+    Register::kNumAllocatable;
+
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+  // Safepoints expect a block of kNumSafepointRegisters values on the
+  // stack, so adjust the stack for unsaved registers.
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  DCHECK(num_unsaved >= 0);
+  if (num_unsaved > 0) {
+    lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize)));
+  }
+  MultiPush(kSafepointSavedRegisters);
+}
+
+void MacroAssembler::PopSafepointRegisters() {
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  MultiPop(kSafepointSavedRegisters);
+  if (num_unsaved > 0) {
+    la(sp, MemOperand(sp, num_unsaved * kPointerSize));
+  }
+}
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+  StoreP(src, SafepointRegisterSlot(dst));
+}
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+  LoadP(dst, SafepointRegisterSlot(src));
+}
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the highest encoding,
+  // which means that lowest encodings are closest to the stack pointer.
+  RegList regs = kSafepointSavedRegisters;
+  int index = 0;
+
+  DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
+
+  for (int16_t i = 0; i < reg_code; i++) {
+    if ((regs & (1 << i)) != 0) {
+      index++;
+    }
+  }
+
+  return index;
+}
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+  // General purpose registers are pushed last on the stack.
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
+  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+  return MemOperand(sp, doubles_size + register_offset);
+}
+
+void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
+                                     const DoubleRegister src) {
+  // Turn potential sNaN into qNaN
+  if (!dst.is(src)) ldr(dst, src);
+  lzdr(kDoubleRegZero);
+  sdbr(dst, kDoubleRegZero);
+}
+
+void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
+  cdfbr(dst, src);
+}
+
+void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
+                                                DoubleRegister dst) {
+  if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
+    cdlfbr(Condition(5), Condition(0), dst, src);
+  } else {
+    // zero-extend src
+    llgfr(src, src);
+    // convert to double
+    cdgbr(dst, src);
+  }
+}
+
+void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
+  cefbr(dst, src);
+}
+
+void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
+                                               DoubleRegister dst) {
+  celfbr(Condition(0), Condition(0), dst, src);
+}
+
+#if V8_TARGET_ARCH_S390X
+void MacroAssembler::ConvertInt64ToDouble(Register src,
+                                          DoubleRegister double_dst) {
+  cdgbr(double_dst, src);
+}
+
+void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
+                                                 DoubleRegister double_dst) {
+  celgbr(Condition(0), Condition(0), double_dst, src);
+}
+
+void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
+                                                  DoubleRegister double_dst) {
+  cdlgbr(Condition(0), Condition(0), double_dst, src);
+}
+
+void MacroAssembler::ConvertInt64ToFloat(Register src,
+                                         DoubleRegister double_dst) {
+  cegbr(double_dst, src);
+}
+#endif
+
+void MacroAssembler::ConvertFloat32ToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_S390X
+                                           const Register dst_hi,
+#endif
+                                           const Register dst,
+                                           const DoubleRegister double_dst,
+                                           FPRoundingMode rounding_mode) {
+  Condition m = Condition(0);
+  switch (rounding_mode) {
+    case kRoundToZero:
+      m = Condition(5);
+      break;
+    case kRoundToNearest:
+      UNIMPLEMENTED();
+      break;
+    case kRoundToPlusInf:
+      m = Condition(6);
+      break;
+    case kRoundToMinusInf:
+      m = Condition(7);
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  cgebr(m, dst, double_input);
+  ldgr(double_dst, dst);
+#if !V8_TARGET_ARCH_S390X
+  srlg(dst_hi, dst, Operand(32));
+#endif
+}
+
+void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_S390X
+                                          const Register dst_hi,
+#endif
+                                          const Register dst,
+                                          const DoubleRegister double_dst,
+                                          FPRoundingMode rounding_mode) {
+  Condition m = Condition(0);
+  switch (rounding_mode) {
+    case kRoundToZero:
+      m = Condition(5);
+      break;
+    case kRoundToNearest:
+      UNIMPLEMENTED();
+      break;
+    case kRoundToPlusInf:
+      m = Condition(6);
+      break;
+    case kRoundToMinusInf:
+      m = Condition(7);
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  cgdbr(m, dst, double_input);
+  ldgr(double_dst, dst);
+#if !V8_TARGET_ARCH_S390X
+  srlg(dst_hi, dst, Operand(32));
+#endif
+}
+
+void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input,
+                                           const Register dst,
+                                           const DoubleRegister double_dst,
+                                           FPRoundingMode rounding_mode) {
+  Condition m = Condition(0);
+  switch (rounding_mode) {
+    case kRoundToZero:
+      m = Condition(5);
+      break;
+    case kRoundToNearest:
+      UNIMPLEMENTED();
+      break;
+    case kRoundToPlusInf:
+      m = Condition(6);
+      break;
+    case kRoundToMinusInf:
+      m = Condition(7);
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  cfebr(m, dst, double_input);
+  ldgr(double_dst, dst);
+}
+
+void MacroAssembler::ConvertFloat32ToUnsignedInt32(
+    const DoubleRegister double_input, const Register dst,
+    const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
+  Condition m = Condition(0);
+  switch (rounding_mode) {
+    case kRoundToZero:
+      m = Condition(5);
+      break;
+    case kRoundToNearest:
+      UNIMPLEMENTED();
+      break;
+    case kRoundToPlusInf:
+      m = Condition(6);
+      break;
+    case kRoundToMinusInf:
+      m = Condition(7);
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  clfebr(m, Condition(0), dst, double_input);
+  ldgr(double_dst, dst);
+}
+
+#if V8_TARGET_ARCH_S390X
+void MacroAssembler::ConvertFloat32ToUnsignedInt64(
+    const DoubleRegister double_input, const Register dst,
+    const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
+  Condition m = Condition(0);
+  switch (rounding_mode) {
+    case kRoundToZero:
+      m = Condition(5);
+      break;
+    case kRoundToNearest:
+      UNIMPLEMENTED();
+      break;
+    case kRoundToPlusInf:
+      m = Condition(6);
+      break;
+    case kRoundToMinusInf:
+      m = Condition(7);
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  clgebr(m, Condition(0), dst, double_input);
+  ldgr(double_dst, dst);
+}
+
+void MacroAssembler::ConvertDoubleToUnsignedInt64(
+    const DoubleRegister double_input, const Register dst,
+    const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
+  Condition m = Condition(0);
+  switch (rounding_mode) {
+    case kRoundToZero:
+      m = Condition(5);
+      break;
+    case kRoundToNearest:
+      UNIMPLEMENTED();
+      break;
+    case kRoundToPlusInf:
+      m = Condition(6);
+      break;
+    case kRoundToMinusInf:
+      m = Condition(7);
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  clgdbr(m, Condition(0), dst, double_input);
+  ldgr(double_dst, dst);
+}
+
+#endif
+
+#if !V8_TARGET_ARCH_S390X
+void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+                                   Register src_low, Register src_high,
+                                   Register scratch, Register shift) {
+  LoadRR(r0, src_high);
+  LoadRR(r1, src_low);
+  sldl(r0, shift, Operand::Zero());
+  LoadRR(dst_high, r0);
+  LoadRR(dst_low, r1);
+}
+
+void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+                                   Register src_low, Register src_high,
+                                   uint32_t shift) {
+  LoadRR(r0, src_high);
+  LoadRR(r1, src_low);
+  sldl(r0, r0, Operand(shift));
+  LoadRR(dst_high, r0);
+  LoadRR(dst_low, r1);
+}
+
+void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+                                    Register src_low, Register src_high,
+                                    Register scratch, Register shift) {
+  LoadRR(r0, src_high);
+  LoadRR(r1, src_low);
+  srdl(r0, shift, Operand::Zero());
+  LoadRR(dst_high, r0);
+  LoadRR(dst_low, r1);
+}
+
+void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+                                    Register src_low, Register src_high,
+                                    uint32_t shift) {
+  LoadRR(r0, src_high);
+  LoadRR(r1, src_low);
+  srdl(r0, r0, Operand(shift));
+  LoadRR(dst_high, r0);
+  LoadRR(dst_low, r1);
+}
+
+void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
+                                         Register src_low, Register src_high,
+                                         Register scratch, Register shift) {
+  LoadRR(r0, src_high);
+  LoadRR(r1, src_low);
+  srda(r0, shift, Operand::Zero());
+  LoadRR(dst_high, r0);
+  LoadRR(dst_low, r1);
+}
+
+void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
+                                         Register src_low, Register src_high,
+                                         uint32_t shift) {
+  LoadRR(r0, src_high);
+  LoadRR(r1, src_low);
+  srda(r0, r0, Operand(shift));
+  LoadRR(dst_high, r0);
+  LoadRR(dst_low, r1);
+}
+#endif
+
+void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
+  lgdr(dst, src);
+}
+
+void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
+  ldgr(dst, src);
+}
+
+void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
+                                  int prologue_offset) {
+  {
+    ConstantPoolUnavailableScope constant_pool_unavailable(this);
+    LoadSmiLiteral(r1, Smi::FromInt(type));
+    PushCommonFrame(r1);
+  }
+}
+
+void MacroAssembler::Prologue(bool code_pre_aging, Register base,
+                              int prologue_offset) {
+  DCHECK(!base.is(no_reg));
+  {
+    PredictableCodeSizeScope predictible_code_size_scope(
+        this, kNoCodeAgeSequenceLength);
+    // The following instructions must remain together and unmodified
+    // for code aging to work properly.
+    if (code_pre_aging) {
+      // Pre-age the code.
+      // This matches the code found in PatchPlatformCodeAge()
+      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+      intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
+      nop();
+      CleanseP(r14);
+      Push(r14);
+      mov(r2, Operand(target));
+      Call(r2);
+      for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
+           i += 2) {
+        // TODO(joransiu): Create nop function to pad
+        //         (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
+        nop();  // 2-byte nops().
+      }
+    } else {
+      // This matches the code found in GetNoCodeAgeSequence()
+      PushStandardFrame(r3);
+    }
+  }
+}
+
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+  LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+  LoadP(vector,
+        FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+                                bool load_constant_pool_pointer_reg) {
+  // We create a stack frame with:
+  //    Return Addr <-- old sp
+  //    Old FP      <-- new fp
+  //    CP
+  //    type
+  //    CodeObject  <-- new sp
+
+  LoadSmiLiteral(ip, Smi::FromInt(type));
+  PushCommonFrame(ip);
+
+  if (type == StackFrame::INTERNAL) {
+    mov(r0, Operand(CodeObject()));
+    push(r0);
+  }
+}
+
+int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+  // Drop the execution stack down to the frame pointer and restore
+  // the caller frame pointer, return address and constant pool pointer.
+  LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  lay(r1, MemOperand(
+              fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment));
+  LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  LoadRR(sp, r1);
+  int frame_ends = pc_offset();
+  return frame_ends;
+}
+
+// ExitFrame layout (probably wrongish.. needs updating)
+//
+//  SP -> previousSP
+//        LK reserved
+//        code
+//        sp_on_exit (for debug?)
+// oldSP->prev SP
+//        LK
+//        <parameters on stack>
+
+// Prior to calling EnterExitFrame, we've got a bunch of parameters
+// on the stack that we need to wrap a real frame around.. so first
+// we reserve a slot for LK and push the previous SP which is captured
+// in the fp register (r11)
+// Then - we buy a new frame
+
+// r14
+// oldFP <- newFP
+// SP
+// Code
+// Floats
+// gaps
+// Args
+// ABIRes <- newSP
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+  // Set up the frame structure on the stack.
+  DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+  DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+  DCHECK(stack_space > 0);
+
+  // This is an opportunity to build a frame to wrap
+  // all of the pushes that have happened inside of V8
+  // since we were called from C code
+  CleanseP(r14);
+  LoadSmiLiteral(r1, Smi::FromInt(StackFrame::EXIT));
+  PushCommonFrame(r1);
+  // Reserve room for saved entry sp and code object.
+  lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
+
+  if (emit_debug_code()) {
+    StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
+  }
+  mov(r1, Operand(CodeObject()));
+  StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+
+  // Save the frame pointer and the context in top.
+  mov(r1, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  StoreP(fp, MemOperand(r1));
+  mov(r1, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+  StoreP(cp, MemOperand(r1));
+
+  // Optionally save all volatile double registers.
+  if (save_doubles) {
+    MultiPushDoubles(kCallerSavedDoubles);
+    // Note that d0 will be accessible at
+    //   fp - ExitFrameConstants::kFrameSize -
+    //   kNumCallerSavedDoubles * kDoubleSize,
+    // since the sp slot and code slot were pushed after the fp.
+  }
+
+  lay(sp, MemOperand(sp, -stack_space * kPointerSize));
+
+  // Allocate and align the frame preparing for calling the runtime
+  // function.
+  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+  if (frame_alignment > 0) {
+    DCHECK(frame_alignment == 8);
+    ClearRightImm(sp, sp, Operand(3));  // equivalent to &= -8
+  }
+
+  StoreP(MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize),
+         Operand::Zero(), r0);
+  lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
+  // Set the exit frame sp value to point just before the return address
+  // location.
+  lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
+  StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+void MacroAssembler::InitializeNewString(Register string, Register length,
+                                         Heap::RootListIndex map_index,
+                                         Register scratch1, Register scratch2) {
+  SmiTag(scratch1, length);
+  LoadRoot(scratch2, map_index);
+  StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset));
+  StoreP(FieldMemOperand(string, String::kHashFieldSlot),
+         Operand(String::kEmptyHashField), scratch1);
+  StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+}
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if !defined(USE_SIMULATOR)
+  // Running on the real platform. Use the alignment as mandated by the local
+  // environment.
+  // Note: This will break if we ever start generating snapshots on one S390
+  // platform for another S390 platform with a different alignment.
+  return base::OS::ActivationFrameAlignment();
+#else  // Simulated
+  // If we are using the simulator then we should always align to the expected
+  // alignment. As the simulator is used to generate snapshots we do not know
+  // if the target platform will need alignment, so this is controlled from a
+  // flag.
+  return FLAG_sim_stack_alignment;
+#endif
+}
+
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+                                    bool restore_context,
+                                    bool argument_count_is_length) {
+  // Optionally restore all double registers.
+  if (save_doubles) {
+    // Calculate the stack location of the saved doubles and restore them.
+    const int kNumRegs = kNumCallerSavedDoubles;
+    lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
+                             kNumRegs * kDoubleSize)));
+    MultiPopDoubles(kCallerSavedDoubles, r5);
+  }
+
+  // Clear top frame.
+  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
+
+  // Restore current context from top and clear it in debug mode.
+  if (restore_context) {
+    mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+    LoadP(cp, MemOperand(ip));
+  }
+#ifdef DEBUG
+  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+  StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
+#endif
+
+  // Tear down the exit frame, pop the arguments, and return.
+  LeaveFrame(StackFrame::EXIT);
+
+  if (argument_count.is_valid()) {
+    if (!argument_count_is_length) {
+      ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
+    }
+    la(sp, MemOperand(sp, argument_count));
+  }
+}
+
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
+  Move(dst, d0);
+}
+
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+  Move(dst, d0);
+}
+
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+                                        Register caller_args_count_reg,
+                                        Register scratch0, Register scratch1) {
+#if DEBUG
+  if (callee_args_count.is_reg()) {
+    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+                       scratch1));
+  } else {
+    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+  }
+#endif
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We AddP kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch0;
+  ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
+  AddP(dst_reg, fp, dst_reg);
+  AddP(dst_reg, dst_reg,
+       Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = caller_args_count_reg;
+  // Calculate the end of source area. +kPointerSize is for the receiver.
+  if (callee_args_count.is_reg()) {
+    ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
+    AddP(src_reg, sp, src_reg);
+    AddP(src_reg, src_reg, Operand(kPointerSize));
+  } else {
+    mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
+    AddP(src_reg, src_reg, sp);
+  }
+
+  if (FLAG_debug_code) {
+    CmpLogicalP(src_reg, dst_reg);
+    Check(lt, kStackAccessBelowStackPointer);
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  RestoreFrameStateForTailCall();
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch1;
+  Label loop;
+  if (callee_args_count.is_reg()) {
+    AddP(tmp_reg, callee_args_count.reg(), Operand(1));  // +1 for receiver
+  } else {
+    mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
+  }
+  LoadRR(r1, tmp_reg);
+  bind(&loop);
+  LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
+  StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
+  lay(src_reg, MemOperand(src_reg, -kPointerSize));
+  lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
+  BranchOnCount(r1, &loop);
+
+  // Leave current frame.
+  LoadRR(sp, dst_reg);
+}
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual, Label* done,
+                                    bool* definitely_mismatches,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  bool definitely_matches = false;
+  *definitely_mismatches = false;
+  Label regular_invoke;
+
+  // Check whether the expected and actual arguments count match. If not,
+  // setup registers according to contract with ArgumentsAdaptorTrampoline:
+  //  r2: actual arguments count
+  //  r3: function (passed through to callee)
+  //  r4: expected arguments count
+
+  // The code below is made a lot easier because the calling code already sets
+  // up actual and expected registers according to the contract if values are
+  // passed in registers.
+
+  // ARM has some sanity checks as per below, considering add them for S390
+  //  DCHECK(actual.is_immediate() || actual.reg().is(r2));
+  //  DCHECK(expected.is_immediate() || expected.reg().is(r4));
+
+  if (expected.is_immediate()) {
+    DCHECK(actual.is_immediate());
+    mov(r2, Operand(actual.immediate()));
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+      if (expected.immediate() == sentinel) {
+        // Don't worry about adapting arguments for builtins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        *definitely_mismatches = true;
+        mov(r4, Operand(expected.immediate()));
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      mov(r2, Operand(actual.immediate()));
+      CmpPH(expected.reg(), Operand(actual.immediate()));
+      beq(&regular_invoke);
+    } else {
+      CmpP(expected.reg(), actual.reg());
+      beq(&regular_invoke);
+    }
+  }
+
+  if (!definitely_matches) {
+    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(adaptor));
+      Call(adaptor);
+      call_wrapper.AfterCall();
+      if (!*definitely_mismatches) {
+        b(done);
+      }
+    } else {
+      Jump(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&regular_invoke);
+  }
+}
+
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+                                             const ParameterCount& expected,
+                                             const ParameterCount& actual) {
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(isolate());
+  mov(r6, Operand(step_in_enabled));
+  LoadlB(r6, MemOperand(r6));
+  CmpP(r6, Operand::Zero());
+  beq(&skip_flooding);
+  {
+    FrameScope frame(this,
+                     has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+    if (expected.is_reg()) {
+      SmiTag(expected.reg());
+      Push(expected.reg());
+    }
+    if (actual.is_reg()) {
+      SmiTag(actual.reg());
+      Push(actual.reg());
+    }
+    if (new_target.is_valid()) {
+      Push(new_target);
+    }
+    Push(fun, fun);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    Pop(fun);
+    if (new_target.is_valid()) {
+      Pop(new_target);
+    }
+    if (actual.is_reg()) {
+      Pop(actual.reg());
+      SmiUntag(actual.reg());
+    }
+    if (expected.is_reg()) {
+      Pop(expected.reg());
+      SmiUntag(expected.reg());
+    }
+  }
+  bind(&skip_flooding);
+}
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+                                        const ParameterCount& expected,
+                                        const ParameterCount& actual,
+                                        InvokeFlag flag,
+                                        const CallWrapper& call_wrapper) {
+  // You can't call a function without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  DCHECK(function.is(r3));
+  DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5));
+
+  if (call_wrapper.NeedsDebugStepCheck()) {
+    FloodFunctionIfStepping(function, new_target, expected, actual);
+  }
+
+  // Clear the new.target register if not given.
+  if (!new_target.is_valid()) {
+    LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  }
+
+  Label done;
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
+                 call_wrapper);
+  if (!definitely_mismatches) {
+    // We call indirectly through the code field in the function to
+    // allow recompilation to take effect without changing any of the
+    // call sites.
+    Register code = ip;
+    LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      CallJSEntry(code);
+      call_wrapper.AfterCall();
+    } else {
+      DCHECK(flag == JUMP_FUNCTION);
+      JumpToJSEntry(code);
+    }
+
+    // Continue here if InvokePrologue does handle the invocation due to
+    // mismatched parameter counts.
+    bind(&done);
+  }
+}
+
+void MacroAssembler::InvokeFunction(Register fun, Register new_target,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  // You can't call a function without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  // Contract with called JS functions requires that function is passed in r3.
+  DCHECK(fun.is(r3));
+
+  Register expected_reg = r4;
+  Register temp_reg = r6;
+  LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+  LoadW(expected_reg,
+        FieldMemOperand(temp_reg,
+                        SharedFunctionInfo::kFormalParameterCountOffset));
+#if !defined(V8_TARGET_ARCH_S390X)
+  SmiUntag(expected_reg);
+#endif
+
+  ParameterCount expected(expected_reg);
+  InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
+}
+
+void MacroAssembler::InvokeFunction(Register function,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  // You can't call a function without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  // Contract with called JS functions requires that function is passed in r3.
+  DCHECK(function.is(r3));
+
+  // Get the function and setup the context.
+  LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+  InvokeFunctionCode(r3, no_reg, expected, actual, flag, call_wrapper);
+}
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  Move(r3, function);
+  InvokeFunction(r3, expected, actual, flag, call_wrapper);
+}
+
+void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
+                                          Label* fail) {
+  DCHECK(kNotStringTag != 0);
+
+  LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  mov(r0, Operand(kIsNotStringMask));
+  AndP(r0, scratch);
+  bne(fail);
+}
+
+void MacroAssembler::IsObjectNameType(Register object, Register scratch,
+                                      Label* fail) {
+  LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  CmpP(scratch, Operand(LAST_NAME_TYPE));
+  bgt(fail);
+}
+
+void MacroAssembler::DebugBreak() {
+  LoadImmP(r2, Operand::Zero());
+  mov(r3,
+      Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
+  CEntryStub ces(isolate(), 1);
+  DCHECK(AllowThisStubCall(&ces));
+  Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+}
+
+void MacroAssembler::PushStackHandler() {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+
+  // Link the current handler as the next handler.
+  mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+
+  // Buy the full stack frame for 5 slots.
+  lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
+
+  // Copy the old handler into the next handler slot.
+  mvc(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
+      kPointerSize);
+  // Set this new handler as the current one.
+  StoreP(sp, MemOperand(r7));
+}
+
+void MacroAssembler::PopStackHandler() {
+  STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+
+  // Pop the Next Handler into r3 and store it into Handler Address reference.
+  Pop(r3);
+  mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+
+  StoreP(r3, MemOperand(ip));
+}
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+                                            Register scratch, Label* miss) {
+  Label same_contexts;
+
+  DCHECK(!holder_reg.is(scratch));
+  DCHECK(!holder_reg.is(ip));
+  DCHECK(!scratch.is(ip));
+
+  // Load current lexical context from the active StandardFrame, which
+  // may require crawling past STUB frames.
+  Label load_context;
+  Label has_context;
+  DCHECK(!ip.is(scratch));
+  LoadRR(ip, fp);
+  bind(&load_context);
+  LoadP(scratch,
+        MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
+  JumpIfNotSmi(scratch, &has_context);
+  LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
+  b(&load_context);
+  bind(&has_context);
+
+// In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+  CmpP(scratch, Operand::Zero());
+  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
+#endif
+
+  // Load the native context of the current context.
+  LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
+
+  // Check the context is a native context.
+  if (emit_debug_code()) {
+    // Cannot use ip as a temporary in this verification code. Due to the fact
+    // that ip is clobbered as part of cmp with an object Operand.
+    push(holder_reg);  // Temporarily save holder on the stack.
+    // Read the first word and compare to the native_context_map.
+    LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+    CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
+    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
+    pop(holder_reg);  // Restore holder.
+  }
+
+  // Check if both contexts are the same.
+  LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+  CmpP(scratch, ip);
+  beq(&same_contexts, Label::kNear);
+
+  // Check the context is a native context.
+  if (emit_debug_code()) {
+    // TODO(119): avoid push(holder_reg)/pop(holder_reg)
+    // Cannot use ip as a temporary in this verification code. Due to the fact
+    // that ip is clobbered as part of cmp with an object Operand.
+    push(holder_reg);        // Temporarily save holder on the stack.
+    LoadRR(holder_reg, ip);  // Move ip to its holding place.
+    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
+    Check(ne, kJSGlobalProxyContextShouldNotBeNull);
+
+    LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+    CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
+    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
+    // Restore ip is not needed. ip is reloaded below.
+    pop(holder_reg);  // Restore holder.
+    // Restore ip to holder's context.
+    LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+  }
+
+  // Check that the security token in the calling global object is
+  // compatible with the security token in the receiving global
+  // object.
+  int token_offset =
+      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+  LoadP(scratch, FieldMemOperand(scratch, token_offset));
+  LoadP(ip, FieldMemOperand(ip, token_offset));
+  CmpP(scratch, ip);
+  bne(miss);
+
+  bind(&same_contexts);
+}
+
+// Compute the hash code from the untagged key.  This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
+// code-stub-hydrogen.cc
+void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
+  // First of all we assign the hash seed to scratch.
+  LoadRoot(scratch, Heap::kHashSeedRootIndex);
+  SmiUntag(scratch);
+
+  // Xor original key with a seed.
+  XorP(t0, scratch);
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  LoadRR(scratch, t0);
+  NotP(scratch);
+  sll(t0, Operand(15));
+  AddP(t0, scratch, t0);
+  // hash = hash ^ (hash >> 12);
+  ShiftRight(scratch, t0, Operand(12));
+  XorP(t0, scratch);
+  // hash = hash + (hash << 2);
+  ShiftLeft(scratch, t0, Operand(2));
+  AddP(t0, t0, scratch);
+  // hash = hash ^ (hash >> 4);
+  ShiftRight(scratch, t0, Operand(4));
+  XorP(t0, scratch);
+  // hash = hash * 2057;
+  LoadRR(r0, t0);
+  ShiftLeft(scratch, t0, Operand(3));
+  AddP(t0, t0, scratch);
+  ShiftLeft(scratch, r0, Operand(11));
+  AddP(t0, t0, scratch);
+  // hash = hash ^ (hash >> 16);
+  ShiftRight(scratch, t0, Operand(16));
+  XorP(t0, scratch);
+  // hash & 0x3fffffff
+  ExtractBitRange(t0, t0, 29, 0);
+}
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
+                                              Register key, Register result,
+                                              Register t0, Register t1,
+                                              Register t2) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the same as 'key' or 'result'.
+  //            Unchanged on bailout so 'key' or 'result' can be used
+  //            in further computation.
+  //
+  // Scratch registers:
+  //
+  // t0 - holds the untagged key on entry and holds the hash once computed.
+  //
+  // t1 - used to hold the capacity mask of the dictionary
+  //
+  // t2 - used for the index into the dictionary.
+  Label done;
+
+  GetNumberHash(t0, t1);
+
+  // Compute the capacity mask.
+  LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
+  SmiUntag(t1);
+  SubP(t1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  for (int i = 0; i < kNumberDictionaryProbes; i++) {
+    // Use t2 for index calculations and keep the hash intact in t0.
+    LoadRR(t2, t0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      AddP(t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
+    }
+    AndP(t2, t1);
+
+    // Scale the index by multiplying by the element size.
+    DCHECK(SeededNumberDictionary::kEntrySize == 3);
+    LoadRR(ip, t2);
+    sll(ip, Operand(1));
+    AddP(t2, ip);  // t2 = t2 * 3
+
+    // Check if the key is identical to the name.
+    sll(t2, Operand(kPointerSizeLog2));
+    AddP(t2, elements);
+    LoadP(ip,
+          FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
+    CmpP(key, ip);
+    if (i != kNumberDictionaryProbes - 1) {
+      beq(&done, Label::kNear);
+    } else {
+      bne(miss);
+    }
+  }
+
+  bind(&done);
+  // Check that the value is a field property.
+  // t2: elements + (index * kPointerSize)
+  const int kDetailsOffset =
+      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
+  LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
+  DCHECK_EQ(DATA, 0);
+  AndP(r0, ip, t1);
+  bne(miss);
+
+  // Get the value at the masked, scaled index and return.
+  const int kValueOffset =
+      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+  LoadP(result, FieldMemOperand(t2, kValueOffset));
+}
+
+void MacroAssembler::Allocate(int object_size, Register result,
+                              Register scratch1, Register scratch2,
+                              Label* gc_required, AllocationFlags flags) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  if (!FLAG_inline_new) {
+    if (emit_debug_code()) {
+      // Trash the registers to simulate an allocation failure.
+      LoadImmP(result, Operand(0x7091));
+      LoadImmP(scratch1, Operand(0x7191));
+      LoadImmP(scratch2, Operand(0x7291));
+    }
+    b(gc_required);
+    return;
+  }
+
+  DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
+  }
+  DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
+
+  // Check relative positions of allocation top and limit addresses.
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+  ExternalReference allocation_limit =
+      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+  intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+  intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
+  DCHECK((limit - top) == kPointerSize);
+
+  // Set up allocation top address register.
+  Register top_address = scratch1;
+  // This code stores a temporary value in ip. This is OK, as the code below
+  // does not need ip for implicit literal generation.
+  Register alloc_limit = ip;
+  Register result_end = scratch2;
+  mov(top_address, Operand(allocation_top));
+
+  if ((flags & RESULT_CONTAINS_TOP) == 0) {
+    // Load allocation top into result and allocation limit into ip.
+    LoadP(result, MemOperand(top_address));
+    LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
+  } else {
+    if (emit_debug_code()) {
+      // Assert that result actually contains top on entry.
+      LoadP(alloc_limit, MemOperand(top_address));
+      CmpP(result, alloc_limit);
+      Check(eq, kUnexpectedAllocationTop);
+    }
+    // Load allocation limit. Result already contains allocation top.
+    LoadP(alloc_limit, MemOperand(top_address, limit - top));
+  }
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+// Align the next allocation. Storing the filler map without checking top is
+// safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_S390X
+    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+    STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    AndP(result_end, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    beq(&aligned);
+    if ((flags & PRETENURE) != 0) {
+      CmpLogicalP(result, alloc_limit);
+      bge(gc_required);
+    }
+    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    StoreW(result_end, MemOperand(result));
+    AddP(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+#endif
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top.
+  SubP(r0, alloc_limit, result);
+  if (is_int16(object_size)) {
+    CmpP(r0, Operand(object_size));
+    blt(gc_required);
+    AddP(result_end, result, Operand(object_size));
+  } else {
+    mov(result_end, Operand(object_size));
+    CmpP(r0, result_end);
+    blt(gc_required);
+    AddP(result_end, result, result_end);
+  }
+  StoreP(result_end, MemOperand(top_address));
+
+  // Tag object if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    AddP(result, result, Operand(kHeapObjectTag));
+  }
+}
+
+void MacroAssembler::Allocate(Register object_size, Register result,
+                              Register result_end, Register scratch,
+                              Label* gc_required, AllocationFlags flags) {
+  if (!FLAG_inline_new) {
+    if (emit_debug_code()) {
+      // Trash the registers to simulate an allocation failure.
+      LoadImmP(result, Operand(0x7091));
+      LoadImmP(scratch, Operand(0x7191));
+      LoadImmP(result_end, Operand(0x7291));
+    }
+    b(gc_required);
+    return;
+  }
+
+  // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+  // is not specified. Other registers must not overlap.
+  DCHECK(!AreAliased(object_size, result, scratch, ip));
+  DCHECK(!AreAliased(result_end, result, scratch, ip));
+  DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+  // Check relative positions of allocation top and limit addresses.
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+  ExternalReference allocation_limit =
+      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+  intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+  intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
+  DCHECK((limit - top) == kPointerSize);
+
+  // Set up allocation top address and allocation limit registers.
+  Register top_address = scratch;
+  // This code stores a temporary value in ip. This is OK, as the code below
+  // does not need ip for implicit literal generation.
+  Register alloc_limit = ip;
+  mov(top_address, Operand(allocation_top));
+
+  if ((flags & RESULT_CONTAINS_TOP) == 0) {
+    // Load allocation top into result and allocation limit into alloc_limit..
+    LoadP(result, MemOperand(top_address));
+    LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
+  } else {
+    if (emit_debug_code()) {
+      // Assert that result actually contains top on entry.
+      LoadP(alloc_limit, MemOperand(top_address));
+      CmpP(result, alloc_limit);
+      Check(eq, kUnexpectedAllocationTop);
+    }
+    // Load allocation limit. Result already contains allocation top.
+    LoadP(alloc_limit, MemOperand(top_address, limit - top));
+  }
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+// Align the next allocation. Storing the filler map without checking top is
+// safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_S390X
+    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+    STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    AndP(result_end, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    beq(&aligned);
+    if ((flags & PRETENURE) != 0) {
+      CmpLogicalP(result, alloc_limit);
+      bge(gc_required);
+    }
+    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    StoreW(result_end, MemOperand(result));
+    AddP(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+#endif
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top. Object size may be in words so a shift is
+  // required to get the number of bytes.
+  SubP(r0, alloc_limit, result);
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
+    CmpP(r0, result_end);
+    blt(gc_required);
+    AddP(result_end, result, result_end);
+  } else {
+    CmpP(r0, object_size);
+    blt(gc_required);
+    AddP(result_end, result, object_size);
+  }
+
+  // Update allocation top. result temporarily holds the new top.
+  if (emit_debug_code()) {
+    AndP(r0, result_end, Operand(kObjectAlignmentMask));
+    Check(eq, kUnalignedAllocationInNewSpace, cr0);
+  }
+  StoreP(result_end, MemOperand(top_address));
+
+  // Tag object if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    AddP(result, result, Operand(kHeapObjectTag));
+  }
+}
+
+void MacroAssembler::AllocateTwoByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+
+  ShiftLeftP(scratch1, length, Operand(1));  // Length in bytes, not chars.
+  AddP(scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
+
+  AndP(scratch1, Operand(~kObjectAlignmentMask));
+
+  // Allocate two-byte string in new space.
+  Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
+                      scratch2);
+}
+
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK(kCharSize == 1);
+  AddP(scratch1, length,
+       Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
+  AndP(scratch1, Operand(~kObjectAlignmentMask));
+
+  // Allocate one-byte string in new space.
+  Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+                      scratch1, scratch2);
+}
+
+void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
+                      scratch2);
+}
+
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+                      scratch1, scratch2);
+}
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
+                      scratch2);
+}
+
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+                      scratch1, scratch2);
+}
+
+void MacroAssembler::CompareObjectType(Register object, Register map,
+                                       Register type_reg, InstanceType type) {
+  const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
+
+  LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+  CompareInstanceType(map, temp, type);
+}
+
+void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
+                                         InstanceType type) {
+  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+  STATIC_ASSERT(LAST_TYPE < 256);
+  LoadlB(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  CmpP(type_reg, Operand(type));
+}
+
+void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
+  CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
+}
+
+void MacroAssembler::CheckFastElements(Register map, Register scratch,
+                                       Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+  STATIC_ASSERT(FAST_ELEMENTS == 2);
+  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+  STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
+  CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
+                 Operand(Map::kMaximumBitField2FastHoleyElementValue));
+  bgt(fail);
+}
+
+void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
+                                             Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+  STATIC_ASSERT(FAST_ELEMENTS == 2);
+  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+  CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
+                 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+  ble(fail);
+  CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
+                 Operand(Map::kMaximumBitField2FastHoleyElementValue));
+  bgt(fail);
+}
+
+void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
+                                          Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+  CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
+                 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+  bgt(fail);
+}
+
+void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
+  SmiUntag(ip, smi);
+  ConvertIntToDouble(ip, value);
+}
+void MacroAssembler::StoreNumberToDoubleElements(
+    Register value_reg, Register key_reg, Register elements_reg,
+    Register scratch1, DoubleRegister double_scratch, Label* fail,
+    int elements_offset) {
+  DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+  Label smi_value, store;
+
+  // Handle smi values specially.
+  JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
+           DONT_DO_SMI_CHECK);
+
+  LoadDouble(double_scratch,
+             FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+  // Force a canonical NaN.
+  CanonicalizeNaN(double_scratch);
+  b(&store);
+
+  bind(&smi_value);
+  SmiToDouble(double_scratch, value_reg);
+
+  bind(&store);
+  SmiToDoubleArrayOffset(scratch1, key_reg);
+  StoreDouble(double_scratch,
+              FieldMemOperand(elements_reg, scratch1,
+                              FixedDoubleArray::kHeaderSize - elements_offset));
+}
+
+void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
+                                            Register right,
+                                            Register overflow_dst,
+                                            Register scratch) {
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!dst.is(scratch));
+  DCHECK(!overflow_dst.is(scratch));
+  DCHECK(!overflow_dst.is(left));
+  DCHECK(!overflow_dst.is(right));
+
+  // TODO(joransiu): Optimize paths for left == right.
+  bool left_is_right = left.is(right);
+
+  // C = A+B; C overflows if A/B have same sign and C has diff sign than A
+  if (dst.is(left)) {
+    LoadRR(scratch, left);             // Preserve left.
+    AddP(dst, left, right);            // Left is overwritten.
+    XorP(overflow_dst, scratch, dst);  // Original left.
+    if (!left_is_right) XorP(scratch, dst, right);
+  } else if (dst.is(right)) {
+    LoadRR(scratch, right);  // Preserve right.
+    AddP(dst, left, right);  // Right is overwritten.
+    XorP(overflow_dst, dst, left);
+    if (!left_is_right) XorP(scratch, dst, scratch);
+  } else {
+    AddP(dst, left, right);
+    XorP(overflow_dst, dst, left);
+    if (!left_is_right) XorP(scratch, dst, right);
+  }
+  if (!left_is_right) AndP(overflow_dst, scratch, overflow_dst);
+  LoadAndTestRR(overflow_dst, overflow_dst);
+}
+
+void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
+                                            intptr_t right,
+                                            Register overflow_dst,
+                                            Register scratch) {
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!dst.is(scratch));
+  DCHECK(!overflow_dst.is(scratch));
+  DCHECK(!overflow_dst.is(left));
+
+  mov(r1, Operand(right));
+  AddAndCheckForOverflow(dst, left, r1, overflow_dst, scratch);
+}
+
+void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
+                                            Register right,
+                                            Register overflow_dst,
+                                            Register scratch) {
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!dst.is(scratch));
+  DCHECK(!overflow_dst.is(scratch));
+  DCHECK(!overflow_dst.is(left));
+  DCHECK(!overflow_dst.is(right));
+
+  // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
+  if (dst.is(left)) {
+    LoadRR(scratch, left);   // Preserve left.
+    SubP(dst, left, right);  // Left is overwritten.
+    XorP(overflow_dst, dst, scratch);
+    XorP(scratch, right);
+    AndP(overflow_dst, scratch /*, SetRC*/);
+    LoadAndTestRR(overflow_dst, overflow_dst);
+    // Should be okay to remove rc
+  } else if (dst.is(right)) {
+    LoadRR(scratch, right);  // Preserve right.
+    SubP(dst, left, right);  // Right is overwritten.
+    XorP(overflow_dst, dst, left);
+    XorP(scratch, left);
+    AndP(overflow_dst, scratch /*, SetRC*/);
+    LoadAndTestRR(overflow_dst, overflow_dst);
+    // Should be okay to remove rc
+  } else {
+    SubP(dst, left, right);
+    XorP(overflow_dst, dst, left);
+    XorP(scratch, left, right);
+    AndP(overflow_dst, scratch /*, SetRC*/);
+    LoadAndTestRR(overflow_dst, overflow_dst);
+    // Should be okay to remove rc
+  }
+}
+
+void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
+                                Label* early_success) {
+  LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  CompareMap(obj, map, early_success);
+}
+
+void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
+                                Label* early_success) {
+  mov(r0, Operand(map));
+  CmpP(r0, FieldMemOperand(obj_map, HeapObject::kMapOffset));
+}
+
+void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
+                              Label* fail, SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, fail);
+  }
+
+  Label success;
+  CompareMap(obj, scratch, map, &success);
+  bne(fail);
+  bind(&success);
+}
+
+void MacroAssembler::CheckMap(Register obj, Register scratch,
+                              Heap::RootListIndex index, Label* fail,
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, fail);
+  }
+  LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  CompareRoot(scratch, index);
+  bne(fail);
+}
+
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+                                     Register scratch2, Handle<WeakCell> cell,
+                                     Handle<Code> success,
+                                     SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
+  CmpWeakValue(scratch1, cell, scratch2);
+  Jump(success, RelocInfo::CODE_TARGET, eq);
+  bind(&fail);
+}
+
+void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
+                                  Register scratch, CRegister) {
+  mov(scratch, Operand(cell));
+  CmpP(value, FieldMemOperand(scratch, WeakCell::kValueOffset));
+}
+
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
+  mov(value, Operand(cell));
+  LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
+}
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+                                   Label* miss) {
+  GetWeakValue(value, cell);
+  JumpIfSmi(value, miss);
+}
+
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+                                       Register temp, Register temp2) {
+  Label done, loop;
+  LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
+  bind(&loop);
+  JumpIfSmi(result, &done);
+  CompareObjectType(result, temp, temp2, MAP_TYPE);
+  bne(&done);
+  LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
+  b(&loop);
+  bind(&done);
+}
+
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+                                             Register scratch, Label* miss) {
+  // Get the prototype or initial map from the function.
+  LoadP(result,
+        FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // If the prototype or initial map is the hole, don't return it and
+  // simply miss the cache instead. This will allow us to allocate a
+  // prototype object on-demand in the runtime system.
+  CompareRoot(result, Heap::kTheHoleValueRootIndex);
+  beq(miss);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  CompareObjectType(result, scratch, scratch, MAP_TYPE);
+  bne(&done, Label::kNear);
+
+  // Get the prototype from the initial map.
+  LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  bind(&done);
+}
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
+                              Condition cond) {
+  DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
+}
+
+void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
+  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+}
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+  // If the hash field contains an array index pick it out. The assert checks
+  // that the constants for the maximum number of digits for an array index
+  // cached in the hash field and the number of bits reserved for it does not
+  // conflict.
+  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
+         (1 << String::kArrayIndexValueBits));
+  DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
+}
+
+void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
+                                       Register scratch1, Register scratch2,
+                                       DoubleRegister double_scratch) {
+  TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
+}
+
+void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
+                                           Register scratch1,
+                                           Register scratch2) {
+  lgdr(scratch1, input);
+#if V8_TARGET_ARCH_S390X
+  llihf(scratch2, Operand(0x80000000));  // scratch2 = 0x80000000_00000000
+  CmpP(scratch1, scratch2);
+#else
+  Label done;
+  CmpP(scratch1, Operand::Zero());
+  bne(&done, Label::kNear);
+
+  srlg(scratch1, scratch1, Operand(32));
+  CmpP(scratch1, Operand(HeapNumber::kSignMask));
+  bind(&done);
+#endif
+}
+
+void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
+  stdy(input, MemOperand(sp, -kDoubleSize));
+  LoadlW(scratch, MemOperand(sp, -kDoubleSize + Register::kExponentOffset));
+  Cmp32(scratch, Operand::Zero());
+}
+
+void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
+  LoadlW(scratch, FieldMemOperand(input, HeapNumber::kValueOffset +
+                                             Register::kExponentOffset));
+  Cmp32(scratch, Operand::Zero());
+}
+
+void MacroAssembler::TryDoubleToInt32Exact(Register result,
+                                           DoubleRegister double_input,
+                                           Register scratch,
+                                           DoubleRegister double_scratch) {
+  Label done;
+  DCHECK(!double_input.is(double_scratch));
+
+  ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_S390X
+                       scratch,
+#endif
+                       result, double_scratch);
+
+#if V8_TARGET_ARCH_S390X
+  TestIfInt32(result, r0);
+#else
+  TestIfInt32(scratch, result, r0);
+#endif
+  bne(&done);
+
+  // convert back and compare
+  lgdr(scratch, double_scratch);
+  cdfbr(double_scratch, scratch);
+  cdbr(double_scratch, double_input);
+  bind(&done);
+}
+
+void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
+                                   Register input_high, Register scratch,
+                                   DoubleRegister double_scratch, Label* done,
+                                   Label* exact) {
+  DCHECK(!result.is(input_high));
+  DCHECK(!double_input.is(double_scratch));
+  Label exception;
+
+  // Move high word into input_high
+  StoreDouble(double_input, MemOperand(sp, -kDoubleSize));
+  lay(sp, MemOperand(sp, -kDoubleSize));
+  LoadlW(input_high, MemOperand(sp, Register::kExponentOffset));
+  la(sp, MemOperand(sp, kDoubleSize));
+
+  // Test for NaN/Inf
+  ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
+  CmpLogicalP(result, Operand(0x7ff));
+  beq(&exception);
+
+  // Convert (rounding to -Inf)
+  ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_S390X
+                       scratch,
+#endif
+                       result, double_scratch, kRoundToMinusInf);
+
+// Test for overflow
+#if V8_TARGET_ARCH_S390X
+  TestIfInt32(result, r0);
+#else
+  TestIfInt32(scratch, result, r0);
+#endif
+  bne(&exception);
+
+  // Test for exactness
+  lgdr(scratch, double_scratch);
+  cdfbr(double_scratch, scratch);
+  cdbr(double_scratch, double_input);
+  beq(exact);
+  b(done);
+
+  bind(&exception);
+}
+
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+                                                DoubleRegister double_input,
+                                                Label* done) {
+  DoubleRegister double_scratch = kScratchDoubleReg;
+#if !V8_TARGET_ARCH_S390X
+  Register scratch = ip;
+#endif
+
+  ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_S390X
+                       scratch,
+#endif
+                       result, double_scratch);
+
+// Test for overflow
+#if V8_TARGET_ARCH_S390X
+  TestIfInt32(result, r0);
+#else
+  TestIfInt32(scratch, result, r0);
+#endif
+  beq(done);
+}
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+                                       DoubleRegister double_input) {
+  Label done;
+
+  TryInlineTruncateDoubleToI(result, double_input, &done);
+
+  // If we fell through then inline version didn't succeed - call stub instead.
+  push(r14);
+  // Put input on stack.
+  StoreDouble(double_input, MemOperand(sp, -kDoubleSize));
+  lay(sp, MemOperand(sp, -kDoubleSize));
+
+  DoubleToIStub stub(isolate(), sp, result, 0, true, true);
+  CallStub(&stub);
+
+  la(sp, MemOperand(sp, kDoubleSize));
+  pop(r14);
+
+  bind(&done);
+}
+
+void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
+  Label done;
+  DoubleRegister double_scratch = kScratchDoubleReg;
+  DCHECK(!result.is(object));
+
+  LoadDouble(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+  TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+  // If we fell through then inline version didn't succeed - call stub instead.
+  push(r14);
+  DoubleToIStub stub(isolate(), object, result,
+                     HeapNumber::kValueOffset - kHeapObjectTag, true, true);
+  CallStub(&stub);
+  pop(r14);
+
+  bind(&done);
+}
+
+void MacroAssembler::TruncateNumberToI(Register object, Register result,
+                                       Register heap_number_map,
+                                       Register scratch1, Label* not_number) {
+  Label done;
+  DCHECK(!result.is(object));
+
+  UntagAndJumpIfSmi(result, object, &done);
+  JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+  TruncateHeapNumberToI(result, object);
+
+  bind(&done);
+}
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
+                                         int num_least_bits) {
+  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+    // We rotate by kSmiShift amount, and extract the num_least_bits
+    risbg(dst, src, Operand(64 - num_least_bits), Operand(63),
+          Operand(64 - kSmiShift), true);
+  } else {
+    SmiUntag(dst, src);
+    AndP(dst, Operand((1 << num_least_bits) - 1));
+  }
+}
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
+                                           int num_least_bits) {
+  AndP(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+                                 SaveFPRegsMode save_doubles) {
+  // All parameters are on the stack.  r2 has the return value after call.
+
+  // If the expected number of arguments of the runtime function is
+  // constant, we check that the actual number of arguments match the
+  // expectation.
+  CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  mov(r2, Operand(num_arguments));
+  mov(r3, Operand(ExternalReference(f, isolate())));
+  CEntryStub stub(isolate(),
+#if V8_TARGET_ARCH_S390X
+                  f->result_size,
+#else
+                  1,
+#endif
+                  save_doubles);
+  CallStub(&stub);
+}
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+                                           int num_arguments) {
+  mov(r2, Operand(num_arguments));
+  mov(r3, Operand(ext));
+
+  CEntryStub stub(isolate(), 1);
+  CallStub(&stub);
+}
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+  const Runtime::Function* function = Runtime::FunctionForId(fid);
+  DCHECK_EQ(1, function->result_size);
+  if (function->nargs >= 0) {
+    mov(r2, Operand(function->nargs));
+  }
+  JumpToExternalReference(ExternalReference(fid, isolate()));
+}
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+  mov(r3, Operand(builtin));
+  CEntryStub stub(isolate(), 1);
+  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+                                Register scratch1, Register scratch2) {
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(scratch1, Operand(value));
+    mov(scratch2, Operand(ExternalReference(counter)));
+    StoreW(scratch1, MemOperand(scratch2));
+  }
+}
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  DCHECK(value > 0 && is_int8(value));
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(scratch1, Operand(ExternalReference(counter)));
+    // @TODO(john.yan): can be optimized by asi()
+    LoadW(scratch2, MemOperand(scratch1));
+    AddP(scratch2, Operand(value));
+    StoreW(scratch2, MemOperand(scratch1));
+  }
+}
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  DCHECK(value > 0 && is_int8(value));
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(scratch1, Operand(ExternalReference(counter)));
+    // @TODO(john.yan): can be optimized by asi()
+    LoadW(scratch2, MemOperand(scratch1));
+    AddP(scratch2, Operand(-value));
+    StoreW(scratch2, MemOperand(scratch1));
+  }
+}
+
+void MacroAssembler::Assert(Condition cond, BailoutReason reason,
+                            CRegister cr) {
+  if (emit_debug_code()) Check(cond, reason, cr);
+}
+
+void MacroAssembler::AssertFastElements(Register elements) {
+  if (emit_debug_code()) {
+    DCHECK(!elements.is(r0));
+    Label ok;
+    push(elements);
+    LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+    CompareRoot(elements, Heap::kFixedArrayMapRootIndex);
+    beq(&ok, Label::kNear);
+    CompareRoot(elements, Heap::kFixedDoubleArrayMapRootIndex);
+    beq(&ok, Label::kNear);
+    CompareRoot(elements, Heap::kFixedCOWArrayMapRootIndex);
+    beq(&ok, Label::kNear);
+    Abort(kJSObjectWithFastElementsMapHasSlowElements);
+    bind(&ok);
+    pop(elements);
+  }
+}
+
+void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
+  Label L;
+  b(cond, &L);
+  Abort(reason);
+  // will not return here
+  bind(&L);
+}
+
+void MacroAssembler::Abort(BailoutReason reason) {
+  Label abort_start;
+  bind(&abort_start);
+#ifdef DEBUG
+  const char* msg = GetBailoutReason(reason);
+  if (msg != NULL) {
+    RecordComment("Abort message: ");
+    RecordComment(msg);
+  }
+
+  if (FLAG_trap_on_abort) {
+    stop(msg);
+    return;
+  }
+#endif
+
+  LoadSmiLiteral(r0, Smi::FromInt(reason));
+  push(r0);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort);
+  } else {
+    CallRuntime(Runtime::kAbort);
+  }
+  // will not return here
+}
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+  if (context_chain_length > 0) {
+    // Move up the chain of contexts to the context containing the slot.
+    LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+    for (int i = 1; i < context_chain_length; i++) {
+      LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+    }
+  } else {
+    // Slot is in the current function context.  Move it into the
+    // destination register in case we store into it (the write barrier
+    // cannot be allowed to destroy the context in esi).
+    LoadRR(dst, cp);
+  }
+}
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+    ElementsKind expected_kind, ElementsKind transitioned_kind,
+    Register map_in_out, Register scratch, Label* no_map_match) {
+  DCHECK(IsFastElementsKind(expected_kind));
+  DCHECK(IsFastElementsKind(transitioned_kind));
+
+  // Check that the function's map is the same as the expected cached map.
+  LoadP(scratch, NativeContextMemOperand());
+  LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
+  CmpP(map_in_out, ip);
+  bne(no_map_match);
+
+  // Use the transitioned cached map.
+  LoadP(map_in_out,
+        ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
+}
+
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+  LoadP(dst, NativeContextMemOperand());
+  LoadP(dst, ContextMemOperand(dst, index));
+}
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+                                                  Register map,
+                                                  Register scratch) {
+  // Load the initial map. The global functions all have initial maps.
+  LoadP(map,
+        FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+  if (emit_debug_code()) {
+    Label ok, fail;
+    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+    b(&ok);
+    bind(&fail);
+    Abort(kGlobalFunctionsMustHaveInitialMap);
+    bind(&ok);
+  }
+}
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+    Register reg, Register scratch, Label* not_power_of_two_or_zero) {
+  SubP(scratch, reg, Operand(1));
+  CmpP(scratch, Operand::Zero());
+  blt(not_power_of_two_or_zero);
+  AndP(r0, reg, scratch /*, SetRC*/);  // Should be okay to remove rc
+  bne(not_power_of_two_or_zero /*, cr0*/);
+}
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
+                                                     Register scratch,
+                                                     Label* zero_and_neg,
+                                                     Label* not_power_of_two) {
+  SubP(scratch, reg, Operand(1));
+  CmpP(scratch, Operand::Zero());
+  blt(zero_and_neg);
+  AndP(r0, reg, scratch /*, SetRC*/);  // Should be okay to remove rc
+  bne(not_power_of_two /*, cr0*/);
+}
+
+#if !V8_TARGET_ARCH_S390X
+void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
+  DCHECK(!reg.is(overflow));
+  LoadRR(overflow, reg);  // Save original value.
+  SmiTag(reg);
+  XorP(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
+  LoadAndTestRR(overflow, overflow);
+}
+
+void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
+                                         Register overflow) {
+  if (dst.is(src)) {
+    // Fall back to slower case.
+    SmiTagCheckOverflow(dst, overflow);
+  } else {
+    DCHECK(!dst.is(src));
+    DCHECK(!dst.is(overflow));
+    DCHECK(!src.is(overflow));
+    SmiTag(dst, src);
+    XorP(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
+    LoadAndTestRR(overflow, overflow);
+  }
+}
+#endif
+
+void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
+                                      Label* on_not_both_smi) {
+  STATIC_ASSERT(kSmiTag == 0);
+  OrP(r0, reg1, reg2 /*, LeaveRC*/);  // should be okay to remove LeaveRC
+  JumpIfNotSmi(r0, on_not_both_smi);
+}
+
+void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
+                                       Label* smi_case) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  // this won't work if src == dst
+  DCHECK(src.code() != dst.code());
+  SmiUntag(dst, src);
+  TestIfSmi(src);
+  beq(smi_case);
+}
+
+void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
+                                          Label* non_smi_case) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  // We can more optimally use TestIfSmi if dst != src
+  // otherwise, the UnTag operation will kill the CC and we cannot
+  // test the Tag bit.
+  if (src.code() != dst.code()) {
+    SmiUntag(dst, src);
+    TestIfSmi(src);
+  } else {
+    TestBit(src, 0, r0);
+    SmiUntag(dst, src);
+    LoadAndTestRR(r0, r0);
+  }
+  bne(non_smi_case);
+}
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
+                                     Label* on_either_smi) {
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpIfSmi(reg1, on_either_smi);
+  JumpIfSmi(reg2, on_either_smi);
+}
+
+void MacroAssembler::AssertNotNumber(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object);
+    Check(ne, kOperandIsANumber, cr0);
+    push(object);
+    CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
+    pop(object);
+    Check(ne, kOperandIsANumber);
+  }
+}
+
+void MacroAssembler::AssertNotSmi(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object);
+    Check(ne, kOperandIsASmi, cr0);
+  }
+}
+
+void MacroAssembler::AssertSmi(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object);
+    Check(eq, kOperandIsNotSmi, cr0);
+  }
+}
+
+void MacroAssembler::AssertString(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object);
+    Check(ne, kOperandIsASmiAndNotAString, cr0);
+    push(object);
+    LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
+    CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
+    pop(object);
+    Check(lt, kOperandIsNotAString);
+  }
+}
+
+void MacroAssembler::AssertName(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object);
+    Check(ne, kOperandIsASmiAndNotAName, cr0);
+    push(object);
+    LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
+    CompareInstanceType(object, object, LAST_NAME_TYPE);
+    pop(object);
+    Check(le, kOperandIsNotAName);
+  }
+}
+
+void MacroAssembler::AssertFunction(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object);
+    Check(ne, kOperandIsASmiAndNotAFunction, cr0);
+    push(object);
+    CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
+    pop(object);
+    Check(eq, kOperandIsNotAFunction);
+  }
+}
+
+void MacroAssembler::AssertBoundFunction(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object);
+    Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
+    push(object);
+    CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
+    pop(object);
+    Check(eq, kOperandIsNotABoundFunction);
+  }
+}
+
+void MacroAssembler::AssertReceiver(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object);
+    Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
+    push(object);
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
+    pop(object);
+    Check(ge, kOperandIsNotAReceiver);
+  }
+}
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+                                                     Register scratch) {
+  if (emit_debug_code()) {
+    Label done_checking;
+    AssertNotSmi(object);
+    CompareRoot(object, Heap::kUndefinedValueRootIndex);
+    beq(&done_checking, Label::kNear);
+    LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+    CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+    Assert(eq, kExpectedUndefinedOrCell);
+    bind(&done_checking);
+  }
+}
+
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
+  if (emit_debug_code()) {
+    CompareRoot(reg, index);
+    Check(eq, kHeapNumberMapRegisterClobbered);
+  }
+}
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+                                         Register heap_number_map,
+                                         Register scratch,
+                                         Label* on_not_heap_number) {
+  LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  CmpP(scratch, heap_number_map);
+  bne(on_not_heap_number);
+}
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+    Register first, Register second, Register scratch1, Register scratch2,
+    Label* failure) {
+  // Test that both first and second are sequential one-byte strings.
+  // Assume that they are non-smis.
+  LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+  LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+  LoadlB(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  LoadlB(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+  JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+                                                 scratch2, failure);
+}
+
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+                                                           Register second,
+                                                           Register scratch1,
+                                                           Register scratch2,
+                                                           Label* failure) {
+  // Check that neither is a smi.
+  AndP(scratch1, first, second);
+  JumpIfSmi(scratch1, failure);
+  JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+                                               scratch2, failure);
+}
+
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+                                                     Label* not_unique_name) {
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  Label succeed;
+  AndP(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+  beq(&succeed, Label::kNear);
+  CmpP(reg, Operand(SYMBOL_TYPE));
+  bne(not_unique_name);
+
+  bind(&succeed);
+}
+
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
+                                        Register scratch2,
+                                        Register heap_number_map,
+                                        Label* gc_required,
+                                        TaggingMode tagging_mode,
+                                        MutableMode mode) {
+  // Allocate an object in the heap for the heap number and tag it as a heap
+  // object.
+  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+
+  Heap::RootListIndex map_index = mode == MUTABLE
+                                      ? Heap::kMutableHeapNumberMapRootIndex
+                                      : Heap::kHeapNumberMapRootIndex;
+  AssertIsRoot(heap_number_map, map_index);
+
+  // Store heap number map in the allocated object.
+  if (tagging_mode == TAG_RESULT) {
+    StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+  } else {
+    StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+  }
+}
+
+void MacroAssembler::AllocateHeapNumberWithValue(
+    Register result, DoubleRegister value, Register scratch1, Register scratch2,
+    Register heap_number_map, Label* gc_required) {
+  AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
+  StoreDouble(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+                                     Register value, Register scratch1,
+                                     Register scratch2, Label* gc_required) {
+  DCHECK(!result.is(constructor));
+  DCHECK(!result.is(scratch1));
+  DCHECK(!result.is(scratch2));
+  DCHECK(!result.is(value));
+
+  // Allocate JSValue in new space.
+  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+
+  // Initialize the JSValue.
+  LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+  StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
+  LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+  StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
+  StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
+  StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
+  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
+                               Register scratch) {
+  Label big_loop, left_bytes, done, fake_call;
+
+  DCHECK(!scratch.is(r0));
+
+  // big loop moves 256 bytes at a time
+  bind(&big_loop);
+  CmpP(length, Operand(static_cast<intptr_t>(0x100)));
+  blt(&left_bytes);
+
+  mvc(MemOperand(dst), MemOperand(src), 0x100);
+
+  AddP(src, Operand(static_cast<intptr_t>(0x100)));
+  AddP(dst, Operand(static_cast<intptr_t>(0x100)));
+  SubP(length, Operand(static_cast<intptr_t>(0x100)));
+  b(&big_loop);
+
+  bind(&left_bytes);
+  CmpP(length, Operand::Zero());
+  beq(&done);
+
+  // TODO(john.yan): More optimal version is to use MVC
+  // Sequence below has some undiagnosed issue.
+  /*
+  b(scratch, &fake_call);  // use brasl to Save mvc addr to scratch
+  mvc(MemOperand(dst), MemOperand(src), 1);
+  bind(&fake_call);
+  SubP(length, Operand(static_cast<intptr_t>(-1)));
+  ex(length, MemOperand(scratch));  // execute mvc instr above
+  AddP(src, length);
+  AddP(dst, length);
+  AddP(src, Operand(static_cast<intptr_t>(0x1)));
+  AddP(dst, Operand(static_cast<intptr_t>(0x1)));
+  */
+
+  mvc(MemOperand(dst), MemOperand(src), 1);
+  AddP(src, Operand(static_cast<intptr_t>(0x1)));
+  AddP(dst, Operand(static_cast<intptr_t>(0x1)));
+  SubP(length, Operand(static_cast<intptr_t>(0x1)));
+
+  b(&left_bytes);
+  bind(&done);
+}
+
+void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
+                                                 Register count,
+                                                 Register filler) {
+  Label loop;
+  bind(&loop);
+  StoreP(filler, MemOperand(current_address));
+  AddP(current_address, current_address, Operand(kPointerSize));
+  BranchOnCount(r1, &loop);
+}
+
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+                                                Register end_address,
+                                                Register filler) {
+  Label done;
+  DCHECK(!filler.is(r1));
+  DCHECK(!current_address.is(r1));
+  DCHECK(!end_address.is(r1));
+  SubP(r1, end_address, current_address /*, LeaveOE, SetRC*/);
+  beq(&done, Label::kNear);
+  ShiftRightP(r1, r1, Operand(kPointerSizeLog2));
+  InitializeNFieldsWithFiller(current_address, r1, filler);
+  bind(&done);
+}
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+    Register first, Register second, Register scratch1, Register scratch2,
+    Label* failure) {
+  const int kFlatOneByteStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  const int kFlatOneByteStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
+  if (!scratch1.is(first)) LoadRR(scratch1, first);
+  if (!scratch2.is(second)) LoadRR(scratch2, second);
+  nilf(scratch1, Operand(kFlatOneByteStringMask));
+  CmpP(scratch1, Operand(kFlatOneByteStringTag));
+  bne(failure);
+  nilf(scratch2, Operand(kFlatOneByteStringMask));
+  CmpP(scratch2, Operand(kFlatOneByteStringTag));
+  bne(failure);
+}
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+                                                              Register scratch,
+                                                              Label* failure) {
+  const int kFlatOneByteStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  const int kFlatOneByteStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
+
+  if (!scratch.is(type)) LoadRR(scratch, type);
+  nilf(scratch, Operand(kFlatOneByteStringMask));
+  CmpP(scratch, Operand(kFlatOneByteStringTag));
+  bne(failure);
+}
+
+static const int kRegisterPassedArguments = 5;
+
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+                                              int num_double_arguments) {
+  int stack_passed_words = 0;
+  if (num_double_arguments > DoubleRegister::kNumRegisters) {
+    stack_passed_words +=
+        2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+  }
+  // Up to five simple arguments are passed in registers r2..r6
+  if (num_reg_arguments > kRegisterPassedArguments) {
+    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+  }
+  return stack_passed_words;
+}
+
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
+                                               Register value,
+                                               uint32_t encoding_mask) {
+  Label is_object;
+  TestIfSmi(string);
+  Check(ne, kNonObject, cr0);
+
+  LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+  LoadlB(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+  AndP(ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+  CmpP(ip, Operand(encoding_mask));
+  Check(eq, kUnexpectedStringType);
+
+// The index is assumed to be untagged coming in, tag it to compare with the
+// string length without using a temp register, it is restored at the end of
+// this function.
+#if !V8_TARGET_ARCH_S390X
+  Label index_tag_ok, index_tag_bad;
+  JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
+#endif
+  SmiTag(index, index);
+#if !V8_TARGET_ARCH_S390X
+  b(&index_tag_ok);
+  bind(&index_tag_bad);
+  Abort(kIndexIsTooLarge);
+  bind(&index_tag_ok);
+#endif
+
+  LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
+  CmpP(index, ip);
+  Check(lt, kIndexIsTooLarge);
+
+  DCHECK(Smi::FromInt(0) == 0);
+  CmpP(index, Operand::Zero());
+  Check(ge, kIndexIsNegative);
+
+  SmiUntag(index, index);
+}
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          int num_double_arguments,
+                                          Register scratch) {
+  int frame_alignment = ActivationFrameAlignment();
+  int stack_passed_arguments =
+      CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+  int stack_space = kNumRequiredStackFrameSlots;
+  if (frame_alignment > kPointerSize) {
+    // Make stack end at alignment and make room for stack arguments
+    // -- preserving original value of sp.
+    LoadRR(scratch, sp);
+    lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+    ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+    StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
+  } else {
+    stack_space += stack_passed_arguments;
+  }
+  lay(sp, MemOperand(sp, -(stack_space)*kPointerSize));
+}
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          Register scratch) {
+  PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
+
+void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
+
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+                                          DoubleRegister src2) {
+  if (src2.is(d0)) {
+    DCHECK(!src1.is(d2));
+    Move(d2, src2);
+    Move(d0, src1);
+  } else {
+    Move(d0, src1);
+    Move(d2, src2);
+  }
+}
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  mov(ip, Operand(function));
+  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
+}
+
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_arguments) {
+  CallCFunction(function, num_arguments, 0);
+}
+
+void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+  CallCFunction(function, num_arguments, 0);
+}
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+                                         int num_reg_arguments,
+                                         int num_double_arguments) {
+  DCHECK(has_frame());
+
+  // Just call directly. The function called cannot cause a GC, or
+  // allow preemption, so the return address in the link register
+  // stays correct.
+  Register dest = function;
+  if (ABI_CALL_VIA_IP) {
+    Move(ip, function);
+    dest = ip;
+  }
+
+  Call(dest);
+
+  int stack_passed_arguments =
+      CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+  int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
+  if (ActivationFrameAlignment() > kPointerSize) {
+    // Load the original stack pointer (pre-alignment) from the stack
+    LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
+  } else {
+    la(sp, MemOperand(sp, stack_space * kPointerSize));
+  }
+}
+
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,  // scratch may be same register as object
+    int mask, Condition cc, Label* condition_met) {
+  DCHECK(cc == ne || cc == eq);
+  ClearRightImm(scratch, object, Operand(kPageSizeBits));
+
+  if (base::bits::IsPowerOfTwo32(mask)) {
+    // If it's a power of two, we can use Test-Under-Mask Memory-Imm form
+    // which allows testing of a single byte in memory.
+    int32_t byte_offset = 4;
+    uint32_t shifted_mask = mask;
+    // Determine the byte offset to be tested
+    if (mask <= 0x80) {
+      byte_offset = kPointerSize - 1;
+    } else if (mask < 0x8000) {
+      byte_offset = kPointerSize - 2;
+      shifted_mask = mask >> 8;
+    } else if (mask < 0x800000) {
+      byte_offset = kPointerSize - 3;
+      shifted_mask = mask >> 16;
+    } else {
+      byte_offset = kPointerSize - 4;
+      shifted_mask = mask >> 24;
+    }
+#if V8_TARGET_LITTLE_ENDIAN
+    // Reverse the byte_offset if emulating on little endian platform
+    byte_offset = kPointerSize - byte_offset - 1;
+#endif
+    tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
+       Operand(shifted_mask));
+  } else {
+    LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+    AndP(r0, scratch, Operand(mask));
+  }
+  // Should be okay to remove rc
+
+  if (cc == ne) {
+    bne(condition_met);
+  }
+  if (cc == eq) {
+    beq(condition_met);
+  }
+}
+
+void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
+                                 Register scratch1, Label* on_black) {
+  HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
+  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+}
+
+void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
+                              Register mask_scratch, Label* has_color,
+                              int first_bit, int second_bit) {
+  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color, word_boundary;
+  LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  // Test the first bit
+  AndP(r0, ip, mask_scratch /*, SetRC*/);  // Should be okay to remove rc
+  b(first_bit == 1 ? eq : ne, &other_color, Label::kNear);
+  // Shift left 1
+  // May need to load the next cell
+  sll(mask_scratch, Operand(1) /*, SetRC*/);
+  LoadAndTest32(mask_scratch, mask_scratch);
+  beq(&word_boundary, Label::kNear);
+  // Test the second bit
+  AndP(r0, ip, mask_scratch /*, SetRC*/);  // Should be okay to remove rc
+  b(second_bit == 1 ? ne : eq, has_color);
+  b(&other_color, Label::kNear);
+
+  bind(&word_boundary);
+  LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
+  AndP(r0, ip, Operand(1));
+  b(second_bit == 1 ? ne : eq, has_color);
+  bind(&other_color);
+}
+
+void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
+                                 Register mask_reg) {
+  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+  LoadRR(bitmap_reg, addr_reg);
+  nilf(bitmap_reg, Operand(~Page::kPageAlignmentMask));
+  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+  ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
+  ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
+  ShiftLeftP(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
+  AddP(bitmap_reg, ip);
+  LoadRR(ip, mask_reg);  // Have to do some funky reg shuffling as
+                         // 31-bit shift left clobbers on s390.
+  LoadImmP(mask_reg, Operand(1));
+  ShiftLeftP(mask_reg, mask_reg, ip);
+}
+
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+                                 Register mask_scratch, Register load_scratch,
+                                 Label* value_is_white) {
+  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  LoadlW(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  LoadRR(r0, load_scratch);
+  AndP(r0, mask_scratch);
+  beq(value_is_white);
+}
+
+// Saturate a value into 8-bit unsigned integer
+//   if input_value < 0, output_value is 0
+//   if input_value > 255, output_value is 255
+//   otherwise output_value is the input_value
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+  int satval = (1 << 8) - 1;
+
+  Label done, negative_label, overflow_label;
+  CmpP(input_reg, Operand::Zero());
+  blt(&negative_label);
+
+  CmpP(input_reg, Operand(satval));
+  bgt(&overflow_label);
+  if (!output_reg.is(input_reg)) {
+    LoadRR(output_reg, input_reg);
+  }
+  b(&done);
+
+  bind(&negative_label);
+  LoadImmP(output_reg, Operand::Zero());  // set to 0 if negative
+  b(&done);
+
+  bind(&overflow_label);  // set to satval if > satval
+  LoadImmP(output_reg, Operand(satval));
+
+  bind(&done);
+}
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+                                        DoubleRegister input_reg,
+                                        DoubleRegister double_scratch) {
+  Label above_zero;
+  Label done;
+  Label in_bounds;
+
+  LoadDoubleLiteral(double_scratch, 0.0, result_reg);
+  cdbr(input_reg, double_scratch);
+  bgt(&above_zero, Label::kNear);
+
+  // Double value is less than zero, NaN or Inf, return 0.
+  LoadIntLiteral(result_reg, 0);
+  b(&done, Label::kNear);
+
+  // Double value is >= 255, return 255.
+  bind(&above_zero);
+  LoadDoubleLiteral(double_scratch, 255.0, result_reg);
+  cdbr(input_reg, double_scratch);
+  ble(&in_bounds, Label::kNear);
+  LoadIntLiteral(result_reg, 255);
+  b(&done, Label::kNear);
+
+  // In 0-255 range, round and truncate.
+  bind(&in_bounds);
+
+  // round to nearest (default rounding mode)
+  cfdbr(ROUND_TO_NEAREST_WITH_TIES_TO_EVEN, result_reg, input_reg);
+  bind(&done);
+}
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+  LoadlW(dst, FieldMemOperand(map, Map::kBitField3Offset));
+  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+  LoadW(dst, FieldMemOperand(map, Map::kBitField3Offset));
+  And(dst, Operand(Map::EnumLengthBits::kMask));
+  SmiTag(dst);
+}
+
+void MacroAssembler::LoadAccessor(Register dst, Register holder,
+                                  int accessor_index,
+                                  AccessorComponent accessor) {
+  LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
+  LoadInstanceDescriptors(dst, dst);
+  LoadP(dst,
+        FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
+  const int getterOffset = AccessorPair::kGetterOffset;
+  const int setterOffset = AccessorPair::kSetterOffset;
+  int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
+  LoadP(dst, FieldMemOperand(dst, offset));
+}
+
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+  Register null_value = r7;
+  Register empty_fixed_array_value = r8;
+  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Label next, start;
+  LoadRR(r4, r2);
+
+  // Check if the enum length field is properly initialized, indicating that
+  // there is an enum cache.
+  LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
+
+  EnumLength(r5, r3);
+  CmpSmiLiteral(r5, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
+  beq(call_runtime);
+
+  LoadRoot(null_value, Heap::kNullValueRootIndex);
+  b(&start, Label::kNear);
+
+  bind(&next);
+  LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
+
+  // For all objects but the receiver, check that the cache is empty.
+  EnumLength(r5, r3);
+  CmpSmiLiteral(r5, Smi::FromInt(0), r0);
+  bne(call_runtime);
+
+  bind(&start);
+
+  // Check that there are no elements. Register r4 contains the current JS
+  // object we've reached through the prototype chain.
+  Label no_elements;
+  LoadP(r4, FieldMemOperand(r4, JSObject::kElementsOffset));
+  CmpP(r4, empty_fixed_array_value);
+  beq(&no_elements, Label::kNear);
+
+  // Second chance, the object may be using the empty slow element dictionary.
+  CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
+  bne(call_runtime);
+
+  bind(&no_elements);
+  LoadP(r4, FieldMemOperand(r3, Map::kPrototypeOffset));
+  CmpP(r4, null_value);
+  bne(&next);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// New MacroAssembler Interfaces added for S390
+//
+////////////////////////////////////////////////////////////////////////////////
+// Primarily used for loading constants
+// This should really move to be in macro-assembler as it
+// is really a pseudo instruction
+// Some usages of this intend for a FIXED_SEQUENCE to be used
+// @TODO - break this dependency so we can optimize mov() in general
+// and only use the generic version when we require a fixed sequence
+void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
+                                        Representation r, Register scratch) {
+  DCHECK(!r.IsDouble());
+  if (r.IsInteger8()) {
+    LoadB(dst, mem);
+    lgbr(dst, dst);
+  } else if (r.IsUInteger8()) {
+    LoadlB(dst, mem);
+  } else if (r.IsInteger16()) {
+    LoadHalfWordP(dst, mem, scratch);
+    lghr(dst, dst);
+  } else if (r.IsUInteger16()) {
+    LoadHalfWordP(dst, mem, scratch);
+#if V8_TARGET_ARCH_S390X
+  } else if (r.IsInteger32()) {
+    LoadW(dst, mem, scratch);
+#endif
+  } else {
+    LoadP(dst, mem, scratch);
+  }
+}
+
+void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
+                                         Representation r, Register scratch) {
+  DCHECK(!r.IsDouble());
+  if (r.IsInteger8() || r.IsUInteger8()) {
+    StoreByte(src, mem, scratch);
+  } else if (r.IsInteger16() || r.IsUInteger16()) {
+    StoreHalfWord(src, mem, scratch);
+#if V8_TARGET_ARCH_S390X
+  } else if (r.IsInteger32()) {
+    StoreW(src, mem, scratch);
+#endif
+  } else {
+    if (r.IsHeapObject()) {
+      AssertNotSmi(src);
+    } else if (r.IsSmi()) {
+      AssertSmi(src);
+    }
+    StoreP(src, mem, scratch);
+  }
+}
+
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
+                                                     Register scratch_reg,
+                                                     Register scratch2_reg,
+                                                     Label* no_memento_found) {
+  Label map_check;
+  Label top_check;
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address(isolate());
+  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+
+  DCHECK(!AreAliased(receiver_reg, scratch_reg));
+
+  // Bail out if the object is not in new space.
+  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+
+  DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
+  AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+
+  // If the object is in new space, we need to check whether it is on the same
+  // page as the current top.
+  XorP(r0, scratch_reg, Operand(new_space_allocation_top));
+  AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
+  beq(&top_check, Label::kNear);
+  // The object is on a different page than allocation top. Bail out if the
+  // object sits on the page boundary as no memento can follow and we cannot
+  // touch the memory following it.
+  XorP(r0, scratch_reg, receiver_reg);
+  AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
+  bne(no_memento_found);
+  // Continue with the actual map check.
+  b(&map_check, Label::kNear);
+  // If top is on the same page as the current object, we need to check whether
+  // we are below top.
+  bind(&top_check);
+  CmpP(scratch_reg, Operand(new_space_allocation_top));
+  bgt(no_memento_found);
+  // Memento map check.
+  bind(&map_check);
+  LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+  CmpP(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
+}
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+                                   Register reg4, Register reg5,
+                                   Register reg6) {
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+    int code = config->GetAllocatableGeneralCode(i);
+    Register candidate = Register::from_code(code);
+    if (regs & candidate.bit()) continue;
+    return candidate;
+  }
+  UNREACHABLE();
+  return no_reg;
+}
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
+                                                      Register scratch0,
+                                                      Register scratch1,
+                                                      Label* found) {
+  DCHECK(!scratch1.is(scratch0));
+  Register current = scratch0;
+  Label loop_again, end;
+
+  // scratch contained elements pointer.
+  LoadRR(current, object);
+  LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
+  LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
+  CompareRoot(current, Heap::kNullValueRootIndex);
+  beq(&end);
+
+  // Loop based on the map going up the prototype chain.
+  bind(&loop_again);
+  LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
+
+  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+  LoadlB(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
+  CmpP(scratch1, Operand(JS_OBJECT_TYPE));
+  blt(found);
+
+  LoadlB(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+  DecodeField<Map::ElementsKindBits>(scratch1);
+  CmpP(scratch1, Operand(DICTIONARY_ELEMENTS));
+  beq(found);
+  LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
+  CompareRoot(current, Heap::kNullValueRootIndex);
+  bne(&loop_again);
+
+  bind(&end);
+}
+
+void MacroAssembler::mov(Register dst, const Operand& src) {
+  if (src.rmode_ != kRelocInfo_NONEPTR) {
+    // some form of relocation needed
+    RecordRelocInfo(src.rmode_, src.imm_);
+  }
+
+#if V8_TARGET_ARCH_S390X
+  int64_t value = src.immediate();
+  int32_t hi_32 = static_cast<int64_t>(value) >> 32;
+  int32_t lo_32 = static_cast<int32_t>(value);
+
+  iihf(dst, Operand(hi_32));
+  iilf(dst, Operand(lo_32));
+#else
+  int value = src.immediate();
+  iilf(dst, Operand(value));
+#endif
+}
+
+void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
+  if (dst.is(src2)) {
+    MulP(dst, src1);
+  } else if (dst.is(src1)) {
+    MulP(dst, src2);
+  } else {
+    Move(dst, src1);
+    MulP(dst, src2);
+  }
+}
+
+void MacroAssembler::DivP(Register dividend, Register divider) {
+  // have to make sure the src and dst are reg pairs
+  DCHECK(dividend.code() % 2 == 0);
+#if V8_TARGET_ARCH_S390X
+  dsgr(dividend, divider);
+#else
+  dr(dividend, divider);
+#endif
+}
+
+void MacroAssembler::MulP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  msgfi(dst, opnd);
+#else
+  msfi(dst, opnd);
+#endif
+}
+
+void MacroAssembler::MulP(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+  msgr(dst, src);
+#else
+  msr(dst, src);
+#endif
+}
+
+void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  if (is_uint16(opnd.offset())) {
+    ms(dst, opnd);
+  } else if (is_int20(opnd.offset())) {
+    msy(dst, opnd);
+  } else {
+    UNIMPLEMENTED();
+  }
+#else
+  if (is_int20(opnd.offset())) {
+    msg(dst, opnd);
+  } else {
+    UNIMPLEMENTED();
+  }
+#endif
+}
+
+//----------------------------------------------------------------------------
+//  Add Instructions
+//----------------------------------------------------------------------------
+
+// Add 32-bit (Register dst = Register dst + Immediate opnd)
+void MacroAssembler::Add32(Register dst, const Operand& opnd) {
+  if (is_int16(opnd.immediate()))
+    ahi(dst, opnd);
+  else
+    afi(dst, opnd);
+}
+
+// Add Pointer Size (Register dst = Register dst + Immediate opnd)
+void MacroAssembler::AddP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  if (is_int16(opnd.immediate()))
+    aghi(dst, opnd);
+  else
+    agfi(dst, opnd);
+#else
+  Add32(dst, opnd);
+#endif
+}
+
+// Add 32-bit (Register dst = Register src + Immediate opnd)
+void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
+  if (!dst.is(src)) {
+    if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
+      ahik(dst, src, opnd);
+      return;
+    }
+    lr(dst, src);
+  }
+  Add32(dst, opnd);
+}
+
+// Add Pointer Size (Register dst = Register src + Immediate opnd)
+void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
+  if (!dst.is(src)) {
+    if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
+      AddPImm_RRI(dst, src, opnd);
+      return;
+    }
+    LoadRR(dst, src);
+  }
+  AddP(dst, opnd);
+}
+
+// Add 32-bit (Register dst = Register dst + Register src)
+void MacroAssembler::Add32(Register dst, Register src) { ar(dst, src); }
+
+// Add Pointer Size (Register dst = Register dst + Register src)
+void MacroAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
+
+// Add Pointer Size with src extension
+//     (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+  agfr(dst, src);
+#else
+  ar(dst, src);
+#endif
+}
+
+// Add 32-bit (Register dst = Register src1 + Register src2)
+void MacroAssembler::Add32(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1) && !dst.is(src2)) {
+    // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
+    // as AR is a smaller instruction
+    if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+      ark(dst, src1, src2);
+      return;
+    } else {
+      lr(dst, src1);
+    }
+  } else if (dst.is(src2)) {
+    src2 = src1;
+  }
+  ar(dst, src2);
+}
+
+// Add Pointer Size (Register dst = Register src1 + Register src2)
+void MacroAssembler::AddP(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1) && !dst.is(src2)) {
+    // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
+    // as AR is a smaller instruction
+    if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+      AddP_RRR(dst, src1, src2);
+      return;
+    } else {
+      LoadRR(dst, src1);
+    }
+  } else if (dst.is(src2)) {
+    src2 = src1;
+  }
+  AddRR(dst, src2);
+}
+
+// Add Pointer Size with src extension
+//      (Register dst (ptr) = Register dst (ptr) + Register src1 (ptr) +
+//                            Register src2 (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1,
+                                    Register src2) {
+#if V8_TARGET_ARCH_S390X
+  if (dst.is(src2)) {
+    // The source we need to sign extend is the same as result.
+    lgfr(dst, src2);
+    agr(dst, src1);
+  } else {
+    if (!dst.is(src1)) LoadRR(dst, src1);
+    agfr(dst, src2);
+  }
+#else
+  AddP(dst, src1, src2);
+#endif
+}
+
+// Add 32-bit (Register-Memory)
+void MacroAssembler::Add32(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+  if (is_uint12(opnd.offset()))
+    a(dst, opnd);
+  else
+    ay(dst, opnd);
+}
+
+// Add Pointer Size (Register-Memory)
+void MacroAssembler::AddP(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  DCHECK(is_int20(opnd.offset()));
+  ag(dst, opnd);
+#else
+  Add32(dst, opnd);
+#endif
+}
+
+// Add Pointer Size with src extension
+//      (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  DCHECK(is_int20(opnd.offset()));
+  agf(dst, opnd);
+#else
+  Add32(dst, opnd);
+#endif
+}
+
+// Add 32-bit (Memory - Immediate)
+void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
+  DCHECK(is_int8(imm.immediate()));
+  DCHECK(is_int20(opnd.offset()));
+  DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
+  asi(opnd, imm);
+}
+
+// Add Pointer-sized (Memory - Immediate)
+void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
+  DCHECK(is_int8(imm.immediate()));
+  DCHECK(is_int20(opnd.offset()));
+  DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
+#if V8_TARGET_ARCH_S390X
+  agsi(opnd, imm);
+#else
+  asi(opnd, imm);
+#endif
+}
+
+//----------------------------------------------------------------------------
+//  Add Logical Instructions
+//----------------------------------------------------------------------------
+
+// Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
+void MacroAssembler::AddLogicalWithCarry32(Register dst, Register src1,
+                                           Register src2) {
+  if (!dst.is(src2) && !dst.is(src1)) {
+    lr(dst, src1);
+    alcr(dst, src2);
+  } else if (!dst.is(src2)) {
+    // dst == src1
+    DCHECK(dst.is(src1));
+    alcr(dst, src2);
+  } else {
+    // dst == src2
+    DCHECK(dst.is(src2));
+    alcr(dst, src1);
+  }
+}
+
+// Add Logical 32-bit (Register dst = Register src1 + Register src2)
+void MacroAssembler::AddLogical32(Register dst, Register src1, Register src2) {
+  if (!dst.is(src2) && !dst.is(src1)) {
+    lr(dst, src1);
+    alr(dst, src2);
+  } else if (!dst.is(src2)) {
+    // dst == src1
+    DCHECK(dst.is(src1));
+    alr(dst, src2);
+  } else {
+    // dst == src2
+    DCHECK(dst.is(src2));
+    alr(dst, src1);
+  }
+}
+
+// Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
+void MacroAssembler::AddLogical(Register dst, const Operand& imm) {
+  alfi(dst, imm);
+}
+
+// Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
+void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) {
+#ifdef V8_TARGET_ARCH_S390X
+  algfi(dst, imm);
+#else
+  AddLogical(dst, imm);
+#endif
+}
+
+// Add Logical 32-bit (Register-Memory)
+void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+  if (is_uint12(opnd.offset()))
+    al_z(dst, opnd);
+  else
+    aly(dst, opnd);
+}
+
+// Add Logical Pointer Size (Register-Memory)
+void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  DCHECK(is_int20(opnd.offset()));
+  alg(dst, opnd);
+#else
+  AddLogical(dst, opnd);
+#endif
+}
+
+//----------------------------------------------------------------------------
+//  Subtract Instructions
+//----------------------------------------------------------------------------
+
+// Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
+// src2)
+void MacroAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
+                                            Register src2) {
+  if (!dst.is(src2) && !dst.is(src1)) {
+    lr(dst, src1);
+    slbr(dst, src2);
+  } else if (!dst.is(src2)) {
+    // dst == src1
+    DCHECK(dst.is(src1));
+    slbr(dst, src2);
+  } else {
+    // dst == src2
+    DCHECK(dst.is(src2));
+    lr(r0, dst);
+    SubLogicalWithBorrow32(dst, src1, r0);
+  }
+}
+
+// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
+void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
+  if (!dst.is(src2) && !dst.is(src1)) {
+    lr(dst, src1);
+    slr(dst, src2);
+  } else if (!dst.is(src2)) {
+    // dst == src1
+    DCHECK(dst.is(src1));
+    slr(dst, src2);
+  } else {
+    // dst == src2
+    DCHECK(dst.is(src2));
+    lr(r0, dst);
+    SubLogical32(dst, src1, r0);
+  }
+}
+
+// Subtract 32-bit (Register dst = Register dst - Immediate opnd)
+void MacroAssembler::Sub32(Register dst, const Operand& imm) {
+  Add32(dst, Operand(-(imm.imm_)));
+}
+
+// Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
+void MacroAssembler::SubP(Register dst, const Operand& imm) {
+  AddP(dst, Operand(-(imm.imm_)));
+}
+
+// Subtract 32-bit (Register dst = Register src - Immediate opnd)
+void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) {
+  Add32(dst, src, Operand(-(imm.imm_)));
+}
+
+// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
+void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) {
+  AddP(dst, src, Operand(-(imm.imm_)));
+}
+
+// Subtract 32-bit (Register dst = Register dst - Register src)
+void MacroAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
+
+// Subtract Pointer Size (Register dst = Register dst - Register src)
+void MacroAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
+
+// Subtract Pointer Size with src extension
+//     (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+  sgfr(dst, src);
+#else
+  sr(dst, src);
+#endif
+}
+
+// Subtract 32-bit (Register = Register - Register)
+void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
+  // Use non-clobbering version if possible
+  if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
+    srk(dst, src1, src2);
+    return;
+  }
+  if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1);
+  // In scenario where we have dst = src - dst, we need to swap and negate
+  if (!dst.is(src1) && dst.is(src2)) {
+    sr(dst, src1);  // dst = (dst - src)
+    lcr(dst, dst);  // dst = -dst
+  } else {
+    sr(dst, src2);
+  }
+}
+
+// Subtract Pointer Sized (Register = Register - Register)
+void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
+  // Use non-clobbering version if possible
+  if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
+    SubP_RRR(dst, src1, src2);
+    return;
+  }
+  if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
+  // In scenario where we have dst = src - dst, we need to swap and negate
+  if (!dst.is(src1) && dst.is(src2)) {
+    SubP(dst, src1);             // dst = (dst - src)
+    LoadComplementRR(dst, dst);  // dst = -dst
+  } else {
+    SubP(dst, src2);
+  }
+}
+
+// Subtract Pointer Size with src extension
+//     (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
+                                    Register src2) {
+#if V8_TARGET_ARCH_S390X
+  if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
+
+  // In scenario where we have dst = src - dst, we need to swap and negate
+  if (!dst.is(src1) && dst.is(src2)) {
+    lgfr(dst, dst);              // Sign extend this operand first.
+    SubP(dst, src1);             // dst = (dst - src)
+    LoadComplementRR(dst, dst);  // dst = -dst
+  } else {
+    sgfr(dst, src2);
+  }
+#else
+  SubP(dst, src1, src2);
+#endif
+}
+
+// Subtract 32-bit (Register-Memory)
+void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+  if (is_uint12(opnd.offset()))
+    s(dst, opnd);
+  else
+    sy(dst, opnd);
+}
+
+// Subtract Pointer Sized (Register - Memory)
+void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  sg(dst, opnd);
+#else
+  Sub32(dst, opnd);
+#endif
+}
+
+void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+  sllg(src, src, Operand(32));
+  ldgr(dst, src);
+}
+
+void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+  lgdr(dst, src);
+  srlg(dst, dst, Operand(32));
+}
+
+void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  DCHECK(is_int20(opnd.offset()));
+  sgf(dst, opnd);
+#else
+  Sub32(dst, opnd);
+#endif
+}
+
+//----------------------------------------------------------------------------
+//  Subtract Logical Instructions
+//----------------------------------------------------------------------------
+
+// Subtract Logical 32-bit (Register - Memory)
+void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+  if (is_uint12(opnd.offset()))
+    sl(dst, opnd);
+  else
+    sly(dst, opnd);
+}
+
+// Subtract Logical Pointer Sized (Register - Memory)
+void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+  slgf(dst, opnd);
+#else
+  SubLogical(dst, opnd);
+#endif
+}
+
+// Subtract Logical Pointer Size with src extension
+//      (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::SubLogicalP_ExtendSrc(Register dst,
+                                           const MemOperand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  DCHECK(is_int20(opnd.offset()));
+  slgf(dst, opnd);
+#else
+  SubLogical(dst, opnd);
+#endif
+}
+
+//----------------------------------------------------------------------------
+//  Bitwise Operations
+//----------------------------------------------------------------------------
+
+// AND 32-bit - dst = dst & src
+void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
+
+// AND Pointer Size - dst = dst & src
+void MacroAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
+
+// Non-clobbering AND 32-bit - dst = src1 & src1
+void MacroAssembler::And(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1) && !dst.is(src2)) {
+    // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+    // as XR is a smaller instruction
+    if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+      nrk(dst, src1, src2);
+      return;
+    } else {
+      lr(dst, src1);
+    }
+  } else if (dst.is(src2)) {
+    src2 = src1;
+  }
+  And(dst, src2);
+}
+
+// Non-clobbering AND pointer size - dst = src1 & src1
+void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1) && !dst.is(src2)) {
+    // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+    // as XR is a smaller instruction
+    if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+      AndP_RRR(dst, src1, src2);
+      return;
+    } else {
+      LoadRR(dst, src1);
+    }
+  } else if (dst.is(src2)) {
+    src2 = src1;
+  }
+  AndP(dst, src2);
+}
+
+// AND 32-bit (Reg - Mem)
+void MacroAssembler::And(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+  if (is_uint12(opnd.offset()))
+    n(dst, opnd);
+  else
+    ny(dst, opnd);
+}
+
+// AND Pointer Size (Reg - Mem)
+void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+  ng(dst, opnd);
+#else
+  And(dst, opnd);
+#endif
+}
+
+// AND 32-bit - dst = dst & imm
+void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
+
+// AND Pointer Size - dst = dst & imm
+void MacroAssembler::AndP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  intptr_t value = opnd.imm_;
+  if (value >> 32 != -1) {
+    // this may not work b/c condition code won't be set correctly
+    nihf(dst, Operand(value >> 32));
+  }
+  nilf(dst, Operand(value & 0xFFFFFFFF));
+#else
+  And(dst, opnd);
+#endif
+}
+
+// AND 32-bit - dst = src & imm
+void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
+  if (!dst.is(src)) lr(dst, src);
+  nilf(dst, opnd);
+}
+
+// AND Pointer Size - dst = src & imm
+void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
+  // Try to exploit RISBG first
+  intptr_t value = opnd.imm_;
+  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+    intptr_t shifted_value = value;
+    int trailing_zeros = 0;
+
+    // We start checking how many trailing zeros are left at the end.
+    while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
+      trailing_zeros++;
+      shifted_value >>= 1;
+    }
+
+    // If temp (value with right-most set of zeros shifted out) is 1 less
+    // than power of 2, we have consecutive bits of 1.
+    // Special case: If shift_value is zero, we cannot use RISBG, as it requires
+    //               selection of at least 1 bit.
+    if ((0 != shifted_value) && base::bits::IsPowerOfTwo64(shifted_value + 1)) {
+      int startBit =
+          base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
+      int endBit = 63 - trailing_zeros;
+      // Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
+      risbg(dst, src, Operand(startBit), Operand(endBit), Operand::Zero(),
+            true);
+      return;
+    } else if (-1 == shifted_value) {
+      // A Special case in which all top bits up to MSB are 1's.  In this case,
+      // we can set startBit to be 0.
+      int endBit = 63 - trailing_zeros;
+      risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
+      return;
+    }
+  }
+
+  // If we are &'ing zero, we can just whack the dst register and skip copy
+  if (!dst.is(src) && (0 != value)) LoadRR(dst, src);
+  AndP(dst, opnd);
+}
+
+// OR 32-bit - dst = dst & src
+void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
+
+// OR Pointer Size - dst = dst & src
+void MacroAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
+
+// Non-clobbering OR 32-bit - dst = src1 & src1
+void MacroAssembler::Or(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1) && !dst.is(src2)) {
+    // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+    // as XR is a smaller instruction
+    if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+      ork(dst, src1, src2);
+      return;
+    } else {
+      lr(dst, src1);
+    }
+  } else if (dst.is(src2)) {
+    src2 = src1;
+  }
+  Or(dst, src2);
+}
+
+// Non-clobbering OR pointer size - dst = src1 & src1
+void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1) && !dst.is(src2)) {
+    // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+    // as XR is a smaller instruction
+    if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+      OrP_RRR(dst, src1, src2);
+      return;
+    } else {
+      LoadRR(dst, src1);
+    }
+  } else if (dst.is(src2)) {
+    src2 = src1;
+  }
+  OrP(dst, src2);
+}
+
+// OR 32-bit (Reg - Mem)
+void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+  if (is_uint12(opnd.offset()))
+    o(dst, opnd);
+  else
+    oy(dst, opnd);
+}
+
+// OR Pointer Size (Reg - Mem)
+void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+  og(dst, opnd);
+#else
+  Or(dst, opnd);
+#endif
+}
+
+// OR 32-bit - dst = dst & imm
+void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
+
+// OR Pointer Size - dst = dst & imm
+void MacroAssembler::OrP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  intptr_t value = opnd.imm_;
+  if (value >> 32 != 0) {
+    // this may not work b/c condition code won't be set correctly
+    oihf(dst, Operand(value >> 32));
+  }
+  oilf(dst, Operand(value & 0xFFFFFFFF));
+#else
+  Or(dst, opnd);
+#endif
+}
+
+// OR 32-bit - dst = src & imm
+void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
+  if (!dst.is(src)) lr(dst, src);
+  oilf(dst, opnd);
+}
+
+// OR Pointer Size - dst = src & imm
+void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
+  if (!dst.is(src)) LoadRR(dst, src);
+  OrP(dst, opnd);
+}
+
+// XOR 32-bit - dst = dst & src
+void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
+
+// XOR Pointer Size - dst = dst & src
+void MacroAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
+
+// Non-clobbering XOR 32-bit - dst = src1 & src1
+void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1) && !dst.is(src2)) {
+    // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+    // as XR is a smaller instruction
+    if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+      xrk(dst, src1, src2);
+      return;
+    } else {
+      lr(dst, src1);
+    }
+  } else if (dst.is(src2)) {
+    src2 = src1;
+  }
+  Xor(dst, src2);
+}
+
+// Non-clobbering XOR pointer size - dst = src1 & src1
+void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1) && !dst.is(src2)) {
+    // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
+    // as XR is a smaller instruction
+    if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+      XorP_RRR(dst, src1, src2);
+      return;
+    } else {
+      LoadRR(dst, src1);
+    }
+  } else if (dst.is(src2)) {
+    src2 = src1;
+  }
+  XorP(dst, src2);
+}
+
+// XOR 32-bit (Reg - Mem)
+void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+  if (is_uint12(opnd.offset()))
+    x(dst, opnd);
+  else
+    xy(dst, opnd);
+}
+
+// XOR Pointer Size (Reg - Mem)
+void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+  xg(dst, opnd);
+#else
+  Xor(dst, opnd);
+#endif
+}
+
+// XOR 32-bit - dst = dst & imm
+void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
+
+// XOR Pointer Size - dst = dst & imm
+void MacroAssembler::XorP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  intptr_t value = opnd.imm_;
+  xihf(dst, Operand(value >> 32));
+  xilf(dst, Operand(value & 0xFFFFFFFF));
+#else
+  Xor(dst, opnd);
+#endif
+}
+
+// XOR 32-bit - dst = src & imm
+void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
+  if (!dst.is(src)) lr(dst, src);
+  xilf(dst, opnd);
+}
+
+// XOR Pointer Size - dst = src & imm
+void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
+  if (!dst.is(src)) LoadRR(dst, src);
+  XorP(dst, opnd);
+}
+
+void MacroAssembler::NotP(Register dst) {
+#if V8_TARGET_ARCH_S390X
+  xihf(dst, Operand(0xFFFFFFFF));
+  xilf(dst, Operand(0xFFFFFFFF));
+#else
+  XorP(dst, Operand(0xFFFFFFFF));
+#endif
+}
+
+// works the same as mov
+void MacroAssembler::Load(Register dst, const Operand& opnd) {
+  intptr_t value = opnd.immediate();
+  if (is_int16(value)) {
+#if V8_TARGET_ARCH_S390X
+    lghi(dst, opnd);
+#else
+    lhi(dst, opnd);
+#endif
+  } else {
+#if V8_TARGET_ARCH_S390X
+    llilf(dst, opnd);
+#else
+    iilf(dst, opnd);
+#endif
+  }
+}
+
+void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
+  DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+  lgf(dst, opnd);  // 64<-32
+#else
+  if (is_uint12(opnd.offset())) {
+    l(dst, opnd);
+  } else {
+    ly(dst, opnd);
+  }
+#endif
+}
+
+//-----------------------------------------------------------------------------
+//  Compare Helpers
+//-----------------------------------------------------------------------------
+
+// Compare 32-bit Register vs Register
+void MacroAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
+
+// Compare Pointer Sized Register vs Register
+void MacroAssembler::CmpP(Register src1, Register src2) {
+#if V8_TARGET_ARCH_S390X
+  cgr(src1, src2);
+#else
+  Cmp32(src1, src2);
+#endif
+}
+
+// Compare 32-bit Register vs Immediate
+// This helper will set up proper relocation entries if required.
+void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
+  if (opnd.rmode_ == kRelocInfo_NONEPTR) {
+    intptr_t value = opnd.immediate();
+    if (is_int16(value))
+      chi(dst, opnd);
+    else
+      cfi(dst, opnd);
+  } else {
+    // Need to generate relocation record here
+    RecordRelocInfo(opnd.rmode_, opnd.imm_);
+    cfi(dst, opnd);
+  }
+}
+
+// Compare Pointer Sized  Register vs Immediate
+// This helper will set up proper relocation entries if required.
+void MacroAssembler::CmpP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  if (opnd.rmode_ == kRelocInfo_NONEPTR) {
+    cgfi(dst, opnd);
+  } else {
+    mov(r0, opnd);  // Need to generate 64-bit relocation
+    cgr(dst, r0);
+  }
+#else
+  Cmp32(dst, opnd);
+#endif
+}
+
+// Compare 32-bit Register vs Memory
+void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) {
+  // make sure offset is within 20 bit range
+  DCHECK(is_int20(opnd.offset()));
+  if (is_uint12(opnd.offset()))
+    c(dst, opnd);
+  else
+    cy(dst, opnd);
+}
+
+// Compare Pointer Size Register vs Memory
+void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) {
+  // make sure offset is within 20 bit range
+  DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+  cg(dst, opnd);
+#else
+  Cmp32(dst, opnd);
+#endif
+}
+
+//-----------------------------------------------------------------------------
+// Compare Logical Helpers
+//-----------------------------------------------------------------------------
+
+// Compare Logical 32-bit Register vs Register
+void MacroAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
+
+// Compare Logical Pointer Sized Register vs Register
+void MacroAssembler::CmpLogicalP(Register dst, Register src) {
+#ifdef V8_TARGET_ARCH_S390X
+  clgr(dst, src);
+#else
+  CmpLogical32(dst, src);
+#endif
+}
+
+// Compare Logical 32-bit Register vs Immediate
+void MacroAssembler::CmpLogical32(Register dst, const Operand& opnd) {
+  clfi(dst, opnd);
+}
+
+// Compare Logical Pointer Sized Register vs Immediate
+void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
+#if V8_TARGET_ARCH_S390X
+  DCHECK(static_cast<uint32_t>(opnd.immediate() >> 32) == 0);
+  clgfi(dst, opnd);
+#else
+  CmpLogical32(dst, opnd);
+#endif
+}
+
+// Compare Logical 32-bit Register vs Memory
+void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
+  // make sure offset is within 20 bit range
+  DCHECK(is_int20(opnd.offset()));
+  if (is_uint12(opnd.offset()))
+    cl(dst, opnd);
+  else
+    cly(dst, opnd);
+}
+
+// Compare Logical Pointer Sized Register vs Memory
+void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
+  // make sure offset is within 20 bit range
+  DCHECK(is_int20(opnd.offset()));
+#if V8_TARGET_ARCH_S390X
+  clg(dst, opnd);
+#else
+  CmpLogical32(dst, opnd);
+#endif
+}
+
+// Compare Logical Byte (Mem - Imm)
+void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
+  DCHECK(is_uint8(imm.immediate()));
+  if (is_uint12(mem.offset()))
+    cli(mem, imm);
+  else
+    cliy(mem, imm);
+}
+
+void MacroAssembler::Branch(Condition c, const Operand& opnd) {
+  intptr_t value = opnd.immediate();
+  if (is_int16(value))
+    brc(c, opnd);
+  else
+    brcl(c, opnd);
+}
+
+// Branch On Count.  Decrement R1, and branch if R1 != 0.
+void MacroAssembler::BranchOnCount(Register r1, Label* l) {
+  int32_t offset = branch_offset(l);
+  positions_recorder()->WriteRecordedPositions();
+  if (is_int16(offset)) {
+#if V8_TARGET_ARCH_S390X
+    brctg(r1, Operand(offset));
+#else
+    brct(r1, Operand(offset));
+#endif
+  } else {
+    AddP(r1, Operand(-1));
+    Branch(ne, Operand(offset));
+  }
+}
+
+void MacroAssembler::LoadIntLiteral(Register dst, int value) {
+  Load(dst, Operand(value));
+}
+
+void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
+  intptr_t value = reinterpret_cast<intptr_t>(smi);
+#if V8_TARGET_ARCH_S390X
+  DCHECK((value & 0xffffffff) == 0);
+  // The smi value is loaded in upper 32-bits.  Lower 32-bit are zeros.
+  llihf(dst, Operand(value >> 32));
+#else
+  llilf(dst, Operand(value));
+#endif
+}
+
+void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
+                                       Register scratch) {
+  uint32_t hi_32 = value >> 32;
+  uint32_t lo_32 = static_cast<uint32_t>(value);
+
+  // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
+  iihf(scratch, Operand(hi_32));
+  iilf(scratch, Operand(lo_32));
+  ldgr(result, scratch);
+}
+
+void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
+                                       Register scratch) {
+  uint64_t int_val = bit_cast<uint64_t, double>(value);
+  LoadDoubleLiteral(result, int_val, scratch);
+}
+
+void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
+                                        Register scratch) {
+  uint32_t hi_32 = bit_cast<uint32_t>(value);
+  uint32_t lo_32 = 0;
+
+  // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
+  iihf(scratch, Operand(hi_32));
+  iilf(scratch, Operand(lo_32));
+  ldgr(result, scratch);
+}
+
+void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
+#if V8_TARGET_ARCH_S390X
+  LoadSmiLiteral(scratch, smi);
+  cgr(src1, scratch);
+#else
+  // CFI takes 32-bit immediate.
+  cfi(src1, Operand(smi));
+#endif
+}
+
+void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
+                                          Register scratch) {
+#if V8_TARGET_ARCH_S390X
+  LoadSmiLiteral(scratch, smi);
+  clgr(src1, scratch);
+#else
+  // CLFI takes 32-bit immediate
+  clfi(src1, Operand(smi));
+#endif
+}
+
+void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
+                                   Register scratch) {
+#if V8_TARGET_ARCH_S390X
+  LoadSmiLiteral(scratch, smi);
+  AddP(dst, src, scratch);
+#else
+  AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
+#endif
+}
+
+void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
+                                   Register scratch) {
+#if V8_TARGET_ARCH_S390X
+  LoadSmiLiteral(scratch, smi);
+  SubP(dst, src, scratch);
+#else
+  AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
+#endif
+}
+
+void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
+  if (!dst.is(src)) LoadRR(dst, src);
+#if V8_TARGET_ARCH_S390X
+  DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0);
+  int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32);
+  nihf(dst, Operand(value));
+#else
+  nilf(dst, Operand(reinterpret_cast<int>(smi)));
+#endif
+}
+
+// Load a "pointer" sized value from the memory location
+void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
+                           Register scratch) {
+  int offset = mem.offset();
+
+  if (!scratch.is(no_reg) && !is_int20(offset)) {
+    /* cannot use d-form */
+    LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_S390X
+    lg(dst, MemOperand(mem.rb(), scratch));
+#else
+    l(dst, MemOperand(mem.rb(), scratch));
+#endif
+  } else {
+#if V8_TARGET_ARCH_S390X
+    lg(dst, mem);
+#else
+    if (is_uint12(offset)) {
+      l(dst, mem);
+    } else {
+      ly(dst, mem);
+    }
+#endif
+  }
+}
+
+// Store a "pointer" sized value to the memory location
+void MacroAssembler::StoreP(Register src, const MemOperand& mem,
+                            Register scratch) {
+  if (!is_int20(mem.offset())) {
+    DCHECK(!scratch.is(no_reg));
+    DCHECK(!scratch.is(r0));
+    LoadIntLiteral(scratch, mem.offset());
+#if V8_TARGET_ARCH_S390X
+    stg(src, MemOperand(mem.rb(), scratch));
+#else
+    st(src, MemOperand(mem.rb(), scratch));
+#endif
+  } else {
+#if V8_TARGET_ARCH_S390X
+    stg(src, mem);
+#else
+    // StoreW will try to generate ST if offset fits, otherwise
+    // it'll generate STY.
+    StoreW(src, mem);
+#endif
+  }
+}
+
+// Store a "pointer" sized constant to the memory location
+void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
+                            Register scratch) {
+  // Relocations not supported
+  DCHECK(opnd.rmode_ == kRelocInfo_NONEPTR);
+
+  // Try to use MVGHI/MVHI
+  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
+      mem.getIndexRegister().is(r0) && is_int16(opnd.imm_)) {
+#if V8_TARGET_ARCH_S390X
+    mvghi(mem, opnd);
+#else
+    mvhi(mem, opnd);
+#endif
+  } else {
+    LoadImmP(scratch, opnd);
+    StoreP(scratch, mem);
+  }
+}
+
+void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
+                                   const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+  DCHECK(is_int20(mem.offset()));
+  lmg(dst1, dst2, mem);
+#else
+  if (is_uint12(mem.offset())) {
+    lm(dst1, dst2, mem);
+  } else {
+    DCHECK(is_int20(mem.offset()));
+    lmy(dst1, dst2, mem);
+  }
+#endif
+}
+
+void MacroAssembler::StoreMultipleP(Register src1, Register src2,
+                                    const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+  DCHECK(is_int20(mem.offset()));
+  stmg(src1, src2, mem);
+#else
+  if (is_uint12(mem.offset())) {
+    stm(src1, src2, mem);
+  } else {
+    DCHECK(is_int20(mem.offset()));
+    stmy(src1, src2, mem);
+  }
+#endif
+}
+
+void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
+                                   const MemOperand& mem) {
+  if (is_uint12(mem.offset())) {
+    lm(dst1, dst2, mem);
+  } else {
+    DCHECK(is_int20(mem.offset()));
+    lmy(dst1, dst2, mem);
+  }
+}
+
+void MacroAssembler::StoreMultipleW(Register src1, Register src2,
+                                    const MemOperand& mem) {
+  if (is_uint12(mem.offset())) {
+    stm(src1, src2, mem);
+  } else {
+    DCHECK(is_int20(mem.offset()));
+    stmy(src1, src2, mem);
+  }
+}
+
+// Load 32-bits and sign extend if necessary.
+void MacroAssembler::LoadW(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+  lgfr(dst, src);
+#else
+  if (!dst.is(src)) lr(dst, src);
+#endif
+}
+
+// Load 32-bits and sign extend if necessary.
+void MacroAssembler::LoadW(Register dst, const MemOperand& mem,
+                           Register scratch) {
+  int offset = mem.offset();
+
+  if (!is_int20(offset)) {
+    DCHECK(!scratch.is(no_reg));
+    LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_S390X
+    lgf(dst, MemOperand(mem.rb(), scratch));
+#else
+    l(dst, MemOperand(mem.rb(), scratch));
+#endif
+  } else {
+#if V8_TARGET_ARCH_S390X
+    lgf(dst, mem);
+#else
+    if (is_uint12(offset)) {
+      l(dst, mem);
+    } else {
+      ly(dst, mem);
+    }
+#endif
+  }
+}
+
+// Load 32-bits and zero extend if necessary.
+void MacroAssembler::LoadlW(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+  llgfr(dst, src);
+#else
+  if (!dst.is(src)) lr(dst, src);
+#endif
+}
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand of RX or RXY format
+void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
+                            Register scratch) {
+  Register base = mem.rb();
+  int offset = mem.offset();
+
+#if V8_TARGET_ARCH_S390X
+  if (is_int20(offset)) {
+    llgf(dst, mem);
+  } else if (!scratch.is(no_reg)) {
+    // Materialize offset into scratch register.
+    LoadIntLiteral(scratch, offset);
+    llgf(dst, MemOperand(base, scratch));
+  } else {
+    DCHECK(false);
+  }
+#else
+  bool use_RXform = false;
+  bool use_RXYform = false;
+  if (is_uint12(offset)) {
+    // RX-format supports unsigned 12-bits offset.
+    use_RXform = true;
+  } else if (is_int20(offset)) {
+    // RXY-format supports signed 20-bits offset.
+    use_RXYform = true;
+  } else if (!scratch.is(no_reg)) {
+    // Materialize offset into scratch register.
+    LoadIntLiteral(scratch, offset);
+  } else {
+    DCHECK(false);
+  }
+
+  if (use_RXform) {
+    l(dst, mem);
+  } else if (use_RXYform) {
+    ly(dst, mem);
+  } else {
+    ly(dst, MemOperand(base, scratch));
+  }
+#endif
+}
+
+void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+  lgb(dst, mem);
+#else
+  lb(dst, mem);
+#endif
+}
+
+void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+  llgc(dst, mem);
+#else
+  llc(dst, mem);
+#endif
+}
+
+// Load And Test (Reg <- Reg)
+void MacroAssembler::LoadAndTest32(Register dst, Register src) {
+  ltr(dst, src);
+}
+
+// Load And Test
+//     (Register dst(ptr) = Register src (32 | 32->64))
+// src is treated as a 32-bit signed integer, which is sign extended to
+// 64-bit if necessary.
+void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+  ltgfr(dst, src);
+#else
+  ltr(dst, src);
+#endif
+}
+
+// Load And Test Pointer Sized (Reg <- Reg)
+void MacroAssembler::LoadAndTestP(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+  ltgr(dst, src);
+#else
+  ltr(dst, src);
+#endif
+}
+
+// Load And Test 32-bit (Reg <- Mem)
+void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
+  lt_z(dst, mem);
+}
+
+// Load And Test Pointer Sized (Reg <- Mem)
+void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+  ltg(dst, mem);
+#else
+  lt_z(dst, mem);
+#endif
+}
+
+// Load Double Precision (64-bit) Floating Point number from memory
+void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
+  // for 32bit and 64bit we all use 64bit floating point regs
+  if (is_uint12(mem.offset())) {
+    ld(dst, mem);
+  } else {
+    ldy(dst, mem);
+  }
+}
+
+// Load Single Precision (32-bit) Floating Point number from memory
+void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
+  if (is_uint12(mem.offset())) {
+    le_z(dst, mem);
+  } else {
+    DCHECK(is_int20(mem.offset()));
+    ley(dst, mem);
+  }
+}
+
+// Load Single Precision (32-bit) Floating Point number from memory,
+// and convert to Double Precision (64-bit)
+void MacroAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
+                                                const MemOperand& mem) {
+  LoadFloat32(dst, mem);
+  ldebr(dst, dst);
+}
+
+// Store Double Precision (64-bit) Floating Point number to memory
+void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
+  if (is_uint12(mem.offset())) {
+    std(dst, mem);
+  } else {
+    stdy(dst, mem);
+  }
+}
+
+// Store Single Precision (32-bit) Floating Point number to memory
+void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
+  if (is_uint12(mem.offset())) {
+    ste(src, mem);
+  } else {
+    stey(src, mem);
+  }
+}
+
+// Convert Double precision (64-bit) to Single Precision (32-bit)
+// and store resulting Float32 to memory
+void MacroAssembler::StoreDoubleAsFloat32(DoubleRegister src,
+                                          const MemOperand& mem,
+                                          DoubleRegister scratch) {
+  ledbr(scratch, src);
+  StoreFloat32(scratch, mem);
+}
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand of RX or RXY format
+void MacroAssembler::StoreW(Register src, const MemOperand& mem,
+                            Register scratch) {
+  Register base = mem.rb();
+  int offset = mem.offset();
+
+  bool use_RXform = false;
+  bool use_RXYform = false;
+
+  if (is_uint12(offset)) {
+    // RX-format supports unsigned 12-bits offset.
+    use_RXform = true;
+  } else if (is_int20(offset)) {
+    // RXY-format supports signed 20-bits offset.
+    use_RXYform = true;
+  } else if (!scratch.is(no_reg)) {
+    // Materialize offset into scratch register.
+    LoadIntLiteral(scratch, offset);
+  } else {
+    // scratch is no_reg
+    DCHECK(false);
+  }
+
+  if (use_RXform) {
+    st(src, mem);
+  } else if (use_RXYform) {
+    sty(src, mem);
+  } else {
+    StoreW(src, MemOperand(base, scratch));
+  }
+}
+
+// Loads 16-bits half-word value from memory and sign extends to pointer
+// sized register
+void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
+                                   Register scratch) {
+  Register base = mem.rb();
+  int offset = mem.offset();
+
+  if (!is_int20(offset)) {
+    DCHECK(!scratch.is(no_reg));
+    LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_S390X
+    lgh(dst, MemOperand(base, scratch));
+#else
+    lh(dst, MemOperand(base, scratch));
+#endif
+  } else {
+#if V8_TARGET_ARCH_S390X
+    lgh(dst, mem);
+#else
+    if (is_uint12(offset)) {
+      lh(dst, mem);
+    } else {
+      lhy(dst, mem);
+    }
+#endif
+  }
+}
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand current only supports d-form
+void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
+                                   Register scratch) {
+  Register base = mem.rb();
+  int offset = mem.offset();
+
+  if (is_uint12(offset)) {
+    sth(src, mem);
+  } else if (is_int20(offset)) {
+    sthy(src, mem);
+  } else {
+    DCHECK(!scratch.is(no_reg));
+    LoadIntLiteral(scratch, offset);
+    sth(src, MemOperand(base, scratch));
+  }
+}
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand current only supports d-form
+void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
+                               Register scratch) {
+  Register base = mem.rb();
+  int offset = mem.offset();
+
+  if (is_uint12(offset)) {
+    stc(src, mem);
+  } else if (is_int20(offset)) {
+    stcy(src, mem);
+  } else {
+    DCHECK(!scratch.is(no_reg));
+    LoadIntLiteral(scratch, offset);
+    stc(src, MemOperand(base, scratch));
+  }
+}
+
+// Shift left logical for 32-bit integer types.
+void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
+  if (dst.is(src)) {
+    sll(dst, val);
+  } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    sllk(dst, src, val);
+  } else {
+    lr(dst, src);
+    sll(dst, val);
+  }
+}
+
+// Shift left logical for 32-bit integer types.
+void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) {
+  if (dst.is(src)) {
+    sll(dst, val);
+  } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    sllk(dst, src, val);
+  } else {
+    DCHECK(!dst.is(val));  // The lr/sll path clobbers val.
+    lr(dst, src);
+    sll(dst, val);
+  }
+}
+
+// Shift right logical for 32-bit integer types.
+void MacroAssembler::ShiftRight(Register dst, Register src,
+                                const Operand& val) {
+  if (dst.is(src)) {
+    srl(dst, val);
+  } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    srlk(dst, src, val);
+  } else {
+    lr(dst, src);
+    srl(dst, val);
+  }
+}
+
+// Shift right logical for 32-bit integer types.
+void MacroAssembler::ShiftRight(Register dst, Register src, Register val) {
+  DCHECK(!dst.is(val));  // The lr/srl path clobbers val.
+  if (dst.is(src)) {
+    srl(dst, val);
+  } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    srlk(dst, src, val);
+  } else {
+    lr(dst, src);
+    srl(dst, val);
+  }
+}
+
+// Shift left arithmetic for 32-bit integer types.
+void MacroAssembler::ShiftLeftArith(Register dst, Register src,
+                                    const Operand& val) {
+  if (dst.is(src)) {
+    sla(dst, val);
+  } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    slak(dst, src, val);
+  } else {
+    lr(dst, src);
+    sla(dst, val);
+  }
+}
+
+// Shift left arithmetic for 32-bit integer types.
+void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
+  DCHECK(!dst.is(val));  // The lr/sla path clobbers val.
+  if (dst.is(src)) {
+    sla(dst, val);
+  } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    slak(dst, src, val);
+  } else {
+    lr(dst, src);
+    sla(dst, val);
+  }
+}
+
+// Shift right arithmetic for 32-bit integer types.
+void MacroAssembler::ShiftRightArith(Register dst, Register src,
+                                     const Operand& val) {
+  if (dst.is(src)) {
+    sra(dst, val);
+  } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    srak(dst, src, val);
+  } else {
+    lr(dst, src);
+    sra(dst, val);
+  }
+}
+
+// Shift right arithmetic for 32-bit integer types.
+void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
+  DCHECK(!dst.is(val));  // The lr/sra path clobbers val.
+  if (dst.is(src)) {
+    sra(dst, val);
+  } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    srak(dst, src, val);
+  } else {
+    lr(dst, src);
+    sra(dst, val);
+  }
+}
+
+// Clear right most # of bits
+void MacroAssembler::ClearRightImm(Register dst, Register src,
+                                   const Operand& val) {
+  int numBitsToClear = val.imm_ % (kPointerSize * 8);
+
+  // Try to use RISBG if possible
+  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+    int endBit = 63 - numBitsToClear;
+    risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true);
+    return;
+  }
+
+  uint64_t hexMask = ~((1L << numBitsToClear) - 1);
+
+  // S390 AND instr clobbers source.  Make a copy if necessary
+  if (!dst.is(src)) LoadRR(dst, src);
+
+  if (numBitsToClear <= 16) {
+    nill(dst, Operand(static_cast<uint16_t>(hexMask)));
+  } else if (numBitsToClear <= 32) {
+    nilf(dst, Operand(static_cast<uint32_t>(hexMask)));
+  } else if (numBitsToClear <= 64) {
+    nilf(dst, Operand(static_cast<intptr_t>(0)));
+    nihf(dst, Operand(hexMask >> 32));
+  }
+}
+
+void MacroAssembler::Popcnt32(Register dst, Register src) {
+  DCHECK(!src.is(r0));
+  DCHECK(!dst.is(r0));
+
+  popcnt(dst, src);
+  ShiftRight(r0, dst, Operand(16));
+  ar(dst, r0);
+  ShiftRight(r0, dst, Operand(8));
+  ar(dst, r0);
+  lbr(dst, dst);
+}
+
+#ifdef V8_TARGET_ARCH_S390X
+void MacroAssembler::Popcnt64(Register dst, Register src) {
+  DCHECK(!src.is(r0));
+  DCHECK(!dst.is(r0));
+
+  popcnt(dst, src);
+  ShiftRightP(r0, dst, Operand(32));
+  AddP(dst, r0);
+  ShiftRightP(r0, dst, Operand(16));
+  AddP(dst, r0);
+  ShiftRightP(r0, dst, Operand(8));
+  AddP(dst, r0);
+  lbr(dst, dst);
+}
+#endif
+
+#ifdef DEBUG
+bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
+                Register reg5, Register reg6, Register reg7, Register reg8,
+                Register reg9, Register reg10) {
+  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+                        reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+                        reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+                        reg10.is_valid();
+
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+  if (reg7.is_valid()) regs |= reg7.bit();
+  if (reg8.is_valid()) regs |= reg8.bit();
+  if (reg9.is_valid()) regs |= reg9.bit();
+  if (reg10.is_valid()) regs |= reg10.bit();
+  int n_of_non_aliasing_regs = NumRegs(regs);
+
+  return n_of_valid_regs != n_of_non_aliasing_regs;
+}
+#endif
+
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size,
+                         FlushICache flush_cache)
+    : address_(address),
+      size_(size),
+      masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
+      flush_cache_(flush_cache) {
+  // Create a new macro assembler pointing to the address of the code to patch.
+  // The size is adjusted with kGap on order for the assembler to generate size
+  // bytes of instructions without failing with buffer size constraints.
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+CodePatcher::~CodePatcher() {
+  // Indicate that code has changed.
+  if (flush_cache_ == FLUSH) {
+    Assembler::FlushICache(masm_.isolate(), address_, size_);
+  }
+
+  // Check that the code was patched as expected.
+  DCHECK(masm_.pc_ == address_ + size_);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+void MacroAssembler::TruncatingDiv(Register result, Register dividend,
+                                   int32_t divisor) {
+  DCHECK(!dividend.is(result));
+  DCHECK(!dividend.is(r0));
+  DCHECK(!result.is(r0));
+  base::MagicNumbersForDivision<uint32_t> mag =
+      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+#ifdef V8_TARGET_ARCH_S390X
+  LoadRR(result, dividend);
+  MulP(result, Operand(mag.multiplier));
+  ShiftRightArithP(result, result, Operand(32));
+
+#else
+  lay(sp, MemOperand(sp, -kPointerSize));
+  StoreP(r1, MemOperand(sp));
+
+  mov(r1, Operand(mag.multiplier));
+  mr_z(r0, dividend);  // r0:r1 = r1 * dividend
+
+  LoadRR(result, r0);
+  LoadP(r1, MemOperand(sp));
+  la(sp, MemOperand(sp, kPointerSize));
+#endif
+  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+  if (divisor > 0 && neg) {
+    AddP(result, dividend);
+  }
+  if (divisor < 0 && !neg && mag.multiplier > 0) {
+    SubP(result, dividend);
+  }
+  if (mag.shift > 0) ShiftRightArith(result, result, Operand(mag.shift));
+  ExtractBit(r0, dividend, 31);
+  AddP(result, r0);
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/macro-assembler-s390.h b/src/s390/macro-assembler-s390.h
new file mode 100644
index 0000000..d8d543e
--- /dev/null
+++ b/src/s390/macro-assembler-s390.h
@@ -0,0 +1,1890 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
+#define V8_S390_MACRO_ASSEMBLER_S390_H_
+
+#include "src/assembler.h"
+#include "src/bailout-reason.h"
+#include "src/frames.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {Register::kCode_r2};
+const Register kReturnRegister1 = {Register::kCode_r3};
+const Register kReturnRegister2 = {Register::kCode_r4};
+const Register kJSFunctionRegister = {Register::kCode_r3};
+const Register kContextRegister = {Register::kCode_r13};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_r2};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_r2};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r5};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_r3};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_r2};
+
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
+  return MemOperand(object, index, offset - kHeapObjectTag);
+}
+
+// Generate a MemOperand for loading a field from Root register
+inline MemOperand RootMemOperand(Heap::RootListIndex index) {
+  return MemOperand(kRootRegister, index << kPointerSizeLog2);
+}
+
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+  // Tag the result.
+  TAG_RESULT,
+  // Don't tag
+  DONT_TAG_RESULT
+};
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+  kPointersToHereMaybeInteresting,
+  kPointersToHereAreAlwaysInteresting
+};
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
+                                   Register reg3 = no_reg,
+                                   Register reg4 = no_reg,
+                                   Register reg5 = no_reg,
+                                   Register reg6 = no_reg);
+
+#ifdef DEBUG
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+                Register reg4 = no_reg, Register reg5 = no_reg,
+                Register reg6 = no_reg, Register reg7 = no_reg,
+                Register reg8 = no_reg, Register reg9 = no_reg,
+                Register reg10 = no_reg);
+#endif
+
+// These exist to provide portability between 32 and 64bit
+#if V8_TARGET_ARCH_S390X
+#define Div divd
+
+// The length of the arithmetic operation is the length
+// of the register.
+
+// Length:
+// H = halfword
+// W = word
+
+// arithmetics and bitwise
+#define AddMI agsi
+#define AddRR agr
+#define SubRR sgr
+#define AndRR ngr
+#define OrRR ogr
+#define XorRR xgr
+#define LoadComplementRR lcgr
+#define LoadNegativeRR lngr
+
+// Distinct Operands
+#define AddP_RRR agrk
+#define AddPImm_RRI aghik
+#define AddLogicalP_RRR algrk
+#define SubP_RRR sgrk
+#define SubLogicalP_RRR slgrk
+#define AndP_RRR ngrk
+#define OrP_RRR ogrk
+#define XorP_RRR xgrk
+
+// Load / Store
+#define LoadRR lgr
+#define LoadAndTestRR ltgr
+#define LoadImmP lghi
+#define LoadLogicalHalfWordP llgh
+
+// Compare
+#define CmpPH cghi
+#define CmpLogicalPW clgfi
+
+// Shifts
+#define ShiftLeftP sllg
+#define ShiftRightP srlg
+#define ShiftLeftArithP slag
+#define ShiftRightArithP srag
+#else
+
+// arithmetics and bitwise
+// Reg2Reg
+#define AddMI asi
+#define AddRR ar
+#define SubRR sr
+#define AndRR nr
+#define OrRR or_z
+#define XorRR xr
+#define LoadComplementRR lcr
+#define LoadNegativeRR lnr
+
+// Distinct Operands
+#define AddP_RRR ark
+#define AddPImm_RRI ahik
+#define AddLogicalP_RRR alrk
+#define SubP_RRR srk
+#define SubLogicalP_RRR slrk
+#define AndP_RRR nrk
+#define OrP_RRR ork
+#define XorP_RRR xrk
+
+// Load / Store
+#define LoadRR lr
+#define LoadAndTestRR ltr
+#define LoadImmP lhi
+#define LoadLogicalHalfWordP llh
+
+// Compare
+#define CmpPH chi
+#define CmpLogicalPW clfi
+
+// Shifts
+#define ShiftLeftP ShiftLeft
+#define ShiftRightP ShiftRight
+#define ShiftLeftArithP ShiftLeftArith
+#define ShiftRightArithP ShiftRightArith
+
+#endif
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler : public Assembler {
+ public:
+  MacroAssembler(Isolate* isolate, void* buffer, int size,
+                 CodeObjectRequired create_code_object);
+
+  // Returns the size of a call in instructions.
+  static int CallSize(Register target);
+  int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
+  static int CallSizeNotPredictableCodeSize(Address target,
+                                            RelocInfo::Mode rmode,
+                                            Condition cond = al);
+
+  // Jump, Call, and Ret pseudo instructions implementing inter-working.
+  void Jump(Register target);
+  void JumpToJSEntry(Register target);
+  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
+            CRegister cr = cr7);
+  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(Register target);
+  void CallJSEntry(Register target);
+  void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
+  int CallSize(Handle<Code> code,
+               RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+               TypeFeedbackId ast_id = TypeFeedbackId::None(),
+               Condition cond = al);
+  void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+            TypeFeedbackId ast_id = TypeFeedbackId::None(),
+            Condition cond = al);
+  void Ret() { b(r14); }
+  void Ret(Condition cond) { b(cond, r14); }
+
+  // Emit code to discard a non-negative number of pointer-sized elements
+  // from the stack, clobbering only the sp register.
+  void Drop(int count);
+  void Drop(Register count, Register scratch = r0);
+
+  void Ret(int drop) {
+    Drop(drop);
+    Ret();
+  }
+
+  void Call(Label* target);
+
+  // Register move. May do nothing if the registers are identical.
+  void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
+  void Move(Register dst, Handle<Object> value);
+  void Move(Register dst, Register src, Condition cond = al);
+  void Move(DoubleRegister dst, DoubleRegister src);
+
+  void InsertDoubleLow(DoubleRegister dst, Register src);
+  void InsertDoubleHigh(DoubleRegister dst, Register src);
+
+  void MultiPush(RegList regs, Register location = sp);
+  void MultiPop(RegList regs, Register location = sp);
+
+  void MultiPushDoubles(RegList dregs, Register location = sp);
+  void MultiPopDoubles(RegList dregs, Register location = sp);
+
+  // Load an object from the root table.
+  void LoadRoot(Register destination, Heap::RootListIndex index,
+                Condition cond = al);
+  // Store an object to the root table.
+  void StoreRoot(Register source, Heap::RootListIndex index,
+                 Condition cond = al);
+
+  //--------------------------------------------------------------------------
+  // S390 Macro Assemblers for Instructions
+  //--------------------------------------------------------------------------
+
+  // Arithmetic Operations
+
+  // Add (Register - Immediate)
+  void Add32(Register dst, const Operand& imm);
+  void AddP(Register dst, const Operand& imm);
+  void Add32(Register dst, Register src, const Operand& imm);
+  void AddP(Register dst, Register src, const Operand& imm);
+
+  // Add (Register - Register)
+  void Add32(Register dst, Register src);
+  void AddP(Register dst, Register src);
+  void AddP_ExtendSrc(Register dst, Register src);
+  void Add32(Register dst, Register src1, Register src2);
+  void AddP(Register dst, Register src1, Register src2);
+  void AddP_ExtendSrc(Register dst, Register src1, Register src2);
+
+  // Add (Register - Mem)
+  void Add32(Register dst, const MemOperand& opnd);
+  void AddP(Register dst, const MemOperand& opnd);
+  void AddP_ExtendSrc(Register dst, const MemOperand& opnd);
+
+  // Add (Mem - Immediate)
+  void Add32(const MemOperand& opnd, const Operand& imm);
+  void AddP(const MemOperand& opnd, const Operand& imm);
+
+  // Add Logical (Register - Register)
+  void AddLogical32(Register dst, Register src1, Register src2);
+
+  // Add Logical With Carry (Register - Register)
+  void AddLogicalWithCarry32(Register dst, Register src1, Register src2);
+
+  // Add Logical (Register - Immediate)
+  void AddLogical(Register dst, const Operand& imm);
+  void AddLogicalP(Register dst, const Operand& imm);
+
+  // Add Logical (Register - Mem)
+  void AddLogical(Register dst, const MemOperand& opnd);
+  void AddLogicalP(Register dst, const MemOperand& opnd);
+
+  // Subtract (Register - Immediate)
+  void Sub32(Register dst, const Operand& imm);
+  void SubP(Register dst, const Operand& imm);
+  void Sub32(Register dst, Register src, const Operand& imm);
+  void SubP(Register dst, Register src, const Operand& imm);
+
+  // Subtract (Register - Register)
+  void Sub32(Register dst, Register src);
+  void SubP(Register dst, Register src);
+  void SubP_ExtendSrc(Register dst, Register src);
+  void Sub32(Register dst, Register src1, Register src2);
+  void SubP(Register dst, Register src1, Register src2);
+  void SubP_ExtendSrc(Register dst, Register src1, Register src2);
+
+  // Subtract (Register - Mem)
+  void Sub32(Register dst, const MemOperand& opnd);
+  void SubP(Register dst, const MemOperand& opnd);
+  void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
+
+  // Subtract Logical (Register - Mem)
+  void SubLogical(Register dst, const MemOperand& opnd);
+  void SubLogicalP(Register dst, const MemOperand& opnd);
+  void SubLogicalP_ExtendSrc(Register dst, const MemOperand& opnd);
+  // Subtract Logical 32-bit
+  void SubLogical32(Register dst, Register src1, Register src2);
+  // Subtract Logical With Borrow 32-bit
+  void SubLogicalWithBorrow32(Register dst, Register src1, Register src2);
+
+  // Multiply
+  void MulP(Register dst, const Operand& opnd);
+  void MulP(Register dst, Register src);
+  void MulP(Register dst, const MemOperand& opnd);
+  void Mul(Register dst, Register src1, Register src2);
+
+  // Divide
+  void DivP(Register dividend, Register divider);
+
+  // Compare
+  void Cmp32(Register src1, Register src2);
+  void CmpP(Register src1, Register src2);
+  void Cmp32(Register dst, const Operand& opnd);
+  void CmpP(Register dst, const Operand& opnd);
+  void Cmp32(Register dst, const MemOperand& opnd);
+  void CmpP(Register dst, const MemOperand& opnd);
+
+  // Compare Logical
+  void CmpLogical32(Register src1, Register src2);
+  void CmpLogicalP(Register src1, Register src2);
+  void CmpLogical32(Register src1, const Operand& opnd);
+  void CmpLogicalP(Register src1, const Operand& opnd);
+  void CmpLogical32(Register dst, const MemOperand& opnd);
+  void CmpLogicalP(Register dst, const MemOperand& opnd);
+
+  // Compare Logical Byte (CLI/CLIY)
+  void CmpLogicalByte(const MemOperand& mem, const Operand& imm);
+
+  // Load 32bit
+  void Load(Register dst, const MemOperand& opnd);
+  void Load(Register dst, const Operand& opnd);
+  void LoadW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
+  void LoadW(Register dst, Register src);
+  void LoadlW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
+  void LoadlW(Register dst, Register src);
+  void LoadB(Register dst, const MemOperand& opnd);
+  void LoadlB(Register dst, const MemOperand& opnd);
+
+  // Load And Test
+  void LoadAndTest32(Register dst, Register src);
+  void LoadAndTestP_ExtendSrc(Register dst, Register src);
+  void LoadAndTestP(Register dst, Register src);
+
+  void LoadAndTest32(Register dst, const MemOperand& opnd);
+  void LoadAndTestP(Register dst, const MemOperand& opnd);
+
+  // Load Floating Point
+  void LoadDouble(DoubleRegister dst, const MemOperand& opnd);
+  void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
+  void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
+
+  // Store Floating Point
+  void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
+  void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
+  void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
+                            DoubleRegister scratch);
+
+  void Branch(Condition c, const Operand& opnd);
+  void BranchOnCount(Register r1, Label* l);
+
+  // Shifts
+  void ShiftLeft(Register dst, Register src, Register val);
+  void ShiftLeft(Register dst, Register src, const Operand& val);
+  void ShiftRight(Register dst, Register src, Register val);
+  void ShiftRight(Register dst, Register src, const Operand& val);
+  void ShiftLeftArith(Register dst, Register src, Register shift);
+  void ShiftLeftArith(Register dst, Register src, const Operand& val);
+  void ShiftRightArith(Register dst, Register src, Register shift);
+  void ShiftRightArith(Register dst, Register src, const Operand& val);
+
+  void ClearRightImm(Register dst, Register src, const Operand& val);
+
+  // Bitwise operations
+  void And(Register dst, Register src);
+  void AndP(Register dst, Register src);
+  void And(Register dst, Register src1, Register src2);
+  void AndP(Register dst, Register src1, Register src2);
+  void And(Register dst, const MemOperand& opnd);
+  void AndP(Register dst, const MemOperand& opnd);
+  void And(Register dst, const Operand& opnd);
+  void AndP(Register dst, const Operand& opnd);
+  void And(Register dst, Register src, const Operand& opnd);
+  void AndP(Register dst, Register src, const Operand& opnd);
+  void Or(Register dst, Register src);
+  void OrP(Register dst, Register src);
+  void Or(Register dst, Register src1, Register src2);
+  void OrP(Register dst, Register src1, Register src2);
+  void Or(Register dst, const MemOperand& opnd);
+  void OrP(Register dst, const MemOperand& opnd);
+  void Or(Register dst, const Operand& opnd);
+  void OrP(Register dst, const Operand& opnd);
+  void Or(Register dst, Register src, const Operand& opnd);
+  void OrP(Register dst, Register src, const Operand& opnd);
+  void Xor(Register dst, Register src);
+  void XorP(Register dst, Register src);
+  void Xor(Register dst, Register src1, Register src2);
+  void XorP(Register dst, Register src1, Register src2);
+  void Xor(Register dst, const MemOperand& opnd);
+  void XorP(Register dst, const MemOperand& opnd);
+  void Xor(Register dst, const Operand& opnd);
+  void XorP(Register dst, const Operand& opnd);
+  void Xor(Register dst, Register src, const Operand& opnd);
+  void XorP(Register dst, Register src, const Operand& opnd);
+  void Popcnt32(Register dst, Register src);
+
+#ifdef V8_TARGET_ARCH_S390X
+  void Popcnt64(Register dst, Register src);
+#endif
+
+  void NotP(Register dst);
+
+  void mov(Register dst, const Operand& src);
+
+  // ---------------------------------------------------------------------------
+  // GC Support
+
+  void IncrementalMarkingRecordWriteHelper(Register object, Register value,
+                                           Register address);
+
+  enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
+
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr, Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
+
+  void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+                     Label* condition_met);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
+    InNewSpace(object, scratch, eq, branch);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
+    InNewSpace(object, scratch, ne, branch);
+  }
+
+  // Check if an object has a given incremental marking color.
+  void HasColor(Register object, Register scratch0, Register scratch1,
+                Label* has_color, int first_bit, int second_bit);
+
+  void JumpIfBlack(Register object, Register scratch0, Register scratch1,
+                   Label* on_black);
+
+  // Checks the color of an object.  If the object is white we jump to the
+  // incremental marker.
+  void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+                   Register scratch3, Label* value_is_white);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
+  void RecordWriteField(
+      Register object, int offset, Register value, Register scratch,
+      LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK,
+      PointersToHereCheck pointers_to_here_check_for_value =
+          kPointersToHereMaybeInteresting);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // MemOperand(reg, off).
+  inline void RecordWriteContextSlot(
+      Register context, int offset, Register value, Register scratch,
+      LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK,
+      PointersToHereCheck pointers_to_here_check_for_value =
+          kPointersToHereMaybeInteresting) {
+    RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
+                     lr_status, save_fp, remembered_set_action, smi_check,
+                     pointers_to_here_check_for_value);
+  }
+
+  // Notify the garbage collector that we wrote a code entry into a
+  // JSFunction. Only scratch is clobbered by the operation.
+  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+                                 Register scratch);
+
+  void RecordWriteForMap(Register object, Register map, Register dst,
+                         LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
+
+  // For a given |object| notify the garbage collector that the slot |address|
+  // has been written.  |value| is the object being stored. The value and
+  // address registers are clobbered by the operation.
+  void RecordWrite(
+      Register object, Register address, Register value,
+      LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK,
+      PointersToHereCheck pointers_to_here_check_for_value =
+          kPointersToHereMaybeInteresting);
+
+  void push(Register src) {
+    lay(sp, MemOperand(sp, -kPointerSize));
+    StoreP(src, MemOperand(sp));
+  }
+
+  void pop(Register dst) {
+    LoadP(dst, MemOperand(sp));
+    la(sp, MemOperand(sp, kPointerSize));
+  }
+
+  void pop() { la(sp, MemOperand(sp, kPointerSize)); }
+
+  void Push(Register src) { push(src); }
+
+  // Push a handle.
+  void Push(Handle<Object> handle);
+  void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+  // Push two registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2) {
+    lay(sp, MemOperand(sp, -kPointerSize * 2));
+    StoreP(src1, MemOperand(sp, kPointerSize));
+    StoreP(src2, MemOperand(sp, 0));
+  }
+
+  // Push three registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3) {
+    lay(sp, MemOperand(sp, -kPointerSize * 3));
+    StoreP(src1, MemOperand(sp, kPointerSize * 2));
+    StoreP(src2, MemOperand(sp, kPointerSize));
+    StoreP(src3, MemOperand(sp, 0));
+  }
+
+  // Push four registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3, Register src4) {
+    lay(sp, MemOperand(sp, -kPointerSize * 4));
+    StoreP(src1, MemOperand(sp, kPointerSize * 3));
+    StoreP(src2, MemOperand(sp, kPointerSize * 2));
+    StoreP(src3, MemOperand(sp, kPointerSize));
+    StoreP(src4, MemOperand(sp, 0));
+  }
+
+  // Push five registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3, Register src4,
+            Register src5) {
+    DCHECK(!src1.is(src2));
+    DCHECK(!src1.is(src3));
+    DCHECK(!src2.is(src3));
+    DCHECK(!src1.is(src4));
+    DCHECK(!src2.is(src4));
+    DCHECK(!src3.is(src4));
+    DCHECK(!src1.is(src5));
+    DCHECK(!src2.is(src5));
+    DCHECK(!src3.is(src5));
+    DCHECK(!src4.is(src5));
+
+    lay(sp, MemOperand(sp, -kPointerSize * 5));
+    StoreP(src1, MemOperand(sp, kPointerSize * 4));
+    StoreP(src2, MemOperand(sp, kPointerSize * 3));
+    StoreP(src3, MemOperand(sp, kPointerSize * 2));
+    StoreP(src4, MemOperand(sp, kPointerSize));
+    StoreP(src5, MemOperand(sp, 0));
+  }
+
+  void Pop(Register dst) { pop(dst); }
+
+  // Pop two registers. Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2) {
+    LoadP(src2, MemOperand(sp, 0));
+    LoadP(src1, MemOperand(sp, kPointerSize));
+    la(sp, MemOperand(sp, 2 * kPointerSize));
+  }
+
+  // Pop three registers.  Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3) {
+    LoadP(src3, MemOperand(sp, 0));
+    LoadP(src2, MemOperand(sp, kPointerSize));
+    LoadP(src1, MemOperand(sp, 2 * kPointerSize));
+    la(sp, MemOperand(sp, 3 * kPointerSize));
+  }
+
+  // Pop four registers.  Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3, Register src4) {
+    LoadP(src4, MemOperand(sp, 0));
+    LoadP(src3, MemOperand(sp, kPointerSize));
+    LoadP(src2, MemOperand(sp, 2 * kPointerSize));
+    LoadP(src1, MemOperand(sp, 3 * kPointerSize));
+    la(sp, MemOperand(sp, 4 * kPointerSize));
+  }
+
+  // Pop five registers.  Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3, Register src4,
+           Register src5) {
+    LoadP(src5, MemOperand(sp, 0));
+    LoadP(src4, MemOperand(sp, kPointerSize));
+    LoadP(src3, MemOperand(sp, 2 * kPointerSize));
+    LoadP(src2, MemOperand(sp, 3 * kPointerSize));
+    LoadP(src1, MemOperand(sp, 4 * kPointerSize));
+    la(sp, MemOperand(sp, 5 * kPointerSize));
+  }
+
+  // Push a fixed frame, consisting of lr, fp, constant pool.
+  void PushCommonFrame(Register marker_reg = no_reg);
+
+  // Push a standard frame, consisting of lr, fp, constant pool,
+  // context and JS function
+  void PushStandardFrame(Register function_reg);
+
+  void PopCommonFrame(Register marker_reg = no_reg);
+
+  // Restore caller's frame pointer and return address prior to being
+  // overwritten by tail call stack preparation.
+  void RestoreFrameStateForTailCall();
+
+  // Push and pop the registers that can hold pointers, as defined by the
+  // RegList constant kSafepointSavedRegisters.
+  void PushSafepointRegisters();
+  void PopSafepointRegisters();
+  // Store value in register src in the safepoint stack slot for
+  // register dst.
+  void StoreToSafepointRegisterSlot(Register src, Register dst);
+  // Load the value of the src register from its safepoint stack slot
+  // into register dst.
+  void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+  // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
+  // from C.
+  // Does not handle errors.
+  void FlushICache(Register address, size_t size, Register scratch);
+
+  // If the value is a NaN, canonicalize the value else, do nothing.
+  void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+  void CanonicalizeNaN(const DoubleRegister value) {
+    CanonicalizeNaN(value, value);
+  }
+
+  // Converts the integer (untagged smi) in |src| to a double, storing
+  // the result to |dst|
+  void ConvertIntToDouble(Register src, DoubleRegister dst);
+
+  // Converts the unsigned integer (untagged smi) in |src| to
+  // a double, storing the result to |dst|
+  void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
+
+  // Converts the integer (untagged smi) in |src| to
+  // a float, storing the result in |dst|
+  void ConvertIntToFloat(Register src, DoubleRegister dst);
+
+  // Converts the unsigned integer (untagged smi) in |src| to
+  // a float, storing the result in |dst|
+  void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
+
+#if V8_TARGET_ARCH_S390X
+  void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
+  void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
+  void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
+  void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
+#endif
+
+  void MovIntToFloat(DoubleRegister dst, Register src);
+  void MovFloatToInt(Register dst, DoubleRegister src);
+  void MovDoubleToInt64(Register dst, DoubleRegister src);
+  void MovInt64ToDouble(DoubleRegister dst, Register src);
+  // Converts the double_input to an integer.  Note that, upon return,
+  // the contents of double_dst will also hold the fixed point representation.
+  void ConvertFloat32ToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_S390X
+                             const Register dst_hi,
+#endif
+                             const Register dst,
+                             const DoubleRegister double_dst,
+                             FPRoundingMode rounding_mode = kRoundToZero);
+
+  // Converts the double_input to an integer.  Note that, upon return,
+  // the contents of double_dst will also hold the fixed point representation.
+  void ConvertDoubleToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_S390X
+                            const Register dst_hi,
+#endif
+                            const Register dst, const DoubleRegister double_dst,
+                            FPRoundingMode rounding_mode = kRoundToZero);
+
+  void ConvertFloat32ToInt32(const DoubleRegister double_input,
+                             const Register dst,
+                             const DoubleRegister double_dst,
+                             FPRoundingMode rounding_mode = kRoundToZero);
+  void ConvertFloat32ToUnsignedInt32(
+      const DoubleRegister double_input, const Register dst,
+      const DoubleRegister double_dst,
+      FPRoundingMode rounding_mode = kRoundToZero);
+#if V8_TARGET_ARCH_S390X
+  // Converts the double_input to an unsigned integer.  Note that, upon return,
+  // the contents of double_dst will also hold the fixed point representation.
+  void ConvertDoubleToUnsignedInt64(
+      const DoubleRegister double_input, const Register dst,
+      const DoubleRegister double_dst,
+      FPRoundingMode rounding_mode = kRoundToZero);
+  void ConvertFloat32ToUnsignedInt64(
+      const DoubleRegister double_input, const Register dst,
+      const DoubleRegister double_dst,
+      FPRoundingMode rounding_mode = kRoundToZero);
+#endif
+
+#if !V8_TARGET_ARCH_S390X
+  void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
+                     Register src_high, Register scratch, Register shift);
+  void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
+                     Register src_high, uint32_t shift);
+  void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
+                      Register src_high, Register scratch, Register shift);
+  void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
+                      Register src_high, uint32_t shift);
+  void ShiftRightArithPair(Register dst_low, Register dst_high,
+                           Register src_low, Register src_high,
+                           Register scratch, Register shift);
+  void ShiftRightArithPair(Register dst_low, Register dst_high,
+                           Register src_low, Register src_high, uint32_t shift);
+#endif
+
+  // Generates function and stub prologue code.
+  void StubPrologue(StackFrame::Type type, Register base = no_reg,
+                    int prologue_offset = 0);
+  void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
+
+  // Enter exit frame.
+  // stack_space - extra stack space, used for parameters before call to C.
+  // At least one slot (for the return address) should be provided.
+  void EnterExitFrame(bool save_doubles, int stack_space = 1);
+
+  // Leave the current exit frame. Expects the return value in r0.
+  // Expect the number of values, pushed prior to the exit frame, to
+  // remove in a register (or no_reg, if there is nothing to remove).
+  void LeaveExitFrame(bool save_doubles, Register argument_count,
+                      bool restore_context,
+                      bool argument_count_is_length = false);
+
+  // Get the actual activation frame alignment for target environment.
+  static int ActivationFrameAlignment();
+
+  void LoadContext(Register dst, int context_chain_length);
+
+  // Load the global object from the current context.
+  void LoadGlobalObject(Register dst) {
+    LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+  }
+
+  // Load the global proxy from the current context.
+  void LoadGlobalProxy(Register dst) {
+    LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+  }
+
+  // Conditionally load the cached Array transitioned map of type
+  // transitioned_kind from the native context if the map in register
+  // map_in_out is the cached Array map in the native context of
+  // expected_kind.
+  void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
+                                           ElementsKind transitioned_kind,
+                                           Register map_in_out,
+                                           Register scratch,
+                                           Label* no_map_match);
+
+  void LoadNativeContextSlot(int index, Register dst);
+
+  // Load the initial map from the global function. The registers
+  // function and map can be the same, function is then overwritten.
+  void LoadGlobalFunctionInitialMap(Register function, Register map,
+                                    Register scratch);
+
+  void InitializeRootRegister() {
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(isolate());
+    mov(kRootRegister, Operand(roots_array_start));
+  }
+
+  // ----------------------------------------------------------------
+  // new S390 macro-assembler interfaces that are slightly higher level
+  // than assembler-s390 and may generate variable length sequences
+
+  // load a literal signed int value <value> to GPR <dst>
+  void LoadIntLiteral(Register dst, int value);
+
+  // load an SMI value <value> to GPR <dst>
+  void LoadSmiLiteral(Register dst, Smi* smi);
+
+  // load a literal double value <value> to FPR <result>
+  void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
+  void LoadDoubleLiteral(DoubleRegister result, uint64_t value,
+                         Register scratch);
+
+  void LoadFloat32Literal(DoubleRegister result, float value, Register scratch);
+
+  void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg);
+
+  void LoadHalfWordP(Register dst, const MemOperand& mem,
+                     Register scratch = no_reg);
+
+  void StoreHalfWord(Register src, const MemOperand& mem,
+                     Register scratch = r0);
+  void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
+
+  void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
+                          Register scratch = no_reg);
+  void StoreRepresentation(Register src, const MemOperand& mem,
+                           Representation r, Register scratch = no_reg);
+
+  void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
+  void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
+  void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
+  void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
+  void AndSmiLiteral(Register dst, Register src, Smi* smi);
+
+  // Set new rounding mode RN to FPSCR
+  void SetRoundingMode(FPRoundingMode RN);
+
+  // reset rounding mode to default (kRoundToNearest)
+  void ResetRoundingMode();
+
+  // These exist to provide portability between 32 and 64bit
+  void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
+  void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
+  void StoreP(const MemOperand& mem, const Operand& opnd,
+              Register scratch = no_reg);
+  void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
+  void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
+  void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
+  void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);
+
+  // Cleanse pointer address on 31bit by zero out top  bit.
+  // This is a NOP on 64-bit.
+  void CleanseP(Register src) {
+#if (V8_HOST_ARCH_S390 && !(V8_TARGET_ARCH_S390X))
+    nilh(src, Operand(0x7FFF));
+#endif
+  }
+
+  // ---------------------------------------------------------------------------
+  // JavaScript invokes
+
+  // Set up call kind marking in ecx. The method takes ecx as an
+  // explicit first parameter to make the code more readable at the
+  // call sites.
+  // void SetCallKind(Register dst, CallKind kind);
+
+  // Removes current frame and its arguments from the stack preserving
+  // the arguments and a return address pushed to the stack for the next call.
+  // Both |callee_args_count| and |caller_args_count_reg| do not include
+  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+  // is trashed.
+  void PrepareForTailCall(const ParameterCount& callee_args_count,
+                          Register caller_args_count_reg, Register scratch0,
+                          Register scratch1);
+
+  // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeFunctionCode(Register function, Register new_target,
+                          const ParameterCount& expected,
+                          const ParameterCount& actual, InvokeFlag flag,
+                          const CallWrapper& call_wrapper);
+
+  void FloodFunctionIfStepping(Register fun, Register new_target,
+                               const ParameterCount& expected,
+                               const ParameterCount& actual);
+
+  // Invoke the JavaScript function in the given register. Changes the
+  // current context to the context in the function before invoking.
+  void InvokeFunction(Register function, Register new_target,
+                      const ParameterCount& actual, InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+  void InvokeFunction(Register function, const ParameterCount& expected,
+                      const ParameterCount& actual, InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+  void InvokeFunction(Handle<JSFunction> function,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual, InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+  void IsObjectJSStringType(Register object, Register scratch, Label* fail);
+
+  void IsObjectNameType(Register object, Register scratch, Label* fail);
+
+  // ---------------------------------------------------------------------------
+  // Debugger Support
+
+  void DebugBreak();
+
+  // ---------------------------------------------------------------------------
+  // Exception handling
+
+  // Push a new stack handler and link into stack handler chain.
+  void PushStackHandler();
+
+  // Unlink the stack handler on top of the stack from the stack handler chain.
+  // Must preserve the result register.
+  void PopStackHandler();
+
+  // ---------------------------------------------------------------------------
+  // Inline caching support
+
+  // Generate code for checking access rights - used for security checks
+  // on access to global objects across environments. The holder register
+  // is left untouched, whereas both scratch registers are clobbered.
+  void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
+                              Label* miss);
+
+  void GetNumberHash(Register t0, Register scratch);
+
+  void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
+                                Register result, Register t0, Register t1,
+                                Register t2);
+
+  inline void MarkCode(NopMarkerTypes type) { nop(type); }
+
+  // Check if the given instruction is a 'type' marker.
+  // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
+  // These instructions are generated to mark special location in the code,
+  // like some special IC code.
+  static inline bool IsMarkedCode(Instr instr, int type) {
+    DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+    return IsNop(instr, type);
+  }
+
+  static inline int GetCodeMarker(Instr instr) {
+    int dst_reg_offset = 12;
+    int dst_mask = 0xf << dst_reg_offset;
+    int src_mask = 0xf;
+    int dst_reg = (instr & dst_mask) >> dst_reg_offset;
+    int src_reg = instr & src_mask;
+    uint32_t non_register_mask = ~(dst_mask | src_mask);
+    uint32_t mov_mask = al | 13 << 21;
+
+    // Return <n> if we have a mov rn rn, else return -1.
+    int type = ((instr & non_register_mask) == mov_mask) &&
+                       (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
+                       (dst_reg < LAST_CODE_MARKER)
+                   ? src_reg
+                   : -1;
+    DCHECK((type == -1) ||
+           ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+    return type;
+  }
+
+  // ---------------------------------------------------------------------------
+  // Allocation support
+
+  // Allocate an object in new space or old pointer space. The object_size is
+  // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+  // is passed. If the space is exhausted control continues at the gc_required
+  // label. The allocated object is returned in result. If the flag
+  // tag_allocated_object is true the result is tagged as as a heap object.
+  // All registers are clobbered also when control continues at the gc_required
+  // label.
+  void Allocate(int object_size, Register result, Register scratch1,
+                Register scratch2, Label* gc_required, AllocationFlags flags);
+
+  void Allocate(Register object_size, Register result, Register result_end,
+                Register scratch, Label* gc_required, AllocationFlags flags);
+
+  void AllocateTwoByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
+  void AllocateOneByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
+  void AllocateTwoByteConsString(Register result, Register length,
+                                 Register scratch1, Register scratch2,
+                                 Label* gc_required);
+  void AllocateOneByteConsString(Register result, Register length,
+                                 Register scratch1, Register scratch2,
+                                 Label* gc_required);
+  void AllocateTwoByteSlicedString(Register result, Register length,
+                                   Register scratch1, Register scratch2,
+                                   Label* gc_required);
+  void AllocateOneByteSlicedString(Register result, Register length,
+                                   Register scratch1, Register scratch2,
+                                   Label* gc_required);
+
+  // Allocates a heap number or jumps to the gc_required label if the young
+  // space is full and a scavenge is needed. All registers are clobbered also
+  // when control continues at the gc_required label.
+  void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+                          Register heap_number_map, Label* gc_required,
+                          TaggingMode tagging_mode = TAG_RESULT,
+                          MutableMode mode = IMMUTABLE);
+  void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
+                                   Register scratch1, Register scratch2,
+                                   Register heap_number_map,
+                                   Label* gc_required);
+
+  // Allocate and initialize a JSValue wrapper with the specified {constructor}
+  // and {value}.
+  void AllocateJSValue(Register result, Register constructor, Register value,
+                       Register scratch1, Register scratch2,
+                       Label* gc_required);
+
+  // Copies a number of bytes from src to dst. All registers are clobbered. On
+  // exit src and dst will point to the place just after where the last byte was
+  // read or written and length will be zero.
+  void CopyBytes(Register src, Register dst, Register length, Register scratch);
+
+  // Initialize fields with filler values.  |count| fields starting at
+  // |current_address| are overwritten with the value in |filler|.  At the end
+  // the loop, |current_address| points at the next uninitialized field.
+  // |count| is assumed to be non-zero.
+  void InitializeNFieldsWithFiller(Register current_address, Register count,
+                                   Register filler);
+
+  // Initialize fields with filler values.  Fields starting at |current_address|
+  // not including |end_address| are overwritten with the value in |filler|.  At
+  // the end the loop, |current_address| takes the value of |end_address|.
+  void InitializeFieldsWithFiller(Register current_address,
+                                  Register end_address, Register filler);
+
+  // ---------------------------------------------------------------------------
+  // Support functions.
+
+  // Machine code version of Map::GetConstructor().
+  // |temp| holds |result|'s map when done, and |temp2| its instance type.
+  void GetMapConstructor(Register result, Register map, Register temp,
+                         Register temp2);
+
+  // Try to get function prototype of a function and puts the value in
+  // the result register. Checks that the function really is a
+  // function and jumps to the miss label if the fast checks fail. The
+  // function register will be untouched; the other registers may be
+  // clobbered.
+  void TryGetFunctionPrototype(Register function, Register result,
+                               Register scratch, Label* miss);
+
+  // Compare object type for heap object.  heap_object contains a non-Smi
+  // whose object type should be compared with the given type.  This both
+  // sets the flags and leaves the object type in the type_reg register.
+  // It leaves the map in the map register (unless the type_reg and map register
+  // are the same register).  It leaves the heap object in the heap_object
+  // register unless the heap_object register is the same register as one of the
+  // other registers.
+  // Type_reg can be no_reg. In that case ip is used.
+  void CompareObjectType(Register heap_object, Register map, Register type_reg,
+                         InstanceType type);
+
+  // Compare instance type in a map.  map contains a valid map object whose
+  // object type should be compared with the given type.  This both
+  // sets the flags and leaves the object type in the type_reg register.
+  void CompareInstanceType(Register map, Register type_reg, InstanceType type);
+
+  // Check if a map for a JSObject indicates that the object has fast elements.
+  // Jump to the specified label if it does not.
+  void CheckFastElements(Register map, Register scratch, Label* fail);
+
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map, Register scratch, Label* fail);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiElements(Register map, Register scratch, Label* fail);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements. Otherwise jump to fail.
+  void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
+                                   Register elements_reg, Register scratch1,
+                                   DoubleRegister double_scratch, Label* fail,
+                                   int elements_offset = 0);
+
+  // Compare an object's map with the specified map and its transitioned
+  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
+  // set with result of map compare. If multiple map compares are required, the
+  // compare sequences branches to early_success.
+  void CompareMap(Register obj, Register scratch, Handle<Map> map,
+                  Label* early_success);
+
+  // As above, but the map of the object is already loaded into the register
+  // which is preserved by the code generated.
+  void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
+
+  // Check if the map of an object is equal to a specified map and branch to
+  // label if not. Skip the smi check if not required (object is known to be a
+  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+  // against maps that are ElementsKind transition maps of the specified map.
+  void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
+                SmiCheckType smi_check_type);
+
+  void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
+                Label* fail, SmiCheckType smi_check_type);
+
+  // Check if the map of an object is equal to a specified weak map and branch
+  // to a specified target if equal. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+                       Handle<WeakCell> cell, Handle<Code> success,
+                       SmiCheckType smi_check_type);
+
+  // Compare the given value and the value of weak cell.
+  void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
+                    CRegister cr = cr7);
+
+  void GetWeakValue(Register value, Handle<WeakCell> cell);
+
+  // Load the value of the weak cell in the value register. Branch to the given
+  // miss label if the weak cell was cleared.
+  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
+
+  // Compare the object in a register to a value from the root list.
+  // Uses the ip register as scratch.
+  void CompareRoot(Register obj, Heap::RootListIndex index);
+  void PushRoot(Heap::RootListIndex index) {
+    LoadRoot(r0, index);
+    Push(r0);
+  }
+
+  // Compare the object in a register to a value and jump if they are equal.
+  void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+    CompareRoot(with, index);
+    beq(if_equal);
+  }
+
+  // Compare the object in a register to a value and jump if they are not equal.
+  void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+                     Label* if_not_equal) {
+    CompareRoot(with, index);
+    bne(if_not_equal);
+  }
+
+  // Load and check the instance type of an object for being a string.
+  // Loads the type into the second argument register.
+  // Returns a condition that will be enabled if the object was a string.
+  Condition IsObjectStringType(Register obj, Register type) {
+    LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+    LoadlB(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+    mov(r0, Operand(kIsNotStringMask));
+    AndP(r0, type);
+    DCHECK_EQ(0u, kStringTag);
+    return eq;
+  }
+
+  // Picks out an array index from the hash field.
+  // Register use:
+  //   hash - holds the index's hash. Clobbered.
+  //   index - holds the overwritten index on exit.
+  void IndexFromHash(Register hash, Register index);
+
+  // Get the number of least significant bits from a register
+  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
+  // Load the value of a smi object into a FP double register. The register
+  // scratch1 can be the same register as smi in which case smi will hold the
+  // untagged value afterwards.
+  void SmiToDouble(DoubleRegister value, Register smi);
+
+  // Check if a double can be exactly represented as a signed 32-bit integer.
+  // CR_EQ in cr7 is set if true.
+  void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
+                         Register scratch2, DoubleRegister double_scratch);
+
+  // Check if a double is equal to -0.0.
+  // CR_EQ in cr7 holds the result.
+  void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
+                             Register scratch2);
+
+  // Check the sign of a double.
+  // CR_LT in cr7 holds the result.
+  void TestDoubleSign(DoubleRegister input, Register scratch);
+  void TestHeapNumberSign(Register input, Register scratch);
+
+  // Try to convert a double to a signed 32-bit integer.
+  // CR_EQ in cr7 is set and result assigned if the conversion is exact.
+  void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
+                             Register scratch, DoubleRegister double_scratch);
+
+  // Floor a double and writes the value to the result register.
+  // Go to exact if the conversion is exact (to be able to test -0),
+  // fall through calling code if an overflow occurred, else go to done.
+  // In return, input_high is loaded with high bits of input.
+  void TryInt32Floor(Register result, DoubleRegister double_input,
+                     Register input_high, Register scratch,
+                     DoubleRegister double_scratch, Label* done, Label* exact);
+
+  // Performs a truncating conversion of a floating point number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+  // succeeds, otherwise falls through if result is saturated. On return
+  // 'result' either holds answer, or is clobbered on fall through.
+  //
+  // Only public for the test code in test-code-stubs-arm.cc.
+  void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+                                  Label* done);
+
+  // Performs a truncating conversion of a floating point number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+  // Exits with 'result' holding the answer.
+  void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+  // Performs a truncating conversion of a heap number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+  // must be different registers.  Exits with 'result' holding the answer.
+  void TruncateHeapNumberToI(Register result, Register object);
+
+  // Converts the smi or heap number in object to an int32 using the rules
+  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+  // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+  // different registers.
+  void TruncateNumberToI(Register object, Register result,
+                         Register heap_number_map, Register scratch1,
+                         Label* not_int32);
+
+  // Overflow handling functions.
+  // Usage: call the appropriate arithmetic function and then call one of the
+  // flow control functions with the corresponding label.
+
+  // Compute dst = left + right, setting condition codes. dst may be same as
+  // either left or right (or a unique register). left and right must not be
+  // the same register.
+  void AddAndCheckForOverflow(Register dst, Register left, Register right,
+                              Register overflow_dst, Register scratch = r0);
+  void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
+                              Register overflow_dst, Register scratch = r0);
+
+  // Compute dst = left - right, setting condition codes. dst may be same as
+  // either left or right (or a unique register). left and right must not be
+  // the same register.
+  void SubAndCheckForOverflow(Register dst, Register left, Register right,
+                              Register overflow_dst, Register scratch = r0);
+
+  void BranchOnOverflow(Label* label) { blt(label /*, cr0*/); }
+
+  void BranchOnNoOverflow(Label* label) { bge(label /*, cr0*/); }
+
+  void RetOnOverflow(void) {
+    Label label;
+
+    blt(&label /*, cr0*/);
+    Ret();
+    bind(&label);
+  }
+
+  void RetOnNoOverflow(void) {
+    Label label;
+
+    bge(&label /*, cr0*/);
+    Ret();
+    bind(&label);
+  }
+
+  // ---------------------------------------------------------------------------
+  // Runtime calls
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
+                Condition cond = al);
+
+  // Call a code stub.
+  void TailCallStub(CodeStub* stub, Condition cond = al);
+
+  // Call a runtime routine.
+  void CallRuntime(const Runtime::Function* f, int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+    const Runtime::Function* function = Runtime::FunctionForId(fid);
+    CallRuntime(function, function->nargs, kSaveFPRegs);
+  }
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId fid,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+    const Runtime::Function* function = Runtime::FunctionForId(fid);
+    CallRuntime(function, function->nargs, save_doubles);
+  }
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+    CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+  }
+
+  // Convenience function: call an external reference.
+  void CallExternalReference(const ExternalReference& ext, int num_arguments);
+
+  // Convenience function: tail call a runtime routine (jump).
+  void TailCallRuntime(Runtime::FunctionId fid);
+
+  int CalculateStackPassedWords(int num_reg_arguments,
+                                int num_double_arguments);
+
+  // Before calling a C-function from generated code, align arguments on stack.
+  // After aligning the frame, non-register arguments must be stored in
+  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
+  // are word sized. If double arguments are used, this function assumes that
+  // all double arguments are stored before core registers; otherwise the
+  // correct alignment of the double values is not guaranteed.
+  // Some compilers/platforms require the stack to be aligned when calling
+  // C++ code.
+  // Needs a scratch register to do some arithmetic. This register will be
+  // trashed.
+  void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+                            Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+  // There are two ways of passing double arguments on ARM, depending on
+  // whether soft or hard floating point ABI is used. These functions
+  // abstract parameter passing for the three different ways we call
+  // C functions from generated code.
+  void MovToFloatParameter(DoubleRegister src);
+  void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+  void MovToFloatResult(DoubleRegister src);
+
+  // Calls a C function and cleans up the space for arguments allocated
+  // by PrepareCallCFunction. The called function is not allowed to trigger a
+  // garbage collection, since that might move the code and invalidate the
+  // return address (unless this is somehow accounted for by the called
+  // function).
+  void CallCFunction(ExternalReference function, int num_arguments);
+  void CallCFunction(Register function, int num_arguments);
+  void CallCFunction(ExternalReference function, int num_reg_arguments,
+                     int num_double_arguments);
+  void CallCFunction(Register function, int num_reg_arguments,
+                     int num_double_arguments);
+
+  void MovFromFloatParameter(DoubleRegister dst);
+  void MovFromFloatResult(DoubleRegister dst);
+
+  // Jump to a runtime routine.
+  void JumpToExternalReference(const ExternalReference& builtin);
+
+  Handle<Object> CodeObject() {
+    DCHECK(!code_object_.is_null());
+    return code_object_;
+  }
+
+  // Emit code for a truncating division by a constant. The dividend register is
+  // unchanged and ip gets clobbered. Dividend and result must be different.
+  void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
+  // ---------------------------------------------------------------------------
+  // StatsCounter support
+
+  void SetCounter(StatsCounter* counter, int value, Register scratch1,
+                  Register scratch2);
+  void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+                        Register scratch2);
+  void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+                        Register scratch2);
+
+  // ---------------------------------------------------------------------------
+  // Debugging
+
+  // Calls Abort(msg) if the condition cond is not satisfied.
+  // Use --debug_code to enable.
+  void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+  void AssertFastElements(Register elements);
+
+  // Like Assert(), but always enabled.
+  void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+
+  // Print a message to stdout and abort execution.
+  void Abort(BailoutReason reason);
+
+  // Verify restrictions about code generated in stubs.
+  void set_generating_stub(bool value) { generating_stub_ = value; }
+  bool generating_stub() { return generating_stub_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
+
+  // ---------------------------------------------------------------------------
+  // Number utilities
+
+  // Check whether the value of reg is a power of two and not zero. If not
+  // control continues at the label not_power_of_two. If reg is a power of two
+  // the register scratch contains the value of (reg - 1) when control falls
+  // through.
+  void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
+                                 Label* not_power_of_two_or_zero);
+  // Check whether the value of reg is a power of two and not zero.
+  // Control falls through if it is, with scratch containing the mask
+  // value (reg - 1).
+  // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
+  // zero or negative, or jumps to the 'not_power_of_two' label if the value is
+  // strictly positive but not a power of two.
+  void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
+                                       Label* zero_and_neg,
+                                       Label* not_power_of_two);
+
+  // ---------------------------------------------------------------------------
+  // Bit testing/extraction
+  //
+  // Bit numbering is such that the least significant bit is bit 0
+  // (for consistency between 32/64-bit).
+
+  // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
+  // and place them into the least significant bits of dst.
+  inline void ExtractBitRange(Register dst, Register src, int rangeStart,
+                              int rangeEnd) {
+    DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
+
+    // Try to use RISBG if possible.
+    if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+      int shiftAmount = (64 - rangeEnd) % 64;  // Convert to shift left.
+      int endBit = 63;  // End is always LSB after shifting.
+      int startBit = 63 - rangeStart + rangeEnd;
+      risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
+            true);
+    } else {
+      if (rangeEnd > 0)  // Don't need to shift if rangeEnd is zero.
+        ShiftRightP(dst, src, Operand(rangeEnd));
+      else if (!dst.is(src))  // If we didn't shift, we might need to copy
+        LoadRR(dst, src);
+      int width = rangeStart - rangeEnd + 1;
+#if V8_TARGET_ARCH_S390X
+      uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
+      nihf(dst, Operand(mask >> 32));
+      nilf(dst, Operand(mask & 0xFFFFFFFF));
+      ltgr(dst, dst);
+#else
+      uint32_t mask = (1 << width) - 1;
+      AndP(dst, Operand(mask));
+#endif
+    }
+  }
+
+  inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
+    ExtractBitRange(dst, src, bitNumber, bitNumber);
+  }
+
+  // Extract consecutive bits (defined by mask) from src and place them
+  // into the least significant bits of dst.
+  inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
+                             RCBit rc = LeaveRC) {
+    int start = kBitsPerPointer - 1;
+    int end;
+    uintptr_t bit = (1L << start);
+
+    while (bit && (mask & bit) == 0) {
+      start--;
+      bit >>= 1;
+    }
+    end = start;
+    bit >>= 1;
+
+    while (bit && (mask & bit)) {
+      end--;
+      bit >>= 1;
+    }
+
+    // 1-bits in mask must be contiguous
+    DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
+
+    ExtractBitRange(dst, src, start, end);
+  }
+
+  // Test single bit in value.
+  inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
+    ExtractBitRange(scratch, value, bitNumber, bitNumber);
+  }
+
+  // Test consecutive bit range in value.  Range is defined by
+  // rangeStart - rangeEnd.
+  inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
+                           Register scratch = r0) {
+    ExtractBitRange(scratch, value, rangeStart, rangeEnd);
+  }
+
+  // Test consecutive bit range in value.  Range is defined by mask.
+  inline void TestBitMask(Register value, uintptr_t mask,
+                          Register scratch = r0) {
+    ExtractBitMask(scratch, value, mask, SetRC);
+  }
+
+  // ---------------------------------------------------------------------------
+  // Smi utilities
+
+  // Shift left by kSmiShift
+  void SmiTag(Register reg) { SmiTag(reg, reg); }
+  void SmiTag(Register dst, Register src) {
+    ShiftLeftP(dst, src, Operand(kSmiShift));
+  }
+
+#if !V8_TARGET_ARCH_S390X
+  // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+  void SmiTagCheckOverflow(Register reg, Register overflow);
+  void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
+
+  inline void JumpIfNotSmiCandidate(Register value, Register scratch,
+                                    Label* not_smi_label) {
+    // High bits must be identical to fit into an Smi
+    STATIC_ASSERT(kSmiShift == 1);
+    AddP(scratch, value, Operand(0x40000000u));
+    CmpP(scratch, Operand::Zero());
+    blt(not_smi_label);
+  }
+#endif
+  inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
+    // The test is different for unsigned int values. Since we need
+    // the value to be in the range of a positive smi, we can't
+    // handle any of the high bits being set in the value.
+    TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
+                 scratch);
+  }
+  inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
+                                            Label* not_smi_label) {
+    TestUnsignedSmiCandidate(value, scratch);
+    bne(not_smi_label /*, cr0*/);
+  }
+
+  void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+
+  void SmiUntag(Register dst, Register src) {
+    ShiftRightArithP(dst, src, Operand(kSmiShift));
+  }
+
+  void SmiToPtrArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
+    ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
+#else
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
+    ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
+#endif
+  }
+
+  void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
+
+  void SmiToShortArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
+    ShiftRightArithP(dst, src, Operand(kSmiShift - 1));
+#else
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
+    if (!dst.is(src)) {
+      LoadRR(dst, src);
+    }
+#endif
+  }
+
+  void SmiToIntArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
+    ShiftRightArithP(dst, src, Operand(kSmiShift - 2));
+#else
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
+    ShiftLeftP(dst, src, Operand(2 - kSmiShift));
+#endif
+  }
+
+#define SmiToFloatArrayOffset SmiToIntArrayOffset
+
+  void SmiToDoubleArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
+    ShiftRightArithP(dst, src, Operand(kSmiShift - kDoubleSizeLog2));
+#else
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
+    ShiftLeftP(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
+#endif
+  }
+
+  void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
+    if (kSmiShift < elementSizeLog2) {
+      ShiftLeftP(dst, src, Operand(elementSizeLog2 - kSmiShift));
+    } else if (kSmiShift > elementSizeLog2) {
+      ShiftRightArithP(dst, src, Operand(kSmiShift - elementSizeLog2));
+    } else if (!dst.is(src)) {
+      LoadRR(dst, src);
+    }
+  }
+
+  void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
+                          bool isSmi) {
+    if (isSmi) {
+      SmiToArrayOffset(dst, src, elementSizeLog2);
+    } else {
+#if V8_TARGET_ARCH_S390X
+      // src (key) is a 32-bit integer.  Sign extension ensures
+      // upper 32-bit does not contain garbage before being used to
+      // reference memory.
+      lgfr(src, src);
+#endif
+      ShiftLeftP(dst, src, Operand(elementSizeLog2));
+    }
+  }
+
+  // Untag the source value into destination and jump if source is a smi.
+  // Souce and destination can be the same register.
+  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+  // Untag the source value into destination and jump if source is not a smi.
+  // Souce and destination can be the same register.
+  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
+  inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
+
+  inline void TestIfPositiveSmi(Register value, Register scratch) {
+    STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
+                  (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
+    mov(scratch, Operand(kIntptrSignBit | kSmiTagMask));
+    AndP(scratch, value);
+  }
+
+  // Jump the register contains a smi.
+  inline void JumpIfSmi(Register value, Label* smi_label) {
+    TestIfSmi(value);
+    beq(smi_label /*, cr0*/);  // branch if SMI
+  }
+  // Jump if either of the registers contain a non-smi.
+  inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+    TestIfSmi(value);
+    bne(not_smi_label /*, cr0*/);
+  }
+  // Jump if either of the registers contain a non-smi.
+  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+  // Jump if either of the registers contain a smi.
+  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+  // Abort execution if argument is a number, enabled via --debug-code.
+  void AssertNotNumber(Register object);
+
+  // Abort execution if argument is a smi, enabled via --debug-code.
+  void AssertNotSmi(Register object);
+  void AssertSmi(Register object);
+
+#if V8_TARGET_ARCH_S390X
+  inline void TestIfInt32(Register value, Register scratch) {
+    // High bits must be identical to fit into an 32-bit integer
+    lgfr(scratch, value);
+    CmpP(scratch, value);
+  }
+#else
+  inline void TestIfInt32(Register hi_word, Register lo_word,
+                          Register scratch) {
+    // High bits must be identical to fit into an 32-bit integer
+    ShiftRightArith(scratch, lo_word, Operand(31));
+    CmpP(scratch, hi_word);
+  }
+#endif
+
+#if V8_TARGET_ARCH_S390X
+  // Ensure it is permissable to read/write int value directly from
+  // upper half of the smi.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#endif
+#if V8_TARGET_LITTLE_ENDIAN
+#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#else
+#define SmiWordOffset(offset) offset
+#endif
+
+  // Abort execution if argument is not a string, enabled via --debug-code.
+  void AssertString(Register object);
+
+  // Abort execution if argument is not a name, enabled via --debug-code.
+  void AssertName(Register object);
+
+  void AssertFunction(Register object);
+
+  // Abort execution if argument is not a JSBoundFunction,
+  // enabled via --debug-code.
+  void AssertBoundFunction(Register object);
+
+  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+  void AssertReceiver(Register object);
+
+  // Abort execution if argument is not undefined or an AllocationSite, enabled
+  // via --debug-code.
+  void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+  // Abort execution if reg is not the root value with the given index,
+  // enabled via --debug-code.
+  void AssertIsRoot(Register reg, Heap::RootListIndex index);
+
+  // ---------------------------------------------------------------------------
+  // HeapNumber utilities
+
+  void JumpIfNotHeapNumber(Register object, Register heap_number_map,
+                           Register scratch, Label* on_not_heap_number);
+
+  // ---------------------------------------------------------------------------
+  // String utilities
+
+  // Checks if both objects are sequential one-byte strings and jumps to label
+  // if either is not. Assumes that neither object is a smi.
+  void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
+                                                    Register object2,
+                                                    Register scratch1,
+                                                    Register scratch2,
+                                                    Label* failure);
+
+  // Checks if both objects are sequential one-byte strings and jumps to label
+  // if either is not.
+  void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* not_flat_one_byte_strings);
+
+  // Checks if both instance types are sequential one-byte strings and jumps to
+  // label if either is not.
+  void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+      Register first_object_instance_type, Register second_object_instance_type,
+      Register scratch1, Register scratch2, Label* failure);
+
+  // Check if instance type is sequential one-byte string and jump to label if
+  // it is not.
+  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+                                                Label* failure);
+
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
+
+  void EmitSeqStringSetCharCheck(Register string, Register index,
+                                 Register value, uint32_t encoding_mask);
+
+  // ---------------------------------------------------------------------------
+  // Patching helpers.
+
+  void ClampUint8(Register output_reg, Register input_reg);
+
+  // Saturate a value into 8-bit unsigned integer
+  //   if input_value < 0, output_value is 0
+  //   if input_value > 255, output_value is 255
+  //   otherwise output_value is the (int)input_value (round to nearest)
+  void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
+                          DoubleRegister temp_double_reg);
+
+  void LoadInstanceDescriptors(Register map, Register descriptors);
+  void EnumLength(Register dst, Register map);
+  void NumberOfOwnDescriptors(Register dst, Register map);
+  void LoadAccessor(Register dst, Register holder, int accessor_index,
+                    AccessorComponent accessor);
+
+  template <typename Field>
+  void DecodeField(Register dst, Register src) {
+    ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
+  }
+
+  template <typename Field>
+  void DecodeField(Register reg) {
+    DecodeField<Field>(reg, reg);
+  }
+
+  template <typename Field>
+  void DecodeFieldToSmi(Register dst, Register src) {
+    // TODO(joransiu): Optimize into single instruction
+    DecodeField<Field>(dst, src);
+    SmiTag(dst);
+  }
+
+  template <typename Field>
+  void DecodeFieldToSmi(Register reg) {
+    DecodeFieldToSmi<Field>(reg, reg);
+  }
+
+  // Load the type feedback vector from a JavaScript frame.
+  void EmitLoadTypeFeedbackVector(Register vector);
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type,
+                  bool load_constant_pool_pointer_reg = false);
+  // Returns the pc offset at which the frame ends.
+  int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
+
+  // Expects object in r2 and returns map with validated enum cache
+  // in r2.  Assumes that any other register can be used as a scratch.
+  void CheckEnumCache(Label* call_runtime);
+
+  // AllocationMemento support. Arrays may have an associated
+  // AllocationMemento object that can be checked for in order to pretransition
+  // to another type.
+  // On entry, receiver_reg should point to the array object.
+  // scratch_reg gets clobbered.
+  // If allocation info is present, condition flags are set to eq.
+  void TestJSArrayForAllocationMemento(Register receiver_reg,
+                                       Register scratch_reg,
+                                       Register scratch2_reg,
+                                       Label* no_memento_found);
+
+  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+                                         Register scratch_reg,
+                                         Register scratch2_reg,
+                                         Label* memento_found) {
+    Label no_memento_found;
+    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
+                                    &no_memento_found);
+    beq(memento_found);
+    bind(&no_memento_found);
+  }
+
+  // Jumps to found label if a prototype map has dictionary elements.
+  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+                                        Register scratch1, Label* found);
+
+ private:
+  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+  void CallCFunctionHelper(Register function, int num_reg_arguments,
+                           int num_double_arguments);
+
+  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
+            CRegister cr = cr7);
+
+  // Helper functions for generating invokes.
+  void InvokePrologue(const ParameterCount& expected,
+                      const ParameterCount& actual, Label* done,
+                      bool* definitely_mismatches, InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+  void InitializeNewString(Register string, Register length,
+                           Heap::RootListIndex map_index, Register scratch1,
+                           Register scratch2);
+
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object, Register scratch,
+                  Condition cond,  // eq for new space, ne otherwise.
+                  Label* branch);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Leaves addr_reg unchanged.
+  inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
+                          Register mask_reg);
+
+  static const RegList kSafepointSavedRegisters;
+  static const int kNumSafepointSavedRegisters;
+
+  // Compute memory operands for safepoint stack slots.
+  static int SafepointRegisterStackIndex(int reg_code);
+  MemOperand SafepointRegisterSlot(Register reg);
+  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
+  bool generating_stub_;
+  bool has_frame_;
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
+
+  // Needs access to SafepointRegisterStackIndex for compiled frame
+  // traversal.
+  friend class StandardFrame;
+};
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+  enum FlushICache { FLUSH, DONT_FLUSH };
+
+  CodePatcher(Isolate* isolate, byte* address, int instructions,
+              FlushICache flush_cache = FLUSH);
+  ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+ private:
+  byte* address_;            // The address of the code being patched.
+  int size_;                 // Number of bytes of the expected patch size.
+  MacroAssembler masm_;      // Macro assembler used to generate the code.
+  FlushICache flush_cache_;  // Whether to flush the I cache after patching.
+};
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+inline MemOperand ContextMemOperand(Register context, int index = 0) {
+  return MemOperand(context, Context::SlotOffset(index));
+}
+
+inline MemOperand NativeContextMemOperand() {
+  return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
+}
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm)    \
+  masm->stop(__FILE_LINE__); \
+  masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_S390_MACRO_ASSEMBLER_S390_H_
diff --git a/src/s390/simulator-s390.cc b/src/s390/simulator-s390.cc
new file mode 100644
index 0000000..06e52a7
--- /dev/null
+++ b/src/s390/simulator-s390.cc
@@ -0,0 +1,5128 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <cmath>
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/assembler.h"
+#include "src/base/bits.h"
+#include "src/codegen.h"
+#include "src/disasm.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/s390/constants-s390.h"
+#include "src/s390/frames-s390.h"
+#include "src/s390/simulator-s390.h"
+#if defined(USE_SIMULATOR)
+
+// Only build the simulator if not compiling for real s390 hardware.
+namespace v8 {
+namespace internal {
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
+#define SScanF sscanf  // NOLINT
+
+// The S390Debugger class is used by the simulator while debugging simulated
+// z/Architecture code.
+class S390Debugger {
+ public:
+  explicit S390Debugger(Simulator* sim) : sim_(sim) {}
+  ~S390Debugger();
+
+  void Stop(Instruction* instr);
+  void Debug();
+
+ private:
+#if V8_TARGET_LITTLE_ENDIAN
+  static const Instr kBreakpointInstr = (0x0000FFB2);  // TRAP4 0000
+  static const Instr kNopInstr = (0x00160016);         // OR r0, r0 x2
+#else
+  static const Instr kBreakpointInstr = (0xB2FF0000);  // TRAP4 0000
+  static const Instr kNopInstr = (0x16001600);         // OR r0, r0 x2
+#endif
+
+  Simulator* sim_;
+
+  intptr_t GetRegisterValue(int regnum);
+  double GetRegisterPairDoubleValue(int regnum);
+  double GetFPDoubleRegisterValue(int regnum);
+  float GetFPFloatRegisterValue(int regnum);
+  bool GetValue(const char* desc, intptr_t* value);
+  bool GetFPDoubleValue(const char* desc, double* value);
+
+  // Set or delete a breakpoint. Returns true if successful.
+  bool SetBreakpoint(Instruction* break_pc);
+  bool DeleteBreakpoint(Instruction* break_pc);
+
+  // Undo and redo all breakpoints. This is needed to bracket disassembly and
+  // execution to skip past breakpoints when run from the debugger.
+  void UndoBreakpoints();
+  void RedoBreakpoints();
+};
+
+S390Debugger::~S390Debugger() {}
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+static void InitializeCoverage() {
+  char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+  if (file_name != NULL) {
+    coverage_log = fopen(file_name, "aw+");
+  }
+}
+
+void S390Debugger::Stop(Instruction* instr) {
+  // Get the stop code.
+  uint32_t code = instr->SvcValue() & kStopCodeMask;
+  // Retrieve the encoded address, which comes just after this stop.
+  char** msg_address =
+      reinterpret_cast<char**>(sim_->get_pc() + sizeof(FourByteInstr));
+  char* msg = *msg_address;
+  DCHECK(msg != NULL);
+
+  // Update this stop description.
+  if (isWatchedStop(code) && !watched_stops_[code].desc) {
+    watched_stops_[code].desc = msg;
+  }
+
+  if (strlen(msg) > 0) {
+    if (coverage_log != NULL) {
+      fprintf(coverage_log, "%s\n", msg);
+      fflush(coverage_log);
+    }
+    // Overwrite the instruction and address with nops.
+    instr->SetInstructionBits(kNopInstr);
+    reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
+  }
+  sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr) + kPointerSize);
+}
+
+#else  // ndef GENERATED_CODE_COVERAGE
+
+static void InitializeCoverage() {}
+
+void S390Debugger::Stop(Instruction* instr) {
+  // Get the stop code.
+  // use of kStopCodeMask not right on PowerPC
+  uint32_t code = instr->SvcValue() & kStopCodeMask;
+  // Retrieve the encoded address, which comes just after this stop.
+  char* msg = *reinterpret_cast<char**>(sim_->get_pc() + sizeof(FourByteInstr));
+  // Update this stop description.
+  if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
+    sim_->watched_stops_[code].desc = msg;
+  }
+  // Print the stop message and code if it is not the default code.
+  if (code != kMaxStopCode) {
+    PrintF("Simulator hit stop %u: %s\n", code, msg);
+  } else {
+    PrintF("Simulator hit %s\n", msg);
+  }
+  sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr) + kPointerSize);
+  Debug();
+}
+#endif
+
+intptr_t S390Debugger::GetRegisterValue(int regnum) {
+  return sim_->get_register(regnum);
+}
+
+double S390Debugger::GetRegisterPairDoubleValue(int regnum) {
+  return sim_->get_double_from_register_pair(regnum);
+}
+
+double S390Debugger::GetFPDoubleRegisterValue(int regnum) {
+  return sim_->get_double_from_d_register(regnum);
+}
+
+float S390Debugger::GetFPFloatRegisterValue(int regnum) {
+  return sim_->get_float32_from_d_register(regnum);
+}
+
+bool S390Debugger::GetValue(const char* desc, intptr_t* value) {
+  int regnum = Registers::Number(desc);
+  if (regnum != kNoRegister) {
+    *value = GetRegisterValue(regnum);
+    return true;
+  } else {
+    if (strncmp(desc, "0x", 2) == 0) {
+      return SScanF(desc + 2, "%" V8PRIxPTR,
+                    reinterpret_cast<uintptr_t*>(value)) == 1;
+    } else {
+      return SScanF(desc, "%" V8PRIuPTR, reinterpret_cast<uintptr_t*>(value)) ==
+             1;
+    }
+  }
+  return false;
+}
+
+bool S390Debugger::GetFPDoubleValue(const char* desc, double* value) {
+  int regnum = DoubleRegisters::Number(desc);
+  if (regnum != kNoRegister) {
+    *value = sim_->get_double_from_d_register(regnum);
+    return true;
+  }
+  return false;
+}
+
+bool S390Debugger::SetBreakpoint(Instruction* break_pc) {
+  // Check if a breakpoint can be set. If not return without any side-effects.
+  if (sim_->break_pc_ != NULL) {
+    return false;
+  }
+
+  // Set the breakpoint.
+  sim_->break_pc_ = break_pc;
+  sim_->break_instr_ = break_pc->InstructionBits();
+  // Not setting the breakpoint instruction in the code itself. It will be set
+  // when the debugger shell continues.
+  return true;
+}
+
+bool S390Debugger::DeleteBreakpoint(Instruction* break_pc) {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+
+  sim_->break_pc_ = NULL;
+  sim_->break_instr_ = 0;
+  return true;
+}
+
+void S390Debugger::UndoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+}
+
+void S390Debugger::RedoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+  }
+}
+
+void S390Debugger::Debug() {
+  intptr_t last_pc = -1;
+  bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+  char cmd[COMMAND_SIZE + 1];
+  char arg1[ARG_SIZE + 1];
+  char arg2[ARG_SIZE + 1];
+  char* argv[3] = {cmd, arg1, arg2};
+
+  // make sure to have a proper terminating character if reaching the limit
+  cmd[COMMAND_SIZE] = 0;
+  arg1[ARG_SIZE] = 0;
+  arg2[ARG_SIZE] = 0;
+
+  // Undo all set breakpoints while running in the debugger shell. This will
+  // make them invisible to all commands.
+  UndoBreakpoints();
+  // Disable tracing while simulating
+  bool trace = ::v8::internal::FLAG_trace_sim;
+  ::v8::internal::FLAG_trace_sim = false;
+
+  while (!done && !sim_->has_bad_pc()) {
+    if (last_pc != sim_->get_pc()) {
+      disasm::NameConverter converter;
+      disasm::Disassembler dasm(converter);
+      // use a reasonably large buffer
+      v8::internal::EmbeddedVector<char, 256> buffer;
+      dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+      PrintF("  0x%08" V8PRIxPTR "  %s\n", sim_->get_pc(), buffer.start());
+      last_pc = sim_->get_pc();
+    }
+    char* line = ReadLine("sim> ");
+    if (line == NULL) {
+      break;
+    } else {
+      char* last_input = sim_->last_debugger_input();
+      if (strcmp(line, "\n") == 0 && last_input != NULL) {
+        line = last_input;
+      } else {
+        // Ownership is transferred to sim_;
+        sim_->set_last_debugger_input(line);
+      }
+      // Use sscanf to parse the individual parts of the command line. At the
+      // moment no command expects more than two parameters.
+      int argc = SScanF(line,
+                        "%" XSTR(COMMAND_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s",
+                        cmd, arg1, arg2);
+      if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+        intptr_t value;
+
+        // If at a breakpoint, proceed past it.
+        if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
+                ->InstructionBits() == 0x7d821008) {
+          sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr));
+        } else {
+          sim_->ExecuteInstruction(
+              reinterpret_cast<Instruction*>(sim_->get_pc()));
+        }
+
+        if (argc == 2 && last_pc != sim_->get_pc() && GetValue(arg1, &value)) {
+          for (int i = 1; (!sim_->has_bad_pc()) && i < value; i++) {
+            disasm::NameConverter converter;
+            disasm::Disassembler dasm(converter);
+            // use a reasonably large buffer
+            v8::internal::EmbeddedVector<char, 256> buffer;
+            dasm.InstructionDecode(buffer,
+                                   reinterpret_cast<byte*>(sim_->get_pc()));
+            PrintF("  0x%08" V8PRIxPTR "  %s\n", sim_->get_pc(),
+                   buffer.start());
+            sim_->ExecuteInstruction(
+                reinterpret_cast<Instruction*>(sim_->get_pc()));
+          }
+        }
+      } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+        // If at a breakpoint, proceed past it.
+        if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
+                ->InstructionBits() == 0x7d821008) {
+          sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr));
+        } else {
+          // Execute the one instruction we broke at with breakpoints disabled.
+          sim_->ExecuteInstruction(
+              reinterpret_cast<Instruction*>(sim_->get_pc()));
+        }
+        // Leave the debugger shell.
+        done = true;
+      } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+        if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
+          intptr_t value;
+          double dvalue;
+          if (strcmp(arg1, "all") == 0) {
+            for (int i = 0; i < kNumRegisters; i++) {
+              value = GetRegisterValue(i);
+              PrintF("    %3s: %08" V8PRIxPTR,
+                     Register::from_code(i).ToString(), value);
+              if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
+                  (i % 2) == 0) {
+                dvalue = GetRegisterPairDoubleValue(i);
+                PrintF(" (%f)\n", dvalue);
+              } else if (i != 0 && !((i + 1) & 3)) {
+                PrintF("\n");
+              }
+            }
+            PrintF("  pc: %08" V8PRIxPTR "  cr: %08x\n", sim_->special_reg_pc_,
+                   sim_->condition_reg_);
+          } else if (strcmp(arg1, "alld") == 0) {
+            for (int i = 0; i < kNumRegisters; i++) {
+              value = GetRegisterValue(i);
+              PrintF("     %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
+                     Register::from_code(i).ToString(), value, value);
+              if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
+                  (i % 2) == 0) {
+                dvalue = GetRegisterPairDoubleValue(i);
+                PrintF(" (%f)\n", dvalue);
+              } else if (!((i + 1) % 2)) {
+                PrintF("\n");
+              }
+            }
+            PrintF("   pc: %08" V8PRIxPTR "  cr: %08x\n", sim_->special_reg_pc_,
+                   sim_->condition_reg_);
+          } else if (strcmp(arg1, "allf") == 0) {
+            for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
+              float fvalue = GetFPFloatRegisterValue(i);
+              uint32_t as_words = bit_cast<uint32_t>(fvalue);
+              PrintF("%3s: %f 0x%08x\n",
+                     DoubleRegister::from_code(i).ToString(), fvalue, as_words);
+            }
+          } else if (strcmp(arg1, "alld") == 0) {
+            for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
+              dvalue = GetFPDoubleRegisterValue(i);
+              uint64_t as_words = bit_cast<uint64_t>(dvalue);
+              PrintF("%3s: %f 0x%08x %08x\n",
+                     DoubleRegister::from_code(i).ToString(), dvalue,
+                     static_cast<uint32_t>(as_words >> 32),
+                     static_cast<uint32_t>(as_words & 0xffffffff));
+            }
+          } else if (arg1[0] == 'r' &&
+                     (arg1[1] >= '0' && arg1[1] <= '2' &&
+                      (arg1[2] == '\0' || (arg1[2] >= '0' && arg1[2] <= '5' &&
+                                           arg1[3] == '\0')))) {
+            int regnum = strtoul(&arg1[1], 0, 10);
+            if (regnum != kNoRegister) {
+              value = GetRegisterValue(regnum);
+              PrintF("%s: 0x%08" V8PRIxPTR " %" V8PRIdPTR "\n", arg1, value,
+                     value);
+            } else {
+              PrintF("%s unrecognized\n", arg1);
+            }
+          } else {
+            if (GetValue(arg1, &value)) {
+              PrintF("%s: 0x%08" V8PRIxPTR " %" V8PRIdPTR "\n", arg1, value,
+                     value);
+            } else if (GetFPDoubleValue(arg1, &dvalue)) {
+              uint64_t as_words = bit_cast<uint64_t>(dvalue);
+              PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
+                     static_cast<uint32_t>(as_words >> 32),
+                     static_cast<uint32_t>(as_words & 0xffffffff));
+            } else {
+              PrintF("%s unrecognized\n", arg1);
+            }
+          }
+        } else {
+          PrintF("print <register>\n");
+        }
+      } else if ((strcmp(cmd, "po") == 0) ||
+                 (strcmp(cmd, "printobject") == 0)) {
+        if (argc == 2) {
+          intptr_t value;
+          OFStream os(stdout);
+          if (GetValue(arg1, &value)) {
+            Object* obj = reinterpret_cast<Object*>(value);
+            os << arg1 << ": \n";
+#ifdef DEBUG
+            obj->Print(os);
+            os << "\n";
+#else
+            os << Brief(obj) << "\n";
+#endif
+          } else {
+            os << arg1 << " unrecognized\n";
+          }
+        } else {
+          PrintF("printobject <value>\n");
+        }
+      } else if (strcmp(cmd, "setpc") == 0) {
+        intptr_t value;
+
+        if (!GetValue(arg1, &value)) {
+          PrintF("%s unrecognized\n", arg1);
+          continue;
+        }
+        sim_->set_pc(value);
+      } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+        intptr_t* cur = NULL;
+        intptr_t* end = NULL;
+        int next_arg = 1;
+
+        if (strcmp(cmd, "stack") == 0) {
+          cur = reinterpret_cast<intptr_t*>(sim_->get_register(Simulator::sp));
+        } else {  // "mem"
+          intptr_t value;
+          if (!GetValue(arg1, &value)) {
+            PrintF("%s unrecognized\n", arg1);
+            continue;
+          }
+          cur = reinterpret_cast<intptr_t*>(value);
+          next_arg++;
+        }
+
+        intptr_t words;  // likely inaccurate variable name for 64bit
+        if (argc == next_arg) {
+          words = 10;
+        } else {
+          if (!GetValue(argv[next_arg], &words)) {
+            words = 10;
+          }
+        }
+        end = cur + words;
+
+        while (cur < end) {
+          PrintF("  0x%08" V8PRIxPTR ":  0x%08" V8PRIxPTR " %10" V8PRIdPTR,
+                 reinterpret_cast<intptr_t>(cur), *cur, *cur);
+          HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+          intptr_t value = *cur;
+          Heap* current_heap = sim_->isolate_->heap();
+          if (((value & 1) == 0) ||
+              current_heap->ContainsSlow(obj->address())) {
+            PrintF("(smi %d)", PlatformSmiTagging::SmiToInt(obj));
+          } else if (current_heap->Contains(obj)) {
+            PrintF(" (");
+            obj->ShortPrint();
+            PrintF(")");
+          }
+          PrintF("\n");
+          cur++;
+        }
+      } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
+        disasm::NameConverter converter;
+        disasm::Disassembler dasm(converter);
+        // use a reasonably large buffer
+        v8::internal::EmbeddedVector<char, 256> buffer;
+
+        byte* prev = NULL;
+        byte* cur = NULL;
+        // Default number of instructions to disassemble.
+        int32_t numInstructions = 10;
+
+        if (argc == 1) {
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
+        } else if (argc == 2) {
+          int regnum = Registers::Number(arg1);
+          if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
+            // The argument is an address or a register name.
+            intptr_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(value);
+            }
+          } else {
+            // The argument is the number of instructions.
+            intptr_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(sim_->get_pc());
+              // Disassemble <arg1> instructions.
+              numInstructions = static_cast<int32_t>(value);
+            }
+          }
+        } else {
+          intptr_t value1;
+          intptr_t value2;
+          if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+            cur = reinterpret_cast<byte*>(value1);
+            // Disassemble <arg2> instructions.
+            numInstructions = static_cast<int32_t>(value2);
+          }
+        }
+
+        while (numInstructions > 0) {
+          prev = cur;
+          cur += dasm.InstructionDecode(buffer, cur);
+          PrintF("  0x%08" V8PRIxPTR "  %s\n", reinterpret_cast<intptr_t>(prev),
+                 buffer.start());
+          numInstructions--;
+        }
+      } else if (strcmp(cmd, "gdb") == 0) {
+        PrintF("relinquishing control to gdb\n");
+        v8::base::OS::DebugBreak();
+        PrintF("regaining control from gdb\n");
+      } else if (strcmp(cmd, "break") == 0) {
+        if (argc == 2) {
+          intptr_t value;
+          if (GetValue(arg1, &value)) {
+            if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+              PrintF("setting breakpoint failed\n");
+            }
+          } else {
+            PrintF("%s unrecognized\n", arg1);
+          }
+        } else {
+          PrintF("break <address>\n");
+        }
+      } else if (strcmp(cmd, "del") == 0) {
+        if (!DeleteBreakpoint(NULL)) {
+          PrintF("deleting breakpoint failed\n");
+        }
+      } else if (strcmp(cmd, "cr") == 0) {
+        PrintF("Condition reg: %08x\n", sim_->condition_reg_);
+      } else if (strcmp(cmd, "stop") == 0) {
+        intptr_t value;
+        intptr_t stop_pc =
+            sim_->get_pc() - (sizeof(FourByteInstr) + kPointerSize);
+        Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+        Instruction* msg_address =
+            reinterpret_cast<Instruction*>(stop_pc + sizeof(FourByteInstr));
+        if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+          // Remove the current stop.
+          if (sim_->isStopInstruction(stop_instr)) {
+            stop_instr->SetInstructionBits(kNopInstr);
+            msg_address->SetInstructionBits(kNopInstr);
+          } else {
+            PrintF("Not at debugger stop.\n");
+          }
+        } else if (argc == 3) {
+          // Print information about all/the specified breakpoint(s).
+          if (strcmp(arg1, "info") == 0) {
+            if (strcmp(arg2, "all") == 0) {
+              PrintF("Stop information:\n");
+              for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+                sim_->PrintStopInfo(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->PrintStopInfo(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          } else if (strcmp(arg1, "enable") == 0) {
+            // Enable all/the specified breakpoint(s).
+            if (strcmp(arg2, "all") == 0) {
+              for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+                sim_->EnableStop(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->EnableStop(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          } else if (strcmp(arg1, "disable") == 0) {
+            // Disable all/the specified breakpoint(s).
+            if (strcmp(arg2, "all") == 0) {
+              for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+                sim_->DisableStop(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->DisableStop(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          }
+        } else {
+          PrintF("Wrong usage. Use help command for more information.\n");
+        }
+      } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
+        ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
+        PrintF("Trace of executed instructions is %s\n",
+               ::v8::internal::FLAG_trace_sim ? "on" : "off");
+      } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+        PrintF("cont\n");
+        PrintF("  continue execution (alias 'c')\n");
+        PrintF("stepi [num instructions]\n");
+        PrintF("  step one/num instruction(s) (alias 'si')\n");
+        PrintF("print <register>\n");
+        PrintF("  print register content (alias 'p')\n");
+        PrintF("  use register name 'all' to display all integer registers\n");
+        PrintF(
+            "  use register name 'alld' to display integer registers "
+            "with decimal values\n");
+        PrintF("  use register name 'rN' to display register number 'N'\n");
+        PrintF("  add argument 'fp' to print register pair double values\n");
+        PrintF(
+            "  use register name 'allf' to display floating-point "
+            "registers\n");
+        PrintF("printobject <register>\n");
+        PrintF("  print an object from a register (alias 'po')\n");
+        PrintF("cr\n");
+        PrintF("  print condition register\n");
+        PrintF("stack [<num words>]\n");
+        PrintF("  dump stack content, default dump 10 words)\n");
+        PrintF("mem <address> [<num words>]\n");
+        PrintF("  dump memory content, default dump 10 words)\n");
+        PrintF("disasm [<instructions>]\n");
+        PrintF("disasm [<address/register>]\n");
+        PrintF("disasm [[<address/register>] <instructions>]\n");
+        PrintF("  disassemble code, default is 10 instructions\n");
+        PrintF("  from pc (alias 'di')\n");
+        PrintF("gdb\n");
+        PrintF("  enter gdb\n");
+        PrintF("break <address>\n");
+        PrintF("  set a break point on the address\n");
+        PrintF("del\n");
+        PrintF("  delete the breakpoint\n");
+        PrintF("trace (alias 't')\n");
+        PrintF("  toogle the tracing of all executed statements\n");
+        PrintF("stop feature:\n");
+        PrintF("  Description:\n");
+        PrintF("    Stops are debug instructions inserted by\n");
+        PrintF("    the Assembler::stop() function.\n");
+        PrintF("    When hitting a stop, the Simulator will\n");
+        PrintF("    stop and and give control to the S390Debugger.\n");
+        PrintF("    The first %d stop codes are watched:\n",
+               Simulator::kNumOfWatchedStops);
+        PrintF("    - They can be enabled / disabled: the Simulator\n");
+        PrintF("      will / won't stop when hitting them.\n");
+        PrintF("    - The Simulator keeps track of how many times they \n");
+        PrintF("      are met. (See the info command.) Going over a\n");
+        PrintF("      disabled stop still increases its counter. \n");
+        PrintF("  Commands:\n");
+        PrintF("    stop info all/<code> : print infos about number <code>\n");
+        PrintF("      or all stop(s).\n");
+        PrintF("    stop enable/disable all/<code> : enables / disables\n");
+        PrintF("      all or number <code> stop(s)\n");
+        PrintF("    stop unstop\n");
+        PrintF("      ignore the stop instruction at the current location\n");
+        PrintF("      from now on\n");
+      } else {
+        PrintF("Unknown command: %s\n", cmd);
+      }
+    }
+  }
+
+  // Add all the breakpoints back to stop execution and enter the debugger
+  // shell when hit.
+  RedoBreakpoints();
+  // Restore tracing
+  ::v8::internal::FLAG_trace_sim = trace;
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool ICacheMatch(void* one, void* two) {
+  DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+  DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+  return one == two;
+}
+
+static uint32_t ICacheHash(void* key) {
+  return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+  intptr_t start_page = (start & ~CachePage::kPageMask);
+  intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+  return start_page == end_page;
+}
+
+void Simulator::set_last_debugger_input(char* input) {
+  DeleteArray(last_debugger_input_);
+  last_debugger_input_ = input;
+}
+
+void Simulator::FlushICache(v8::internal::HashMap* i_cache, void* start_addr,
+                            size_t size) {
+  intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+  int intra_line = (start & CachePage::kLineMask);
+  start -= intra_line;
+  size += intra_line;
+  size = ((size - 1) | CachePage::kLineMask) + 1;
+  int offset = (start & CachePage::kPageMask);
+  while (!AllOnOnePage(start, size - 1)) {
+    int bytes_to_flush = CachePage::kPageSize - offset;
+    FlushOnePage(i_cache, start, bytes_to_flush);
+    start += bytes_to_flush;
+    size -= bytes_to_flush;
+    DCHECK_EQ(0, static_cast<int>(start & CachePage::kPageMask));
+    offset = 0;
+  }
+  if (size != 0) {
+    FlushOnePage(i_cache, start, size);
+  }
+}
+
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+  v8::internal::HashMap::Entry* entry =
+      i_cache->LookupOrInsert(page, ICacheHash(page));
+  if (entry->value == NULL) {
+    CachePage* new_page = new CachePage();
+    entry->value = new_page;
+  }
+  return reinterpret_cast<CachePage*>(entry->value);
+}
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+                             int size) {
+  DCHECK(size <= CachePage::kPageSize);
+  DCHECK(AllOnOnePage(start, size - 1));
+  DCHECK((start & CachePage::kLineMask) == 0);
+  DCHECK((size & CachePage::kLineMask) == 0);
+  void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+  int offset = (start & CachePage::kPageMask);
+  CachePage* cache_page = GetCachePage(i_cache, page);
+  char* valid_bytemap = cache_page->ValidityByte(offset);
+  memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+                            Instruction* instr) {
+  intptr_t address = reinterpret_cast<intptr_t>(instr);
+  void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+  void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+  int offset = (address & CachePage::kPageMask);
+  CachePage* cache_page = GetCachePage(i_cache, page);
+  char* cache_valid_byte = cache_page->ValidityByte(offset);
+  bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+  char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+  if (cache_hit) {
+    // Check that the data in memory matches the contents of the I-cache.
+    CHECK_EQ(memcmp(reinterpret_cast<void*>(instr),
+                    cache_page->CachedData(offset), sizeof(FourByteInstr)),
+             0);
+  } else {
+    // Cache miss.  Load memory into the cache.
+    memcpy(cached_line, line, CachePage::kLineLength);
+    *cache_valid_byte = CachePage::LINE_VALID;
+  }
+}
+
+void Simulator::Initialize(Isolate* isolate) {
+  if (isolate->simulator_initialized()) return;
+  isolate->set_simulator_initialized(true);
+  ::v8::internal::ExternalReference::set_redirector(isolate,
+                                                    &RedirectExternalReference);
+}
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+  i_cache_ = isolate_->simulator_i_cache();
+  if (i_cache_ == NULL) {
+    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+    isolate_->set_simulator_i_cache(i_cache_);
+  }
+  Initialize(isolate);
+// Set up simulator support first. Some of this information is needed to
+// setup the architecture state.
+#if V8_TARGET_ARCH_S390X
+  size_t stack_size = FLAG_sim_stack_size * KB;
+#else
+  size_t stack_size = MB;  // allocate 1MB for stack
+#endif
+  stack_size += 2 * stack_protection_size_;
+  stack_ = reinterpret_cast<char*>(malloc(stack_size));
+  pc_modified_ = false;
+  icount_ = 0;
+  break_pc_ = NULL;
+  break_instr_ = 0;
+
+// make sure our register type can hold exactly 4/8 bytes
+#ifdef V8_TARGET_ARCH_S390X
+  DCHECK(sizeof(intptr_t) == 8);
+#else
+  DCHECK(sizeof(intptr_t) == 4);
+#endif
+  // Set up architecture state.
+  // All registers are initialized to zero to start with.
+  for (int i = 0; i < kNumGPRs; i++) {
+    registers_[i] = 0;
+  }
+  condition_reg_ = 0;
+  special_reg_pc_ = 0;
+
+  // Initializing FP registers.
+  for (int i = 0; i < kNumFPRs; i++) {
+    fp_registers_[i] = 0.0;
+  }
+
+  // The sp is initialized to point to the bottom (high address) of the
+  // allocated stack area. To be safe in potential stack underflows we leave
+  // some buffer below.
+  registers_[sp] =
+      reinterpret_cast<intptr_t>(stack_) + stack_size - stack_protection_size_;
+  InitializeCoverage();
+
+  last_debugger_input_ = NULL;
+}
+
+Simulator::~Simulator() { free(stack_); }
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator.  The external reference will be a function compiled for the
+// host architecture.  We need to call that function instead of trying to
+// execute it with the simulator.  We do that by redirecting the external
+// reference to a svc (Supervisor Call) instruction that is handled by
+// the simulator.  We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ public:
+  Redirection(Isolate* isolate, void* external_function,
+              ExternalReference::Type type)
+      : external_function_(external_function),
+// we use TRAP4 here (0xBF22)
+#if V8_TARGET_LITTLE_ENDIAN
+        swi_instruction_(0x1000FFB2),
+#else
+        swi_instruction_(0xB2FF0000 | kCallRtRedirected),
+#endif
+        type_(type),
+        next_(NULL) {
+    next_ = isolate->simulator_redirection();
+    Simulator::current(isolate)->FlushICache(
+        isolate->simulator_i_cache(),
+        reinterpret_cast<void*>(&swi_instruction_), sizeof(FourByteInstr));
+    isolate->set_simulator_redirection(this);
+    if (ABI_USES_FUNCTION_DESCRIPTORS) {
+      function_descriptor_[0] = reinterpret_cast<intptr_t>(&swi_instruction_);
+      function_descriptor_[1] = 0;
+      function_descriptor_[2] = 0;
+    }
+  }
+
+  void* address() {
+    if (ABI_USES_FUNCTION_DESCRIPTORS) {
+      return reinterpret_cast<void*>(function_descriptor_);
+    } else {
+      return reinterpret_cast<void*>(&swi_instruction_);
+    }
+  }
+
+  void* external_function() { return external_function_; }
+  ExternalReference::Type type() { return type_; }
+
+  static Redirection* Get(Isolate* isolate, void* external_function,
+                          ExternalReference::Type type) {
+    Redirection* current = isolate->simulator_redirection();
+    for (; current != NULL; current = current->next_) {
+      if (current->external_function_ == external_function) {
+        DCHECK_EQ(current->type(), type);
+        return current;
+      }
+    }
+    return new Redirection(isolate, external_function, type);
+  }
+
+  static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
+    char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+    char* addr_of_redirection =
+        addr_of_swi - offsetof(Redirection, swi_instruction_);
+    return reinterpret_cast<Redirection*>(addr_of_redirection);
+  }
+
+  static Redirection* FromAddress(void* address) {
+    int delta = ABI_USES_FUNCTION_DESCRIPTORS
+                    ? offsetof(Redirection, function_descriptor_)
+                    : offsetof(Redirection, swi_instruction_);
+    char* addr_of_redirection = reinterpret_cast<char*>(address) - delta;
+    return reinterpret_cast<Redirection*>(addr_of_redirection);
+  }
+
+  static void* ReverseRedirection(intptr_t reg) {
+    Redirection* redirection = FromAddress(reinterpret_cast<void*>(reg));
+    return redirection->external_function();
+  }
+
+  static void DeleteChain(Redirection* redirection) {
+    while (redirection != nullptr) {
+      Redirection* next = redirection->next_;
+      delete redirection;
+      redirection = next;
+    }
+  }
+
+ private:
+  void* external_function_;
+  uint32_t swi_instruction_;
+  ExternalReference::Type type_;
+  Redirection* next_;
+  intptr_t function_descriptor_[3];
+};
+
+// static
+void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+  Redirection::DeleteChain(first);
+  if (i_cache != nullptr) {
+    for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+         entry = i_cache->Next(entry)) {
+      delete static_cast<CachePage*>(entry->value);
+    }
+    delete i_cache;
+  }
+}
+
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+                                           void* external_function,
+                                           ExternalReference::Type type) {
+  Redirection* redirection = Redirection::Get(isolate, external_function, type);
+  return redirection->address();
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+  v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+      isolate->FindOrAllocatePerThreadDataForThisThread();
+  DCHECK(isolate_data != NULL);
+
+  Simulator* sim = isolate_data->simulator();
+  if (sim == NULL) {
+    // TODO(146): delete the simulator object when a thread/isolate goes away.
+    sim = new Simulator(isolate);
+    isolate_data->set_simulator(sim);
+  }
+  return sim;
+}
+
+// Sets the register in the architecture state.
+void Simulator::set_register(int reg, uint64_t value) {
+  DCHECK((reg >= 0) && (reg < kNumGPRs));
+  registers_[reg] = value;
+}
+
+// Get the register from the architecture state.
+uint64_t Simulator::get_register(int reg) const {
+  DCHECK((reg >= 0) && (reg < kNumGPRs));
+  // Stupid code added to avoid bug in GCC.
+  // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+  if (reg >= kNumGPRs) return 0;
+  // End stupid code.
+  return registers_[reg];
+}
+
+template <typename T>
+T Simulator::get_low_register(int reg) const {
+  DCHECK((reg >= 0) && (reg < kNumGPRs));
+  // Stupid code added to avoid bug in GCC.
+  // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+  if (reg >= kNumGPRs) return 0;
+  // End stupid code.
+  return static_cast<T>(registers_[reg] & 0xFFFFFFFF);
+}
+
+template <typename T>
+T Simulator::get_high_register(int reg) const {
+  DCHECK((reg >= 0) && (reg < kNumGPRs));
+  // Stupid code added to avoid bug in GCC.
+  // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+  if (reg >= kNumGPRs) return 0;
+  // End stupid code.
+  return static_cast<T>(registers_[reg] >> 32);
+}
+
+void Simulator::set_low_register(int reg, uint32_t value) {
+  uint64_t shifted_val = static_cast<uint64_t>(value);
+  uint64_t orig_val = static_cast<uint64_t>(registers_[reg]);
+  uint64_t result = (orig_val >> 32 << 32) | shifted_val;
+  registers_[reg] = result;
+}
+
+void Simulator::set_high_register(int reg, uint32_t value) {
+  uint64_t shifted_val = static_cast<uint64_t>(value) << 32;
+  uint64_t orig_val = static_cast<uint64_t>(registers_[reg]);
+  uint64_t result = (orig_val & 0xFFFFFFFF) | shifted_val;
+  registers_[reg] = result;
+}
+
+double Simulator::get_double_from_register_pair(int reg) {
+  DCHECK((reg >= 0) && (reg < kNumGPRs) && ((reg % 2) == 0));
+
+  double dm_val = 0.0;
+#if 0 && !V8_TARGET_ARCH_S390X  // doesn't make sense in 64bit mode
+  // Read the bits from the unsigned integer register_[] array
+  // into the double precision floating point value and return it.
+  char buffer[sizeof(fp_registers_[0])];
+  memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+  memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+#endif
+  return (dm_val);
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(intptr_t value) {
+  pc_modified_ = true;
+  special_reg_pc_ = value;
+}
+
+bool Simulator::has_bad_pc() const {
+  return ((special_reg_pc_ == bad_lr) || (special_reg_pc_ == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+intptr_t Simulator::get_pc() const { return special_reg_pc_; }
+
+// Runtime FP routines take:
+// - two double arguments
+// - one double argument and zero or one integer arguments.
+// All are consructed here from d1, d2 and r2.
+void Simulator::GetFpArgs(double* x, double* y, intptr_t* z) {
+  *x = get_double_from_d_register(0);
+  *y = get_double_from_d_register(2);
+  *z = get_register(2);
+}
+
+// The return value is in d0.
+void Simulator::SetFpResult(const double& result) {
+  set_d_register_from_double(0, result);
+}
+
+void Simulator::TrashCallerSaveRegisters() {
+// We don't trash the registers with the return value.
+#if 0  // A good idea to trash volatile registers, needs to be done
+  registers_[2] = 0x50Bad4U;
+  registers_[3] = 0x50Bad4U;
+  registers_[12] = 0x50Bad4U;
+#endif
+}
+
+uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
+  uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+  return *ptr;
+}
+
+int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
+  int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+  return *ptr;
+}
+
+void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
+  uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+  *ptr = value;
+  return;
+}
+
+void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
+  int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+  *ptr = value;
+  return;
+}
+
+uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
+  uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+  return *ptr;
+}
+
+int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
+  int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+  return *ptr;
+}
+
+void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
+  uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+  *ptr = value;
+  return;
+}
+
+void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
+  int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+  *ptr = value;
+  return;
+}
+
+uint8_t Simulator::ReadBU(intptr_t addr) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  return *ptr;
+}
+
+int8_t Simulator::ReadB(intptr_t addr) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  return *ptr;
+}
+
+void Simulator::WriteB(intptr_t addr, uint8_t value) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  *ptr = value;
+}
+
+void Simulator::WriteB(intptr_t addr, int8_t value) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  *ptr = value;
+}
+
+int64_t Simulator::ReadDW(intptr_t addr) {
+  int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+  return *ptr;
+}
+
+void Simulator::WriteDW(intptr_t addr, int64_t value) {
+  int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+  *ptr = value;
+  return;
+}
+
+/**
+ * Reads a double value from memory at given address.
+ */
+double Simulator::ReadDouble(intptr_t addr) {
+  double* ptr = reinterpret_cast<double*>(addr);
+  return *ptr;
+}
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+  // The simulator uses a separate JS stack. If we have exhausted the C stack,
+  // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+  if (GetCurrentStackPosition() < c_limit) {
+    return reinterpret_cast<uintptr_t>(get_sp());
+  }
+
+  // Otherwise the limit is the JS stack. Leave a safety margin to prevent
+  // overrunning the stack when pushing values.
+  return reinterpret_cast<uintptr_t>(stack_) + stack_protection_size_;
+}
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+  PrintF("Simulator found unsupported instruction:\n 0x%08" V8PRIxPTR ": %s\n",
+         reinterpret_cast<intptr_t>(instr), format);
+  UNIMPLEMENTED();
+}
+
+// Calculate C flag value for additions.
+bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
+  uint32_t uleft = static_cast<uint32_t>(left);
+  uint32_t uright = static_cast<uint32_t>(right);
+  uint32_t urest = 0xffffffffU - uleft;
+
+  return (uright > urest) ||
+         (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
+}
+
+// Calculate C flag value for subtractions.
+bool Simulator::BorrowFrom(int32_t left, int32_t right) {
+  uint32_t uleft = static_cast<uint32_t>(left);
+  uint32_t uright = static_cast<uint32_t>(right);
+
+  return (uright > uleft);
+}
+
+// Calculate V flag value for additions and subtractions.
+template <typename T1>
+bool Simulator::OverflowFromSigned(T1 alu_out, T1 left, T1 right,
+                                   bool addition) {
+  bool overflow;
+  if (addition) {
+    // operands have the same sign
+    overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
+               // and operands and result have different sign
+               && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+  } else {
+    // operands have different signs
+    overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
+               // and first operand and result have different signs
+               && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+  }
+  return overflow;
+}
+
+#if V8_TARGET_ARCH_S390X
+static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
+  *x = reinterpret_cast<intptr_t>(pair->x);
+  *y = reinterpret_cast<intptr_t>(pair->y);
+}
+#else
+static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
+#if V8_TARGET_BIG_ENDIAN
+  *x = static_cast<int32_t>(*pair >> 32);
+  *y = static_cast<int32_t>(*pair);
+#else
+  *x = static_cast<int32_t>(*pair);
+  *y = static_cast<int32_t>(*pair >> 32);
+#endif
+}
+#endif
+
+// Calls into the V8 runtime.
+typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+                                         intptr_t arg2, intptr_t arg3,
+                                         intptr_t arg4, intptr_t arg5);
+typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
+                                               intptr_t arg2, intptr_t arg3,
+                                               intptr_t arg4, intptr_t arg5);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(intptr_t arg0, intptr_t arg1,
+                                                   intptr_t arg2, intptr_t arg3,
+                                                   intptr_t arg4,
+                                                   intptr_t arg5);
+
+// These prototypes handle the four types of FP calls.
+typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, intptr_t arg0);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(intptr_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(intptr_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(intptr_t arg0, intptr_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(intptr_t arg0,
+                                                    intptr_t arg1, void* arg2);
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime.
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+  int svc = instr->SvcValue();
+  switch (svc) {
+    case kCallRtRedirected: {
+      // Check if stack is aligned. Error if not aligned is reported below to
+      // include information on the function called.
+      bool stack_aligned =
+          (get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
+          0;
+      Redirection* redirection = Redirection::FromSwiInstruction(instr);
+      const int kArgCount = 6;
+      int arg0_regnum = 2;
+      intptr_t result_buffer = 0;
+      bool uses_result_buffer =
+          redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE ||
+          (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR &&
+           !ABI_RETURNS_OBJECTPAIR_IN_REGS);
+      if (uses_result_buffer) {
+        result_buffer = get_register(r2);
+        arg0_regnum++;
+      }
+      intptr_t arg[kArgCount];
+      for (int i = 0; i < kArgCount - 1; i++) {
+        arg[i] = get_register(arg0_regnum + i);
+      }
+      intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp));
+      arg[5] = stack_pointer[kCalleeRegisterSaveAreaSize / kPointerSize];
+      bool fp_call =
+          (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+          (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+          (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+          (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+      // Place the return address on the stack, making the call GC safe.
+      *reinterpret_cast<intptr_t*>(get_register(sp) +
+                                   kStackFrameRASlot * kPointerSize) =
+          get_register(r14);
+
+      intptr_t external =
+          reinterpret_cast<intptr_t>(redirection->external_function());
+      if (fp_call) {
+        double dval0, dval1;  // one or two double parameters
+        intptr_t ival;        // zero or one integer parameters
+        int iresult = 0;      // integer return value
+        double dresult = 0;   // double return value
+        GetFpArgs(&dval0, &dval1, &ival);
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          SimulatorRuntimeCall generic_target =
+              reinterpret_cast<SimulatorRuntimeCall>(external);
+          switch (redirection->type()) {
+            case ExternalReference::BUILTIN_FP_FP_CALL:
+            case ExternalReference::BUILTIN_COMPARE_CALL:
+              PrintF("Call to host function at %p with args %f, %f",
+                     FUNCTION_ADDR(generic_target), dval0, dval1);
+              break;
+            case ExternalReference::BUILTIN_FP_CALL:
+              PrintF("Call to host function at %p with arg %f",
+                     FUNCTION_ADDR(generic_target), dval0);
+              break;
+            case ExternalReference::BUILTIN_FP_INT_CALL:
+              PrintF("Call to host function at %p with args %f, %" V8PRIdPTR,
+                     FUNCTION_ADDR(generic_target), dval0, ival);
+              break;
+            default:
+              UNREACHABLE();
+              break;
+          }
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   static_cast<intptr_t>(get_register(sp)));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        switch (redirection->type()) {
+          case ExternalReference::BUILTIN_COMPARE_CALL: {
+            SimulatorRuntimeCompareCall target =
+                reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+            iresult = target(dval0, dval1);
+            set_register(r2, iresult);
+            break;
+          }
+          case ExternalReference::BUILTIN_FP_FP_CALL: {
+            SimulatorRuntimeFPFPCall target =
+                reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+            dresult = target(dval0, dval1);
+            SetFpResult(dresult);
+            break;
+          }
+          case ExternalReference::BUILTIN_FP_CALL: {
+            SimulatorRuntimeFPCall target =
+                reinterpret_cast<SimulatorRuntimeFPCall>(external);
+            dresult = target(dval0);
+            SetFpResult(dresult);
+            break;
+          }
+          case ExternalReference::BUILTIN_FP_INT_CALL: {
+            SimulatorRuntimeFPIntCall target =
+                reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+            dresult = target(dval0, ival);
+            SetFpResult(dresult);
+            break;
+          }
+          default:
+            UNREACHABLE();
+            break;
+        }
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          switch (redirection->type()) {
+            case ExternalReference::BUILTIN_COMPARE_CALL:
+              PrintF("Returned %08x\n", iresult);
+              break;
+            case ExternalReference::BUILTIN_FP_FP_CALL:
+            case ExternalReference::BUILTIN_FP_CALL:
+            case ExternalReference::BUILTIN_FP_INT_CALL:
+              PrintF("Returned %f\n", dresult);
+              break;
+            default:
+              UNREACHABLE();
+              break;
+          }
+        }
+      } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+        // See callers of MacroAssembler::CallApiFunctionAndReturn for
+        // explanation of register usage.
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          PrintF("Call to host function at %p args %08" V8PRIxPTR,
+                 reinterpret_cast<void*>(external), arg[0]);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   static_cast<intptr_t>(get_register(sp)));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        SimulatorRuntimeDirectApiCall target =
+            reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+        target(arg[0]);
+      } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
+        // See callers of MacroAssembler::CallApiFunctionAndReturn for
+        // explanation of register usage.
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          PrintF("Call to host function at %p args %08" V8PRIxPTR
+                 " %08" V8PRIxPTR,
+                 reinterpret_cast<void*>(external), arg[0], arg[1]);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   static_cast<intptr_t>(get_register(sp)));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        SimulatorRuntimeProfilingApiCall target =
+            reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+        target(arg[0], Redirection::ReverseRedirection(arg[1]));
+      } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+        // See callers of MacroAssembler::CallApiFunctionAndReturn for
+        // explanation of register usage.
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          PrintF("Call to host function at %p args %08" V8PRIxPTR
+                 " %08" V8PRIxPTR,
+                 reinterpret_cast<void*>(external), arg[0], arg[1]);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   static_cast<intptr_t>(get_register(sp)));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        SimulatorRuntimeDirectGetterCall target =
+            reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+        if (!ABI_PASSES_HANDLES_IN_REGS) {
+          arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+        }
+        target(arg[0], arg[1]);
+      } else if (redirection->type() ==
+                 ExternalReference::PROFILING_GETTER_CALL) {
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          PrintF("Call to host function at %p args %08" V8PRIxPTR
+                 " %08" V8PRIxPTR " %08" V8PRIxPTR,
+                 reinterpret_cast<void*>(external), arg[0], arg[1], arg[2]);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   static_cast<intptr_t>(get_register(sp)));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        SimulatorRuntimeProfilingGetterCall target =
+            reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+        if (!ABI_PASSES_HANDLES_IN_REGS) {
+          arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+        }
+        target(arg[0], arg[1], Redirection::ReverseRedirection(arg[2]));
+      } else {
+        // builtin call.
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          SimulatorRuntimeCall target =
+              reinterpret_cast<SimulatorRuntimeCall>(external);
+          PrintF(
+              "Call to host function at %p,\n"
+              "\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+              ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
+              FUNCTION_ADDR(target), arg[0], arg[1], arg[2], arg[3], arg[4],
+              arg[5]);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   static_cast<intptr_t>(get_register(sp)));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+          SimulatorRuntimeTripleCall target =
+              reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+          ObjectTriple result =
+              target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+          if (::v8::internal::FLAG_trace_sim) {
+            PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+                   "}\n",
+                   reinterpret_cast<intptr_t>(result.x),
+                   reinterpret_cast<intptr_t>(result.y),
+                   reinterpret_cast<intptr_t>(result.z));
+          }
+          memcpy(reinterpret_cast<void*>(result_buffer), &result,
+                 sizeof(ObjectTriple));
+          set_register(r2, result_buffer);
+        } else {
+          if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
+            SimulatorRuntimePairCall target =
+                reinterpret_cast<SimulatorRuntimePairCall>(external);
+            ObjectPair result =
+                target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+            intptr_t x;
+            intptr_t y;
+            decodeObjectPair(&result, &x, &y);
+            if (::v8::internal::FLAG_trace_sim) {
+              PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
+            }
+            if (ABI_RETURNS_OBJECTPAIR_IN_REGS) {
+              set_register(r2, x);
+              set_register(r3, y);
+            } else {
+              memcpy(reinterpret_cast<void*>(result_buffer), &result,
+                     sizeof(ObjectPair));
+              set_register(r2, result_buffer);
+            }
+          } else {
+            DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+            SimulatorRuntimeCall target =
+                reinterpret_cast<SimulatorRuntimeCall>(external);
+            intptr_t result =
+                target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+            if (::v8::internal::FLAG_trace_sim) {
+              PrintF("Returned %08" V8PRIxPTR "\n", result);
+            }
+            set_register(r2, result);
+          }
+        }
+        // #if !V8_TARGET_ARCH_S390X
+        //         DCHECK(redirection->type() ==
+        //         ExternalReference::BUILTIN_CALL);
+        //         SimulatorRuntimeCall target =
+        //             reinterpret_cast<SimulatorRuntimeCall>(external);
+        //         int64_t result = target(arg[0], arg[1], arg[2], arg[3],
+        //         arg[4],
+        //                                 arg[5]);
+        //         int32_t lo_res = static_cast<int32_t>(result);
+        //         int32_t hi_res = static_cast<int32_t>(result >> 32);
+        // #if !V8_TARGET_LITTLE_ENDIAN
+        //         if (::v8::internal::FLAG_trace_sim) {
+        //           PrintF("Returned %08x\n", hi_res);
+        //         }
+        //         set_register(r2, hi_res);
+        //         set_register(r3, lo_res);
+        // #else
+        //         if (::v8::internal::FLAG_trace_sim) {
+        //           PrintF("Returned %08x\n", lo_res);
+        //         }
+        //         set_register(r2, lo_res);
+        //         set_register(r3, hi_res);
+        // #endif
+        // #else
+        //         if (redirection->type() == ExternalReference::BUILTIN_CALL) {
+        //           SimulatorRuntimeCall target =
+        //             reinterpret_cast<SimulatorRuntimeCall>(external);
+        //           intptr_t result = target(arg[0], arg[1], arg[2], arg[3],
+        //           arg[4],
+        //               arg[5]);
+        //           if (::v8::internal::FLAG_trace_sim) {
+        //             PrintF("Returned %08" V8PRIxPTR "\n", result);
+        //           }
+        //           set_register(r2, result);
+        //         } else {
+        //           DCHECK(redirection->type() ==
+        //               ExternalReference::BUILTIN_CALL_PAIR);
+        //           SimulatorRuntimePairCall target =
+        //             reinterpret_cast<SimulatorRuntimePairCall>(external);
+        //           ObjectPair result = target(arg[0], arg[1], arg[2], arg[3],
+        //               arg[4], arg[5]);
+        //           if (::v8::internal::FLAG_trace_sim) {
+        //             PrintF("Returned %08" V8PRIxPTR ", %08" V8PRIxPTR "\n",
+        //                 result.x, result.y);
+        //           }
+        // #if ABI_RETURNS_OBJECTPAIR_IN_REGS
+        //           set_register(r2, result.x);
+        //           set_register(r3, result.y);
+        // #else
+        //            memcpy(reinterpret_cast<void *>(result_buffer), &result,
+        //                sizeof(ObjectPair));
+        // #endif
+        //         }
+        // #endif
+      }
+      int64_t saved_lr = *reinterpret_cast<intptr_t*>(
+          get_register(sp) + kStackFrameRASlot * kPointerSize);
+#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
+      // On zLinux-31, the saved_lr might be tagged with a high bit of 1.
+      // Cleanse it before proceeding with simulation.
+      saved_lr &= 0x7FFFFFFF;
+#endif
+      set_pc(saved_lr);
+      break;
+    }
+    case kBreakpoint: {
+      S390Debugger dbg(this);
+      dbg.Debug();
+      break;
+    }
+    // stop uses all codes greater than 1 << 23.
+    default: {
+      if (svc >= (1 << 23)) {
+        uint32_t code = svc & kStopCodeMask;
+        if (isWatchedStop(code)) {
+          IncreaseStopCounter(code);
+        }
+        // Stop if it is enabled, otherwise go on jumping over the stop
+        // and the message address.
+        if (isEnabledStop(code)) {
+          S390Debugger dbg(this);
+          dbg.Stop(instr);
+        } else {
+          set_pc(get_pc() + sizeof(FourByteInstr) + kPointerSize);
+        }
+      } else {
+        // This is not a valid svc code.
+        UNREACHABLE();
+        break;
+      }
+    }
+  }
+}
+
+// Stop helper functions.
+bool Simulator::isStopInstruction(Instruction* instr) {
+  return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
+}
+
+bool Simulator::isWatchedStop(uint32_t code) {
+  DCHECK(code <= kMaxStopCode);
+  return code < kNumOfWatchedStops;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+  DCHECK(code <= kMaxStopCode);
+  // Unwatched stops are always enabled.
+  return !isWatchedStop(code) ||
+         !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+void Simulator::EnableStop(uint32_t code) {
+  DCHECK(isWatchedStop(code));
+  if (!isEnabledStop(code)) {
+    watched_stops_[code].count &= ~kStopDisabledBit;
+  }
+}
+
+void Simulator::DisableStop(uint32_t code) {
+  DCHECK(isWatchedStop(code));
+  if (isEnabledStop(code)) {
+    watched_stops_[code].count |= kStopDisabledBit;
+  }
+}
+
+void Simulator::IncreaseStopCounter(uint32_t code) {
+  DCHECK(code <= kMaxStopCode);
+  DCHECK(isWatchedStop(code));
+  if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+    PrintF(
+        "Stop counter for code %i has overflowed.\n"
+        "Enabling this code and reseting the counter to 0.\n",
+        code);
+    watched_stops_[code].count = 0;
+    EnableStop(code);
+  } else {
+    watched_stops_[code].count++;
+  }
+}
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint32_t code) {
+  DCHECK(code <= kMaxStopCode);
+  if (!isWatchedStop(code)) {
+    PrintF("Stop not watched.");
+  } else {
+    const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+    int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+    // Don't print the state of unused breakpoints.
+    if (count != 0) {
+      if (watched_stops_[code].desc) {
+        PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code,
+               state, count, watched_stops_[code].desc);
+      } else {
+        PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+               count);
+      }
+    }
+  }
+}
+
+// Method for checking overflow on signed addition:
+//   Test src1 and src2 have opposite sign,
+//   (1) No overflow if they have opposite sign
+//   (2) Test the result and one of the operands have opposite sign
+//      (a) No overflow if they don't have opposite sign
+//      (b) Overflow if opposite
+#define CheckOverflowForIntAdd(src1, src2, type) \
+  OverflowFromSigned<type>(src1 + src2, src1, src2, true);
+
+#define CheckOverflowForIntSub(src1, src2, type) \
+  OverflowFromSigned<type>(src1 - src2, src1, src2, false);
+
+// Method for checking overflow on unsigned addtion
+#define CheckOverflowForUIntAdd(src1, src2) \
+  ((src1) + (src2) < (src1) || (src1) + (src2) < (src2))
+
+// Method for checking overflow on unsigned subtraction
+#define CheckOverflowForUIntSub(src1, src2) ((src1) - (src2) > (src1))
+
+// Method for checking overflow on multiplication
+#define CheckOverflowForMul(src1, src2) (((src1) * (src2)) / (src2) != (src1))
+
+// Method for checking overflow on shift right
+#define CheckOverflowForShiftRight(src1, src2) \
+  (((src1) >> (src2)) << (src2) != (src1))
+
+// Method for checking overflow on shift left
+#define CheckOverflowForShiftLeft(src1, src2) \
+  (((src1) << (src2)) >> (src2) != (src1))
+
+// S390 Decode and simulate helpers
+bool Simulator::DecodeTwoByte(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+
+  switch (op) {
+    // RR format instructions
+    case AR:
+    case SR:
+    case MR:
+    case DR:
+    case OR:
+    case NR:
+    case XR: {
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      int32_t r1_val = get_low_register<int32_t>(r1);
+      int32_t r2_val = get_low_register<int32_t>(r2);
+      bool isOF = false;
+      switch (op) {
+        case AR:
+          isOF = CheckOverflowForIntAdd(r1_val, r2_val, int32_t);
+          r1_val += r2_val;
+          SetS390ConditionCode<int32_t>(r1_val, 0);
+          SetS390OverflowCode(isOF);
+          break;
+        case SR:
+          isOF = CheckOverflowForIntSub(r1_val, r2_val, int32_t);
+          r1_val -= r2_val;
+          SetS390ConditionCode<int32_t>(r1_val, 0);
+          SetS390OverflowCode(isOF);
+          break;
+        case OR:
+          r1_val |= r2_val;
+          SetS390BitWiseConditionCode<uint32_t>(r1_val);
+          break;
+        case NR:
+          r1_val &= r2_val;
+          SetS390BitWiseConditionCode<uint32_t>(r1_val);
+          break;
+        case XR:
+          r1_val ^= r2_val;
+          SetS390BitWiseConditionCode<uint32_t>(r1_val);
+          break;
+        case MR: {
+          DCHECK(r1 % 2 == 0);
+          r1_val = get_low_register<int32_t>(r1 + 1);
+          int64_t product =
+              static_cast<int64_t>(r1_val) * static_cast<int64_t>(r2_val);
+          int32_t high_bits = product >> 32;
+          r1_val = high_bits;
+          int32_t low_bits = product & 0x00000000FFFFFFFF;
+          set_low_register(r1, high_bits);
+          set_low_register(r1 + 1, low_bits);
+          break;
+        }
+        case DR: {
+          // reg-reg pair should be even-odd pair, assert r1 is an even register
+          DCHECK(r1 % 2 == 0);
+          // leftmost 32 bits of the dividend are in r1
+          // rightmost 32 bits of the dividend are in r1+1
+          // get the signed value from r1
+          int64_t dividend = static_cast<int64_t>(r1_val) << 32;
+          // get unsigned value from r1+1
+          // avoid addition with sign-extended r1+1 value
+          dividend += get_low_register<uint32_t>(r1 + 1);
+          int32_t remainder = dividend % r2_val;
+          int32_t quotient = dividend / r2_val;
+          r1_val = remainder;
+          set_low_register(r1, remainder);
+          set_low_register(r1 + 1, quotient);
+          break;  // reg pair
+        }
+        default:
+          UNREACHABLE();
+          break;
+      }
+      set_low_register(r1, r1_val);
+      break;
+    }
+    case LR: {
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      set_low_register(r1, get_low_register<int32_t>(r2));
+      break;
+    }
+    case LDR: {
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      int64_t r2_val = get_d_register(r2);
+      set_d_register(r1, r2_val);
+      break;
+    }
+    case CR: {
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      int32_t r1_val = get_low_register<int32_t>(r1);
+      int32_t r2_val = get_low_register<int32_t>(r2);
+      SetS390ConditionCode<int32_t>(r1_val, r2_val);
+      break;
+    }
+    case CLR: {
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      uint32_t r1_val = get_low_register<uint32_t>(r1);
+      uint32_t r2_val = get_low_register<uint32_t>(r2);
+      SetS390ConditionCode<uint32_t>(r1_val, r2_val);
+      break;
+    }
+    case BCR: {
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      if (TestConditionCode(Condition(r1))) {
+        intptr_t r2_val = get_register(r2);
+#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
+        // On 31-bit, the top most bit may be 0 or 1, but is ignored by the
+        // hardware.  Cleanse the top bit before jumping to it, unless it's one
+        // of the special PCs
+        if (r2_val != bad_lr && r2_val != end_sim_pc) r2_val &= 0x7FFFFFFF;
+#endif
+        set_pc(r2_val);
+      }
+      break;
+    }
+    case LTR: {
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      int32_t r2_val = get_low_register<int32_t>(r2);
+      SetS390ConditionCode<int32_t>(r2_val, 0);
+      set_low_register(r1, r2_val);
+      break;
+    }
+    case ALR:
+    case SLR: {
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      uint32_t r1_val = get_low_register<uint32_t>(r1);
+      uint32_t r2_val = get_low_register<uint32_t>(r2);
+      uint32_t alu_out = 0;
+      bool isOF = false;
+      if (ALR == op) {
+        alu_out = r1_val + r2_val;
+        isOF = CheckOverflowForUIntAdd(r1_val, r2_val);
+      } else if (SLR == op) {
+        alu_out = r1_val - r2_val;
+        isOF = CheckOverflowForUIntSub(r1_val, r2_val);
+      } else {
+        UNREACHABLE();
+      }
+      set_low_register(r1, alu_out);
+      SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+      break;
+    }
+    case LNR: {
+      // Load Negative (32)
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      int32_t r2_val = get_low_register<int32_t>(r2);
+      r2_val = (r2_val >= 0) ? -r2_val : r2_val;  // If pos, then negate it.
+      set_low_register(r1, r2_val);
+      condition_reg_ = (r2_val == 0) ? CC_EQ : CC_LT;  // CC0 - result is zero
+      // CC1 - result is negative
+      break;
+    }
+    case BASR: {
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      intptr_t link_addr = get_pc() + 2;
+      // If R2 is zero, the BASR does not branch.
+      int64_t r2_val = (r2 == 0) ? link_addr : get_register(r2);
+#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
+      // On 31-bit, the top most bit may be 0 or 1, which can cause issues
+      // for stackwalker.  The top bit should either be cleanse before being
+      // pushed onto the stack, or during stack walking when dereferenced.
+      // For simulator, we'll take the worst case scenario and always tag
+      // the high bit, to flush out more problems.
+      link_addr |= 0x80000000;
+#endif
+      set_register(r1, link_addr);
+      set_pc(r2_val);
+      break;
+    }
+    case LCR: {
+      RRInstruction* rrinst = reinterpret_cast<RRInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      int32_t r2_val = get_low_register<int32_t>(r2);
+      int32_t original_r2_val = r2_val;
+      r2_val = ~r2_val;
+      r2_val = r2_val + 1;
+      set_low_register(r1, r2_val);
+      SetS390ConditionCode<int32_t>(r2_val, 0);
+      // Checks for overflow where r2_val = -2147483648.
+      // Cannot do int comparison due to GCC 4.8 bug on x86.
+      // Detect INT_MIN alternatively, as it is the only value where both
+      // original and result are negative due to overflow.
+      if (r2_val < 0 && original_r2_val < 0) {
+        SetS390OverflowCode(true);
+      }
+      break;
+    }
+    case BKPT: {
+      set_pc(get_pc() + 2);
+      S390Debugger dbg(this);
+      dbg.Debug();
+      break;
+    }
+    default:
+      UNREACHABLE();
+      return false;
+      break;
+  }
+  return true;
+}
+
+// Decode routine for four-byte instructions
+bool Simulator::DecodeFourByte(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+
+  // Pre-cast instruction to various types
+  RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+  SIInstruction* siInstr = reinterpret_cast<SIInstruction*>(instr);
+
+  switch (op) {
+    case POPCNT_Z: {
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      int64_t r2_val = get_register(r2);
+      int64_t r1_val = 0;
+
+      uint8_t* r2_val_ptr = reinterpret_cast<uint8_t*>(&r2_val);
+      uint8_t* r1_val_ptr = reinterpret_cast<uint8_t*>(&r1_val);
+      for (int i = 0; i < 8; i++) {
+        uint32_t x = static_cast<uint32_t>(r2_val_ptr[i]);
+#if defined(__GNUC__)
+        r1_val_ptr[i] = __builtin_popcount(x);
+#else
+#error unsupport __builtin_popcount
+#endif
+      }
+
+      set_register(r1, static_cast<uint64_t>(r1_val));
+      break;
+    }
+    case LLGFR: {
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      int32_t r2_val = get_low_register<int32_t>(r2);
+      uint64_t r2_finalval =
+          (static_cast<uint64_t>(r2_val) & 0x00000000ffffffff);
+      set_register(r1, r2_finalval);
+      break;
+    }
+    case EX: {
+      RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+      int r1 = rxinst->R1Value();
+      int b2 = rxinst->B2Value();
+      int x2 = rxinst->X2Value();
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxinst->D2Value();
+      int32_t r1_val = get_low_register<int32_t>(r1);
+
+      SixByteInstr the_instr = Instruction::InstructionBits(
+          reinterpret_cast<const byte*>(b2_val + x2_val + d2_val));
+      int length = Instruction::InstructionLength(
+          reinterpret_cast<const byte*>(b2_val + x2_val + d2_val));
+
+      char new_instr_buf[8];
+      char* addr = reinterpret_cast<char*>(&new_instr_buf[0]);
+      the_instr |= static_cast<SixByteInstr>(r1_val & 0xff)
+                   << (8 * length - 16);
+      Instruction::SetInstructionBits<SixByteInstr>(
+          reinterpret_cast<byte*>(addr), static_cast<SixByteInstr>(the_instr));
+      ExecuteInstruction(reinterpret_cast<Instruction*>(addr), false);
+      break;
+    }
+    case LGR: {
+      // Load Register (64)
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      set_register(r1, get_register(r2));
+      break;
+    }
+    case LDGR: {
+      // Load FPR from GPR (L <- 64)
+      uint64_t int_val = get_register(rreInst->R2Value());
+      // double double_val = bit_cast<double, uint64_t>(int_val);
+      // set_d_register_from_double(rreInst->R1Value(), double_val);
+      set_d_register(rreInst->R1Value(), int_val);
+      break;
+    }
+    case LGDR: {
+      // Load GPR from FPR (64 <- L)
+      int64_t double_val = get_d_register(rreInst->R2Value());
+      set_register(rreInst->R1Value(), double_val);
+      break;
+    }
+    case LTGR: {
+      // Load Register (64)
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      int64_t r2_val = get_register(r2);
+      SetS390ConditionCode<int64_t>(r2_val, 0);
+      set_register(r1, get_register(r2));
+      break;
+    }
+    case LZDR: {
+      int r1 = rreInst->R1Value();
+      set_d_register_from_double(r1, 0.0);
+      break;
+    }
+    case LTEBR: {
+      RREInstruction* rreinst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreinst->R1Value();
+      int r2 = rreinst->R2Value();
+      int64_t r2_val = get_d_register(r2);
+      float fr2_val = get_float32_from_d_register(r2);
+      SetS390ConditionCode<float>(fr2_val, 0.0);
+      set_d_register(r1, r2_val);
+      break;
+    }
+    case LTDBR: {
+      RREInstruction* rreinst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreinst->R1Value();
+      int r2 = rreinst->R2Value();
+      int64_t r2_val = get_d_register(r2);
+      SetS390ConditionCode<double>(bit_cast<double, int64_t>(r2_val), 0.0);
+      set_d_register(r1, r2_val);
+      break;
+    }
+    case CGR: {
+      // Compare (64)
+      int64_t r1_val = get_register(rreInst->R1Value());
+      int64_t r2_val = get_register(rreInst->R2Value());
+      SetS390ConditionCode<int64_t>(r1_val, r2_val);
+      break;
+    }
+    case CLGR: {
+      // Compare Logical (64)
+      uint64_t r1_val = static_cast<uint64_t>(get_register(rreInst->R1Value()));
+      uint64_t r2_val = static_cast<uint64_t>(get_register(rreInst->R2Value()));
+      SetS390ConditionCode<uint64_t>(r1_val, r2_val);
+      break;
+    }
+    case LH: {
+      // Load Halfword
+      RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+      int r1 = rxinst->R1Value();
+      int x2 = rxinst->X2Value();
+      int b2 = rxinst->B2Value();
+
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      intptr_t d2_val = rxinst->D2Value();
+      intptr_t mem_addr = x2_val + b2_val + d2_val;
+
+      int32_t result = static_cast<int32_t>(ReadH(mem_addr, instr));
+      set_low_register(r1, result);
+      break;
+    }
+    case LHI: {
+      RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+      int r1 = riinst->R1Value();
+      int i = riinst->I2Value();
+      set_low_register(r1, i);
+      break;
+    }
+    case LGHI: {
+      RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+      int r1 = riinst->R1Value();
+      int64_t i = riinst->I2Value();
+      set_register(r1, i);
+      break;
+    }
+    case CHI: {
+      RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+      int r1 = riinst->R1Value();
+      int16_t i = riinst->I2Value();
+      int32_t r1_val = get_low_register<int32_t>(r1);
+      SetS390ConditionCode<int32_t>(r1_val, i);
+      break;
+    }
+    case CGHI: {
+      RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+      int r1 = riinst->R1Value();
+      int64_t i = static_cast<int64_t>(riinst->I2Value());
+      int64_t r1_val = get_register(r1);
+      SetS390ConditionCode<int64_t>(r1_val, i);
+      break;
+    }
+    case BRAS: {
+      // Branch Relative and Save
+      RILInstruction* rilInstr = reinterpret_cast<RILInstruction*>(instr);
+      int r1 = rilInstr->R1Value();
+      intptr_t d2 = rilInstr->I2Value();
+      intptr_t pc = get_pc();
+      // Set PC of next instruction to register
+      set_register(r1, pc + sizeof(FourByteInstr));
+      // Update PC to branch target
+      set_pc(pc + d2 * 2);
+      break;
+    }
+    case BRC: {
+      // Branch Relative on Condition
+      RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+      int m1 = riinst->M1Value();
+      if (TestConditionCode((Condition)m1)) {
+        intptr_t offset = riinst->I2Value() * 2;
+        set_pc(get_pc() + offset);
+      }
+      break;
+    }
+    case BRCT:
+    case BRCTG: {
+      // Branch On Count (32/64).
+      RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+      int r1 = riinst->R1Value();
+      int64_t value =
+          (op == BRCT) ? get_low_register<int32_t>(r1) : get_register(r1);
+      if (BRCT == op)
+        set_low_register(r1, --value);
+      else
+        set_register(r1, --value);
+      // Branch if value != 0
+      if (value != 0) {
+        intptr_t offset = riinst->I2Value() * 2;
+        set_pc(get_pc() + offset);
+      }
+      break;
+    }
+    case BXH: {
+      RSInstruction* rsinst = reinterpret_cast<RSInstruction*>(instr);
+      int r1 = rsinst->R1Value();
+      int r3 = rsinst->R3Value();
+      int b2 = rsinst->B2Value();
+      int d2 = rsinst->D2Value();
+
+      // r1_val is the first operand, r3_val is the increment
+      int32_t r1_val = r1 == 0 ? 0 : get_register(r1);
+      int32_t r3_val = r2 == 0 ? 0 : get_register(r3);
+      intptr_t b2_val = b2 == 0 ? 0 : get_register(b2);
+      intptr_t branch_address = b2_val + d2;
+      // increment r1_val
+      r1_val += r3_val;
+
+      // if the increment is even, then it designates a pair of registers
+      // and the contents of the even and odd registers of the pair are used as
+      // the increment and compare value respectively. If the increment is odd,
+      // the increment itself is used as both the increment and compare value
+      int32_t compare_val = r3 % 2 == 0 ? get_register(r3 + 1) : r3_val;
+      if (r1_val > compare_val) {
+        // branch to address if r1_val is greater than compare value
+        set_pc(branch_address);
+      }
+
+      // update contents of register in r1 with the new incremented value
+      set_register(r1, r1_val);
+      break;
+    }
+    case IIHH:
+    case IIHL:
+    case IILH:
+    case IILL: {
+      UNIMPLEMENTED();
+      break;
+    }
+    case STM:
+    case LM: {
+      // Store Multiple 32-bits.
+      RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
+      int r1 = rsinstr->R1Value();
+      int r3 = rsinstr->R3Value();
+      int rb = rsinstr->B2Value();
+      int offset = rsinstr->D2Value();
+
+      // Regs roll around if r3 is less than r1.
+      // Artifically increase r3 by 16 so we can calculate
+      // the number of regs stored properly.
+      if (r3 < r1) r3 += 16;
+
+      int32_t rb_val = (rb == 0) ? 0 : get_low_register<int32_t>(rb);
+
+      // Store each register in ascending order.
+      for (int i = 0; i <= r3 - r1; i++) {
+        if (op == STM) {
+          int32_t value = get_low_register<int32_t>((r1 + i) % 16);
+          WriteW(rb_val + offset + 4 * i, value, instr);
+        } else if (op == LM) {
+          int32_t value = ReadW(rb_val + offset + 4 * i, instr);
+          set_low_register((r1 + i) % 16, value);
+        }
+      }
+      break;
+    }
+    case SLL:
+    case SRL: {
+      RSInstruction* rsInstr = reinterpret_cast<RSInstruction*>(instr);
+      int r1 = rsInstr->R1Value();
+      int b2 = rsInstr->B2Value();
+      intptr_t d2 = rsInstr->D2Value();
+      // only takes rightmost 6bits
+      int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+      int shiftBits = (b2_val + d2) & 0x3F;
+      uint32_t r1_val = get_low_register<uint32_t>(r1);
+      uint32_t alu_out = 0;
+      if (SLL == op) {
+        alu_out = r1_val << shiftBits;
+      } else if (SRL == op) {
+        alu_out = r1_val >> shiftBits;
+      } else {
+        UNREACHABLE();
+      }
+      set_low_register(r1, alu_out);
+      break;
+    }
+    case SLDL: {
+      RSInstruction* rsInstr = reinterpret_cast<RSInstruction*>(instr);
+      int r1 = rsInstr->R1Value();
+      int b2 = rsInstr->B2Value();
+      intptr_t d2 = rsInstr->D2Value();
+      // only takes rightmost 6bits
+      int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+      int shiftBits = (b2_val + d2) & 0x3F;
+
+      DCHECK(r1 % 2 == 0);
+      uint32_t r1_val = get_low_register<uint32_t>(r1);
+      uint32_t r1_next_val = get_low_register<uint32_t>(r1 + 1);
+      uint64_t alu_out = (static_cast<uint64_t>(r1_val) << 32) |
+                         (static_cast<uint64_t>(r1_next_val));
+      alu_out <<= shiftBits;
+      set_low_register(r1 + 1, static_cast<uint32_t>(alu_out));
+      set_low_register(r1, static_cast<uint32_t>(alu_out >> 32));
+      break;
+    }
+    case SLA:
+    case SRA: {
+      RSInstruction* rsInstr = reinterpret_cast<RSInstruction*>(instr);
+      int r1 = rsInstr->R1Value();
+      int b2 = rsInstr->B2Value();
+      intptr_t d2 = rsInstr->D2Value();
+      // only takes rightmost 6bits
+      int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+      int shiftBits = (b2_val + d2) & 0x3F;
+      int32_t r1_val = get_low_register<int32_t>(r1);
+      int32_t alu_out = 0;
+      bool isOF = false;
+      if (op == SLA) {
+        isOF = CheckOverflowForShiftLeft(r1_val, shiftBits);
+        alu_out = r1_val << shiftBits;
+      } else if (op == SRA) {
+        alu_out = r1_val >> shiftBits;
+      }
+      set_low_register(r1, alu_out);
+      SetS390ConditionCode<int32_t>(alu_out, 0);
+      SetS390OverflowCode(isOF);
+      break;
+    }
+    case LLHR: {
+      UNIMPLEMENTED();
+      break;
+    }
+    case LLGHR: {
+      UNIMPLEMENTED();
+      break;
+    }
+    case L:
+    case LA:
+    case LD:
+    case LE: {
+      RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+      int b2 = rxinst->B2Value();
+      int x2 = rxinst->X2Value();
+      int32_t r1 = rxinst->R1Value();
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxinst->D2Value();
+      intptr_t addr = b2_val + x2_val + d2_val;
+      if (op == L) {
+        int32_t mem_val = ReadW(addr, instr);
+        set_low_register(r1, mem_val);
+      } else if (op == LA) {
+        set_register(r1, addr);
+      } else if (op == LD) {
+        int64_t dbl_val = *reinterpret_cast<int64_t*>(addr);
+        set_d_register(r1, dbl_val);
+      } else if (op == LE) {
+        float float_val = *reinterpret_cast<float*>(addr);
+        set_d_register_from_float32(r1, float_val);
+      }
+      break;
+    }
+    case C:
+    case CL: {
+      RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+      int b2 = rxinst->B2Value();
+      int x2 = rxinst->X2Value();
+      int32_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxinst->D2Value();
+      intptr_t addr = b2_val + x2_val + d2_val;
+      int32_t mem_val = ReadW(addr, instr);
+      if (C == op)
+        SetS390ConditionCode<int32_t>(r1_val, mem_val);
+      else if (CL == op)
+        SetS390ConditionCode<uint32_t>(r1_val, mem_val);
+      break;
+    }
+    case CLI: {
+      // Compare Immediate (Mem - Imm) (8)
+      int b1 = siInstr->B1Value();
+      int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+      intptr_t d1_val = siInstr->D1Value();
+      intptr_t addr = b1_val + d1_val;
+      uint8_t mem_val = ReadB(addr);
+      uint8_t imm_val = siInstr->I2Value();
+      SetS390ConditionCode<uint8_t>(mem_val, imm_val);
+      break;
+    }
+    case TM: {
+      // Test Under Mask (Mem - Imm) (8)
+      int b1 = siInstr->B1Value();
+      int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+      intptr_t d1_val = siInstr->D1Value();
+      intptr_t addr = b1_val + d1_val;
+      uint8_t mem_val = ReadB(addr);
+      uint8_t imm_val = siInstr->I2Value();
+      uint8_t selected_bits = mem_val & imm_val;
+      // CC0: Selected bits are zero
+      // CC1: Selected bits mixed zeros and ones
+      // CC3: Selected bits all ones
+      if (0 == selected_bits) {
+        condition_reg_ = CC_EQ;  // CC0
+      } else if (selected_bits == imm_val) {
+        condition_reg_ = 0x1;  // CC3
+      } else {
+        condition_reg_ = 0x4;  // CC1
+      }
+      break;
+    }
+    case ST:
+    case STE:
+    case STD: {
+      RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+      int b2 = rxinst->B2Value();
+      int x2 = rxinst->X2Value();
+      int32_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxinst->D2Value();
+      intptr_t addr = b2_val + x2_val + d2_val;
+      if (op == ST) {
+        WriteW(addr, r1_val, instr);
+      } else if (op == STD) {
+        int64_t frs_val = get_d_register(rxinst->R1Value());
+        WriteDW(addr, frs_val);
+      } else if (op == STE) {
+        int64_t frs_val = get_d_register(rxinst->R1Value()) >> 32;
+        WriteW(addr, static_cast<int32_t>(frs_val), instr);
+      }
+      break;
+    }
+    case LTGFR:
+    case LGFR: {
+      // Load and Test Register (64 <- 32)  (Sign Extends 32-bit val)
+      // Load Register (64 <- 32)  (Sign Extends 32-bit val)
+      RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreInstr->R1Value();
+      int r2 = rreInstr->R2Value();
+      int32_t r2_val = get_low_register<int32_t>(r2);
+      int64_t result = static_cast<int64_t>(r2_val);
+      set_register(r1, result);
+
+      if (LTGFR == op) SetS390ConditionCode<int64_t>(result, 0);
+      break;
+    }
+    case LNGR: {
+      // Load Negative (64)
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      int64_t r2_val = get_register(r2);
+      r2_val = (r2_val >= 0) ? -r2_val : r2_val;  // If pos, then negate it.
+      set_register(r1, r2_val);
+      condition_reg_ = (r2_val == 0) ? CC_EQ : CC_LT;  // CC0 - result is zero
+      // CC1 - result is negative
+      break;
+    }
+    case TRAP4: {
+      // whack the space of the caller allocated stack
+      int64_t sp_addr = get_register(sp);
+      for (int i = 0; i < kCalleeRegisterSaveAreaSize / kPointerSize; ++i) {
+        // we dont want to whack the RA (r14)
+        if (i != 14) (reinterpret_cast<intptr_t*>(sp_addr))[i] = 0xdeadbabe;
+      }
+      SoftwareInterrupt(instr);
+      break;
+    }
+    case STC: {
+      // Store Character/Byte
+      RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+      int b2 = rxinst->B2Value();
+      int x2 = rxinst->X2Value();
+      uint8_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxinst->D2Value();
+      intptr_t mem_addr = b2_val + x2_val + d2_val;
+      WriteB(mem_addr, r1_val);
+      break;
+    }
+    case STH: {
+      RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+      int b2 = rxinst->B2Value();
+      int x2 = rxinst->X2Value();
+      int16_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxinst->D2Value();
+      intptr_t mem_addr = b2_val + x2_val + d2_val;
+      WriteH(mem_addr, r1_val, instr);
+      break;
+    }
+#if V8_TARGET_ARCH_S390X
+    case LCGR: {
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      int64_t r2_val = get_register(r2);
+      r2_val = ~r2_val;
+      r2_val = r2_val + 1;
+      set_register(r1, r2_val);
+      SetS390ConditionCode<int64_t>(r2_val, 0);
+      // if the input is INT_MIN, loading its compliment would be overflowing
+      if (r2_val < 0 && (r2_val + 1) > 0) {
+        SetS390OverflowCode(true);
+      }
+      break;
+    }
+#endif
+    case SRDA: {
+      RSInstruction* rsInstr = reinterpret_cast<RSInstruction*>(instr);
+      int r1 = rsInstr->R1Value();
+      DCHECK(r1 % 2 == 0);  // must be a reg pair
+      int b2 = rsInstr->B2Value();
+      intptr_t d2 = rsInstr->D2Value();
+      // only takes rightmost 6bits
+      int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+      int shiftBits = (b2_val + d2) & 0x3F;
+      int64_t opnd1 = static_cast<int64_t>(get_low_register<int32_t>(r1)) << 32;
+      int64_t opnd2 = static_cast<uint64_t>(get_low_register<uint32_t>(r1 + 1));
+      int64_t r1_val = opnd1 + opnd2;
+      int64_t alu_out = r1_val >> shiftBits;
+      set_low_register(r1, alu_out >> 32);
+      set_low_register(r1 + 1, alu_out & 0x00000000FFFFFFFF);
+      SetS390ConditionCode<int32_t>(alu_out, 0);
+      break;
+    }
+    case SRDL: {
+      RSInstruction* rsInstr = reinterpret_cast<RSInstruction*>(instr);
+      int r1 = rsInstr->R1Value();
+      DCHECK(r1 % 2 == 0);  // must be a reg pair
+      int b2 = rsInstr->B2Value();
+      intptr_t d2 = rsInstr->D2Value();
+      // only takes rightmost 6bits
+      int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+      int shiftBits = (b2_val + d2) & 0x3F;
+      uint64_t opnd1 = static_cast<uint64_t>(get_low_register<uint32_t>(r1))
+                       << 32;
+      uint64_t opnd2 =
+          static_cast<uint64_t>(get_low_register<uint32_t>(r1 + 1));
+      uint64_t r1_val = opnd1 | opnd2;
+      uint64_t alu_out = r1_val >> shiftBits;
+      set_low_register(r1, alu_out >> 32);
+      set_low_register(r1 + 1, alu_out & 0x00000000FFFFFFFF);
+      SetS390ConditionCode<int32_t>(alu_out, 0);
+      break;
+    }
+    default: { return DecodeFourByteArithmetic(instr); }
+  }
+  return true;
+}
+
+bool Simulator::DecodeFourByteArithmetic64Bit(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+
+  RRFInstruction* rrfInst = reinterpret_cast<RRFInstruction*>(instr);
+  RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+
+  switch (op) {
+    case AGR:
+    case SGR:
+    case OGR:
+    case NGR:
+    case XGR: {
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      int64_t r1_val = get_register(r1);
+      int64_t r2_val = get_register(r2);
+      bool isOF = false;
+      switch (op) {
+        case AGR:
+          isOF = CheckOverflowForIntAdd(r1_val, r2_val, int64_t);
+          r1_val += r2_val;
+          SetS390ConditionCode<int64_t>(r1_val, 0);
+          SetS390OverflowCode(isOF);
+          break;
+        case SGR:
+          isOF = CheckOverflowForIntSub(r1_val, r2_val, int64_t);
+          r1_val -= r2_val;
+          SetS390ConditionCode<int64_t>(r1_val, 0);
+          SetS390OverflowCode(isOF);
+          break;
+        case OGR:
+          r1_val |= r2_val;
+          SetS390BitWiseConditionCode<uint64_t>(r1_val);
+          break;
+        case NGR:
+          r1_val &= r2_val;
+          SetS390BitWiseConditionCode<uint64_t>(r1_val);
+          break;
+        case XGR:
+          r1_val ^= r2_val;
+          SetS390BitWiseConditionCode<uint64_t>(r1_val);
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+      set_register(r1, r1_val);
+      break;
+    }
+    case AGFR: {
+      // Add Register (64 <- 32)  (Sign Extends 32-bit val)
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      int64_t r1_val = get_register(r1);
+      int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+      bool isOF = CheckOverflowForIntAdd(r1_val, r2_val, int64_t);
+      r1_val += r2_val;
+      SetS390ConditionCode<int64_t>(r1_val, 0);
+      SetS390OverflowCode(isOF);
+      set_register(r1, r1_val);
+      break;
+    }
+    case SGFR: {
+      // Sub Reg (64 <- 32)
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      int64_t r1_val = get_register(r1);
+      int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+      bool isOF = false;
+      isOF = CheckOverflowForIntSub(r1_val, r2_val, int64_t);
+      r1_val -= r2_val;
+      SetS390ConditionCode<int64_t>(r1_val, 0);
+      SetS390OverflowCode(isOF);
+      set_register(r1, r1_val);
+      break;
+    }
+    case AGRK:
+    case SGRK:
+    case NGRK:
+    case OGRK:
+    case XGRK: {
+      // 64-bit Non-clobbering arithmetics / bitwise ops.
+      int r1 = rrfInst->R1Value();
+      int r2 = rrfInst->R2Value();
+      int r3 = rrfInst->R3Value();
+      int64_t r2_val = get_register(r2);
+      int64_t r3_val = get_register(r3);
+      if (AGRK == op) {
+        bool isOF = CheckOverflowForIntAdd(r2_val, r3_val, int64_t);
+        SetS390ConditionCode<int64_t>(r2_val + r3_val, 0);
+        SetS390OverflowCode(isOF);
+        set_register(r1, r2_val + r3_val);
+      } else if (SGRK == op) {
+        bool isOF = CheckOverflowForIntSub(r2_val, r3_val, int64_t);
+        SetS390ConditionCode<int64_t>(r2_val - r3_val, 0);
+        SetS390OverflowCode(isOF);
+        set_register(r1, r2_val - r3_val);
+      } else {
+        // Assume bitwise operation here
+        uint64_t bitwise_result = 0;
+        if (NGRK == op) {
+          bitwise_result = r2_val & r3_val;
+        } else if (OGRK == op) {
+          bitwise_result = r2_val | r3_val;
+        } else if (XGRK == op) {
+          bitwise_result = r2_val ^ r3_val;
+        }
+        SetS390BitWiseConditionCode<uint64_t>(bitwise_result);
+        set_register(r1, bitwise_result);
+      }
+      break;
+    }
+    case ALGRK:
+    case SLGRK: {
+      // 64-bit Non-clobbering unsigned arithmetics
+      int r1 = rrfInst->R1Value();
+      int r2 = rrfInst->R2Value();
+      int r3 = rrfInst->R3Value();
+      uint64_t r2_val = get_register(r2);
+      uint64_t r3_val = get_register(r3);
+      if (ALGRK == op) {
+        bool isOF = CheckOverflowForUIntAdd(r2_val, r3_val);
+        SetS390ConditionCode<uint64_t>(r2_val + r3_val, 0);
+        SetS390OverflowCode(isOF);
+        set_register(r1, r2_val + r3_val);
+      } else if (SLGRK == op) {
+        bool isOF = CheckOverflowForUIntSub(r2_val, r3_val);
+        SetS390ConditionCode<uint64_t>(r2_val - r3_val, 0);
+        SetS390OverflowCode(isOF);
+        set_register(r1, r2_val - r3_val);
+      }
+    }
+    case AGHI:
+    case MGHI: {
+      RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+      int32_t r1 = riinst->R1Value();
+      int64_t i = static_cast<int64_t>(riinst->I2Value());
+      int64_t r1_val = get_register(r1);
+      bool isOF = false;
+      switch (op) {
+        case AGHI:
+          isOF = CheckOverflowForIntAdd(r1_val, i, int64_t);
+          r1_val += i;
+          break;
+        case MGHI:
+          isOF = CheckOverflowForMul(r1_val, i);
+          r1_val *= i;
+          break;  // no overflow indication is given
+        default:
+          break;
+      }
+      set_register(r1, r1_val);
+      SetS390ConditionCode<int32_t>(r1_val, 0);
+      SetS390OverflowCode(isOF);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  return true;
+}
+
+/**
+ * Decodes and simulates four byte arithmetic instructions
+ */
+bool Simulator::DecodeFourByteArithmetic(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+
+  // Pre-cast instruction to various types
+  RRFInstruction* rrfInst = reinterpret_cast<RRFInstruction*>(instr);
+
+  switch (op) {
+    case AGR:
+    case SGR:
+    case OGR:
+    case NGR:
+    case XGR:
+    case AGFR:
+    case SGFR: {
+      DecodeFourByteArithmetic64Bit(instr);
+      break;
+    }
+    case ARK:
+    case SRK:
+    case NRK:
+    case ORK:
+    case XRK: {
+      // 32-bit Non-clobbering arithmetics / bitwise ops
+      int r1 = rrfInst->R1Value();
+      int r2 = rrfInst->R2Value();
+      int r3 = rrfInst->R3Value();
+      int32_t r2_val = get_low_register<int32_t>(r2);
+      int32_t r3_val = get_low_register<int32_t>(r3);
+      if (ARK == op) {
+        bool isOF = CheckOverflowForIntAdd(r2_val, r3_val, int32_t);
+        SetS390ConditionCode<int32_t>(r2_val + r3_val, 0);
+        SetS390OverflowCode(isOF);
+        set_low_register(r1, r2_val + r3_val);
+      } else if (SRK == op) {
+        bool isOF = CheckOverflowForIntSub(r2_val, r3_val, int32_t);
+        SetS390ConditionCode<int32_t>(r2_val - r3_val, 0);
+        SetS390OverflowCode(isOF);
+        set_low_register(r1, r2_val - r3_val);
+      } else {
+        // Assume bitwise operation here
+        uint32_t bitwise_result = 0;
+        if (NRK == op) {
+          bitwise_result = r2_val & r3_val;
+        } else if (ORK == op) {
+          bitwise_result = r2_val | r3_val;
+        } else if (XRK == op) {
+          bitwise_result = r2_val ^ r3_val;
+        }
+        SetS390BitWiseConditionCode<uint32_t>(bitwise_result);
+        set_low_register(r1, bitwise_result);
+      }
+      break;
+    }
+    case ALRK:
+    case SLRK: {
+      // 32-bit Non-clobbering unsigned arithmetics
+      int r1 = rrfInst->R1Value();
+      int r2 = rrfInst->R2Value();
+      int r3 = rrfInst->R3Value();
+      uint32_t r2_val = get_low_register<uint32_t>(r2);
+      uint32_t r3_val = get_low_register<uint32_t>(r3);
+      if (ALRK == op) {
+        bool isOF = CheckOverflowForUIntAdd(r2_val, r3_val);
+        SetS390ConditionCode<uint32_t>(r2_val + r3_val, 0);
+        SetS390OverflowCode(isOF);
+        set_low_register(r1, r2_val + r3_val);
+      } else if (SLRK == op) {
+        bool isOF = CheckOverflowForUIntSub(r2_val, r3_val);
+        SetS390ConditionCode<uint32_t>(r2_val - r3_val, 0);
+        SetS390OverflowCode(isOF);
+        set_low_register(r1, r2_val - r3_val);
+      }
+      break;
+    }
+    case AGRK:
+    case SGRK:
+    case NGRK:
+    case OGRK:
+    case XGRK: {
+      DecodeFourByteArithmetic64Bit(instr);
+      break;
+    }
+    case ALGRK:
+    case SLGRK: {
+      DecodeFourByteArithmetic64Bit(instr);
+      break;
+    }
+    case AHI:
+    case MHI: {
+      RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+      int32_t r1 = riinst->R1Value();
+      int32_t i = riinst->I2Value();
+      int32_t r1_val = get_low_register<int32_t>(r1);
+      bool isOF = false;
+      switch (op) {
+        case AHI:
+          isOF = CheckOverflowForIntAdd(r1_val, i, int32_t);
+          r1_val += i;
+          break;
+        case MHI:
+          isOF = CheckOverflowForMul(r1_val, i);
+          r1_val *= i;
+          break;  // no overflow indication is given
+        default:
+          break;
+      }
+      set_low_register(r1, r1_val);
+      SetS390ConditionCode<int32_t>(r1_val, 0);
+      SetS390OverflowCode(isOF);
+      break;
+    }
+    case AGHI:
+    case MGHI: {
+      DecodeFourByteArithmetic64Bit(instr);
+      break;
+    }
+    case MLR: {
+      RREInstruction* rreinst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreinst->R1Value();
+      int r2 = rreinst->R2Value();
+      DCHECK(r1 % 2 == 0);
+
+      uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
+      uint32_t r2_val = get_low_register<uint32_t>(r2);
+      uint64_t product =
+          static_cast<uint64_t>(r1_val) * static_cast<uint64_t>(r2_val);
+      int32_t high_bits = product >> 32;
+      int32_t low_bits = product & 0x00000000FFFFFFFF;
+      set_low_register(r1, high_bits);
+      set_low_register(r1 + 1, low_bits);
+      break;
+    }
+    case DLGR: {
+#ifdef V8_TARGET_ARCH_S390X
+      RREInstruction* rreinst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreinst->R1Value();
+      int r2 = rreinst->R2Value();
+      uint64_t r1_val = get_register(r1);
+      uint64_t r2_val = get_register(r2);
+      DCHECK(r1 % 2 == 0);
+      unsigned __int128 dividend = static_cast<unsigned __int128>(r1_val) << 64;
+      dividend += get_register(r1 + 1);
+      uint64_t remainder = dividend % r2_val;
+      uint64_t quotient = dividend / r2_val;
+      r1_val = remainder;
+      set_register(r1, remainder);
+      set_register(r1 + 1, quotient);
+#else
+      UNREACHABLE();
+#endif
+      break;
+    }
+    case DLR: {
+      RREInstruction* rreinst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreinst->R1Value();
+      int r2 = rreinst->R2Value();
+      uint32_t r1_val = get_low_register<uint32_t>(r1);
+      uint32_t r2_val = get_low_register<uint32_t>(r2);
+      DCHECK(r1 % 2 == 0);
+      uint64_t dividend = static_cast<uint64_t>(r1_val) << 32;
+      dividend += get_low_register<uint32_t>(r1 + 1);
+      uint32_t remainder = dividend % r2_val;
+      uint32_t quotient = dividend / r2_val;
+      r1_val = remainder;
+      set_low_register(r1, remainder);
+      set_low_register(r1 + 1, quotient);
+      break;
+    }
+    case A:
+    case S:
+    case M:
+    case D:
+    case O:
+    case N:
+    case X: {
+      // 32-bit Reg-Mem instructions
+      RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+      int b2 = rxinst->B2Value();
+      int x2 = rxinst->X2Value();
+      int32_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxinst->D2Value();
+      int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+      int32_t alu_out = 0;
+      bool isOF = false;
+      switch (op) {
+        case A:
+          isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+          alu_out = r1_val + mem_val;
+          SetS390ConditionCode<int32_t>(alu_out, 0);
+          SetS390OverflowCode(isOF);
+          break;
+        case S:
+          isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
+          alu_out = r1_val - mem_val;
+          SetS390ConditionCode<int32_t>(alu_out, 0);
+          SetS390OverflowCode(isOF);
+          break;
+        case M:
+        case D:
+          UNIMPLEMENTED();
+          break;
+        case O:
+          alu_out = r1_val | mem_val;
+          SetS390BitWiseConditionCode<uint32_t>(alu_out);
+          break;
+        case N:
+          alu_out = r1_val & mem_val;
+          SetS390BitWiseConditionCode<uint32_t>(alu_out);
+          break;
+        case X:
+          alu_out = r1_val ^ mem_val;
+          SetS390BitWiseConditionCode<uint32_t>(alu_out);
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+      set_low_register(r1, alu_out);
+      break;
+    }
+    case OILL:
+    case OIHL: {
+      RIInstruction* riInst = reinterpret_cast<RIInstruction*>(instr);
+      int r1 = riInst->R1Value();
+      int i = riInst->I2Value();
+      int32_t r1_val = get_low_register<int32_t>(r1);
+      if (OILL == op) {
+        // CC is set based on the 16 bits that are AND'd
+        SetS390BitWiseConditionCode<uint16_t>(r1_val | i);
+      } else if (OILH == op) {
+        // CC is set based on the 16 bits that are AND'd
+        SetS390BitWiseConditionCode<uint16_t>((r1_val >> 16) | i);
+        i = i << 16;
+      } else {
+        UNIMPLEMENTED();
+      }
+      set_low_register(r1, r1_val | i);
+      break;
+    }
+    case NILL:
+    case NILH: {
+      RIInstruction* riInst = reinterpret_cast<RIInstruction*>(instr);
+      int r1 = riInst->R1Value();
+      int i = riInst->I2Value();
+      int32_t r1_val = get_low_register<int32_t>(r1);
+      if (NILL == op) {
+        // CC is set based on the 16 bits that are AND'd
+        SetS390BitWiseConditionCode<uint16_t>(r1_val & i);
+        i |= 0xFFFF0000;
+      } else if (NILH == op) {
+        // CC is set based on the 16 bits that are AND'd
+        SetS390BitWiseConditionCode<uint16_t>((r1_val >> 16) & i);
+        i = (i << 16) | 0x0000FFFF;
+      } else {
+        UNIMPLEMENTED();
+      }
+      set_low_register(r1, r1_val & i);
+      break;
+    }
+    case AH:
+    case SH:
+    case MH: {
+      RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+      int b2 = rxinst->B2Value();
+      int x2 = rxinst->X2Value();
+      int32_t r1_val = get_low_register<int32_t>(rxinst->R1Value());
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxinst->D2Value();
+      intptr_t addr = b2_val + x2_val + d2_val;
+      int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+      int32_t alu_out = 0;
+      bool isOF = false;
+      if (AH == op) {
+        isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+        alu_out = r1_val + mem_val;
+      } else if (SH == op) {
+        isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
+        alu_out = r1_val - mem_val;
+      } else if (MH == op) {
+        alu_out = r1_val * mem_val;
+      } else {
+        UNREACHABLE();
+      }
+      set_low_register(r1, alu_out);
+      if (MH != op) {  // MH does not change condition code
+        SetS390ConditionCode<int32_t>(alu_out, 0);
+        SetS390OverflowCode(isOF);
+      }
+      break;
+    }
+    case DSGR: {
+      RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+
+      DCHECK(r1 % 2 == 0);
+
+      int64_t dividend = get_register(r1 + 1);
+      int64_t divisor = get_register(r2);
+      set_register(r1, dividend % divisor);
+      set_register(r1 + 1, dividend / divisor);
+
+      break;
+    }
+    case FLOGR: {
+      RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+
+      DCHECK(r1 % 2 == 0);
+
+      int64_t r2_val = get_register(r2);
+
+      int i = 0;
+      for (; i < 64; i++) {
+        if (r2_val < 0) break;
+        r2_val <<= 1;
+      }
+
+      r2_val = get_register(r2);
+
+      int64_t mask = ~(1 << (63 - i));
+      set_register(r1, i);
+      set_register(r1 + 1, r2_val & mask);
+
+      break;
+    }
+    case MSR:
+    case MSGR: {  // they do not set overflow code
+      RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      if (op == MSR) {
+        int32_t r1_val = get_low_register<int32_t>(r1);
+        int32_t r2_val = get_low_register<int32_t>(r2);
+        set_low_register(r1, r1_val * r2_val);
+      } else if (op == MSGR) {
+        int64_t r1_val = get_register(r1);
+        int64_t r2_val = get_register(r2);
+        set_register(r1, r1_val * r2_val);
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    }
+    case MS: {
+      RXInstruction* rxinst = reinterpret_cast<RXInstruction*>(instr);
+      int r1 = rxinst->R1Value();
+      int b2 = rxinst->B2Value();
+      int x2 = rxinst->X2Value();
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      intptr_t d2_val = rxinst->D2Value();
+      int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+      int32_t r1_val = get_low_register<int32_t>(r1);
+      set_low_register(r1, r1_val * mem_val);
+      break;
+    }
+    case LGBR:
+    case LBR: {
+      RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+#ifdef V8_TARGET_ARCH_S390X
+      int64_t r2_val = get_low_register<int64_t>(r2);
+      r2_val <<= 56;
+      r2_val >>= 56;
+      set_register(r1, r2_val);
+#else
+      int32_t r2_val = get_low_register<int32_t>(r2);
+      r2_val <<= 24;
+      r2_val >>= 24;
+      set_low_register(r1, r2_val);
+#endif
+      break;
+    }
+    case LGHR:
+    case LHR: {
+      RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+#ifdef V8_TARGET_ARCH_S390X
+      int64_t r2_val = get_low_register<int64_t>(r2);
+      r2_val <<= 48;
+      r2_val >>= 48;
+      set_register(r1, r2_val);
+#else
+      int32_t r2_val = get_low_register<int32_t>(r2);
+      r2_val <<= 16;
+      r2_val >>= 16;
+      set_low_register(r1, r2_val);
+#endif
+      break;
+    }
+    case ALCR: {
+      RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      uint32_t r1_val = get_low_register<uint32_t>(r1);
+      uint32_t r2_val = get_low_register<uint32_t>(r2);
+      uint32_t alu_out = 0;
+      bool isOF = false;
+
+      alu_out = r1_val + r2_val;
+      bool isOF_original = CheckOverflowForUIntAdd(r1_val, r2_val);
+      if (TestConditionCode((Condition)2) || TestConditionCode((Condition)3)) {
+        alu_out = alu_out + 1;
+        isOF = isOF_original || CheckOverflowForUIntAdd(alu_out, 1);
+      } else {
+        isOF = isOF_original;
+      }
+      set_low_register(r1, alu_out);
+      SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+      break;
+    }
+    case SLBR: {
+      RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rrinst->R1Value();
+      int r2 = rrinst->R2Value();
+      uint32_t r1_val = get_low_register<uint32_t>(r1);
+      uint32_t r2_val = get_low_register<uint32_t>(r2);
+      uint32_t alu_out = 0;
+      bool isOF = false;
+
+      alu_out = r1_val - r2_val;
+      bool isOF_original = CheckOverflowForUIntSub(r1_val, r2_val);
+      if (TestConditionCode((Condition)2) || TestConditionCode((Condition)3)) {
+        alu_out = alu_out - 1;
+        isOF = isOF_original || CheckOverflowForUIntSub(alu_out, 1);
+      } else {
+        isOF = isOF_original;
+      }
+      set_low_register(r1, alu_out);
+      SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+      break;
+    }
+    default: { return DecodeFourByteFloatingPoint(instr); }
+  }
+  return true;
+}
+
+void Simulator::DecodeFourByteFloatingPointIntConversion(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+  switch (op) {
+    case CDLFBR:
+    case CDLGBR:
+    case CELGBR:
+    case CLFDBR:
+    case CLGDBR:
+    case CELFBR:
+    case CLGEBR:
+    case CLFEBR: {
+      RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreInstr->R1Value();
+      int r2 = rreInstr->R2Value();
+      if (op == CDLFBR) {
+        uint32_t r2_val = get_low_register<uint32_t>(r2);
+        double r1_val = static_cast<double>(r2_val);
+        set_d_register_from_double(r1, r1_val);
+      } else if (op == CELFBR) {
+        uint32_t r2_val = get_low_register<uint32_t>(r2);
+        float r1_val = static_cast<float>(r2_val);
+        set_d_register_from_float32(r1, r1_val);
+      } else if (op == CDLGBR) {
+        uint64_t r2_val = get_register(r2);
+        double r1_val = static_cast<double>(r2_val);
+        set_d_register_from_double(r1, r1_val);
+      } else if (op == CELGBR) {
+        uint64_t r2_val = get_register(r2);
+        float r1_val = static_cast<float>(r2_val);
+        set_d_register_from_float32(r1, r1_val);
+      } else if (op == CLFDBR) {
+        double r2_val = get_double_from_d_register(r2);
+        uint32_t r1_val = static_cast<uint32_t>(r2_val);
+        set_low_register(r1, r1_val);
+        SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT32_MAX);
+      } else if (op == CLFEBR) {
+        float r2_val = get_float32_from_d_register(r2);
+        uint32_t r1_val = static_cast<uint32_t>(r2_val);
+        set_low_register(r1, r1_val);
+        SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT32_MAX);
+      } else if (op == CLGDBR) {
+        double r2_val = get_double_from_d_register(r2);
+        uint64_t r1_val = static_cast<uint64_t>(r2_val);
+        set_register(r1, r1_val);
+        SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT64_MAX);
+      } else if (op == CLGEBR) {
+        float r2_val = get_float32_from_d_register(r2);
+        uint64_t r1_val = static_cast<uint64_t>(r2_val);
+        set_register(r1, r1_val);
+        SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT64_MAX);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+void Simulator::DecodeFourByteFloatingPointRound(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+  RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
+  int r1 = rreInstr->R1Value();
+  int r2 = rreInstr->R2Value();
+  double r2_val = get_double_from_d_register(r2);
+  float r2_fval = get_float32_from_d_register(r2);
+
+  switch (op) {
+    case CFDBR: {
+      int mask_val = rreInstr->M3Value();
+      int32_t r1_val = 0;
+
+      SetS390RoundConditionCode(r2_val, INT32_MAX, INT32_MIN);
+
+      switch (mask_val) {
+        case CURRENT_ROUNDING_MODE:
+        case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+          r1_val = static_cast<int32_t>(r2_val);
+          break;
+        }
+        case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0: {
+          double ceil_val = std::ceil(r2_val);
+          double floor_val = std::floor(r2_val);
+          double sub_val1 = std::fabs(r2_val - floor_val);
+          double sub_val2 = std::fabs(r2_val - ceil_val);
+          if (sub_val1 > sub_val2) {
+            r1_val = static_cast<int32_t>(ceil_val);
+          } else if (sub_val1 < sub_val2) {
+            r1_val = static_cast<int32_t>(floor_val);
+          } else {  // round away from zero:
+            if (r2_val > 0.0) {
+              r1_val = static_cast<int32_t>(ceil_val);
+            } else {
+              r1_val = static_cast<int32_t>(floor_val);
+            }
+          }
+          break;
+        }
+        case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+          double ceil_val = std::ceil(r2_val);
+          double floor_val = std::floor(r2_val);
+          double sub_val1 = std::fabs(r2_val - floor_val);
+          double sub_val2 = std::fabs(r2_val - ceil_val);
+          if (sub_val1 > sub_val2) {
+            r1_val = static_cast<int32_t>(ceil_val);
+          } else if (sub_val1 < sub_val2) {
+            r1_val = static_cast<int32_t>(floor_val);
+          } else {  // check which one is even:
+            int32_t c_v = static_cast<int32_t>(ceil_val);
+            int32_t f_v = static_cast<int32_t>(floor_val);
+            if (f_v % 2 == 0)
+              r1_val = f_v;
+            else
+              r1_val = c_v;
+          }
+          break;
+        }
+        case ROUND_TOWARD_0: {
+          // check for overflow, cast r2_val to 64bit integer
+          // then check value within the range of INT_MIN and INT_MAX
+          // and set condition code accordingly
+          int64_t temp = static_cast<int64_t>(r2_val);
+          if (temp < INT_MIN || temp > INT_MAX) {
+            condition_reg_ = CC_OF;
+          }
+          r1_val = static_cast<int32_t>(r2_val);
+          break;
+        }
+        case ROUND_TOWARD_PLUS_INFINITE: {
+          r1_val = static_cast<int32_t>(std::ceil(r2_val));
+          break;
+        }
+        case ROUND_TOWARD_MINUS_INFINITE: {
+          // check for overflow, cast r2_val to 64bit integer
+          // then check value within the range of INT_MIN and INT_MAX
+          // and set condition code accordingly
+          int64_t temp = static_cast<int64_t>(std::floor(r2_val));
+          if (temp < INT_MIN || temp > INT_MAX) {
+            condition_reg_ = CC_OF;
+          }
+          r1_val = static_cast<int32_t>(std::floor(r2_val));
+          break;
+        }
+        default:
+          UNREACHABLE();
+      }
+      set_low_register(r1, r1_val);
+      break;
+    }
+    case CGDBR: {
+      int mask_val = rreInstr->M3Value();
+      int64_t r1_val = 0;
+
+      SetS390RoundConditionCode(r2_val, INT64_MAX, INT64_MIN);
+
+      switch (mask_val) {
+        case CURRENT_ROUNDING_MODE:
+        case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0:
+        case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+          UNIMPLEMENTED();
+          break;
+        }
+        case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+          double ceil_val = std::ceil(r2_val);
+          double floor_val = std::floor(r2_val);
+          if (std::abs(r2_val - floor_val) > std::abs(r2_val - ceil_val)) {
+            r1_val = static_cast<int64_t>(ceil_val);
+          } else if (std::abs(r2_val - floor_val) <
+                     std::abs(r2_val - ceil_val)) {
+            r1_val = static_cast<int64_t>(floor_val);
+          } else {  // check which one is even:
+            int64_t c_v = static_cast<int64_t>(ceil_val);
+            int64_t f_v = static_cast<int64_t>(floor_val);
+            if (f_v % 2 == 0)
+              r1_val = f_v;
+            else
+              r1_val = c_v;
+          }
+          break;
+        }
+        case ROUND_TOWARD_0: {
+          r1_val = static_cast<int64_t>(r2_val);
+          break;
+        }
+        case ROUND_TOWARD_PLUS_INFINITE: {
+          r1_val = static_cast<int64_t>(std::ceil(r2_val));
+          break;
+        }
+        case ROUND_TOWARD_MINUS_INFINITE: {
+          r1_val = static_cast<int64_t>(std::floor(r2_val));
+          break;
+        }
+        default:
+          UNREACHABLE();
+      }
+      set_register(r1, r1_val);
+      break;
+    }
+    case CGEBR: {
+      int mask_val = rreInstr->M3Value();
+      int64_t r1_val = 0;
+
+      SetS390RoundConditionCode(r2_fval, INT64_MAX, INT64_MIN);
+
+      switch (mask_val) {
+        case CURRENT_ROUNDING_MODE:
+        case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0:
+        case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+          UNIMPLEMENTED();
+          break;
+        }
+        case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+          float ceil_val = std::ceil(r2_fval);
+          float floor_val = std::floor(r2_fval);
+          if (std::abs(r2_fval - floor_val) > std::abs(r2_fval - ceil_val)) {
+            r1_val = static_cast<int64_t>(ceil_val);
+          } else if (std::abs(r2_fval - floor_val) <
+                     std::abs(r2_fval - ceil_val)) {
+            r1_val = static_cast<int64_t>(floor_val);
+          } else {  // check which one is even:
+            int64_t c_v = static_cast<int64_t>(ceil_val);
+            int64_t f_v = static_cast<int64_t>(floor_val);
+            if (f_v % 2 == 0)
+              r1_val = f_v;
+            else
+              r1_val = c_v;
+          }
+          break;
+        }
+        case ROUND_TOWARD_0: {
+          r1_val = static_cast<int64_t>(r2_fval);
+          break;
+        }
+        case ROUND_TOWARD_PLUS_INFINITE: {
+          r1_val = static_cast<int64_t>(std::ceil(r2_fval));
+          break;
+        }
+        case ROUND_TOWARD_MINUS_INFINITE: {
+          r1_val = static_cast<int64_t>(std::floor(r2_fval));
+          break;
+        }
+        default:
+          UNREACHABLE();
+      }
+      set_register(r1, r1_val);
+      break;
+    }
+    case CFEBR: {
+      int mask_val = rreInstr->M3Value();
+      int32_t r1_val = 0;
+
+      SetS390RoundConditionCode(r2_fval, INT32_MAX, INT32_MIN);
+
+      switch (mask_val) {
+        case CURRENT_ROUNDING_MODE:
+        case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+          r1_val = static_cast<int32_t>(r2_fval);
+          break;
+        }
+        case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0: {
+          float ceil_val = std::ceil(r2_fval);
+          float floor_val = std::floor(r2_fval);
+          float sub_val1 = std::fabs(r2_fval - floor_val);
+          float sub_val2 = std::fabs(r2_fval - ceil_val);
+          if (sub_val1 > sub_val2) {
+            r1_val = static_cast<int32_t>(ceil_val);
+          } else if (sub_val1 < sub_val2) {
+            r1_val = static_cast<int32_t>(floor_val);
+          } else {  // round away from zero:
+            if (r2_fval > 0.0) {
+              r1_val = static_cast<int32_t>(ceil_val);
+            } else {
+              r1_val = static_cast<int32_t>(floor_val);
+            }
+          }
+          break;
+        }
+        case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+          float ceil_val = std::ceil(r2_fval);
+          float floor_val = std::floor(r2_fval);
+          float sub_val1 = std::fabs(r2_fval - floor_val);
+          float sub_val2 = std::fabs(r2_fval - ceil_val);
+          if (sub_val1 > sub_val2) {
+            r1_val = static_cast<int32_t>(ceil_val);
+          } else if (sub_val1 < sub_val2) {
+            r1_val = static_cast<int32_t>(floor_val);
+          } else {  // check which one is even:
+            int32_t c_v = static_cast<int32_t>(ceil_val);
+            int32_t f_v = static_cast<int32_t>(floor_val);
+            if (f_v % 2 == 0)
+              r1_val = f_v;
+            else
+              r1_val = c_v;
+          }
+          break;
+        }
+        case ROUND_TOWARD_0: {
+          // check for overflow, cast r2_fval to 64bit integer
+          // then check value within the range of INT_MIN and INT_MAX
+          // and set condition code accordingly
+          int64_t temp = static_cast<int64_t>(r2_fval);
+          if (temp < INT_MIN || temp > INT_MAX) {
+            condition_reg_ = CC_OF;
+          }
+          r1_val = static_cast<int32_t>(r2_fval);
+          break;
+        }
+        case ROUND_TOWARD_PLUS_INFINITE: {
+          r1_val = static_cast<int32_t>(std::ceil(r2_fval));
+          break;
+        }
+        case ROUND_TOWARD_MINUS_INFINITE: {
+          // check for overflow, cast r2_fval to 64bit integer
+          // then check value within the range of INT_MIN and INT_MAX
+          // and set condition code accordingly
+          int64_t temp = static_cast<int64_t>(std::floor(r2_fval));
+          if (temp < INT_MIN || temp > INT_MAX) {
+            condition_reg_ = CC_OF;
+          }
+          r1_val = static_cast<int32_t>(std::floor(r2_fval));
+          break;
+        }
+        default:
+          UNREACHABLE();
+      }
+      set_low_register(r1, r1_val);
+
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+/**
+ * Decodes and simulates four byte floating point instructions
+ */
+bool Simulator::DecodeFourByteFloatingPoint(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+
+  switch (op) {
+    case ADBR:
+    case AEBR:
+    case SDBR:
+    case SEBR:
+    case MDBR:
+    case MEEBR:
+    case MADBR:
+    case DDBR:
+    case DEBR:
+    case CDBR:
+    case CEBR:
+    case CDFBR:
+    case CDGBR:
+    case CEGBR:
+    case CGEBR:
+    case CFDBR:
+    case CGDBR:
+    case SQDBR:
+    case SQEBR:
+    case CFEBR:
+    case CEFBR:
+    case LCDBR:
+    case LPDBR:
+    case LPEBR: {
+      RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreInstr->R1Value();
+      int r2 = rreInstr->R2Value();
+      double r1_val = get_double_from_d_register(r1);
+      double r2_val = get_double_from_d_register(r2);
+      float fr1_val = get_float32_from_d_register(r1);
+      float fr2_val = get_float32_from_d_register(r2);
+      if (op == ADBR) {
+        r1_val += r2_val;
+        set_d_register_from_double(r1, r1_val);
+        SetS390ConditionCode<double>(r1_val, 0);
+      } else if (op == AEBR) {
+        fr1_val += fr2_val;
+        set_d_register_from_float32(r1, fr1_val);
+        SetS390ConditionCode<float>(fr1_val, 0);
+      } else if (op == SDBR) {
+        r1_val -= r2_val;
+        set_d_register_from_double(r1, r1_val);
+        SetS390ConditionCode<double>(r1_val, 0);
+      } else if (op == SEBR) {
+        fr1_val -= fr2_val;
+        set_d_register_from_float32(r1, fr1_val);
+        SetS390ConditionCode<float>(fr1_val, 0);
+      } else if (op == MDBR) {
+        r1_val *= r2_val;
+        set_d_register_from_double(r1, r1_val);
+        SetS390ConditionCode<double>(r1_val, 0);
+      } else if (op == MEEBR) {
+        fr1_val *= fr2_val;
+        set_d_register_from_float32(r1, fr1_val);
+        SetS390ConditionCode<float>(fr1_val, 0);
+      } else if (op == MADBR) {
+        RRDInstruction* rrdInstr = reinterpret_cast<RRDInstruction*>(instr);
+        int r1 = rrdInstr->R1Value();
+        int r2 = rrdInstr->R2Value();
+        int r3 = rrdInstr->R3Value();
+        double r1_val = get_double_from_d_register(r1);
+        double r2_val = get_double_from_d_register(r2);
+        double r3_val = get_double_from_d_register(r3);
+        r1_val += r2_val * r3_val;
+        set_d_register_from_double(r1, r1_val);
+        SetS390ConditionCode<double>(r1_val, 0);
+      } else if (op == DDBR) {
+        r1_val /= r2_val;
+        set_d_register_from_double(r1, r1_val);
+        SetS390ConditionCode<double>(r1_val, 0);
+      } else if (op == DEBR) {
+        fr1_val /= fr2_val;
+        set_d_register_from_float32(r1, fr1_val);
+        SetS390ConditionCode<float>(fr1_val, 0);
+      } else if (op == CDBR) {
+        if (isNaN(r1_val) || isNaN(r2_val)) {
+          condition_reg_ = CC_OF;
+        } else {
+          SetS390ConditionCode<double>(r1_val, r2_val);
+        }
+      } else if (op == CEBR) {
+        if (isNaN(fr1_val) || isNaN(fr2_val)) {
+          condition_reg_ = CC_OF;
+        } else {
+          SetS390ConditionCode<float>(fr1_val, fr2_val);
+        }
+      } else if (op == CDGBR) {
+        int64_t r2_val = get_register(r2);
+        double r1_val = static_cast<double>(r2_val);
+        set_d_register_from_double(r1, r1_val);
+      } else if (op == CEGBR) {
+        int64_t fr2_val = get_register(r2);
+        float fr1_val = static_cast<float>(fr2_val);
+        set_d_register_from_float32(r1, fr1_val);
+      } else if (op == CDFBR) {
+        int32_t r2_val = get_low_register<int32_t>(r2);
+        double r1_val = static_cast<double>(r2_val);
+        set_d_register_from_double(r1, r1_val);
+      } else if (op == CEFBR) {
+        int32_t fr2_val = get_low_register<int32_t>(r2);
+        float fr1_val = static_cast<float>(fr2_val);
+        set_d_register_from_float32(r1, fr1_val);
+      } else if (op == CFDBR) {
+        DecodeFourByteFloatingPointRound(instr);
+      } else if (op == CGDBR) {
+        DecodeFourByteFloatingPointRound(instr);
+      } else if (op == CGEBR) {
+        DecodeFourByteFloatingPointRound(instr);
+      } else if (op == SQDBR) {
+        r1_val = std::sqrt(r2_val);
+        set_d_register_from_double(r1, r1_val);
+      } else if (op == SQEBR) {
+        fr1_val = std::sqrt(fr2_val);
+        set_d_register_from_float32(r1, fr1_val);
+      } else if (op == CFEBR) {
+        DecodeFourByteFloatingPointRound(instr);
+      } else if (op == LCDBR) {
+        r1_val = -r2_val;
+        set_d_register_from_double(r1, r1_val);
+        if (r2_val != r2_val) {  // input is NaN
+          condition_reg_ = CC_OF;
+        } else if (r2_val == 0) {
+          condition_reg_ = CC_EQ;
+        } else if (r2_val < 0) {
+          condition_reg_ = CC_LT;
+        } else if (r2_val > 0) {
+          condition_reg_ = CC_GT;
+        }
+      } else if (op == LPDBR) {
+        r1_val = std::fabs(r2_val);
+        set_d_register_from_double(r1, r1_val);
+        if (r2_val != r2_val) {  // input is NaN
+          condition_reg_ = CC_OF;
+        } else if (r2_val == 0) {
+          condition_reg_ = CC_EQ;
+        } else {
+          condition_reg_ = CC_GT;
+        }
+      } else if (op == LPEBR) {
+        fr1_val = std::fabs(fr2_val);
+        set_d_register_from_float32(r1, fr1_val);
+        if (fr2_val != fr2_val) {  // input is NaN
+          condition_reg_ = CC_OF;
+        } else if (fr2_val == 0) {
+          condition_reg_ = CC_EQ;
+        } else {
+          condition_reg_ = CC_GT;
+        }
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    }
+    case CDLFBR:
+    case CDLGBR:
+    case CELGBR:
+    case CLFDBR:
+    case CELFBR:
+    case CLGDBR:
+    case CLGEBR:
+    case CLFEBR: {
+      DecodeFourByteFloatingPointIntConversion(instr);
+      break;
+    }
+    case TMLL: {
+      RIInstruction* riinst = reinterpret_cast<RIInstruction*>(instr);
+      int r1 = riinst->R1Value();
+      int mask = riinst->I2Value() & 0x0000FFFF;
+      if (mask == 0) {
+        condition_reg_ = 0x0;
+        break;
+      }
+      uint32_t r1_val = get_low_register<uint32_t>(r1);
+      r1_val = r1_val & 0x0000FFFF;  // uses only the last 16bits
+
+      // Test if all selected bits are Zero
+      bool allSelectedBitsAreZeros = true;
+      for (int i = 0; i < 15; i++) {
+        if (mask & (1 << i)) {
+          if (r1_val & (1 << i)) {
+            allSelectedBitsAreZeros = false;
+            break;
+          }
+        }
+      }
+      if (allSelectedBitsAreZeros) {
+        condition_reg_ = 0x8;
+        break;  // Done!
+      }
+
+      // Test if all selected bits are one
+      bool allSelectedBitsAreOnes = true;
+      for (int i = 0; i < 15; i++) {
+        if (mask & (1 << i)) {
+          if (!(r1_val & (1 << i))) {
+            allSelectedBitsAreOnes = false;
+            break;
+          }
+        }
+      }
+      if (allSelectedBitsAreOnes) {
+        condition_reg_ = 0x1;
+        break;  // Done!
+      }
+
+      // Now we know selected bits mixed zeros and ones
+      // Test if the leftmost bit is zero or one
+      for (int i = 14; i >= 0; i--) {
+        if (mask & (1 << i)) {
+          if (r1_val & (1 << i)) {
+            // leftmost bit is one
+            condition_reg_ = 0x2;
+          } else {
+            // leftmost bit is zero
+            condition_reg_ = 0x4;
+          }
+          break;  // Done!
+        }
+      }
+      break;
+    }
+    case LEDBR: {
+      RREInstruction* rreInst = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreInst->R1Value();
+      int r2 = rreInst->R2Value();
+      double r2_val = get_double_from_d_register(r2);
+      set_d_register_from_float32(r1, static_cast<float>(r2_val));
+      break;
+    }
+    case FIDBRA: {
+      RRFInstruction* rrfInst = reinterpret_cast<RRFInstruction*>(instr);
+      int r1 = rrfInst->R1Value();
+      int r2 = rrfInst->R2Value();
+      int m3 = rrfInst->M3Value();
+      double r2_val = get_double_from_d_register(r2);
+      DCHECK(rrfInst->M4Value() == 0);
+      switch (m3) {
+        case Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0:
+          set_d_register_from_double(r1, round(r2_val));
+          break;
+        case Assembler::FIDBRA_ROUND_TOWARD_0:
+          set_d_register_from_double(r1, trunc(r2_val));
+          break;
+        case Assembler::FIDBRA_ROUND_TOWARD_POS_INF:
+          set_d_register_from_double(r1, std::ceil(r2_val));
+          break;
+        case Assembler::FIDBRA_ROUND_TOWARD_NEG_INF:
+          set_d_register_from_double(r1, std::floor(r2_val));
+          break;
+        default:
+          UNIMPLEMENTED();
+          break;
+      }
+      break;
+    }
+    case FIEBRA: {
+      RRFInstruction* rrfInst = reinterpret_cast<RRFInstruction*>(instr);
+      int r1 = rrfInst->R1Value();
+      int r2 = rrfInst->R2Value();
+      int m3 = rrfInst->M3Value();
+      float r2_val = get_float32_from_d_register(r2);
+      DCHECK(rrfInst->M4Value() == 0);
+      switch (m3) {
+        case Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0:
+          set_d_register_from_float32(r1, round(r2_val));
+          break;
+        case Assembler::FIDBRA_ROUND_TOWARD_0:
+          set_d_register_from_float32(r1, trunc(r2_val));
+          break;
+        case Assembler::FIDBRA_ROUND_TOWARD_POS_INF:
+          set_d_register_from_float32(r1, std::ceil(r2_val));
+          break;
+        case Assembler::FIDBRA_ROUND_TOWARD_NEG_INF:
+          set_d_register_from_float32(r1, std::floor(r2_val));
+          break;
+        default:
+          UNIMPLEMENTED();
+          break;
+      }
+      break;
+    }
+    case MSDBR: {
+      UNIMPLEMENTED();
+      break;
+    }
+    case LDEBR: {
+      RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
+      int r1 = rreInstr->R1Value();
+      int r2 = rreInstr->R2Value();
+      float fp_val = get_float32_from_d_register(r2);
+      double db_val = static_cast<double>(fp_val);
+      set_d_register_from_double(r1, db_val);
+      break;
+    }
+    default: {
+      UNREACHABLE();
+      return false;
+    }
+  }
+  return true;
+}
+
+// Decode routine for six-byte instructions
+bool Simulator::DecodeSixByte(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+
+  // Pre-cast instruction to various types
+  RIEInstruction* rieInstr = reinterpret_cast<RIEInstruction*>(instr);
+  RILInstruction* rilInstr = reinterpret_cast<RILInstruction*>(instr);
+  RSYInstruction* rsyInstr = reinterpret_cast<RSYInstruction*>(instr);
+  RXEInstruction* rxeInstr = reinterpret_cast<RXEInstruction*>(instr);
+  RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+  SIYInstruction* siyInstr = reinterpret_cast<SIYInstruction*>(instr);
+  SILInstruction* silInstr = reinterpret_cast<SILInstruction*>(instr);
+  SSInstruction* ssInstr = reinterpret_cast<SSInstruction*>(instr);
+
+  switch (op) {
+    case CLIY: {
+      // Compare Immediate (Mem - Imm) (8)
+      int b1 = siyInstr->B1Value();
+      int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+      intptr_t d1_val = siyInstr->D1Value();
+      intptr_t addr = b1_val + d1_val;
+      uint8_t mem_val = ReadB(addr);
+      uint8_t imm_val = siyInstr->I2Value();
+      SetS390ConditionCode<uint8_t>(mem_val, imm_val);
+      break;
+    }
+    case TMY: {
+      // Test Under Mask (Mem - Imm) (8)
+      int b1 = siyInstr->B1Value();
+      int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+      intptr_t d1_val = siyInstr->D1Value();
+      intptr_t addr = b1_val + d1_val;
+      uint8_t mem_val = ReadB(addr);
+      uint8_t imm_val = siyInstr->I2Value();
+      uint8_t selected_bits = mem_val & imm_val;
+      // CC0: Selected bits are zero
+      // CC1: Selected bits mixed zeros and ones
+      // CC3: Selected bits all ones
+      if (0 == selected_bits) {
+        condition_reg_ = CC_EQ;  // CC0
+      } else if (selected_bits == imm_val) {
+        condition_reg_ = 0x1;  // CC3
+      } else {
+        condition_reg_ = 0x4;  // CC1
+      }
+      break;
+    }
+    case LDEB: {
+      // Load Float
+      int r1 = rxeInstr->R1Value();
+      int rb = rxeInstr->B2Value();
+      int rx = rxeInstr->X2Value();
+      int offset = rxeInstr->D2Value();
+      int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+      int64_t rx_val = (rx == 0) ? 0 : get_register(rx);
+      double ret = static_cast<double>(
+          *reinterpret_cast<float*>(rx_val + rb_val + offset));
+      set_d_register_from_double(r1, ret);
+      break;
+    }
+    case LAY: {
+      // Load Address
+      int r1 = rxyInstr->R1Value();
+      int rb = rxyInstr->B2Value();
+      int rx = rxyInstr->X2Value();
+      int offset = rxyInstr->D2Value();
+      int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+      int64_t rx_val = (rx == 0) ? 0 : get_register(rx);
+      set_register(r1, rx_val + rb_val + offset);
+      break;
+    }
+    case LARL: {
+      // Load Addresss Relative Long
+      int r1 = rilInstr->R1Value();
+      intptr_t offset = rilInstr->I2Value() * 2;
+      set_register(r1, get_pc() + offset);
+      break;
+    }
+    case LLILF: {
+      // Load Logical into lower 32-bits (zero extend upper 32-bits)
+      int r1 = rilInstr->R1Value();
+      uint64_t imm = static_cast<uint64_t>(rilInstr->I2UnsignedValue());
+      set_register(r1, imm);
+      break;
+    }
+    case LLIHF: {
+      // Load Logical Immediate into high word
+      int r1 = rilInstr->R1Value();
+      uint64_t imm = static_cast<uint64_t>(rilInstr->I2UnsignedValue());
+      set_register(r1, imm << 32);
+      break;
+    }
+    case OILF:
+    case NILF:
+    case IILF: {
+      // Bitwise Op on lower 32-bits
+      int r1 = rilInstr->R1Value();
+      uint32_t imm = rilInstr->I2UnsignedValue();
+      uint32_t alu_out = get_low_register<uint32_t>(r1);
+      if (NILF == op) {
+        alu_out &= imm;
+        SetS390BitWiseConditionCode<uint32_t>(alu_out);
+      } else if (OILF == op) {
+        alu_out |= imm;
+        SetS390BitWiseConditionCode<uint32_t>(alu_out);
+      } else if (op == IILF) {
+        alu_out = imm;
+      } else {
+        DCHECK(false);
+      }
+      set_low_register(r1, alu_out);
+      break;
+    }
+    case OIHF:
+    case NIHF:
+    case IIHF: {
+      // Bitwise Op on upper 32-bits
+      int r1 = rilInstr->R1Value();
+      uint32_t imm = rilInstr->I2Value();
+      uint32_t alu_out = get_high_register<uint32_t>(r1);
+      if (op == NIHF) {
+        alu_out &= imm;
+        SetS390BitWiseConditionCode<uint32_t>(alu_out);
+      } else if (op == OIHF) {
+        alu_out |= imm;
+        SetS390BitWiseConditionCode<uint32_t>(alu_out);
+      } else if (op == IIHF) {
+        alu_out = imm;
+      } else {
+        DCHECK(false);
+      }
+      set_high_register(r1, alu_out);
+      break;
+    }
+    case CLFI: {
+      // Compare Logical with Immediate (32)
+      int r1 = rilInstr->R1Value();
+      uint32_t imm = rilInstr->I2UnsignedValue();
+      SetS390ConditionCode<uint32_t>(get_low_register<uint32_t>(r1), imm);
+      break;
+    }
+    case CFI: {
+      // Compare with Immediate (32)
+      int r1 = rilInstr->R1Value();
+      int32_t imm = rilInstr->I2Value();
+      SetS390ConditionCode<int32_t>(get_low_register<int32_t>(r1), imm);
+      break;
+    }
+    case CLGFI: {
+      // Compare Logical with Immediate (64)
+      int r1 = rilInstr->R1Value();
+      uint64_t imm = static_cast<uint64_t>(rilInstr->I2UnsignedValue());
+      SetS390ConditionCode<uint64_t>(get_register(r1), imm);
+      break;
+    }
+    case CGFI: {
+      // Compare with Immediate (64)
+      int r1 = rilInstr->R1Value();
+      int64_t imm = static_cast<int64_t>(rilInstr->I2Value());
+      SetS390ConditionCode<int64_t>(get_register(r1), imm);
+      break;
+    }
+    case BRASL: {
+      // Branch and Save Relative Long
+      int r1 = rilInstr->R1Value();
+      intptr_t d2 = rilInstr->I2Value();
+      intptr_t pc = get_pc();
+      set_register(r1, pc + 6);  // save next instruction to register
+      set_pc(pc + d2 * 2);       // update register
+      break;
+    }
+    case BRCL: {
+      // Branch on Condition Relative Long
+      Condition m1 = (Condition)rilInstr->R1Value();
+      if (TestConditionCode((Condition)m1)) {
+        intptr_t offset = rilInstr->I2Value() * 2;
+        set_pc(get_pc() + offset);
+      }
+      break;
+    }
+    case LMG:
+    case STMG: {
+      // Store Multiple 64-bits.
+      int r1 = rsyInstr->R1Value();
+      int r3 = rsyInstr->R3Value();
+      int rb = rsyInstr->B2Value();
+      int offset = rsyInstr->D2Value();
+
+      // Regs roll around if r3 is less than r1.
+      // Artifically increase r3 by 16 so we can calculate
+      // the number of regs stored properly.
+      if (r3 < r1) r3 += 16;
+
+      int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+
+      // Store each register in ascending order.
+      for (int i = 0; i <= r3 - r1; i++) {
+        if (op == LMG) {
+          int64_t value = ReadDW(rb_val + offset + 8 * i);
+          set_register((r1 + i) % 16, value);
+        } else if (op == STMG) {
+          int64_t value = get_register((r1 + i) % 16);
+          WriteDW(rb_val + offset + 8 * i, value);
+        } else {
+          DCHECK(false);
+        }
+      }
+      break;
+    }
+    case SLLK:
+    case RLL:
+    case SRLK:
+    case SLLG:
+    case RLLG:
+    case SRLG: {
+      DecodeSixByteBitShift(instr);
+      break;
+    }
+    case SLAK:
+    case SRAK: {
+      // 32-bit non-clobbering shift-left/right arithmetic
+      int r1 = rsyInstr->R1Value();
+      int r3 = rsyInstr->R3Value();
+      int b2 = rsyInstr->B2Value();
+      intptr_t d2 = rsyInstr->D2Value();
+      // only takes rightmost 6 bits
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int shiftBits = (b2_val + d2) & 0x3F;
+      int32_t r3_val = get_low_register<int32_t>(r3);
+      int32_t alu_out = 0;
+      bool isOF = false;
+      if (op == SLAK) {
+        isOF = CheckOverflowForShiftLeft(r3_val, shiftBits);
+        alu_out = r3_val << shiftBits;
+      } else if (op == SRAK) {
+        alu_out = r3_val >> shiftBits;
+      }
+      set_low_register(r1, alu_out);
+      SetS390ConditionCode<int32_t>(alu_out, 0);
+      SetS390OverflowCode(isOF);
+      break;
+    }
+    case SLAG:
+    case SRAG: {
+      // 64-bit non-clobbering shift-left/right arithmetic
+      int r1 = rsyInstr->R1Value();
+      int r3 = rsyInstr->R3Value();
+      int b2 = rsyInstr->B2Value();
+      intptr_t d2 = rsyInstr->D2Value();
+      // only takes rightmost 6 bits
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int shiftBits = (b2_val + d2) & 0x3F;
+      int64_t r3_val = get_register(r3);
+      intptr_t alu_out = 0;
+      bool isOF = false;
+      if (op == SLAG) {
+        isOF = CheckOverflowForShiftLeft(r3_val, shiftBits);
+        alu_out = r3_val << shiftBits;
+      } else if (op == SRAG) {
+        alu_out = r3_val >> shiftBits;
+      }
+      set_register(r1, alu_out);
+      SetS390ConditionCode<intptr_t>(alu_out, 0);
+      SetS390OverflowCode(isOF);
+      break;
+    }
+    case LMY:
+    case STMY: {
+      RSYInstruction* rsyInstr = reinterpret_cast<RSYInstruction*>(instr);
+      // Load/Store Multiple (32)
+      int r1 = rsyInstr->R1Value();
+      int r3 = rsyInstr->R3Value();
+      int b2 = rsyInstr->B2Value();
+      int offset = rsyInstr->D2Value();
+
+      // Regs roll around if r3 is less than r1.
+      // Artifically increase r3 by 16 so we can calculate
+      // the number of regs stored properly.
+      if (r3 < r1) r3 += 16;
+
+      int32_t b2_val = (b2 == 0) ? 0 : get_low_register<int32_t>(b2);
+
+      // Store each register in ascending order.
+      for (int i = 0; i <= r3 - r1; i++) {
+        if (op == LMY) {
+          int32_t value = ReadW(b2_val + offset + 4 * i, instr);
+          set_low_register((r1 + i) % 16, value);
+        } else {
+          int32_t value = get_low_register<int32_t>((r1 + i) % 16);
+          WriteW(b2_val + offset + 4 * i, value, instr);
+        }
+      }
+      break;
+    }
+    case LT:
+    case LTG: {
+      // Load and Test (32/64)
+      int r1 = rxyInstr->R1Value();
+      int x2 = rxyInstr->X2Value();
+      int b2 = rxyInstr->B2Value();
+      int d2 = rxyInstr->D2Value();
+
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      intptr_t addr = x2_val + b2_val + d2;
+
+      if (op == LT) {
+        int32_t value = ReadW(addr, instr);
+        set_low_register(r1, value);
+        SetS390ConditionCode<int32_t>(value, 0);
+      } else if (op == LTG) {
+        int64_t value = ReadDW(addr);
+        set_register(r1, value);
+        SetS390ConditionCode<int64_t>(value, 0);
+      }
+      break;
+    }
+    case LY:
+    case LB:
+    case LGB:
+    case LG:
+    case LGF:
+    case LGH:
+    case LLGF:
+    case STG:
+    case STY:
+    case STCY:
+    case STHY:
+    case STEY:
+    case LDY:
+    case LHY:
+    case STDY:
+    case LEY: {
+      // Miscellaneous Loads and Stores
+      int r1 = rxyInstr->R1Value();
+      int x2 = rxyInstr->X2Value();
+      int b2 = rxyInstr->B2Value();
+      int d2 = rxyInstr->D2Value();
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      intptr_t addr = x2_val + b2_val + d2;
+      if (op == LY) {
+        uint32_t mem_val = ReadWU(addr, instr);
+        set_low_register(r1, mem_val);
+      } else if (op == LB) {
+        int32_t mem_val = ReadB(addr);
+        set_low_register(r1, mem_val);
+      } else if (op == LGB) {
+        int64_t mem_val = ReadB(addr);
+        set_register(r1, mem_val);
+      } else if (op == LG) {
+        int64_t mem_val = ReadDW(addr);
+        set_register(r1, mem_val);
+      } else if (op == LGF) {
+        int64_t mem_val = static_cast<int64_t>(ReadW(addr, instr));
+        set_register(r1, mem_val);
+      } else if (op == LGH) {
+        int64_t mem_val = static_cast<int64_t>(ReadH(addr, instr));
+        set_register(r1, mem_val);
+      } else if (op == LLGF) {
+        //      int r1 = rreInst->R1Value();
+        //      int r2 = rreInst->R2Value();
+        //      int32_t r2_val = get_low_register<int32_t>(r2);
+        //      uint64_t r2_finalval = (static_cast<uint64_t>(r2_val)
+        //        & 0x00000000ffffffff);
+        //      set_register(r1, r2_finalval);
+        //      break;
+        uint64_t mem_val = static_cast<uint64_t>(ReadWU(addr, instr));
+        set_register(r1, mem_val);
+      } else if (op == LDY) {
+        uint64_t dbl_val = *reinterpret_cast<uint64_t*>(addr);
+        set_d_register(r1, dbl_val);
+      } else if (op == STEY) {
+        int64_t frs_val = get_d_register(r1) >> 32;
+        WriteW(addr, static_cast<int32_t>(frs_val), instr);
+      } else if (op == LEY) {
+        float float_val = *reinterpret_cast<float*>(addr);
+        set_d_register_from_float32(r1, float_val);
+      } else if (op == STY) {
+        uint32_t value = get_low_register<uint32_t>(r1);
+        WriteW(addr, value, instr);
+      } else if (op == STG) {
+        uint64_t value = get_register(r1);
+        WriteDW(addr, value);
+      } else if (op == STDY) {
+        int64_t frs_val = get_d_register(r1);
+        WriteDW(addr, frs_val);
+      } else if (op == STCY) {
+        uint8_t value = get_low_register<uint32_t>(r1);
+        WriteB(addr, value);
+      } else if (op == STHY) {
+        uint16_t value = get_low_register<uint32_t>(r1);
+        WriteH(addr, value, instr);
+      } else if (op == LHY) {
+        int32_t result = static_cast<int32_t>(ReadH(addr, instr));
+        set_low_register(r1, result);
+      }
+      break;
+    }
+    case MVC: {
+      // Move Character
+      int b1 = ssInstr->B1Value();
+      intptr_t d1 = ssInstr->D1Value();
+      int b2 = ssInstr->B2Value();
+      intptr_t d2 = ssInstr->D2Value();
+      int length = ssInstr->Length();
+      int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      intptr_t src_addr = b2_val + d2;
+      intptr_t dst_addr = b1_val + d1;
+      // remember that the length is the actual length - 1
+      for (int i = 0; i < length + 1; ++i) {
+        WriteB(dst_addr++, ReadB(src_addr++));
+      }
+      break;
+    }
+    case MVHI: {
+      // Move Integer (32)
+      int b1 = silInstr->B1Value();
+      intptr_t d1 = silInstr->D1Value();
+      int16_t i2 = silInstr->I2Value();
+      int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+      intptr_t src_addr = b1_val + d1;
+      WriteW(src_addr, i2, instr);
+      break;
+    }
+    case MVGHI: {
+      // Move Integer (64)
+      int b1 = silInstr->B1Value();
+      intptr_t d1 = silInstr->D1Value();
+      int16_t i2 = silInstr->I2Value();
+      int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+      intptr_t src_addr = b1_val + d1;
+      WriteDW(src_addr, i2);
+      break;
+    }
+    case LLH:
+    case LLGH: {
+      // Load Logical Halfworld
+      int r1 = rxyInstr->R1Value();
+      int b2 = rxyInstr->B2Value();
+      int x2 = rxyInstr->X2Value();
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxyInstr->D2Value();
+      uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+      if (op == LLH) {
+        set_low_register(r1, mem_val);
+      } else if (op == LLGH) {
+        set_register(r1, mem_val);
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    }
+    case LLC:
+    case LLGC: {
+      // Load Logical Character - loads a byte and zero extends.
+      int r1 = rxyInstr->R1Value();
+      int b2 = rxyInstr->B2Value();
+      int x2 = rxyInstr->X2Value();
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxyInstr->D2Value();
+      uint8_t mem_val = ReadBU(b2_val + d2_val + x2_val);
+      if (op == LLC) {
+        set_low_register(r1, static_cast<uint32_t>(mem_val));
+      } else if (op == LLGC) {
+        set_register(r1, static_cast<uint64_t>(mem_val));
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    }
+    case XIHF:
+    case XILF: {
+      int r1 = rilInstr->R1Value();
+      uint32_t imm = rilInstr->I2UnsignedValue();
+      uint32_t alu_out = 0;
+      if (op == XILF) {
+        alu_out = get_low_register<uint32_t>(r1);
+        alu_out = alu_out ^ imm;
+        set_low_register(r1, alu_out);
+      } else if (op == XIHF) {
+        alu_out = get_high_register<uint32_t>(r1);
+        alu_out = alu_out ^ imm;
+        set_high_register(r1, alu_out);
+      } else {
+        UNREACHABLE();
+      }
+      SetS390BitWiseConditionCode<uint32_t>(alu_out);
+      break;
+    }
+    case RISBG: {
+      // Rotate then insert selected bits
+      int r1 = rieInstr->R1Value();
+      int r2 = rieInstr->R2Value();
+      // Starting Bit Position is Bits 2-7 of I3 field
+      uint32_t start_bit = rieInstr->I3Value() & 0x3F;
+      // Ending Bit Position is Bits 2-7 of I4 field
+      uint32_t end_bit = rieInstr->I4Value() & 0x3F;
+      // Shift Amount is Bits 2-7 of I5 field
+      uint32_t shift_amount = rieInstr->I5Value() & 0x3F;
+      // Zero out Remaining (unslected) bits if Bit 0 of I4 is 1.
+      bool zero_remaining = (0 != (rieInstr->I4Value() & 0x80));
+
+      uint64_t src_val = get_register(r2);
+
+      // Rotate Left by Shift Amount first
+      uint64_t rotated_val =
+          (src_val << shift_amount) | (src_val >> (64 - shift_amount));
+      int32_t width = end_bit - start_bit + 1;
+
+      uint64_t selection_mask = 0;
+      if (width < 64) {
+        selection_mask = (static_cast<uint64_t>(1) << width) - 1;
+      } else {
+        selection_mask = static_cast<uint64_t>(static_cast<int64_t>(-1));
+      }
+      selection_mask = selection_mask << (63 - end_bit);
+
+      uint64_t selected_val = rotated_val & selection_mask;
+
+      if (!zero_remaining) {
+        // Merged the unselected bits from the original value
+        selected_val = (src_val & ~selection_mask) | selected_val;
+      }
+
+      // Condition code is set by treating result as 64-bit signed int
+      SetS390ConditionCode<int64_t>(selected_val, 0);
+      set_register(r1, selected_val);
+      break;
+    }
+    default:
+      return DecodeSixByteArithmetic(instr);
+  }
+  return true;
+}
+
+void Simulator::DecodeSixByteBitShift(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+
+  // Pre-cast instruction to various types
+
+  RSYInstruction* rsyInstr = reinterpret_cast<RSYInstruction*>(instr);
+
+  switch (op) {
+    case SLLK:
+    case RLL:
+    case SRLK: {
+      // For SLLK/SRLL, the 32-bit third operand is shifted the number
+      // of bits specified by the second-operand address, and the result is
+      // placed at the first-operand location. Except for when the R1 and R3
+      // fields designate the same register, the third operand remains
+      // unchanged in general register R3.
+      int r1 = rsyInstr->R1Value();
+      int r3 = rsyInstr->R3Value();
+      int b2 = rsyInstr->B2Value();
+      intptr_t d2 = rsyInstr->D2Value();
+      // only takes rightmost 6 bits
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int shiftBits = (b2_val + d2) & 0x3F;
+      // unsigned
+      uint32_t r3_val = get_low_register<uint32_t>(r3);
+      uint32_t alu_out = 0;
+      if (SLLK == op) {
+        alu_out = r3_val << shiftBits;
+      } else if (SRLK == op) {
+        alu_out = r3_val >> shiftBits;
+      } else if (RLL == op) {
+        uint32_t rotateBits = r3_val >> (32 - shiftBits);
+        alu_out = (r3_val << shiftBits) | (rotateBits);
+      } else {
+        UNREACHABLE();
+      }
+      set_low_register(r1, alu_out);
+      break;
+    }
+    case SLLG:
+    case RLLG:
+    case SRLG: {
+      // For SLLG/SRLG, the 64-bit third operand is shifted the number
+      // of bits specified by the second-operand address, and the result is
+      // placed at the first-operand location. Except for when the R1 and R3
+      // fields designate the same register, the third operand remains
+      // unchanged in general register R3.
+      int r1 = rsyInstr->R1Value();
+      int r3 = rsyInstr->R3Value();
+      int b2 = rsyInstr->B2Value();
+      intptr_t d2 = rsyInstr->D2Value();
+      // only takes rightmost 6 bits
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int shiftBits = (b2_val + d2) & 0x3F;
+      // unsigned
+      uint64_t r3_val = get_register(r3);
+      uint64_t alu_out = 0;
+      if (op == SLLG) {
+        alu_out = r3_val << shiftBits;
+      } else if (op == SRLG) {
+        alu_out = r3_val >> shiftBits;
+      } else if (op == RLLG) {
+        uint64_t rotateBits = r3_val >> (64 - shiftBits);
+        alu_out = (r3_val << shiftBits) | (rotateBits);
+      } else {
+        UNREACHABLE();
+      }
+      set_register(r1, alu_out);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+/**
+ * Decodes and simulates six byte arithmetic instructions
+ */
+bool Simulator::DecodeSixByteArithmetic(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+
+  // Pre-cast instruction to various types
+  SIYInstruction* siyInstr = reinterpret_cast<SIYInstruction*>(instr);
+
+  switch (op) {
+    case CDB:
+    case ADB:
+    case SDB:
+    case MDB:
+    case DDB:
+    case SQDB: {
+      RXEInstruction* rxeInstr = reinterpret_cast<RXEInstruction*>(instr);
+      int b2 = rxeInstr->B2Value();
+      int x2 = rxeInstr->X2Value();
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxeInstr->D2Value();
+      double r1_val = get_double_from_d_register(rxeInstr->R1Value());
+      double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+
+      switch (op) {
+        case CDB:
+          SetS390ConditionCode<double>(r1_val, dbl_val);
+          break;
+        case ADB:
+          r1_val += dbl_val;
+          set_d_register_from_double(r1, r1_val);
+          SetS390ConditionCode<double>(r1_val, 0);
+          break;
+        case SDB:
+          r1_val -= dbl_val;
+          set_d_register_from_double(r1, r1_val);
+          SetS390ConditionCode<double>(r1_val, 0);
+          break;
+        case MDB:
+          r1_val *= dbl_val;
+          set_d_register_from_double(r1, r1_val);
+          SetS390ConditionCode<double>(r1_val, 0);
+          break;
+        case DDB:
+          r1_val /= dbl_val;
+          set_d_register_from_double(r1, r1_val);
+          SetS390ConditionCode<double>(r1_val, 0);
+          break;
+        case SQDB:
+          r1_val = std::sqrt(dbl_val);
+          set_d_register_from_double(r1, r1_val);
+        default:
+          UNREACHABLE();
+          break;
+      }
+      break;
+    }
+    case LRV:
+    case LRVH:
+    case STRV:
+    case STRVH: {
+      RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+      int r1 = rxyInstr->R1Value();
+      int x2 = rxyInstr->X2Value();
+      int b2 = rxyInstr->B2Value();
+      int d2 = rxyInstr->D2Value();
+      int32_t r1_val = get_low_register<int32_t>(r1);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      intptr_t mem_addr = b2_val + x2_val + d2;
+
+      if (op == LRVH) {
+        int16_t mem_val = ReadH(mem_addr, instr);
+        int32_t result = ByteReverse(mem_val) & 0x0000ffff;
+        result |= r1_val & 0xffff0000;
+        set_low_register(r1, result);
+      } else if (op == LRV) {
+        int32_t mem_val = ReadW(mem_addr, instr);
+        set_low_register(r1, ByteReverse(mem_val));
+      } else if (op == STRVH) {
+        int16_t result = static_cast<int16_t>(r1_val >> 16);
+        WriteH(mem_addr, ByteReverse(result), instr);
+      } else if (op == STRV) {
+        WriteW(mem_addr, ByteReverse(r1_val), instr);
+      }
+
+      break;
+    }
+    case AHIK:
+    case AGHIK: {
+      // Non-clobbering Add Halfword Immediate
+      RIEInstruction* rieInst = reinterpret_cast<RIEInstruction*>(instr);
+      int r1 = rieInst->R1Value();
+      int r2 = rieInst->R2Value();
+      bool isOF = false;
+      if (AHIK == op) {
+        // 32-bit Add
+        int32_t r2_val = get_low_register<int32_t>(r2);
+        int32_t imm = rieInst->I6Value();
+        isOF = CheckOverflowForIntAdd(r2_val, imm, int32_t);
+        set_low_register(r1, r2_val + imm);
+        SetS390ConditionCode<int32_t>(r2_val + imm, 0);
+      } else if (AGHIK == op) {
+        // 64-bit Add
+        int64_t r2_val = get_register(r2);
+        int64_t imm = static_cast<int64_t>(rieInst->I6Value());
+        isOF = CheckOverflowForIntAdd(r2_val, imm, int64_t);
+        set_register(r1, r2_val + imm);
+        SetS390ConditionCode<int64_t>(r2_val + imm, 0);
+      }
+      SetS390OverflowCode(isOF);
+      break;
+    }
+    case ALFI:
+    case SLFI: {
+      RILInstruction* rilInstr = reinterpret_cast<RILInstruction*>(instr);
+      int r1 = rilInstr->R1Value();
+      uint32_t imm = rilInstr->I2UnsignedValue();
+      uint32_t alu_out = get_low_register<uint32_t>(r1);
+      if (op == ALFI) {
+        alu_out += imm;
+      } else if (op == SLFI) {
+        alu_out -= imm;
+      }
+      SetS390ConditionCode<uint32_t>(alu_out, 0);
+      set_low_register(r1, alu_out);
+      break;
+    }
+    case ML: {
+      UNIMPLEMENTED();
+      break;
+    }
+    case AY:
+    case SY:
+    case NY:
+    case OY:
+    case XY:
+    case CY: {
+      RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+      int r1 = rxyInstr->R1Value();
+      int x2 = rxyInstr->X2Value();
+      int b2 = rxyInstr->B2Value();
+      int d2 = rxyInstr->D2Value();
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int32_t alu_out = get_low_register<int32_t>(r1);
+      int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+      bool isOF = false;
+      if (op == AY) {
+        isOF = CheckOverflowForIntAdd(alu_out, mem_val, int32_t);
+        alu_out += mem_val;
+        SetS390ConditionCode<int32_t>(alu_out, 0);
+        SetS390OverflowCode(isOF);
+      } else if (op == SY) {
+        isOF = CheckOverflowForIntSub(alu_out, mem_val, int32_t);
+        alu_out -= mem_val;
+        SetS390ConditionCode<int32_t>(alu_out, 0);
+        SetS390OverflowCode(isOF);
+      } else if (op == NY) {
+        alu_out &= mem_val;
+        SetS390BitWiseConditionCode<uint32_t>(alu_out);
+      } else if (op == OY) {
+        alu_out |= mem_val;
+        SetS390BitWiseConditionCode<uint32_t>(alu_out);
+      } else if (op == XY) {
+        alu_out ^= mem_val;
+        SetS390BitWiseConditionCode<uint32_t>(alu_out);
+      } else if (op == CY) {
+        SetS390ConditionCode<int32_t>(alu_out, mem_val);
+      }
+      if (op != CY) {
+        set_low_register(r1, alu_out);
+      }
+      break;
+    }
+    case AHY:
+    case SHY: {
+      RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+      int32_t r1_val = get_low_register<int32_t>(rxyInstr->R1Value());
+      int b2 = rxyInstr->B2Value();
+      int x2 = rxyInstr->X2Value();
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxyInstr->D2Value();
+      int32_t mem_val =
+          static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+      int32_t alu_out = 0;
+      bool isOF = false;
+      switch (op) {
+        case AHY:
+          alu_out = r1_val + mem_val;
+          isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+          break;
+        case SHY:
+          alu_out = r1_val - mem_val;
+          isOF = CheckOverflowForIntSub(r1_val, mem_val, int64_t);
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+      set_low_register(r1, alu_out);
+      SetS390ConditionCode<int32_t>(alu_out, 0);
+      SetS390OverflowCode(isOF);
+      break;
+    }
+    case AG:
+    case SG:
+    case NG:
+    case OG:
+    case XG:
+    case CG:
+    case CLG: {
+      RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+      int r1 = rxyInstr->R1Value();
+      int x2 = rxyInstr->X2Value();
+      int b2 = rxyInstr->B2Value();
+      int d2 = rxyInstr->D2Value();
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t alu_out = get_register(r1);
+      int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+
+      switch (op) {
+        case AG: {
+          alu_out += mem_val;
+          SetS390ConditionCode<int32_t>(alu_out, 0);
+          break;
+        }
+        case SG: {
+          alu_out -= mem_val;
+          SetS390ConditionCode<int32_t>(alu_out, 0);
+          break;
+        }
+        case NG: {
+          alu_out &= mem_val;
+          SetS390BitWiseConditionCode<uint32_t>(alu_out);
+          break;
+        }
+        case OG: {
+          alu_out |= mem_val;
+          SetS390BitWiseConditionCode<uint32_t>(alu_out);
+          break;
+        }
+        case XG: {
+          alu_out ^= mem_val;
+          SetS390BitWiseConditionCode<uint32_t>(alu_out);
+          break;
+        }
+        case CG: {
+          SetS390ConditionCode<int64_t>(alu_out, mem_val);
+          break;
+        }
+        case CLG: {
+          SetS390ConditionCode<uint64_t>(alu_out, mem_val);
+          break;
+        }
+        default: {
+          DCHECK(false);
+          break;
+        }
+      }
+
+      if (op != CG) {
+        set_register(r1, alu_out);
+      }
+      break;
+    }
+    case ALY:
+    case SLY:
+    case CLY: {
+      RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+      int r1 = rxyInstr->R1Value();
+      int x2 = rxyInstr->X2Value();
+      int b2 = rxyInstr->B2Value();
+      int d2 = rxyInstr->D2Value();
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      uint32_t alu_out = get_low_register<uint32_t>(r1);
+      uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+
+      if (op == ALY) {
+        alu_out += mem_val;
+        set_low_register(r1, alu_out);
+        SetS390ConditionCode<uint32_t>(alu_out, 0);
+      } else if (op == SLY) {
+        alu_out -= mem_val;
+        set_low_register(r1, alu_out);
+        SetS390ConditionCode<uint32_t>(alu_out, 0);
+      } else if (op == CLY) {
+        SetS390ConditionCode<uint32_t>(alu_out, mem_val);
+      }
+      break;
+    }
+    case AGFI:
+    case AFI: {
+      // Clobbering Add Word Immediate
+      RILInstruction* rilInstr = reinterpret_cast<RILInstruction*>(instr);
+      int32_t r1 = rilInstr->R1Value();
+      bool isOF = false;
+      if (AFI == op) {
+        // 32-bit Add (Register + 32-bit Immediate)
+        int32_t r1_val = get_low_register<int32_t>(r1);
+        int32_t i2 = rilInstr->I2Value();
+        isOF = CheckOverflowForIntAdd(r1_val, i2, int32_t);
+        int32_t alu_out = r1_val + i2;
+        set_low_register(r1, alu_out);
+        SetS390ConditionCode<int32_t>(alu_out, 0);
+      } else if (AGFI == op) {
+        // 64-bit Add (Register + 32-bit Imm)
+        int64_t r1_val = get_register(r1);
+        int64_t i2 = static_cast<int64_t>(rilInstr->I2Value());
+        isOF = CheckOverflowForIntAdd(r1_val, i2, int64_t);
+        int64_t alu_out = r1_val + i2;
+        set_register(r1, alu_out);
+        SetS390ConditionCode<int64_t>(alu_out, 0);
+      }
+      SetS390OverflowCode(isOF);
+      break;
+    }
+    case ASI: {
+      // TODO(bcleung): Change all fooInstr->I2Value() to template functions.
+      // The below static cast to 8 bit and then to 32 bit is necessary
+      // because siyInstr->I2Value() returns a uint8_t, which a direct
+      // cast to int32_t could incorrectly interpret.
+      int8_t i2_8bit = static_cast<int8_t>(siyInstr->I2Value());
+      int32_t i2 = static_cast<int32_t>(i2_8bit);
+      int b1 = siyInstr->B1Value();
+      intptr_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+
+      int d1_val = siyInstr->D1Value();
+      intptr_t addr = b1_val + d1_val;
+
+      int32_t mem_val = ReadW(addr, instr);
+      bool isOF = CheckOverflowForIntAdd(mem_val, i2, int32_t);
+      int32_t alu_out = mem_val + i2;
+      SetS390ConditionCode<int32_t>(alu_out, 0);
+      SetS390OverflowCode(isOF);
+      WriteW(addr, alu_out, instr);
+      break;
+    }
+    case AGSI: {
+      // TODO(bcleung): Change all fooInstr->I2Value() to template functions.
+      // The below static cast to 8 bit and then to 32 bit is necessary
+      // because siyInstr->I2Value() returns a uint8_t, which a direct
+      // cast to int32_t could incorrectly interpret.
+      int8_t i2_8bit = static_cast<int8_t>(siyInstr->I2Value());
+      int64_t i2 = static_cast<int64_t>(i2_8bit);
+      int b1 = siyInstr->B1Value();
+      intptr_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+
+      int d1_val = siyInstr->D1Value();
+      intptr_t addr = b1_val + d1_val;
+
+      int64_t mem_val = ReadDW(addr);
+      int isOF = CheckOverflowForIntAdd(mem_val, i2, int64_t);
+      int64_t alu_out = mem_val + i2;
+      SetS390ConditionCode<uint64_t>(alu_out, 0);
+      SetS390OverflowCode(isOF);
+      WriteDW(addr, alu_out);
+      break;
+    }
+    case AGF:
+    case SGF:
+    case ALG:
+    case SLG: {
+#ifndef V8_TARGET_ARCH_S390X
+      DCHECK(false);
+#endif
+      RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+      int r1 = rxyInstr->R1Value();
+      uint64_t r1_val = get_register(rxyInstr->R1Value());
+      int b2 = rxyInstr->B2Value();
+      int x2 = rxyInstr->X2Value();
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxyInstr->D2Value();
+      uint64_t alu_out = r1_val;
+      if (op == ALG) {
+        uint64_t mem_val =
+            static_cast<uint64_t>(ReadDW(b2_val + d2_val + x2_val));
+        alu_out += mem_val;
+        SetS390ConditionCode<uint64_t>(alu_out, 0);
+      } else if (op == SLG) {
+        uint64_t mem_val =
+            static_cast<uint64_t>(ReadDW(b2_val + d2_val + x2_val));
+        alu_out -= mem_val;
+        SetS390ConditionCode<uint64_t>(alu_out, 0);
+      } else if (op == AGF) {
+        uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+        alu_out += mem_val;
+        SetS390ConditionCode<int64_t>(alu_out, 0);
+      } else if (op == SGF) {
+        uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+        alu_out -= mem_val;
+        SetS390ConditionCode<int64_t>(alu_out, 0);
+      } else {
+        DCHECK(false);
+      }
+      set_register(r1, alu_out);
+      break;
+    }
+    case ALGFI:
+    case SLGFI: {
+#ifndef V8_TARGET_ARCH_S390X
+      // should only be called on 64bit
+      DCHECK(false);
+#endif
+      RILInstruction* rilInstr = reinterpret_cast<RILInstruction*>(instr);
+      int r1 = rilInstr->R1Value();
+      uint32_t i2 = rilInstr->I2UnsignedValue();
+      uint64_t r1_val = (uint64_t)(get_register(r1));
+      uint64_t alu_out;
+      if (op == ALGFI)
+        alu_out = r1_val + i2;
+      else
+        alu_out = r1_val - i2;
+      set_register(r1, (intptr_t)alu_out);
+      SetS390ConditionCode<uint64_t>(alu_out, 0);
+      break;
+    }
+    case MSY:
+    case MSG: {
+      RXYInstruction* rxyInstr = reinterpret_cast<RXYInstruction*>(instr);
+      int r1 = rxyInstr->R1Value();
+      int b2 = rxyInstr->B2Value();
+      int x2 = rxyInstr->X2Value();
+      int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+      int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+      intptr_t d2_val = rxyInstr->D2Value();
+      if (op == MSY) {
+        int32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+        int32_t r1_val = get_low_register<int32_t>(r1);
+        set_low_register(r1, mem_val * r1_val);
+      } else if (op == MSG) {
+        int64_t mem_val = ReadDW(b2_val + d2_val + x2_val);
+        int64_t r1_val = get_register(r1);
+        set_register(r1, mem_val * r1_val);
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    }
+    case MSFI:
+    case MSGFI: {
+      RILInstruction* rilinst = reinterpret_cast<RILInstruction*>(instr);
+      int r1 = rilinst->R1Value();
+      int32_t i2 = rilinst->I2Value();
+      if (op == MSFI) {
+        int32_t alu_out = get_low_register<int32_t>(r1);
+        alu_out = alu_out * i2;
+        set_low_register(r1, alu_out);
+      } else if (op == MSGFI) {
+        int64_t alu_out = get_register(r1);
+        alu_out = alu_out * i2;
+        set_register(r1, alu_out);
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      return false;
+  }
+  return true;
+}
+
+int16_t Simulator::ByteReverse(int16_t hword) {
+  return (hword << 8) | ((hword >> 8) & 0x00ff);
+}
+
+int32_t Simulator::ByteReverse(int32_t word) {
+  int32_t result = word << 24;
+  result |= (word << 8) & 0x00ff0000;
+  result |= (word >> 8) & 0x0000ff00;
+  result |= (word >> 24) & 0x00000ff;
+  return result;
+}
+
+// Executes the current instruction.
+void Simulator::ExecuteInstruction(Instruction* instr, bool auto_incr_pc) {
+  if (v8::internal::FLAG_check_icache) {
+    CheckICache(isolate_->simulator_i_cache(), instr);
+  }
+  pc_modified_ = false;
+  if (::v8::internal::FLAG_trace_sim) {
+    disasm::NameConverter converter;
+    disasm::Disassembler dasm(converter);
+    // use a reasonably large buffer
+    v8::internal::EmbeddedVector<char, 256> buffer;
+    dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+#ifdef V8_TARGET_ARCH_S390X
+    PrintF("%05ld  %08" V8PRIxPTR "  %s\n", icount_,
+           reinterpret_cast<intptr_t>(instr), buffer.start());
+#else
+    PrintF("%05lld  %08" V8PRIxPTR "  %s\n", icount_,
+           reinterpret_cast<intptr_t>(instr), buffer.start());
+#endif
+    // Flush stdout to prevent incomplete file output during abnormal exits
+    // This is caused by the output being buffered before being written to file
+    fflush(stdout);
+  }
+
+  // Try to simulate as S390 Instruction first.
+  bool processed = true;
+
+  int instrLength = instr->InstructionLength();
+  if (instrLength == 2)
+    processed = DecodeTwoByte(instr);
+  else if (instrLength == 4)
+    processed = DecodeFourByte(instr);
+  else if (instrLength == 6)
+    processed = DecodeSixByte(instr);
+
+  if (processed) {
+    if (!pc_modified_ && auto_incr_pc) {
+      set_pc(reinterpret_cast<intptr_t>(instr) + instrLength);
+    }
+    return;
+  }
+}
+
+void Simulator::DebugStart() {
+  S390Debugger dbg(this);
+  dbg.Debug();
+}
+
+void Simulator::Execute() {
+  // Get the PC to simulate. Cannot use the accessor here as we need the
+  // raw PC value and not the one used as input to arithmetic instructions.
+  intptr_t program_counter = get_pc();
+
+  if (::v8::internal::FLAG_stop_sim_at == 0) {
+    // Fast version of the dispatch loop without checking whether the simulator
+    // should be stopping at a particular executed instruction.
+    while (program_counter != end_sim_pc) {
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+      icount_++;
+      ExecuteInstruction(instr);
+      program_counter = get_pc();
+    }
+  } else {
+    // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+    // we reach the particular instuction count.
+    while (program_counter != end_sim_pc) {
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+      icount_++;
+      if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+        S390Debugger dbg(this);
+        dbg.Debug();
+      } else {
+        ExecuteInstruction(instr);
+      }
+      program_counter = get_pc();
+    }
+  }
+}
+
+void Simulator::CallInternal(byte* entry, int reg_arg_count) {
+  // Prepare to execute the code at entry
+  if (ABI_USES_FUNCTION_DESCRIPTORS) {
+    // entry is the function descriptor
+    set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+  } else {
+    // entry is the instruction address
+    set_pc(reinterpret_cast<intptr_t>(entry));
+  }
+  // Remember the values of non-volatile registers.
+  int64_t r6_val = get_register(r6);
+  int64_t r7_val = get_register(r7);
+  int64_t r8_val = get_register(r8);
+  int64_t r9_val = get_register(r9);
+  int64_t r10_val = get_register(r10);
+  int64_t r11_val = get_register(r11);
+  int64_t r12_val = get_register(r12);
+  int64_t r13_val = get_register(r13);
+
+  if (ABI_CALL_VIA_IP) {
+    // Put target address in ip (for JS prologue).
+    set_register(ip, get_pc());
+  }
+
+  // Put down marker for end of simulation. The simulator will stop simulation
+  // when the PC reaches this value. By saving the "end simulation" value into
+  // the LR the simulation stops when returning to this call point.
+  registers_[14] = end_sim_pc;
+
+  // Set up the non-volatile registers with a known value. To be able to check
+  // that they are preserved properly across JS execution.
+  intptr_t callee_saved_value = icount_;
+  if (reg_arg_count < 5) {
+    set_register(r6, callee_saved_value + 6);
+  }
+  set_register(r7, callee_saved_value + 7);
+  set_register(r8, callee_saved_value + 8);
+  set_register(r9, callee_saved_value + 9);
+  set_register(r10, callee_saved_value + 10);
+  set_register(r11, callee_saved_value + 11);
+  set_register(r12, callee_saved_value + 12);
+  set_register(r13, callee_saved_value + 13);
+
+  // Start the simulation
+  Execute();
+
+// Check that the non-volatile registers have been preserved.
+#ifndef V8_TARGET_ARCH_S390X
+  if (reg_arg_count < 5) {
+    DCHECK_EQ(callee_saved_value + 6, get_low_register<int32_t>(r6));
+  }
+  DCHECK_EQ(callee_saved_value + 7, get_low_register<int32_t>(r7));
+  DCHECK_EQ(callee_saved_value + 8, get_low_register<int32_t>(r8));
+  DCHECK_EQ(callee_saved_value + 9, get_low_register<int32_t>(r9));
+  DCHECK_EQ(callee_saved_value + 10, get_low_register<int32_t>(r10));
+  DCHECK_EQ(callee_saved_value + 11, get_low_register<int32_t>(r11));
+  DCHECK_EQ(callee_saved_value + 12, get_low_register<int32_t>(r12));
+  DCHECK_EQ(callee_saved_value + 13, get_low_register<int32_t>(r13));
+#else
+  if (reg_arg_count < 5) {
+    DCHECK_EQ(callee_saved_value + 6, get_register(r6));
+  }
+  DCHECK_EQ(callee_saved_value + 7, get_register(r7));
+  DCHECK_EQ(callee_saved_value + 8, get_register(r8));
+  DCHECK_EQ(callee_saved_value + 9, get_register(r9));
+  DCHECK_EQ(callee_saved_value + 10, get_register(r10));
+  DCHECK_EQ(callee_saved_value + 11, get_register(r11));
+  DCHECK_EQ(callee_saved_value + 12, get_register(r12));
+  DCHECK_EQ(callee_saved_value + 13, get_register(r13));
+#endif
+
+  // Restore non-volatile registers with the original value.
+  set_register(r6, r6_val);
+  set_register(r7, r7_val);
+  set_register(r8, r8_val);
+  set_register(r9, r9_val);
+  set_register(r10, r10_val);
+  set_register(r11, r11_val);
+  set_register(r12, r12_val);
+  set_register(r13, r13_val);
+}
+
+intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
+  // Remember the values of non-volatile registers.
+  int64_t r6_val = get_register(r6);
+  int64_t r7_val = get_register(r7);
+  int64_t r8_val = get_register(r8);
+  int64_t r9_val = get_register(r9);
+  int64_t r10_val = get_register(r10);
+  int64_t r11_val = get_register(r11);
+  int64_t r12_val = get_register(r12);
+  int64_t r13_val = get_register(r13);
+
+  va_list parameters;
+  va_start(parameters, argument_count);
+  // Set up arguments
+
+  // First 5 arguments passed in registers r2-r6.
+  int reg_arg_count = (argument_count > 5) ? 5 : argument_count;
+  int stack_arg_count = argument_count - reg_arg_count;
+  for (int i = 0; i < reg_arg_count; i++) {
+    intptr_t value = va_arg(parameters, intptr_t);
+    set_register(i + 2, value);
+  }
+
+  // Remaining arguments passed on stack.
+  int64_t original_stack = get_register(sp);
+  // Compute position of stack on entry to generated code.
+  intptr_t entry_stack =
+      (original_stack -
+       (kCalleeRegisterSaveAreaSize + stack_arg_count * sizeof(intptr_t)));
+  if (base::OS::ActivationFrameAlignment() != 0) {
+    entry_stack &= -base::OS::ActivationFrameAlignment();
+  }
+
+  // Store remaining arguments on stack, from low to high memory.
+  intptr_t* stack_argument =
+      reinterpret_cast<intptr_t*>(entry_stack + kCalleeRegisterSaveAreaSize);
+  for (int i = 0; i < stack_arg_count; i++) {
+    intptr_t value = va_arg(parameters, intptr_t);
+    stack_argument[i] = value;
+  }
+  va_end(parameters);
+  set_register(sp, entry_stack);
+
+// Prepare to execute the code at entry
+#if ABI_USES_FUNCTION_DESCRIPTORS
+  // entry is the function descriptor
+  set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+#else
+  // entry is the instruction address
+  set_pc(reinterpret_cast<intptr_t>(entry));
+#endif
+
+  // Put target address in ip (for JS prologue).
+  set_register(r12, get_pc());
+
+  // Put down marker for end of simulation. The simulator will stop simulation
+  // when the PC reaches this value. By saving the "end simulation" value into
+  // the LR the simulation stops when returning to this call point.
+  registers_[14] = end_sim_pc;
+
+  // Set up the non-volatile registers with a known value. To be able to check
+  // that they are preserved properly across JS execution.
+  intptr_t callee_saved_value = icount_;
+  if (reg_arg_count < 5) {
+    set_register(r6, callee_saved_value + 6);
+  }
+  set_register(r7, callee_saved_value + 7);
+  set_register(r8, callee_saved_value + 8);
+  set_register(r9, callee_saved_value + 9);
+  set_register(r10, callee_saved_value + 10);
+  set_register(r11, callee_saved_value + 11);
+  set_register(r12, callee_saved_value + 12);
+  set_register(r13, callee_saved_value + 13);
+
+  // Start the simulation
+  Execute();
+
+// Check that the non-volatile registers have been preserved.
+#ifndef V8_TARGET_ARCH_S390X
+  if (reg_arg_count < 5) {
+    DCHECK_EQ(callee_saved_value + 6, get_low_register<int32_t>(r6));
+  }
+  DCHECK_EQ(callee_saved_value + 7, get_low_register<int32_t>(r7));
+  DCHECK_EQ(callee_saved_value + 8, get_low_register<int32_t>(r8));
+  DCHECK_EQ(callee_saved_value + 9, get_low_register<int32_t>(r9));
+  DCHECK_EQ(callee_saved_value + 10, get_low_register<int32_t>(r10));
+  DCHECK_EQ(callee_saved_value + 11, get_low_register<int32_t>(r11));
+  DCHECK_EQ(callee_saved_value + 12, get_low_register<int32_t>(r12));
+  DCHECK_EQ(callee_saved_value + 13, get_low_register<int32_t>(r13));
+#else
+  if (reg_arg_count < 5) {
+    DCHECK_EQ(callee_saved_value + 6, get_register(r6));
+  }
+  DCHECK_EQ(callee_saved_value + 7, get_register(r7));
+  DCHECK_EQ(callee_saved_value + 8, get_register(r8));
+  DCHECK_EQ(callee_saved_value + 9, get_register(r9));
+  DCHECK_EQ(callee_saved_value + 10, get_register(r10));
+  DCHECK_EQ(callee_saved_value + 11, get_register(r11));
+  DCHECK_EQ(callee_saved_value + 12, get_register(r12));
+  DCHECK_EQ(callee_saved_value + 13, get_register(r13));
+#endif
+
+  // Restore non-volatile registers with the original value.
+  set_register(r6, r6_val);
+  set_register(r7, r7_val);
+  set_register(r8, r8_val);
+  set_register(r9, r9_val);
+  set_register(r10, r10_val);
+  set_register(r11, r11_val);
+  set_register(r12, r12_val);
+  set_register(r13, r13_val);
+// Pop stack passed arguments.
+
+#ifndef V8_TARGET_ARCH_S390X
+  DCHECK_EQ(entry_stack, get_low_register<int32_t>(sp));
+#else
+  DCHECK_EQ(entry_stack, get_register(sp));
+#endif
+  set_register(sp, original_stack);
+
+  // Return value register
+  intptr_t result = get_register(r2);
+  return result;
+}
+
+void Simulator::CallFP(byte* entry, double d0, double d1) {
+  set_d_register_from_double(0, d0);
+  set_d_register_from_double(1, d1);
+  CallInternal(entry);
+}
+
+int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
+  CallFP(entry, d0, d1);
+  int32_t result = get_register(r2);
+  return result;
+}
+
+double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
+  CallFP(entry, d0, d1);
+  return get_double_from_d_register(0);
+}
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+  uintptr_t new_sp = get_register(sp) - sizeof(uintptr_t);
+  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+  *stack_slot = address;
+  set_register(sp, new_sp);
+  return new_sp;
+}
+
+uintptr_t Simulator::PopAddress() {
+  uintptr_t current_sp = get_register(sp);
+  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+  uintptr_t address = *stack_slot;
+  set_register(sp, current_sp + sizeof(uintptr_t));
+  return address;
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // USE_SIMULATOR
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/s390/simulator-s390.h b/src/s390/simulator-s390.h
new file mode 100644
index 0000000..ae3dd58
--- /dev/null
+++ b/src/s390/simulator-s390.h
@@ -0,0 +1,552 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Declares a Simulator for S390 instructions if we are not generating a native
+// S390 binary. This Simulator allows us to run and debug S390 code generation
+// on regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a S390 hardware platform.
+
+#ifndef V8_S390_SIMULATOR_S390_H_
+#define V8_S390_SIMULATOR_S390_H_
+
+#include "src/allocation.h"
+
+#if !defined(USE_SIMULATOR)
+// Running without a simulator on a native s390 platform.
+
+namespace v8 {
+namespace internal {
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+  (entry(p0, p1, p2, p3, p4))
+
+typedef int (*s390_regexp_matcher)(String*, int, const byte*, const byte*, int*,
+                                   int, Address, int, void*, Isolate*);
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type ppc_regexp_matcher.
+// The ninth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+                                   p7, p8)                                     \
+  (FUNCTION_CAST<s390_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7,   \
+                                             NULL, p8))
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on s390 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+                                            uintptr_t c_limit) {
+    USE(isolate);
+    return c_limit;
+  }
+
+  static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+                                            uintptr_t try_catch_address) {
+    USE(isolate);
+    return try_catch_address;
+  }
+
+  static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+    USE(isolate);
+  }
+};
+}  // namespace internal
+}  // namespace v8
+
+#else  // !defined(USE_SIMULATOR)
+// Running with a simulator.
+
+#include "src/assembler.h"
+#include "src/hashmap.h"
+#include "src/s390/constants-s390.h"
+
+namespace v8 {
+namespace internal {
+
+class CachePage {
+ public:
+  static const int LINE_VALID = 0;
+  static const int LINE_INVALID = 1;
+
+  static const int kPageShift = 12;
+  static const int kPageSize = 1 << kPageShift;
+  static const int kPageMask = kPageSize - 1;
+  static const int kLineShift = 2;  // The cache line is only 4 bytes right now.
+  static const int kLineLength = 1 << kLineShift;
+  static const int kLineMask = kLineLength - 1;
+
+  CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+  char* ValidityByte(int offset) {
+    return &validity_map_[offset >> kLineShift];
+  }
+
+  char* CachedData(int offset) { return &data_[offset]; }
+
+ private:
+  char data_[kPageSize];  // The cached data.
+  static const int kValidityMapSize = kPageSize >> kLineShift;
+  char validity_map_[kValidityMapSize];  // One byte per line.
+};
+
+class Simulator {
+ public:
+  friend class S390Debugger;
+  enum Register {
+    no_reg = -1,
+    r0 = 0,
+    r1 = 1,
+    r2 = 2,
+    r3 = 3,
+    r4 = 4,
+    r5 = 5,
+    r6 = 6,
+    r7 = 7,
+    r8 = 8,
+    r9 = 9,
+    r10 = 10,
+    r11 = 11,
+    r12 = 12,
+    r13 = 13,
+    r14 = 14,
+    r15 = 15,
+    fp = r11,
+    ip = r12,
+    cp = r13,
+    ra = r14,
+    sp = r15,  // name aliases
+    kNumGPRs = 16,
+    d0 = 0,
+    d1,
+    d2,
+    d3,
+    d4,
+    d5,
+    d6,
+    d7,
+    d8,
+    d9,
+    d10,
+    d11,
+    d12,
+    d13,
+    d14,
+    d15,
+    kNumFPRs = 16
+  };
+
+  explicit Simulator(Isolate* isolate);
+  ~Simulator();
+
+  // The currently executing Simulator instance. Potentially there can be one
+  // for each native thread.
+  static Simulator* current(v8::internal::Isolate* isolate);
+
+  // Accessors for register state.
+  void set_register(int reg, uint64_t value);
+  uint64_t get_register(int reg) const;
+  template <typename T>
+  T get_low_register(int reg) const;
+  template <typename T>
+  T get_high_register(int reg) const;
+  void set_low_register(int reg, uint32_t value);
+  void set_high_register(int reg, uint32_t value);
+
+  double get_double_from_register_pair(int reg);
+  void set_d_register_from_double(int dreg, const double dbl) {
+    DCHECK(dreg >= 0 && dreg < kNumFPRs);
+    *bit_cast<double*>(&fp_registers_[dreg]) = dbl;
+  }
+
+  double get_double_from_d_register(int dreg) {
+    DCHECK(dreg >= 0 && dreg < kNumFPRs);
+    return *bit_cast<double*>(&fp_registers_[dreg]);
+  }
+  void set_d_register(int dreg, int64_t value) {
+    DCHECK(dreg >= 0 && dreg < kNumFPRs);
+    fp_registers_[dreg] = value;
+  }
+  int64_t get_d_register(int dreg) {
+    DCHECK(dreg >= 0 && dreg < kNumFPRs);
+    return fp_registers_[dreg];
+  }
+
+  void set_d_register_from_float32(int dreg, const float f) {
+    DCHECK(dreg >= 0 && dreg < kNumFPRs);
+
+    int32_t f_int = *bit_cast<int32_t*>(&f);
+    int64_t finalval = static_cast<int64_t>(f_int) << 32;
+    set_d_register(dreg, finalval);
+  }
+
+  float get_float32_from_d_register(int dreg) {
+    DCHECK(dreg >= 0 && dreg < kNumFPRs);
+
+    int64_t regval = get_d_register(dreg) >> 32;
+    int32_t regval32 = static_cast<int32_t>(regval);
+    return *bit_cast<float*>(&regval32);
+  }
+
+  // Special case of set_register and get_register to access the raw PC value.
+  void set_pc(intptr_t value);
+  intptr_t get_pc() const;
+
+  Address get_sp() const {
+    return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+  }
+
+  // Accessor to the internal simulator stack area.
+  uintptr_t StackLimit(uintptr_t c_limit) const;
+
+  // Executes S390 instructions until the PC reaches end_sim_pc.
+  void Execute();
+
+  // Call on program start.
+  static void Initialize(Isolate* isolate);
+
+  static void TearDown(HashMap* i_cache, Redirection* first);
+
+  // V8 generally calls into generated JS code with 5 parameters and into
+  // generated RegExp code with 7 parameters. This is a convenience function,
+  // which sets up the simulator state and grabs the result on return.
+  intptr_t Call(byte* entry, int argument_count, ...);
+  // Alternative: call a 2-argument double function.
+  void CallFP(byte* entry, double d0, double d1);
+  int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+  double CallFPReturnsDouble(byte* entry, double d0, double d1);
+
+  // Push an address onto the JS stack.
+  uintptr_t PushAddress(uintptr_t address);
+
+  // Pop an address from the JS stack.
+  uintptr_t PopAddress();
+
+  // Debugger input.
+  void set_last_debugger_input(char* input);
+  char* last_debugger_input() { return last_debugger_input_; }
+
+  // ICache checking.
+  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+                          size_t size);
+
+  // Returns true if pc register contains one of the 'special_values' defined
+  // below (bad_lr, end_sim_pc).
+  bool has_bad_pc() const;
+
+ private:
+  enum special_values {
+    // Known bad pc value to ensure that the simulator does not execute
+    // without being properly setup.
+    bad_lr = -1,
+    // A pc value used to signal the simulator to stop execution.  Generally
+    // the lr is set to this value on transition from native C code to
+    // simulated execution, so that the simulator can "return" to the native
+    // C code.
+    end_sim_pc = -2
+  };
+
+  // Unsupported instructions use Format to print an error and stop execution.
+  void Format(Instruction* instr, const char* format);
+
+  // Helper functions to set the conditional flags in the architecture state.
+  bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
+  bool BorrowFrom(int32_t left, int32_t right);
+  template <typename T1>
+  inline bool OverflowFromSigned(T1 alu_out, T1 left, T1 right, bool addition);
+
+  // Helper functions to decode common "addressing" modes
+  int32_t GetShiftRm(Instruction* instr, bool* carry_out);
+  int32_t GetImm(Instruction* instr, bool* carry_out);
+  void ProcessPUW(Instruction* instr, int num_regs, int operand_size,
+                  intptr_t* start_address, intptr_t* end_address);
+  void HandleRList(Instruction* instr, bool load);
+  void HandleVList(Instruction* inst);
+  void SoftwareInterrupt(Instruction* instr);
+
+  // Stop helper functions.
+  inline bool isStopInstruction(Instruction* instr);
+  inline bool isWatchedStop(uint32_t bkpt_code);
+  inline bool isEnabledStop(uint32_t bkpt_code);
+  inline void EnableStop(uint32_t bkpt_code);
+  inline void DisableStop(uint32_t bkpt_code);
+  inline void IncreaseStopCounter(uint32_t bkpt_code);
+  void PrintStopInfo(uint32_t code);
+
+  // Byte Reverse
+  inline int16_t ByteReverse(int16_t hword);
+  inline int32_t ByteReverse(int32_t word);
+
+  // Read and write memory.
+  inline uint8_t ReadBU(intptr_t addr);
+  inline int8_t ReadB(intptr_t addr);
+  inline void WriteB(intptr_t addr, uint8_t value);
+  inline void WriteB(intptr_t addr, int8_t value);
+
+  inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
+  inline int16_t ReadH(intptr_t addr, Instruction* instr);
+  // Note: Overloaded on the sign of the value.
+  inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
+  inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
+
+  inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
+  inline int32_t ReadW(intptr_t addr, Instruction* instr);
+  inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
+  inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
+
+  inline int64_t ReadDW(intptr_t addr);
+  inline double ReadDouble(intptr_t addr);
+  inline void WriteDW(intptr_t addr, int64_t value);
+
+  // S390
+  void Trace(Instruction* instr);
+  bool DecodeTwoByte(Instruction* instr);
+  bool DecodeFourByte(Instruction* instr);
+  bool DecodeFourByteArithmetic(Instruction* instr);
+  bool DecodeFourByteArithmetic64Bit(Instruction* instr);
+  bool DecodeFourByteFloatingPoint(Instruction* instr);
+  void DecodeFourByteFloatingPointIntConversion(Instruction* instr);
+  void DecodeFourByteFloatingPointRound(Instruction* instr);
+
+  bool DecodeSixByte(Instruction* instr);
+  bool DecodeSixByteArithmetic(Instruction* instr);
+  bool S390InstructionDecode(Instruction* instr);
+  void DecodeSixByteBitShift(Instruction* instr);
+
+  // Used by the CL**BR instructions.
+  template <typename T1, typename T2>
+  void SetS390RoundConditionCode(T1 r2_val, T2 max, T2 min) {
+    condition_reg_ = 0;
+    double r2_dval = static_cast<double>(r2_val);
+    double dbl_min = static_cast<double>(min);
+    double dbl_max = static_cast<double>(max);
+
+    if (r2_dval == 0.0)
+      condition_reg_ = 8;
+    else if (r2_dval < 0.0 && r2_dval >= dbl_min && std::isfinite(r2_dval))
+      condition_reg_ = 4;
+    else if (r2_dval > 0.0 && r2_dval <= dbl_max && std::isfinite(r2_dval))
+      condition_reg_ = 2;
+    else
+      condition_reg_ = 1;
+  }
+
+  template <typename T1>
+  void SetS390RoundConditionCode(T1 r2_val, int64_t max, int64_t min) {
+    condition_reg_ = 0;
+    double r2_dval = static_cast<double>(r2_val);
+    double dbl_min = static_cast<double>(min);
+    double dbl_max = static_cast<double>(max);
+
+    // Note that the IEEE 754 floating-point representations (both 32 and
+    // 64 bit) cannot exactly represent INT64_MAX. The closest it can get
+    // is INT64_max + 1. IEEE 754 FP can, though, represent INT64_MIN
+    // exactly.
+
+    // This is not an issue for INT32, as IEEE754 64-bit can represent
+    // INT32_MAX and INT32_MIN with exact precision.
+
+    if (r2_dval == 0.0)
+      condition_reg_ = 8;
+    else if (r2_dval < 0.0 && r2_dval >= dbl_min && std::isfinite(r2_dval))
+      condition_reg_ = 4;
+    else if (r2_dval > 0.0 && r2_dval < dbl_max && std::isfinite(r2_dval))
+      condition_reg_ = 2;
+    else
+      condition_reg_ = 1;
+  }
+
+  // Used by the CL**BR instructions.
+  template <typename T1, typename T2, typename T3>
+  void SetS390ConvertConditionCode(T1 src, T2 dst, T3 max) {
+    condition_reg_ = 0;
+    if (src == static_cast<T1>(0.0)) {
+      condition_reg_ |= 8;
+    } else if (src < static_cast<T1>(0.0) && static_cast<T2>(src) == 0 &&
+               std::isfinite(src)) {
+      condition_reg_ |= 4;
+    } else if (src > static_cast<T1>(0.0) && std::isfinite(src) &&
+               src < static_cast<T1>(max)) {
+      condition_reg_ |= 2;
+    } else {
+      condition_reg_ |= 1;
+    }
+  }
+
+  template <typename T>
+  void SetS390ConditionCode(T lhs, T rhs) {
+    condition_reg_ = 0;
+    if (lhs == rhs) {
+      condition_reg_ |= CC_EQ;
+    } else if (lhs < rhs) {
+      condition_reg_ |= CC_LT;
+    } else if (lhs > rhs) {
+      condition_reg_ |= CC_GT;
+    }
+
+    // We get down here only for floating point
+    // comparisons and the values are unordered
+    // i.e. NaN
+    if (condition_reg_ == 0) condition_reg_ = unordered;
+  }
+
+  // Used by arithmetic operations that use carry.
+  template <typename T>
+  void SetS390ConditionCodeCarry(T result, bool overflow) {
+    condition_reg_ = 0;
+    bool zero_result = (result == static_cast<T>(0));
+    if (zero_result && !overflow) {
+      condition_reg_ |= 8;
+    } else if (!zero_result && !overflow) {
+      condition_reg_ |= 4;
+    } else if (zero_result && overflow) {
+      condition_reg_ |= 2;
+    } else if (!zero_result && overflow) {
+      condition_reg_ |= 1;
+    }
+    if (condition_reg_ == 0) UNREACHABLE();
+  }
+
+  bool isNaN(double value) { return (value != value); }
+
+  // Set the condition code for bitwise operations
+  // CC0 is set if value == 0.
+  // CC1 is set if value != 0.
+  // CC2/CC3 are not set.
+  template <typename T>
+  void SetS390BitWiseConditionCode(T value) {
+    condition_reg_ = 0;
+
+    if (value == 0)
+      condition_reg_ |= CC_EQ;
+    else
+      condition_reg_ |= CC_LT;
+  }
+
+  void SetS390OverflowCode(bool isOF) {
+    if (isOF) condition_reg_ = CC_OF;
+  }
+
+  bool TestConditionCode(Condition mask) {
+    // Check for unconditional branch
+    if (mask == 0xf) return true;
+
+    return (condition_reg_ & mask) != 0;
+  }
+
+  // Executes one instruction.
+  void ExecuteInstruction(Instruction* instr, bool auto_incr_pc = true);
+
+  // ICache.
+  static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+  static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+                           int size);
+  static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+
+  // Runtime call support.
+  static void* RedirectExternalReference(
+      Isolate* isolate, void* external_function,
+      v8::internal::ExternalReference::Type type);
+
+  // Handle arguments and return value for runtime FP functions.
+  void GetFpArgs(double* x, double* y, intptr_t* z);
+  void SetFpResult(const double& result);
+  void TrashCallerSaveRegisters();
+
+  void CallInternal(byte* entry, int reg_arg_count = 3);
+
+  // Architecture state.
+  // On z9 and higher and supported Linux on z Systems platforms, all registers
+  // are 64-bit, even in 31-bit mode.
+  uint64_t registers_[kNumGPRs];
+  int64_t fp_registers_[kNumFPRs];
+
+  // Condition Code register. In S390, the last 4 bits are used.
+  int32_t condition_reg_;
+  // Special register to track PC.
+  intptr_t special_reg_pc_;
+
+  // Simulator support.
+  char* stack_;
+  static const size_t stack_protection_size_ = 256 * kPointerSize;
+  bool pc_modified_;
+  int64_t icount_;
+
+  // Debugger input.
+  char* last_debugger_input_;
+
+  // Icache simulation
+  v8::internal::HashMap* i_cache_;
+
+  // Registered breakpoints.
+  Instruction* break_pc_;
+  Instr break_instr_;
+
+  v8::internal::Isolate* isolate_;
+
+  // A stop is watched if its code is less than kNumOfWatchedStops.
+  // Only watched stops support enabling/disabling and the counter feature.
+  static const uint32_t kNumOfWatchedStops = 256;
+
+  // Breakpoint is disabled if bit 31 is set.
+  static const uint32_t kStopDisabledBit = 1 << 31;
+
+  // A stop is enabled, meaning the simulator will stop when meeting the
+  // instruction, if bit 31 of watched_stops_[code].count is unset.
+  // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+  // the breakpoint was hit or gone through.
+  struct StopCountAndDesc {
+    uint32_t count;
+    char* desc;
+  };
+  StopCountAndDesc watched_stops_[kNumOfWatchedStops];
+  void DebugStart();
+};
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4)          \
+  reinterpret_cast<Object*>(Simulator::current(isolate)->Call(           \
+      FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
+      (intptr_t)p3, (intptr_t)p4))
+
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+                                   p7, p8)                                     \
+  Simulator::current(isolate)->Call(entry, 10, (intptr_t)p0, (intptr_t)p1,     \
+                                    (intptr_t)p2, (intptr_t)p3, (intptr_t)p4,  \
+                                    (intptr_t)p5, (intptr_t)p6, (intptr_t)p7,  \
+                                    (intptr_t)NULL, (intptr_t)p8)
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.  The JS-based limit normally points near the end of
+// the simulator stack.  When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+                                            uintptr_t c_limit) {
+    return Simulator::current(isolate)->StackLimit(c_limit);
+  }
+
+  static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+                                            uintptr_t try_catch_address) {
+    Simulator* sim = Simulator::current(isolate);
+    return sim->PushAddress(try_catch_address);
+  }
+
+  static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+    Simulator::current(isolate)->PopAddress();
+  }
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // !defined(USE_SIMULATOR)
+#endif  // V8_S390_SIMULATOR_S390_H_
diff --git a/src/simulator.h b/src/simulator.h
index d198291..ca23889 100644
--- a/src/simulator.h
+++ b/src/simulator.h
@@ -19,6 +19,8 @@
 #include "src/mips/simulator-mips.h"
 #elif V8_TARGET_ARCH_MIPS64
 #include "src/mips64/simulator-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/simulator-s390.h"
 #elif V8_TARGET_ARCH_X87
 #include "src/x87/simulator-x87.h"
 #else
diff --git a/src/snapshot/code-serializer.cc b/src/snapshot/code-serializer.cc
new file mode 100644
index 0000000..84a08c1
--- /dev/null
+++ b/src/snapshot/code-serializer.cc
@@ -0,0 +1,421 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/code-serializer.h"
+
+#include "src/code-stubs.h"
+#include "src/log.h"
+#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/snapshot/deserializer.h"
+#include "src/version.h"
+
+namespace v8 {
+namespace internal {
+
+ScriptData* CodeSerializer::Serialize(Isolate* isolate,
+                                      Handle<SharedFunctionInfo> info,
+                                      Handle<String> source) {
+  base::ElapsedTimer timer;
+  if (FLAG_profile_deserialization) timer.Start();
+  if (FLAG_trace_serializer) {
+    PrintF("[Serializing from");
+    Object* script = info->script();
+    if (script->IsScript()) Script::cast(script)->name()->ShortPrint();
+    PrintF("]\n");
+  }
+
+  // Serialize code object.
+  SnapshotByteSink sink(info->code()->CodeSize() * 2);
+  CodeSerializer cs(isolate, &sink, *source);
+  DisallowHeapAllocation no_gc;
+  Object** location = Handle<Object>::cast(info).location();
+  cs.VisitPointer(location);
+  cs.SerializeDeferredObjects();
+  cs.Pad();
+
+  SerializedCodeData data(sink.data(), cs);
+  ScriptData* script_data = data.GetScriptData();
+
+  if (FLAG_profile_deserialization) {
+    double ms = timer.Elapsed().InMillisecondsF();
+    int length = script_data->length();
+    PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
+  }
+
+  return script_data;
+}
+
+void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
+                                     WhereToPoint where_to_point, int skip) {
+  int root_index = root_index_map_.Lookup(obj);
+  if (root_index != RootIndexMap::kInvalidRootIndex) {
+    PutRoot(root_index, obj, how_to_code, where_to_point, skip);
+    return;
+  }
+
+  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+
+  FlushSkip(skip);
+
+  if (obj->IsCode()) {
+    Code* code_object = Code::cast(obj);
+    switch (code_object->kind()) {
+      case Code::OPTIMIZED_FUNCTION:  // No optimized code compiled yet.
+      case Code::HANDLER:             // No handlers patched in yet.
+      case Code::REGEXP:              // No regexp literals initialized yet.
+      case Code::NUMBER_OF_KINDS:     // Pseudo enum value.
+      case Code::BYTECODE_HANDLER:    // No direct references to handlers.
+        CHECK(false);
+      case Code::BUILTIN:
+        SerializeBuiltin(code_object->builtin_index(), how_to_code,
+                         where_to_point);
+        return;
+      case Code::STUB:
+        SerializeCodeStub(code_object->stub_key(), how_to_code, where_to_point);
+        return;
+#define IC_KIND_CASE(KIND) case Code::KIND:
+        IC_KIND_LIST(IC_KIND_CASE)
+#undef IC_KIND_CASE
+        SerializeIC(code_object, how_to_code, where_to_point);
+        return;
+      case Code::FUNCTION:
+        DCHECK(code_object->has_reloc_info_for_serialization());
+        SerializeGeneric(code_object, how_to_code, where_to_point);
+        return;
+      case Code::WASM_FUNCTION:
+      case Code::WASM_TO_JS_FUNCTION:
+      case Code::JS_TO_WASM_FUNCTION:
+        UNREACHABLE();
+    }
+    UNREACHABLE();
+  }
+
+  // Past this point we should not see any (context-specific) maps anymore.
+  CHECK(!obj->IsMap());
+  // There should be no references to the global object embedded.
+  CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
+  // There should be no hash table embedded. They would require rehashing.
+  CHECK(!obj->IsHashTable());
+  // We expect no instantiated function objects or contexts.
+  CHECK(!obj->IsJSFunction() && !obj->IsContext());
+
+  SerializeGeneric(obj, how_to_code, where_to_point);
+}
+
+void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
+                                      HowToCode how_to_code,
+                                      WhereToPoint where_to_point) {
+  // Object has not yet been serialized.  Serialize it here.
+  ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
+                              where_to_point);
+  serializer.Serialize();
+}
+
+void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code,
+                                      WhereToPoint where_to_point) {
+  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+         (how_to_code == kPlain && where_to_point == kInnerPointer) ||
+         (how_to_code == kFromCode && where_to_point == kInnerPointer));
+  DCHECK_LT(builtin_index, Builtins::builtin_count);
+  DCHECK_LE(0, builtin_index);
+
+  if (FLAG_trace_serializer) {
+    PrintF(" Encoding builtin: %s\n",
+           isolate()->builtins()->name(builtin_index));
+  }
+
+  sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
+  sink_->PutInt(builtin_index, "builtin_index");
+}
+
+void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
+                                       WhereToPoint where_to_point) {
+  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+         (how_to_code == kPlain && where_to_point == kInnerPointer) ||
+         (how_to_code == kFromCode && where_to_point == kInnerPointer));
+  DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
+  DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
+
+  int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
+
+  if (FLAG_trace_serializer) {
+    PrintF(" Encoding code stub %s as %d\n",
+           CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key)), index);
+  }
+
+  sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
+  sink_->PutInt(index, "CodeStub key");
+}
+
+void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
+                                 WhereToPoint where_to_point) {
+  // The IC may be implemented as a stub.
+  uint32_t stub_key = ic->stub_key();
+  if (stub_key != CodeStub::NoCacheKey()) {
+    if (FLAG_trace_serializer) {
+      PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind()));
+    }
+    SerializeCodeStub(stub_key, how_to_code, where_to_point);
+    return;
+  }
+  // The IC may be implemented as builtin. Only real builtins have an
+  // actual builtin_index value attached (otherwise it's just garbage).
+  // Compare to make sure we are really dealing with a builtin.
+  int builtin_index = ic->builtin_index();
+  if (builtin_index < Builtins::builtin_count) {
+    Builtins::Name name = static_cast<Builtins::Name>(builtin_index);
+    Code* builtin = isolate()->builtins()->builtin(name);
+    if (builtin == ic) {
+      if (FLAG_trace_serializer) {
+        PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind()));
+      }
+      DCHECK(ic->kind() == Code::KEYED_LOAD_IC ||
+             ic->kind() == Code::KEYED_STORE_IC);
+      SerializeBuiltin(builtin_index, how_to_code, where_to_point);
+      return;
+    }
+  }
+  // The IC may also just be a piece of code kept in the non_monomorphic_cache.
+  // In that case, just serialize as a normal code object.
+  if (FLAG_trace_serializer) {
+    PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind()));
+  }
+  DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC);
+  SerializeGeneric(ic, how_to_code, where_to_point);
+}
+
+int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
+  // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
+  int index = 0;
+  while (index < stub_keys_.length()) {
+    if (stub_keys_[index] == stub_key) return index;
+    index++;
+  }
+  stub_keys_.Add(stub_key);
+  return index;
+}
+
+MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
+    Isolate* isolate, ScriptData* cached_data, Handle<String> source) {
+  base::ElapsedTimer timer;
+  if (FLAG_profile_deserialization) timer.Start();
+
+  HandleScope scope(isolate);
+
+  base::SmartPointer<SerializedCodeData> scd(
+      SerializedCodeData::FromCachedData(isolate, cached_data, *source));
+  if (scd.is_empty()) {
+    if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
+    DCHECK(cached_data->rejected());
+    return MaybeHandle<SharedFunctionInfo>();
+  }
+
+  // Prepare and register list of attached objects.
+  Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
+  Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
+      code_stub_keys.length() + kCodeStubsBaseIndex);
+  attached_objects[kSourceObjectIndex] = source;
+  for (int i = 0; i < code_stub_keys.length(); i++) {
+    attached_objects[i + kCodeStubsBaseIndex] =
+        CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
+  }
+
+  Deserializer deserializer(scd.get());
+  deserializer.SetAttachedObjects(attached_objects);
+
+  // Deserialize.
+  Handle<SharedFunctionInfo> result;
+  if (!deserializer.DeserializeCode(isolate).ToHandle(&result)) {
+    // Deserializing may fail if the reservations cannot be fulfilled.
+    if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
+    return MaybeHandle<SharedFunctionInfo>();
+  }
+
+  if (FLAG_profile_deserialization) {
+    double ms = timer.Elapsed().InMillisecondsF();
+    int length = cached_data->length();
+    PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
+  }
+  result->set_deserialized(true);
+
+  if (isolate->logger()->is_logging_code_events() ||
+      isolate->cpu_profiler()->is_profiling()) {
+    String* name = isolate->heap()->empty_string();
+    if (result->script()->IsScript()) {
+      Script* script = Script::cast(result->script());
+      if (script->name()->IsString()) name = String::cast(script->name());
+    }
+    isolate->logger()->CodeCreateEvent(
+        Logger::SCRIPT_TAG, result->abstract_code(), *result, NULL, name);
+  }
+  return scope.CloseAndEscape(result);
+}
+
+class Checksum {
+ public:
+  explicit Checksum(Vector<const byte> payload) {
+#ifdef MEMORY_SANITIZER
+    // Computing the checksum includes padding bytes for objects like strings.
+    // Mark every object as initialized in the code serializer.
+    MSAN_MEMORY_IS_INITIALIZED(payload.start(), payload.length());
+#endif  // MEMORY_SANITIZER
+    // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
+    uintptr_t a = 1;
+    uintptr_t b = 0;
+    const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
+    DCHECK(IsAligned(payload.length(), kIntptrSize));
+    const uintptr_t* end = cur + payload.length() / kIntptrSize;
+    while (cur < end) {
+      // Unsigned overflow expected and intended.
+      a += *cur++;
+      b += a;
+    }
+#if V8_HOST_ARCH_64_BIT
+    a ^= a >> 32;
+    b ^= b >> 32;
+#endif  // V8_HOST_ARCH_64_BIT
+    a_ = static_cast<uint32_t>(a);
+    b_ = static_cast<uint32_t>(b);
+  }
+
+  bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }
+
+  uint32_t a() const { return a_; }
+  uint32_t b() const { return b_; }
+
+ private:
+  uint32_t a_;
+  uint32_t b_;
+
+  DISALLOW_COPY_AND_ASSIGN(Checksum);
+};
+
+SerializedCodeData::SerializedCodeData(const List<byte>& payload,
+                                       const CodeSerializer& cs) {
+  DisallowHeapAllocation no_gc;
+  const List<uint32_t>* stub_keys = cs.stub_keys();
+
+  List<Reservation> reservations;
+  cs.EncodeReservations(&reservations);
+
+  // Calculate sizes.
+  int reservation_size = reservations.length() * kInt32Size;
+  int num_stub_keys = stub_keys->length();
+  int stub_keys_size = stub_keys->length() * kInt32Size;
+  int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
+  int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
+  int size = padded_payload_offset + payload.length();
+
+  // Allocate backing store and create result data.
+  AllocateData(size);
+
+  // Set header values.
+  SetMagicNumber(cs.isolate());
+  SetHeaderValue(kVersionHashOffset, Version::Hash());
+  SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
+  SetHeaderValue(kCpuFeaturesOffset,
+                 static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
+  SetHeaderValue(kFlagHashOffset, FlagList::Hash());
+  SetHeaderValue(kNumReservationsOffset, reservations.length());
+  SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
+  SetHeaderValue(kPayloadLengthOffset, payload.length());
+
+  Checksum checksum(payload.ToConstVector());
+  SetHeaderValue(kChecksum1Offset, checksum.a());
+  SetHeaderValue(kChecksum2Offset, checksum.b());
+
+  // Copy reservation chunk sizes.
+  CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
+            reservation_size);
+
+  // Copy code stub keys.
+  CopyBytes(data_ + kHeaderSize + reservation_size,
+            reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
+
+  memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
+
+  // Copy serialized data.
+  CopyBytes(data_ + padded_payload_offset, payload.begin(),
+            static_cast<size_t>(payload.length()));
+}
+
+SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
+    Isolate* isolate, String* source) const {
+  uint32_t magic_number = GetMagicNumber();
+  if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
+  uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
+  uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
+  uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
+  uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
+  uint32_t c1 = GetHeaderValue(kChecksum1Offset);
+  uint32_t c2 = GetHeaderValue(kChecksum2Offset);
+  if (version_hash != Version::Hash()) return VERSION_MISMATCH;
+  if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
+  if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
+    return CPU_FEATURES_MISMATCH;
+  }
+  if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
+  if (!Checksum(Payload()).Check(c1, c2)) return CHECKSUM_MISMATCH;
+  return CHECK_SUCCESS;
+}
+
+uint32_t SerializedCodeData::SourceHash(String* source) const {
+  return source->length();
+}
+
+// Return ScriptData object and relinquish ownership over it to the caller.
+ScriptData* SerializedCodeData::GetScriptData() {
+  DCHECK(owns_data_);
+  ScriptData* result = new ScriptData(data_, size_);
+  result->AcquireDataOwnership();
+  owns_data_ = false;
+  data_ = NULL;
+  return result;
+}
+
+Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
+    const {
+  return Vector<const Reservation>(
+      reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
+      GetHeaderValue(kNumReservationsOffset));
+}
+
+Vector<const byte> SerializedCodeData::Payload() const {
+  int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
+  int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
+  int payload_offset = kHeaderSize + reservations_size + code_stubs_size;
+  int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
+  const byte* payload = data_ + padded_payload_offset;
+  DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
+  int length = GetHeaderValue(kPayloadLengthOffset);
+  DCHECK_EQ(data_ + size_, payload + length);
+  return Vector<const byte>(payload, length);
+}
+
+Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
+  int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
+  const byte* start = data_ + kHeaderSize + reservations_size;
+  return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
+                                GetHeaderValue(kNumCodeStubKeysOffset));
+}
+
+SerializedCodeData::SerializedCodeData(ScriptData* data)
+    : SerializedData(const_cast<byte*>(data->data()), data->length()) {}
+
+SerializedCodeData* SerializedCodeData::FromCachedData(Isolate* isolate,
+                                                       ScriptData* cached_data,
+                                                       String* source) {
+  DisallowHeapAllocation no_gc;
+  SerializedCodeData* scd = new SerializedCodeData(cached_data);
+  SanityCheckResult r = scd->SanityCheck(isolate, source);
+  if (r == CHECK_SUCCESS) return scd;
+  cached_data->Reject();
+  source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r);
+  delete scd;
+  return NULL;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/snapshot/code-serializer.h b/src/snapshot/code-serializer.h
new file mode 100644
index 0000000..b217fff
--- /dev/null
+++ b/src/snapshot/code-serializer.h
@@ -0,0 +1,127 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_CODE_SERIALIZER_H_
+#define V8_SNAPSHOT_CODE_SERIALIZER_H_
+
+#include "src/parsing/preparse-data.h"
+#include "src/snapshot/serializer.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeSerializer : public Serializer {
+ public:
+  static ScriptData* Serialize(Isolate* isolate,
+                               Handle<SharedFunctionInfo> info,
+                               Handle<String> source);
+
+  MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
+      Isolate* isolate, ScriptData* cached_data, Handle<String> source);
+
+  static const int kSourceObjectIndex = 0;
+  STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
+
+  static const int kCodeStubsBaseIndex = 1;
+
+  String* source() const {
+    DCHECK(!AllowHeapAllocation::IsAllowed());
+    return source_;
+  }
+
+  const List<uint32_t>* stub_keys() const { return &stub_keys_; }
+
+ private:
+  CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
+      : Serializer(isolate, sink), source_(source) {
+    back_reference_map_.AddSourceString(source);
+  }
+
+  ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
+
+  void SerializeObject(HeapObject* o, HowToCode how_to_code,
+                       WhereToPoint where_to_point, int skip) override;
+
+  void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
+                        WhereToPoint where_to_point);
+  void SerializeIC(Code* ic, HowToCode how_to_code,
+                   WhereToPoint where_to_point);
+  void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
+                         WhereToPoint where_to_point);
+  void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
+                        WhereToPoint where_to_point);
+  int AddCodeStubKey(uint32_t stub_key);
+
+  DisallowHeapAllocation no_gc_;
+  String* source_;
+  List<uint32_t> stub_keys_;
+  DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
+};
+
+// Wrapper around ScriptData to provide code-serializer-specific functionality.
+class SerializedCodeData : public SerializedData {
+ public:
+  // Used when consuming.
+  static SerializedCodeData* FromCachedData(Isolate* isolate,
+                                            ScriptData* cached_data,
+                                            String* source);
+
+  // Used when producing.
+  SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
+
+  // Return ScriptData object and relinquish ownership over it to the caller.
+  ScriptData* GetScriptData();
+
+  Vector<const Reservation> Reservations() const;
+  Vector<const byte> Payload() const;
+
+  Vector<const uint32_t> CodeStubKeys() const;
+
+ private:
+  explicit SerializedCodeData(ScriptData* data);
+
+  enum SanityCheckResult {
+    CHECK_SUCCESS = 0,
+    MAGIC_NUMBER_MISMATCH = 1,
+    VERSION_MISMATCH = 2,
+    SOURCE_MISMATCH = 3,
+    CPU_FEATURES_MISMATCH = 4,
+    FLAGS_MISMATCH = 5,
+    CHECKSUM_MISMATCH = 6
+  };
+
+  SanityCheckResult SanityCheck(Isolate* isolate, String* source) const;
+
+  uint32_t SourceHash(String* source) const;
+
+  // The data header consists of uint32_t-sized entries:
+  // [0] magic number and external reference count
+  // [1] version hash
+  // [2] source hash
+  // [3] cpu features
+  // [4] flag hash
+  // [5] number of code stub keys
+  // [6] number of reservation size entries
+  // [7] payload length
+  // [8] payload checksum part 1
+  // [9] payload checksum part 2
+  // ...  reservations
+  // ...  code stub keys
+  // ...  serialized payload
+  static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
+  static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
+  static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
+  static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
+  static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
+  static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
+  static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
+  static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
+  static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
+  static const int kHeaderSize = kChecksum2Offset + kInt32Size;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SNAPSHOT_CODE_SERIALIZER_H_
diff --git a/src/snapshot/deserializer.cc b/src/snapshot/deserializer.cc
new file mode 100644
index 0000000..0a21fef
--- /dev/null
+++ b/src/snapshot/deserializer.cc
@@ -0,0 +1,818 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/deserializer.h"
+
+#include "src/bootstrapper.h"
+#include "src/external-reference-table.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/macro-assembler.h"
+#include "src/snapshot/natives.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+void Deserializer::DecodeReservation(
+    Vector<const SerializedData::Reservation> res) {
+  DCHECK_EQ(0, reservations_[NEW_SPACE].length());
+  STATIC_ASSERT(NEW_SPACE == 0);
+  int current_space = NEW_SPACE;
+  for (auto& r : res) {
+    reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
+    if (r.is_last()) current_space++;
+  }
+  DCHECK_EQ(kNumberOfSpaces, current_space);
+  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
+}
+
+void Deserializer::FlushICacheForNewIsolate() {
+  DCHECK(!deserializing_user_code_);
+  // The entire isolate is newly deserialized. Simply flush all code pages.
+  PageIterator it(isolate_->heap()->code_space());
+  while (it.has_next()) {
+    Page* p = it.next();
+    Assembler::FlushICache(isolate_, p->area_start(),
+                           p->area_end() - p->area_start());
+  }
+}
+
+void Deserializer::FlushICacheForNewCodeObjects() {
+  DCHECK(deserializing_user_code_);
+  for (Code* code : new_code_objects_) {
+    if (FLAG_serialize_age_code) code->PreAge(isolate_);
+    Assembler::FlushICache(isolate_, code->instruction_start(),
+                           code->instruction_size());
+  }
+}
+
+bool Deserializer::ReserveSpace() {
+#ifdef DEBUG
+  for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
+    CHECK(reservations_[i].length() > 0);
+  }
+#endif  // DEBUG
+  if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
+  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+    high_water_[i] = reservations_[i][0].start;
+  }
+  return true;
+}
+
+void Deserializer::Initialize(Isolate* isolate) {
+  DCHECK_NULL(isolate_);
+  DCHECK_NOT_NULL(isolate);
+  isolate_ = isolate;
+  DCHECK_NULL(external_reference_table_);
+  external_reference_table_ = ExternalReferenceTable::instance(isolate);
+  CHECK_EQ(magic_number_,
+           SerializedData::ComputeMagicNumber(external_reference_table_));
+}
+
+void Deserializer::Deserialize(Isolate* isolate) {
+  Initialize(isolate);
+  if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
+  // No active threads.
+  DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
+  // No active handles.
+  DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
+  // Partial snapshot cache is not yet populated.
+  DCHECK(isolate_->partial_snapshot_cache()->is_empty());
+
+  {
+    DisallowHeapAllocation no_gc;
+    isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
+    isolate_->heap()->IterateSmiRoots(this);
+    isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+    isolate_->heap()->RepairFreeListsAfterDeserialization();
+    isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
+    DeserializeDeferredObjects();
+    FlushICacheForNewIsolate();
+  }
+
+  isolate_->heap()->set_native_contexts_list(
+      isolate_->heap()->undefined_value());
+  // The allocation site list is build during root iteration, but if no sites
+  // were encountered then it needs to be initialized to undefined.
+  if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+    isolate_->heap()->set_allocation_sites_list(
+        isolate_->heap()->undefined_value());
+  }
+
+  // Update data pointers to the external strings containing natives sources.
+  Natives::UpdateSourceCache(isolate_->heap());
+  ExtraNatives::UpdateSourceCache(isolate_->heap());
+
+  // Issue code events for newly deserialized code objects.
+  LOG_CODE_EVENT(isolate_, LogCodeObjects());
+  LOG_CODE_EVENT(isolate_, LogBytecodeHandlers());
+  LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
+}
+
+MaybeHandle<Object> Deserializer::DeserializePartial(
+    Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
+  Initialize(isolate);
+  if (!ReserveSpace()) {
+    V8::FatalProcessOutOfMemory("deserialize context");
+    return MaybeHandle<Object>();
+  }
+
+  Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1);
+  attached_objects[kGlobalProxyReference] = global_proxy;
+  SetAttachedObjects(attached_objects);
+
+  DisallowHeapAllocation no_gc;
+  // Keep track of the code space start and end pointers in case new
+  // code objects were unserialized
+  OldSpace* code_space = isolate_->heap()->code_space();
+  Address start_address = code_space->top();
+  Object* root;
+  VisitPointer(&root);
+  DeserializeDeferredObjects();
+
+  isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
+
+  // There's no code deserialized here. If this assert fires then that's
+  // changed and logging should be added to notify the profiler et al of the
+  // new code, which also has to be flushed from instruction cache.
+  CHECK_EQ(start_address, code_space->top());
+  return Handle<Object>(root, isolate);
+}
+
+MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
+    Isolate* isolate) {
+  Initialize(isolate);
+  if (!ReserveSpace()) {
+    return Handle<SharedFunctionInfo>();
+  } else {
+    deserializing_user_code_ = true;
+    HandleScope scope(isolate);
+    Handle<SharedFunctionInfo> result;
+    {
+      DisallowHeapAllocation no_gc;
+      Object* root;
+      VisitPointer(&root);
+      DeserializeDeferredObjects();
+      FlushICacheForNewCodeObjects();
+      result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
+      isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
+    }
+    CommitPostProcessedObjects(isolate);
+    return scope.CloseAndEscape(result);
+  }
+}
+
+Deserializer::~Deserializer() {
+  // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
+  // DCHECK(source_.AtEOF());
+  attached_objects_.Dispose();
+}
+
+// This is called on the roots.  It is the driver of the deserialization
+// process.  It is also called on the body of each function.
+void Deserializer::VisitPointers(Object** start, Object** end) {
+  // The space must be new space.  Any other space would cause ReadChunk to try
+  // to update the remembered using NULL as the address.
+  ReadData(start, end, NEW_SPACE, NULL);
+}
+
+void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+  static const byte expected = kSynchronize;
+  CHECK_EQ(expected, source_.Get());
+}
+
+void Deserializer::DeserializeDeferredObjects() {
+  for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
+    switch (code) {
+      case kAlignmentPrefix:
+      case kAlignmentPrefix + 1:
+      case kAlignmentPrefix + 2:
+        SetAlignment(code);
+        break;
+      default: {
+        int space = code & kSpaceMask;
+        DCHECK(space <= kNumberOfSpaces);
+        DCHECK(code - space == kNewObject);
+        HeapObject* object = GetBackReferencedObject(space);
+        int size = source_.GetInt() << kPointerSizeLog2;
+        Address obj_address = object->address();
+        Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
+        Object** end = reinterpret_cast<Object**>(obj_address + size);
+        bool filled = ReadData(start, end, space, obj_address);
+        CHECK(filled);
+        DCHECK(CanBeDeferred(object));
+        PostProcessNewObject(object, space);
+      }
+    }
+  }
+}
+
+// Used to insert a deserialized internalized string into the string table.
+class StringTableInsertionKey : public HashTableKey {
+ public:
+  explicit StringTableInsertionKey(String* string)
+      : string_(string), hash_(HashForObject(string)) {
+    DCHECK(string->IsInternalizedString());
+  }
+
+  bool IsMatch(Object* string) override {
+    // We know that all entries in a hash table had their hash keys created.
+    // Use that knowledge to have fast failure.
+    if (hash_ != HashForObject(string)) return false;
+    // We want to compare the content of two internalized strings here.
+    return string_->SlowEquals(String::cast(string));
+  }
+
+  uint32_t Hash() override { return hash_; }
+
+  uint32_t HashForObject(Object* key) override {
+    return String::cast(key)->Hash();
+  }
+
+  MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
+    return handle(string_, isolate);
+  }
+
+ private:
+  String* string_;
+  uint32_t hash_;
+  DisallowHeapAllocation no_gc;
+};
+
+HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
+  if (deserializing_user_code()) {
+    if (obj->IsString()) {
+      String* string = String::cast(obj);
+      // Uninitialize hash field as the hash seed may have changed.
+      string->set_hash_field(String::kEmptyHashField);
+      if (string->IsInternalizedString()) {
+        // Canonicalize the internalized string. If it already exists in the
+        // string table, set it to forward to the existing one.
+        StringTableInsertionKey key(string);
+        String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
+        if (canonical == NULL) {
+          new_internalized_strings_.Add(handle(string));
+          return string;
+        } else {
+          string->SetForwardedInternalizedString(canonical);
+          return canonical;
+        }
+      }
+    } else if (obj->IsScript()) {
+      new_scripts_.Add(handle(Script::cast(obj)));
+    } else {
+      DCHECK(CanBeDeferred(obj));
+    }
+  }
+  if (obj->IsAllocationSite()) {
+    DCHECK(obj->IsAllocationSite());
+    // Allocation sites are present in the snapshot, and must be linked into
+    // a list at deserialization time.
+    AllocationSite* site = AllocationSite::cast(obj);
+    // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
+    // as a (weak) root. If this root is relocated correctly, this becomes
+    // unnecessary.
+    if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+      site->set_weak_next(isolate_->heap()->undefined_value());
+    } else {
+      site->set_weak_next(isolate_->heap()->allocation_sites_list());
+    }
+    isolate_->heap()->set_allocation_sites_list(site);
+  } else if (obj->IsCode()) {
+    // We flush all code pages after deserializing the startup snapshot. In that
+    // case, we only need to remember code objects in the large object space.
+    // When deserializing user code, remember each individual code object.
+    if (deserializing_user_code() || space == LO_SPACE) {
+      new_code_objects_.Add(Code::cast(obj));
+    }
+  }
+  // Check alignment.
+  DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
+  return obj;
+}
+
+void Deserializer::CommitPostProcessedObjects(Isolate* isolate) {
+  StringTable::EnsureCapacityForDeserialization(
+      isolate, new_internalized_strings_.length());
+  for (Handle<String> string : new_internalized_strings_) {
+    StringTableInsertionKey key(*string);
+    DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key));
+    StringTable::LookupKey(isolate, &key);
+  }
+
+  Heap* heap = isolate->heap();
+  Factory* factory = isolate->factory();
+  for (Handle<Script> script : new_scripts_) {
+    // Assign a new script id to avoid collision.
+    script->set_id(isolate_->heap()->NextScriptId());
+    // Add script to list.
+    Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script);
+    heap->SetRootScriptList(*list);
+  }
+}
+
+HeapObject* Deserializer::GetBackReferencedObject(int space) {
+  HeapObject* obj;
+  BackReference back_reference(source_.GetInt());
+  if (space == LO_SPACE) {
+    CHECK(back_reference.chunk_index() == 0);
+    uint32_t index = back_reference.large_object_index();
+    obj = deserialized_large_objects_[index];
+  } else {
+    DCHECK(space < kNumberOfPreallocatedSpaces);
+    uint32_t chunk_index = back_reference.chunk_index();
+    DCHECK_LE(chunk_index, current_chunk_[space]);
+    uint32_t chunk_offset = back_reference.chunk_offset();
+    Address address = reservations_[space][chunk_index].start + chunk_offset;
+    if (next_alignment_ != kWordAligned) {
+      int padding = Heap::GetFillToAlign(address, next_alignment_);
+      next_alignment_ = kWordAligned;
+      DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
+      address += padding;
+    }
+    obj = HeapObject::FromAddress(address);
+  }
+  if (deserializing_user_code() && obj->IsInternalizedString()) {
+    obj = String::cast(obj)->GetForwardedInternalizedString();
+  }
+  hot_objects_.Add(obj);
+  return obj;
+}
+
+// This routine writes the new object into the pointer provided and then
+// returns true if the new object was in young space and false otherwise.
+// The reason for this strange interface is that otherwise the object is
+// written very late, which means the FreeSpace map is not set up by the
+// time we need to use it to mark the space at the end of a page free.
+void Deserializer::ReadObject(int space_number, Object** write_back) {
+  Address address;
+  HeapObject* obj;
+  int size = source_.GetInt() << kObjectAlignmentBits;
+
+  if (next_alignment_ != kWordAligned) {
+    int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
+    address = Allocate(space_number, reserved);
+    obj = HeapObject::FromAddress(address);
+    // If one of the following assertions fails, then we are deserializing an
+    // aligned object when the filler maps have not been deserialized yet.
+    // We require filler maps as padding to align the object.
+    Heap* heap = isolate_->heap();
+    DCHECK(heap->free_space_map()->IsMap());
+    DCHECK(heap->one_pointer_filler_map()->IsMap());
+    DCHECK(heap->two_pointer_filler_map()->IsMap());
+    obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
+    address = obj->address();
+    next_alignment_ = kWordAligned;
+  } else {
+    address = Allocate(space_number, size);
+    obj = HeapObject::FromAddress(address);
+  }
+
+  isolate_->heap()->OnAllocationEvent(obj, size);
+  Object** current = reinterpret_cast<Object**>(address);
+  Object** limit = current + (size >> kPointerSizeLog2);
+
+  if (ReadData(current, limit, space_number, address)) {
+    // Only post process if object content has not been deferred.
+    obj = PostProcessNewObject(obj, space_number);
+  }
+
+  Object* write_back_obj = obj;
+  UnalignedCopy(write_back, &write_back_obj);
+#ifdef DEBUG
+  if (obj->IsCode()) {
+    DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
+  } else {
+    DCHECK(space_number != CODE_SPACE);
+  }
+#endif  // DEBUG
+}
+
+// We know the space requirements before deserialization and can
+// pre-allocate that reserved space. During deserialization, all we need
+// to do is to bump up the pointer for each space in the reserved
+// space. This is also used for fixing back references.
+// We may have to split up the pre-allocation into several chunks
+// because it would not fit onto a single page. We do not have to keep
+// track of when to move to the next chunk. An opcode will signal this.
+// Since multiple large objects cannot be folded into one large object
+// space allocation, we have to do an actual allocation when deserializing
+// each large object. Instead of tracking offset for back references, we
+// reference large objects by index.
+Address Deserializer::Allocate(int space_index, int size) {
+  if (space_index == LO_SPACE) {
+    AlwaysAllocateScope scope(isolate_);
+    LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
+    Executability exec = static_cast<Executability>(source_.Get());
+    AllocationResult result = lo_space->AllocateRaw(size, exec);
+    HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
+    deserialized_large_objects_.Add(obj);
+    return obj->address();
+  } else {
+    DCHECK(space_index < kNumberOfPreallocatedSpaces);
+    Address address = high_water_[space_index];
+    DCHECK_NOT_NULL(address);
+    high_water_[space_index] += size;
+#ifdef DEBUG
+    // Assert that the current reserved chunk is still big enough.
+    const Heap::Reservation& reservation = reservations_[space_index];
+    int chunk_index = current_chunk_[space_index];
+    CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
+#endif
+    if (space_index == CODE_SPACE) SkipList::Update(address, size);
+    return address;
+  }
+}
+
+Object** Deserializer::CopyInNativesSource(Vector<const char> source_vector,
+                                           Object** current) {
+  DCHECK(!isolate_->heap()->deserialization_complete());
+  NativesExternalStringResource* resource = new NativesExternalStringResource(
+      source_vector.start(), source_vector.length());
+  Object* resource_obj = reinterpret_cast<Object*>(resource);
+  UnalignedCopy(current++, &resource_obj);
+  return current;
+}
+
+bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
+                            Address current_object_address) {
+  Isolate* const isolate = isolate_;
+  // Write barrier support costs around 1% in startup time.  In fact there
+  // are no new space objects in current boot snapshots, so it's not needed,
+  // but that may change.
+  bool write_barrier_needed =
+      (current_object_address != NULL && source_space != NEW_SPACE &&
+       source_space != CODE_SPACE);
+  while (current < limit) {
+    byte data = source_.Get();
+    switch (data) {
+#define CASE_STATEMENT(where, how, within, space_number) \
+  case where + how + within + space_number:              \
+    STATIC_ASSERT((where & ~kWhereMask) == 0);           \
+    STATIC_ASSERT((how & ~kHowToCodeMask) == 0);         \
+    STATIC_ASSERT((within & ~kWhereToPointMask) == 0);   \
+    STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
+
+#define CASE_BODY(where, how, within, space_number_if_any)                     \
+  {                                                                            \
+    bool emit_write_barrier = false;                                           \
+    bool current_was_incremented = false;                                      \
+    int space_number = space_number_if_any == kAnyOldSpace                     \
+                           ? (data & kSpaceMask)                               \
+                           : space_number_if_any;                              \
+    if (where == kNewObject && how == kPlain && within == kStartOfObject) {    \
+      ReadObject(space_number, current);                                       \
+      emit_write_barrier = (space_number == NEW_SPACE);                        \
+    } else {                                                                   \
+      Object* new_object = NULL; /* May not be a real Object pointer. */       \
+      if (where == kNewObject) {                                               \
+        ReadObject(space_number, &new_object);                                 \
+      } else if (where == kBackref) {                                          \
+        emit_write_barrier = (space_number == NEW_SPACE);                      \
+        new_object = GetBackReferencedObject(data & kSpaceMask);               \
+      } else if (where == kBackrefWithSkip) {                                  \
+        int skip = source_.GetInt();                                           \
+        current = reinterpret_cast<Object**>(                                  \
+            reinterpret_cast<Address>(current) + skip);                        \
+        emit_write_barrier = (space_number == NEW_SPACE);                      \
+        new_object = GetBackReferencedObject(data & kSpaceMask);               \
+      } else if (where == kRootArray) {                                        \
+        int id = source_.GetInt();                                             \
+        Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
+        new_object = isolate->heap()->root(root_index);                        \
+        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
+      } else if (where == kPartialSnapshotCache) {                             \
+        int cache_index = source_.GetInt();                                    \
+        new_object = isolate->partial_snapshot_cache()->at(cache_index);       \
+        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
+      } else if (where == kExternalReference) {                                \
+        int skip = source_.GetInt();                                           \
+        current = reinterpret_cast<Object**>(                                  \
+            reinterpret_cast<Address>(current) + skip);                        \
+        int reference_id = source_.GetInt();                                   \
+        Address address = external_reference_table_->address(reference_id);    \
+        new_object = reinterpret_cast<Object*>(address);                       \
+      } else if (where == kAttachedReference) {                                \
+        int index = source_.GetInt();                                          \
+        DCHECK(deserializing_user_code() || index == kGlobalProxyReference);   \
+        new_object = *attached_objects_[index];                                \
+        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
+      } else {                                                                 \
+        DCHECK(where == kBuiltin);                                             \
+        DCHECK(deserializing_user_code());                                     \
+        int builtin_id = source_.GetInt();                                     \
+        DCHECK_LE(0, builtin_id);                                              \
+        DCHECK_LT(builtin_id, Builtins::builtin_count);                        \
+        Builtins::Name name = static_cast<Builtins::Name>(builtin_id);         \
+        new_object = isolate->builtins()->builtin(name);                       \
+        emit_write_barrier = false;                                            \
+      }                                                                        \
+      if (within == kInnerPointer) {                                           \
+        if (space_number != CODE_SPACE || new_object->IsCode()) {              \
+          Code* new_code_object = reinterpret_cast<Code*>(new_object);         \
+          new_object =                                                         \
+              reinterpret_cast<Object*>(new_code_object->instruction_start()); \
+        } else {                                                               \
+          DCHECK(space_number == CODE_SPACE);                                  \
+          Cell* cell = Cell::cast(new_object);                                 \
+          new_object = reinterpret_cast<Object*>(cell->ValueAddress());        \
+        }                                                                      \
+      }                                                                        \
+      if (how == kFromCode) {                                                  \
+        Address location_of_branch_data = reinterpret_cast<Address>(current);  \
+        Assembler::deserialization_set_special_target_at(                      \
+            isolate, location_of_branch_data,                                  \
+            Code::cast(HeapObject::FromAddress(current_object_address)),       \
+            reinterpret_cast<Address>(new_object));                            \
+        location_of_branch_data += Assembler::kSpecialTargetSize;              \
+        current = reinterpret_cast<Object**>(location_of_branch_data);         \
+        current_was_incremented = true;                                        \
+      } else {                                                                 \
+        UnalignedCopy(current, &new_object);                                   \
+      }                                                                        \
+    }                                                                          \
+    if (emit_write_barrier && write_barrier_needed) {                          \
+      Address current_address = reinterpret_cast<Address>(current);            \
+      SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));      \
+      isolate->heap()->RecordWrite(                                            \
+          HeapObject::FromAddress(current_object_address),                     \
+          static_cast<int>(current_address - current_object_address),          \
+          *reinterpret_cast<Object**>(current_address));                       \
+    }                                                                          \
+    if (!current_was_incremented) {                                            \
+      current++;                                                               \
+    }                                                                          \
+    break;                                                                     \
+  }
+
+// This generates a case and a body for the new space (which has to do extra
+// write barrier handling) and handles the other spaces with fall-through cases
+// and one body.
+#define ALL_SPACES(where, how, within)           \
+  CASE_STATEMENT(where, how, within, NEW_SPACE)  \
+  CASE_BODY(where, how, within, NEW_SPACE)       \
+  CASE_STATEMENT(where, how, within, OLD_SPACE)  \
+  CASE_STATEMENT(where, how, within, CODE_SPACE) \
+  CASE_STATEMENT(where, how, within, MAP_SPACE)  \
+  CASE_STATEMENT(where, how, within, LO_SPACE)   \
+  CASE_BODY(where, how, within, kAnyOldSpace)
+
+#define FOUR_CASES(byte_code) \
+  case byte_code:             \
+  case byte_code + 1:         \
+  case byte_code + 2:         \
+  case byte_code + 3:
+
+#define SIXTEEN_CASES(byte_code) \
+  FOUR_CASES(byte_code)          \
+  FOUR_CASES(byte_code + 4)      \
+  FOUR_CASES(byte_code + 8)      \
+  FOUR_CASES(byte_code + 12)
+
+#define SINGLE_CASE(where, how, within, space) \
+  CASE_STATEMENT(where, how, within, space)    \
+  CASE_BODY(where, how, within, space)
+
+      // Deserialize a new object and write a pointer to it to the current
+      // object.
+      ALL_SPACES(kNewObject, kPlain, kStartOfObject)
+      // Support for direct instruction pointers in functions.  It's an inner
+      // pointer because it points at the entry point, not at the start of the
+      // code object.
+      SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
+      // Deserialize a new code object and write a pointer to its first
+      // instruction to the current code object.
+      ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
+      // Find a recently deserialized object using its offset from the current
+      // allocation point and write a pointer to it to the current object.
+      ALL_SPACES(kBackref, kPlain, kStartOfObject)
+      ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
+#if V8_CODE_EMBEDS_OBJECT_POINTER
+      // Deserialize a new object from pointer found in code and write
+      // a pointer to it to the current object. Required only for MIPS, PPC, ARM
+      // or S390 with embedded constant pool, and omitted on the other
+      // architectures because it is fully unrolled and would cause bloat.
+      ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
+      // Find a recently deserialized code object using its offset from the
+      // current allocation point and write a pointer to it to the current
+      // object. Required only for MIPS, PPC, ARM or S390 with embedded
+      // constant pool.
+      ALL_SPACES(kBackref, kFromCode, kStartOfObject)
+      ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
+#endif
+      // Find a recently deserialized code object using its offset from the
+      // current allocation point and write a pointer to its first instruction
+      // to the current code object or the instruction pointer in a function
+      // object.
+      ALL_SPACES(kBackref, kFromCode, kInnerPointer)
+      ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
+      ALL_SPACES(kBackref, kPlain, kInnerPointer)
+      ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
+      // Find an object in the roots array and write a pointer to it to the
+      // current object.
+      SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
+#if V8_CODE_EMBEDS_OBJECT_POINTER
+      // Find an object in the roots array and write a pointer to it to in code.
+      SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0)
+#endif
+      // Find an object in the partial snapshots cache and write a pointer to it
+      // to the current object.
+      SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
+      // Find an code entry in the partial snapshots cache and
+      // write a pointer to it to the current object.
+      SINGLE_CASE(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
+      // Find an external reference and write a pointer to it to the current
+      // object.
+      SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0)
+      // Find an external reference and write a pointer to it in the current
+      // code object.
+      SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0)
+      // Find an object in the attached references and write a pointer to it to
+      // the current object.
+      SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
+      SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0)
+      SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
+      // Find a builtin and write a pointer to it to the current object.
+      SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
+      SINGLE_CASE(kBuiltin, kPlain, kInnerPointer, 0)
+      SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
+
+#undef CASE_STATEMENT
+#undef CASE_BODY
+#undef ALL_SPACES
+
+      case kSkip: {
+        int size = source_.GetInt();
+        current = reinterpret_cast<Object**>(
+            reinterpret_cast<intptr_t>(current) + size);
+        break;
+      }
+
+      case kInternalReferenceEncoded:
+      case kInternalReference: {
+        // Internal reference address is not encoded via skip, but by offset
+        // from code entry.
+        int pc_offset = source_.GetInt();
+        int target_offset = source_.GetInt();
+        Code* code =
+            Code::cast(HeapObject::FromAddress(current_object_address));
+        DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size());
+        DCHECK(0 <= target_offset && target_offset <= code->instruction_size());
+        Address pc = code->entry() + pc_offset;
+        Address target = code->entry() + target_offset;
+        Assembler::deserialization_set_target_internal_reference_at(
+            isolate, pc, target, data == kInternalReference
+                                     ? RelocInfo::INTERNAL_REFERENCE
+                                     : RelocInfo::INTERNAL_REFERENCE_ENCODED);
+        break;
+      }
+
+      case kNop:
+        break;
+
+      case kNextChunk: {
+        int space = source_.Get();
+        DCHECK(space < kNumberOfPreallocatedSpaces);
+        int chunk_index = current_chunk_[space];
+        const Heap::Reservation& reservation = reservations_[space];
+        // Make sure the current chunk is indeed exhausted.
+        CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
+        // Move to next reserved chunk.
+        chunk_index = ++current_chunk_[space];
+        CHECK_LT(chunk_index, reservation.length());
+        high_water_[space] = reservation[chunk_index].start;
+        break;
+      }
+
+      case kDeferred: {
+        // Deferred can only occur right after the heap object header.
+        DCHECK(current == reinterpret_cast<Object**>(current_object_address +
+                                                     kPointerSize));
+        HeapObject* obj = HeapObject::FromAddress(current_object_address);
+        // If the deferred object is a map, its instance type may be used
+        // during deserialization. Initialize it with a temporary value.
+        if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
+        current = limit;
+        return false;
+      }
+
+      case kSynchronize:
+        // If we get here then that indicates that you have a mismatch between
+        // the number of GC roots when serializing and deserializing.
+        CHECK(false);
+        break;
+
+      case kNativesStringResource:
+        current = CopyInNativesSource(Natives::GetScriptSource(source_.Get()),
+                                      current);
+        break;
+
+      case kExtraNativesStringResource:
+        current = CopyInNativesSource(
+            ExtraNatives::GetScriptSource(source_.Get()), current);
+        break;
+
+      // Deserialize raw data of variable length.
+      case kVariableRawData: {
+        int size_in_bytes = source_.GetInt();
+        byte* raw_data_out = reinterpret_cast<byte*>(current);
+        source_.CopyRaw(raw_data_out, size_in_bytes);
+        break;
+      }
+
+      case kVariableRepeat: {
+        int repeats = source_.GetInt();
+        Object* object = current[-1];
+        DCHECK(!isolate->heap()->InNewSpace(object));
+        for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
+        break;
+      }
+
+      case kAlignmentPrefix:
+      case kAlignmentPrefix + 1:
+      case kAlignmentPrefix + 2:
+        SetAlignment(data);
+        break;
+
+      STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
+      STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
+      SIXTEEN_CASES(kRootArrayConstantsWithSkip)
+      SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
+        int skip = source_.GetInt();
+        current = reinterpret_cast<Object**>(
+            reinterpret_cast<intptr_t>(current) + skip);
+        // Fall through.
+      }
+
+      SIXTEEN_CASES(kRootArrayConstants)
+      SIXTEEN_CASES(kRootArrayConstants + 16) {
+        int id = data & kRootArrayConstantsMask;
+        Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
+        Object* object = isolate->heap()->root(root_index);
+        DCHECK(!isolate->heap()->InNewSpace(object));
+        UnalignedCopy(current++, &object);
+        break;
+      }
+
+      STATIC_ASSERT(kNumberOfHotObjects == 8);
+      FOUR_CASES(kHotObjectWithSkip)
+      FOUR_CASES(kHotObjectWithSkip + 4) {
+        int skip = source_.GetInt();
+        current = reinterpret_cast<Object**>(
+            reinterpret_cast<Address>(current) + skip);
+        // Fall through.
+      }
+
+      FOUR_CASES(kHotObject)
+      FOUR_CASES(kHotObject + 4) {
+        int index = data & kHotObjectMask;
+        Object* hot_object = hot_objects_.Get(index);
+        UnalignedCopy(current, &hot_object);
+        if (write_barrier_needed) {
+          Address current_address = reinterpret_cast<Address>(current);
+          SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
+          isolate->heap()->RecordWrite(
+              HeapObject::FromAddress(current_object_address),
+              static_cast<int>(current_address - current_object_address),
+              hot_object);
+        }
+        current++;
+        break;
+      }
+
+      // Deserialize raw data of fixed length from 1 to 32 words.
+      STATIC_ASSERT(kNumberOfFixedRawData == 32);
+      SIXTEEN_CASES(kFixedRawData)
+      SIXTEEN_CASES(kFixedRawData + 16) {
+        byte* raw_data_out = reinterpret_cast<byte*>(current);
+        int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
+        source_.CopyRaw(raw_data_out, size_in_bytes);
+        current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes);
+        break;
+      }
+
+      STATIC_ASSERT(kNumberOfFixedRepeat == 16);
+      SIXTEEN_CASES(kFixedRepeat) {
+        int repeats = data - kFixedRepeatStart;
+        Object* object;
+        UnalignedCopy(&object, current - 1);
+        DCHECK(!isolate->heap()->InNewSpace(object));
+        for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
+        break;
+      }
+
+#undef SIXTEEN_CASES
+#undef FOUR_CASES
+#undef SINGLE_CASE
+
+      default:
+        CHECK(false);
+    }
+  }
+  CHECK_EQ(limit, current);
+  return true;
+}
+}  // namespace internal
+}  // namespace v8
diff --git a/src/snapshot/deserializer.h b/src/snapshot/deserializer.h
new file mode 100644
index 0000000..58c481c
--- /dev/null
+++ b/src/snapshot/deserializer.h
@@ -0,0 +1,150 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_DESERIALIZER_H_
+#define V8_SNAPSHOT_DESERIALIZER_H_
+
+#include "src/heap/heap.h"
+#include "src/objects.h"
+#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot-source-sink.h"
+
+namespace v8 {
+namespace internal {
+
+// Used for platforms with embedded constant pools to trigger deserialization
+// of objects found in code.
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+    defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) ||    \
+    V8_EMBEDDED_CONSTANT_POOL
+#define V8_CODE_EMBEDS_OBJECT_POINTER 1
+#else
+#define V8_CODE_EMBEDS_OBJECT_POINTER 0
+#endif
+
+class Heap;
+
+// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
+class Deserializer : public SerializerDeserializer {
+ public:
+  // Create a deserializer from a snapshot byte source.
+  template <class Data>
+  explicit Deserializer(Data* data)
+      : isolate_(NULL),
+        source_(data->Payload()),
+        magic_number_(data->GetMagicNumber()),
+        external_reference_table_(NULL),
+        deserialized_large_objects_(0),
+        deserializing_user_code_(false),
+        next_alignment_(kWordAligned) {
+    DecodeReservation(data->Reservations());
+  }
+
+  ~Deserializer() override;
+
+  // Deserialize the snapshot into an empty heap.
+  void Deserialize(Isolate* isolate);
+
+  // Deserialize a single object and the objects reachable from it.
+  MaybeHandle<Object> DeserializePartial(Isolate* isolate,
+                                         Handle<JSGlobalProxy> global_proxy);
+
+  // Deserialize a shared function info. Fail gracefully.
+  MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
+
+  // Pass a vector of externally-provided objects referenced by the snapshot.
+  // The ownership to its backing store is handed over as well.
+  void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
+    attached_objects_ = attached_objects;
+  }
+
+ private:
+  void VisitPointers(Object** start, Object** end) override;
+
+  void Synchronize(VisitorSynchronization::SyncTag tag) override;
+
+  void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
+
+  void Initialize(Isolate* isolate);
+
+  bool deserializing_user_code() { return deserializing_user_code_; }
+
+  void DecodeReservation(Vector<const SerializedData::Reservation> res);
+
+  bool ReserveSpace();
+
+  void UnalignedCopy(Object** dest, Object** src) {
+    memcpy(dest, src, sizeof(*src));
+  }
+
+  void SetAlignment(byte data) {
+    DCHECK_EQ(kWordAligned, next_alignment_);
+    int alignment = data - (kAlignmentPrefix - 1);
+    DCHECK_LE(kWordAligned, alignment);
+    DCHECK_LE(alignment, kSimd128Unaligned);
+    next_alignment_ = static_cast<AllocationAlignment>(alignment);
+  }
+
+  void DeserializeDeferredObjects();
+
+  void FlushICacheForNewIsolate();
+  void FlushICacheForNewCodeObjects();
+
+  void CommitPostProcessedObjects(Isolate* isolate);
+
+  // Fills in some heap data in an area from start to end (non-inclusive).  The
+  // space id is used for the write barrier.  The object_address is the address
+  // of the object we are writing into, or NULL if we are not writing into an
+  // object, i.e. if we are writing a series of tagged values that are not on
+  // the heap. Return false if the object content has been deferred.
+  bool ReadData(Object** start, Object** end, int space,
+                Address object_address);
+  void ReadObject(int space_number, Object** write_back);
+  Address Allocate(int space_index, int size);
+
+  // Special handling for serialized code like hooking up internalized strings.
+  HeapObject* PostProcessNewObject(HeapObject* obj, int space);
+
+  // This returns the address of an object that has been described in the
+  // snapshot by chunk index and offset.
+  HeapObject* GetBackReferencedObject(int space);
+
+  Object** CopyInNativesSource(Vector<const char> source_vector,
+                               Object** current);
+
+  // Cached current isolate.
+  Isolate* isolate_;
+
+  // Objects from the attached object descriptions in the serialized user code.
+  Vector<Handle<Object> > attached_objects_;
+
+  SnapshotByteSource source_;
+  uint32_t magic_number_;
+
+  // The address of the next object that will be allocated in each space.
+  // Each space has a number of chunks reserved by the GC, with each chunk
+  // fitting into a page. Deserialized objects are allocated into the
+  // current chunk of the target space by bumping up high water mark.
+  Heap::Reservation reservations_[kNumberOfSpaces];
+  uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
+  Address high_water_[kNumberOfPreallocatedSpaces];
+
+  ExternalReferenceTable* external_reference_table_;
+
+  List<HeapObject*> deserialized_large_objects_;
+  List<Code*> new_code_objects_;
+  List<Handle<String> > new_internalized_strings_;
+  List<Handle<Script> > new_scripts_;
+
+  bool deserializing_user_code_;
+
+  AllocationAlignment next_alignment_;
+
+  DISALLOW_COPY_AND_ASSIGN(Deserializer);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SNAPSHOT_DESERIALIZER_H_
diff --git a/src/snapshot/mksnapshot.cc b/src/snapshot/mksnapshot.cc
index c163cae..9fe611a 100644
--- a/src/snapshot/mksnapshot.cc
+++ b/src/snapshot/mksnapshot.cc
@@ -12,8 +12,8 @@
 #include "src/flags.h"
 #include "src/list.h"
 #include "src/snapshot/natives.h"
-#include "src/snapshot/serialize.h"
-
+#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/startup-serializer.h"
 
 using namespace v8;
 
@@ -109,10 +109,9 @@
   FILE* startup_blob_file_;
 };
 
-
-char* GetExtraCode(char* filename) {
+char* GetExtraCode(char* filename, const char* description) {
   if (filename == NULL || strlen(filename) == 0) return NULL;
-  ::printf("Embedding extra script: %s\n", filename);
+  ::printf("Loading script for %s: %s\n", description, filename);
   FILE* file = base::OS::FOpen(filename, "rb");
   if (file == NULL) {
     fprintf(stderr, "Failed to open '%s': errno %d\n", filename, errno);
@@ -137,14 +136,13 @@
 
 
 int main(int argc, char** argv) {
-  // By default, log code create information in the snapshot.
-  i::FLAG_log_code = true;
-  i::FLAG_logfile_per_isolate = false;
+  // Make mksnapshot runs predictable to create reproducible snapshots.
+  i::FLAG_predictable = true;
 
   // Print the usage if an error occurs when parsing the command line
   // flags or if the help flag is set.
   int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
-  if (result > 0 || (argc != 1 && argc != 2) || i::FLAG_help) {
+  if (result > 0 || (argc > 3) || i::FLAG_help) {
     ::printf("Usage: %s --startup_src=... --startup_blob=... [extras]\n",
              argv[0]);
     i::FlagList::PrintHelp();
@@ -161,11 +159,21 @@
     SnapshotWriter writer;
     if (i::FLAG_startup_src) writer.SetSnapshotFile(i::FLAG_startup_src);
     if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
-    char* extra_code = GetExtraCode(argc == 2 ? argv[1] : NULL);
-    StartupData blob = v8::V8::CreateSnapshotDataBlob(extra_code);
+
+    char* embed_script = GetExtraCode(argc >= 2 ? argv[1] : NULL, "embedding");
+    StartupData blob = v8::V8::CreateSnapshotDataBlob(embed_script);
+    delete[] embed_script;
+
+    char* warmup_script = GetExtraCode(argc >= 3 ? argv[2] : NULL, "warm up");
+    if (warmup_script) {
+      StartupData cold = blob;
+      blob = v8::V8::WarmUpSnapshotDataBlob(cold, warmup_script);
+      delete[] cold.data;
+      delete[] warmup_script;
+    }
+
     CHECK(blob.data);
     writer.WriteSnapshot(blob);
-    delete[] extra_code;
     delete[] blob.data;
   }
 
diff --git a/src/snapshot/partial-serializer.cc b/src/snapshot/partial-serializer.cc
new file mode 100644
index 0000000..0f1f133
--- /dev/null
+++ b/src/snapshot/partial-serializer.cc
@@ -0,0 +1,123 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/partial-serializer.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+PartialSerializer::PartialSerializer(Isolate* isolate,
+                                     Serializer* startup_snapshot_serializer,
+                                     SnapshotByteSink* sink)
+    : Serializer(isolate, sink),
+      startup_serializer_(startup_snapshot_serializer),
+      global_object_(NULL),
+      next_partial_cache_index_(0) {
+  InitializeCodeAddressMap();
+}
+
+PartialSerializer::~PartialSerializer() {
+  OutputStatistics("PartialSerializer");
+}
+
+void PartialSerializer::Serialize(Object** o) {
+  if ((*o)->IsContext()) {
+    Context* context = Context::cast(*o);
+    global_object_ = context->global_object();
+    back_reference_map()->AddGlobalProxy(context->global_proxy());
+    // The bootstrap snapshot has a code-stub context. When serializing the
+    // partial snapshot, it is chained into the weak context list on the isolate
+    // and it's next context pointer may point to the code-stub context.  Clear
+    // it before serializing, it will get re-added to the context list
+    // explicitly when it's loaded.
+    if (context->IsNativeContext()) {
+      context->set(Context::NEXT_CONTEXT_LINK,
+                   isolate_->heap()->undefined_value());
+      DCHECK(!context->global_object()->IsUndefined());
+    }
+  }
+  VisitPointer(o);
+  SerializeDeferredObjects();
+  Pad();
+}
+
+void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
+                                        WhereToPoint where_to_point, int skip) {
+  if (obj->IsMap()) {
+    // The code-caches link to context-specific code objects, which
+    // the startup and context serializes cannot currently handle.
+    DCHECK(Map::cast(obj)->code_cache() == obj->GetHeap()->empty_fixed_array());
+  }
+
+  // Replace typed arrays by undefined.
+  if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();
+
+  int root_index = root_index_map_.Lookup(obj);
+  if (root_index != RootIndexMap::kInvalidRootIndex) {
+    PutRoot(root_index, obj, how_to_code, where_to_point, skip);
+    return;
+  }
+
+  if (ShouldBeInThePartialSnapshotCache(obj)) {
+    FlushSkip(skip);
+
+    int cache_index = PartialSnapshotCacheIndex(obj);
+    sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
+               "PartialSnapshotCache");
+    sink_->PutInt(cache_index, "partial_snapshot_cache_index");
+    return;
+  }
+
+  // Pointers from the partial snapshot to the objects in the startup snapshot
+  // should go through the root array or through the partial snapshot cache.
+  // If this is not the case you may have to add something to the root array.
+  DCHECK(!startup_serializer_->back_reference_map()->Lookup(obj).is_valid());
+  // All the internalized strings that the partial snapshot needs should be
+  // either in the root table or in the partial snapshot cache.
+  DCHECK(!obj->IsInternalizedString());
+
+  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+
+  FlushSkip(skip);
+
+  // Clear literal boilerplates.
+  if (obj->IsJSFunction()) {
+    FixedArray* literals = JSFunction::cast(obj)->literals();
+    for (int i = 0; i < literals->length(); i++) literals->set_undefined(i);
+  }
+
+  // Object has not yet been serialized.  Serialize it here.
+  ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
+  serializer.Serialize();
+}
+
+int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
+  int index = partial_cache_index_map_.LookupOrInsert(
+      heap_object, next_partial_cache_index_);
+  if (index == PartialCacheIndexMap::kInvalidIndex) {
+    // This object is not part of the partial snapshot cache yet. Add it to the
+    // startup snapshot so we can refer to it via partial snapshot index from
+    // the partial snapshot.
+    startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
+    return next_partial_cache_index_++;
+  }
+  return index;
+}
+
+bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+  // Scripts should be referred only through shared function infos.  We can't
+  // allow them to be part of the partial snapshot because they contain a
+  // unique ID, and deserializing several partial snapshots containing script
+  // would cause dupes.
+  DCHECK(!o->IsScript());
+  return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
+         o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
+         o->map() ==
+             startup_serializer_->isolate()->heap()->fixed_cow_array_map();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/snapshot/partial-serializer.h b/src/snapshot/partial-serializer.h
new file mode 100644
index 0000000..0bf61dd
--- /dev/null
+++ b/src/snapshot/partial-serializer.h
@@ -0,0 +1,62 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
+#define V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
+
+#include "src/address-map.h"
+#include "src/snapshot/serializer.h"
+
+namespace v8 {
+namespace internal {
+
+class PartialSerializer : public Serializer {
+ public:
+  PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
+                    SnapshotByteSink* sink);
+
+  ~PartialSerializer() override;
+
+  // Serialize the objects reachable from a single object pointer.
+  void Serialize(Object** o);
+
+ private:
+  class PartialCacheIndexMap : public AddressMapBase {
+   public:
+    PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
+
+    static const int kInvalidIndex = -1;
+
+    // Lookup object in the map. Return its index if found, or create
+    // a new entry with new_index as value, and return kInvalidIndex.
+    int LookupOrInsert(HeapObject* obj, int new_index) {
+      HashMap::Entry* entry = LookupEntry(&map_, obj, false);
+      if (entry != NULL) return GetValue(entry);
+      SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
+      return kInvalidIndex;
+    }
+
+   private:
+    HashMap map_;
+
+    DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
+  };
+
+  void SerializeObject(HeapObject* o, HowToCode how_to_code,
+                       WhereToPoint where_to_point, int skip) override;
+
+  int PartialSnapshotCacheIndex(HeapObject* o);
+  bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
+
+  Serializer* startup_serializer_;
+  Object* global_object_;
+  PartialCacheIndexMap partial_cache_index_map_;
+  int next_partial_cache_index_;
+  DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
diff --git a/src/snapshot/serialize.cc b/src/snapshot/serialize.cc
deleted file mode 100644
index 4868abd..0000000
--- a/src/snapshot/serialize.cc
+++ /dev/null
@@ -1,2877 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/serialize.h"
-
-#include "src/accessors.h"
-#include "src/api.h"
-#include "src/base/platform/platform.h"
-#include "src/bootstrapper.h"
-#include "src/code-stubs.h"
-#include "src/deoptimizer.h"
-#include "src/execution.h"
-#include "src/global-handles.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/objects.h"
-#include "src/parsing/parser.h"
-#include "src/profiler/cpu-profiler.h"
-#include "src/runtime/runtime.h"
-#include "src/snapshot/natives.h"
-#include "src/snapshot/snapshot.h"
-#include "src/snapshot/snapshot-source-sink.h"
-#include "src/v8.h"
-#include "src/v8threads.h"
-#include "src/version.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Coding of external references.
-
-
-ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
-  ExternalReferenceTable* external_reference_table =
-      isolate->external_reference_table();
-  if (external_reference_table == NULL) {
-    external_reference_table = new ExternalReferenceTable(isolate);
-    isolate->set_external_reference_table(external_reference_table);
-  }
-  return external_reference_table;
-}
-
-
-ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
-  // Miscellaneous
-  Add(ExternalReference::roots_array_start(isolate).address(),
-      "Heap::roots_array_start()");
-  Add(ExternalReference::address_of_stack_limit(isolate).address(),
-      "StackGuard::address_of_jslimit()");
-  Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
-      "StackGuard::address_of_real_jslimit()");
-  Add(ExternalReference::new_space_start(isolate).address(),
-      "Heap::NewSpaceStart()");
-  Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
-      "Heap::NewSpaceAllocationLimitAddress()");
-  Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
-      "Heap::NewSpaceAllocationTopAddress()");
-  Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
-      "mod_two_doubles");
-  // Keyed lookup cache.
-  Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
-      "KeyedLookupCache::keys()");
-  Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
-      "KeyedLookupCache::field_offsets()");
-  Add(ExternalReference::handle_scope_next_address(isolate).address(),
-      "HandleScope::next");
-  Add(ExternalReference::handle_scope_limit_address(isolate).address(),
-      "HandleScope::limit");
-  Add(ExternalReference::handle_scope_level_address(isolate).address(),
-      "HandleScope::level");
-  Add(ExternalReference::new_deoptimizer_function(isolate).address(),
-      "Deoptimizer::New()");
-  Add(ExternalReference::compute_output_frames_function(isolate).address(),
-      "Deoptimizer::ComputeOutputFrames()");
-  Add(ExternalReference::address_of_min_int().address(),
-      "LDoubleConstant::min_int");
-  Add(ExternalReference::address_of_one_half().address(),
-      "LDoubleConstant::one_half");
-  Add(ExternalReference::isolate_address(isolate).address(), "isolate");
-  Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
-      "Interpreter::dispatch_table_address");
-  Add(ExternalReference::address_of_negative_infinity().address(),
-      "LDoubleConstant::negative_infinity");
-  Add(ExternalReference::power_double_double_function(isolate).address(),
-      "power_double_double_function");
-  Add(ExternalReference::power_double_int_function(isolate).address(),
-      "power_double_int_function");
-  Add(ExternalReference::math_log_double_function(isolate).address(),
-      "std::log");
-  Add(ExternalReference::store_buffer_top(isolate).address(),
-      "store_buffer_top");
-  Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
-  Add(ExternalReference::get_date_field_function(isolate).address(),
-      "JSDate::GetField");
-  Add(ExternalReference::date_cache_stamp(isolate).address(),
-      "date_cache_stamp");
-  Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
-      "address_of_pending_message_obj");
-  Add(ExternalReference::get_make_code_young_function(isolate).address(),
-      "Code::MakeCodeYoung");
-  Add(ExternalReference::cpu_features().address(), "cpu_features");
-  Add(ExternalReference::old_space_allocation_top_address(isolate).address(),
-      "Heap::OldSpaceAllocationTopAddress");
-  Add(ExternalReference::old_space_allocation_limit_address(isolate).address(),
-      "Heap::OldSpaceAllocationLimitAddress");
-  Add(ExternalReference::allocation_sites_list_address(isolate).address(),
-      "Heap::allocation_sites_list_address()");
-  Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
-  Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
-      "Code::MarkCodeAsExecuted");
-  Add(ExternalReference::is_profiling_address(isolate).address(),
-      "CpuProfiler::is_profiling");
-  Add(ExternalReference::scheduled_exception_address(isolate).address(),
-      "Isolate::scheduled_exception");
-  Add(ExternalReference::invoke_function_callback(isolate).address(),
-      "InvokeFunctionCallback");
-  Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
-      "InvokeAccessorGetterCallback");
-  Add(ExternalReference::f32_trunc_wrapper_function(isolate).address(),
-      "f32_trunc_wrapper");
-  Add(ExternalReference::f32_floor_wrapper_function(isolate).address(),
-      "f32_floor_wrapper");
-  Add(ExternalReference::f32_ceil_wrapper_function(isolate).address(),
-      "f32_ceil_wrapper");
-  Add(ExternalReference::f32_nearest_int_wrapper_function(isolate).address(),
-      "f32_nearest_int_wrapper");
-  Add(ExternalReference::f64_trunc_wrapper_function(isolate).address(),
-      "f64_trunc_wrapper");
-  Add(ExternalReference::f64_floor_wrapper_function(isolate).address(),
-      "f64_floor_wrapper");
-  Add(ExternalReference::f64_ceil_wrapper_function(isolate).address(),
-      "f64_ceil_wrapper");
-  Add(ExternalReference::f64_nearest_int_wrapper_function(isolate).address(),
-      "f64_nearest_int_wrapper");
-  Add(ExternalReference::log_enter_external_function(isolate).address(),
-      "Logger::EnterExternal");
-  Add(ExternalReference::log_leave_external_function(isolate).address(),
-      "Logger::LeaveExternal");
-  Add(ExternalReference::address_of_minus_one_half().address(),
-      "double_constants.minus_one_half");
-  Add(ExternalReference::stress_deopt_count(isolate).address(),
-      "Isolate::stress_deopt_count_address()");
-  Add(ExternalReference::virtual_handler_register(isolate).address(),
-      "Isolate::virtual_handler_register()");
-  Add(ExternalReference::virtual_slot_register(isolate).address(),
-      "Isolate::virtual_slot_register()");
-  Add(ExternalReference::runtime_function_table_address(isolate).address(),
-      "Runtime::runtime_function_table_address()");
-
-  // Debug addresses
-  Add(ExternalReference::debug_after_break_target_address(isolate).address(),
-      "Debug::after_break_target_address()");
-  Add(ExternalReference::debug_is_active_address(isolate).address(),
-      "Debug::is_active_address()");
-  Add(ExternalReference::debug_step_in_enabled_address(isolate).address(),
-      "Debug::step_in_enabled_address()");
-
-#ifndef V8_INTERPRETED_REGEXP
-  Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
-      "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
-  Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
-      "RegExpMacroAssembler*::CheckStackGuardState()");
-  Add(ExternalReference::re_grow_stack(isolate).address(),
-      "NativeRegExpMacroAssembler::GrowStack()");
-  Add(ExternalReference::re_word_character_map().address(),
-      "NativeRegExpMacroAssembler::word_character_map");
-  Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
-      "RegExpStack::limit_address()");
-  Add(ExternalReference::address_of_regexp_stack_memory_address(isolate)
-          .address(),
-      "RegExpStack::memory_address()");
-  Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
-      "RegExpStack::memory_size()");
-  Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
-      "OffsetsVector::static_offsets_vector");
-#endif  // V8_INTERPRETED_REGEXP
-
-  // The following populates all of the different type of external references
-  // into the ExternalReferenceTable.
-  //
-  // NOTE: This function was originally 100k of code.  It has since been
-  // rewritten to be mostly table driven, as the callback macro style tends to
-  // very easily cause code bloat.  Please be careful in the future when adding
-  // new references.
-
-  struct RefTableEntry {
-    uint16_t id;
-    const char* name;
-  };
-
-  static const RefTableEntry c_builtins[] = {
-#define DEF_ENTRY_C(name, ignored)           \
-  { Builtins::c_##name, "Builtins::" #name } \
-  ,
-      BUILTIN_LIST_C(DEF_ENTRY_C)
-#undef DEF_ENTRY_C
-  };
-
-  for (unsigned i = 0; i < arraysize(c_builtins); ++i) {
-    ExternalReference ref(static_cast<Builtins::CFunctionId>(c_builtins[i].id),
-                          isolate);
-    Add(ref.address(), c_builtins[i].name);
-  }
-
-  static const RefTableEntry builtins[] = {
-#define DEF_ENTRY_C(name, ignored)          \
-  { Builtins::k##name, "Builtins::" #name } \
-  ,
-#define DEF_ENTRY_A(name, i1, i2, i3)       \
-  { Builtins::k##name, "Builtins::" #name } \
-  ,
-      BUILTIN_LIST_C(DEF_ENTRY_C) BUILTIN_LIST_A(DEF_ENTRY_A)
-          BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
-#undef DEF_ENTRY_C
-#undef DEF_ENTRY_A
-  };
-
-  for (unsigned i = 0; i < arraysize(builtins); ++i) {
-    ExternalReference ref(static_cast<Builtins::Name>(builtins[i].id), isolate);
-    Add(ref.address(), builtins[i].name);
-  }
-
-  static const RefTableEntry runtime_functions[] = {
-#define RUNTIME_ENTRY(name, i1, i2)       \
-  { Runtime::k##name, "Runtime::" #name } \
-  ,
-      FOR_EACH_INTRINSIC(RUNTIME_ENTRY)
-#undef RUNTIME_ENTRY
-  };
-
-  for (unsigned i = 0; i < arraysize(runtime_functions); ++i) {
-    ExternalReference ref(
-        static_cast<Runtime::FunctionId>(runtime_functions[i].id), isolate);
-    Add(ref.address(), runtime_functions[i].name);
-  }
-
-  // Stat counters
-  struct StatsRefTableEntry {
-    StatsCounter* (Counters::*counter)();
-    const char* name;
-  };
-
-  static const StatsRefTableEntry stats_ref_table[] = {
-#define COUNTER_ENTRY(name, caption)      \
-  { &Counters::name, "Counters::" #name } \
-  ,
-      STATS_COUNTER_LIST_1(COUNTER_ENTRY) STATS_COUNTER_LIST_2(COUNTER_ENTRY)
-#undef COUNTER_ENTRY
-  };
-
-  Counters* counters = isolate->counters();
-  for (unsigned i = 0; i < arraysize(stats_ref_table); ++i) {
-    // To make sure the indices are not dependent on whether counters are
-    // enabled, use a dummy address as filler.
-    Address address = NotAvailable();
-    StatsCounter* counter = (counters->*(stats_ref_table[i].counter))();
-    if (counter->Enabled()) {
-      address = reinterpret_cast<Address>(counter->GetInternalPointer());
-    }
-    Add(address, stats_ref_table[i].name);
-  }
-
-  // Top addresses
-  static const char* address_names[] = {
-#define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
-      FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) NULL
-#undef BUILD_NAME_LITERAL
-  };
-
-  for (int i = 0; i < Isolate::kIsolateAddressCount; ++i) {
-    Add(isolate->get_address_from_id(static_cast<Isolate::AddressId>(i)),
-        address_names[i]);
-  }
-
-  // Accessors
-  struct AccessorRefTable {
-    Address address;
-    const char* name;
-  };
-
-  static const AccessorRefTable accessors[] = {
-#define ACCESSOR_INFO_DECLARATION(name)                                     \
-  { FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter" } \
-  ,
-      ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
-#undef ACCESSOR_INFO_DECLARATION
-#define ACCESSOR_SETTER_DECLARATION(name)                  \
-  { FUNCTION_ADDR(&Accessors::name), "Accessors::" #name } \
-  ,
-          ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
-#undef ACCESSOR_INFO_DECLARATION
-  };
-
-  for (unsigned i = 0; i < arraysize(accessors); ++i) {
-    Add(accessors[i].address, accessors[i].name);
-  }
-
-  StubCache* stub_cache = isolate->stub_cache();
-
-  // Stub cache tables
-  Add(stub_cache->key_reference(StubCache::kPrimary).address(),
-      "StubCache::primary_->key");
-  Add(stub_cache->value_reference(StubCache::kPrimary).address(),
-      "StubCache::primary_->value");
-  Add(stub_cache->map_reference(StubCache::kPrimary).address(),
-      "StubCache::primary_->map");
-  Add(stub_cache->key_reference(StubCache::kSecondary).address(),
-      "StubCache::secondary_->key");
-  Add(stub_cache->value_reference(StubCache::kSecondary).address(),
-      "StubCache::secondary_->value");
-  Add(stub_cache->map_reference(StubCache::kSecondary).address(),
-      "StubCache::secondary_->map");
-
-  // Runtime entries
-  Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
-      "HandleScope::DeleteExtensions");
-  Add(ExternalReference::incremental_marking_record_write_function(isolate)
-          .address(),
-      "IncrementalMarking::RecordWrite");
-  Add(ExternalReference::incremental_marking_record_write_code_entry_function(
-          isolate)
-          .address(),
-      "IncrementalMarking::RecordWriteOfCodeEntryFromCode");
-  Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
-      "StoreBuffer::StoreBufferOverflow");
-
-  // Add a small set of deopt entry addresses to encoder without generating the
-  // deopt table code, which isn't possible at deserialization time.
-  HandleScope scope(isolate);
-  for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
-    Address address = Deoptimizer::GetDeoptimizationEntry(
-        isolate,
-        entry,
-        Deoptimizer::LAZY,
-        Deoptimizer::CALCULATE_ENTRY_ADDRESS);
-    Add(address, "lazy_deopt");
-  }
-}
-
-
-ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
-  map_ = isolate->external_reference_map();
-  if (map_ != NULL) return;
-  map_ = new HashMap(HashMap::PointersMatch);
-  ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
-  for (int i = 0; i < table->size(); ++i) {
-    Address addr = table->address(i);
-    if (addr == ExternalReferenceTable::NotAvailable()) continue;
-    // We expect no duplicate external references entries in the table.
-    DCHECK_NULL(map_->Lookup(addr, Hash(addr)));
-    map_->LookupOrInsert(addr, Hash(addr))->value = reinterpret_cast<void*>(i);
-  }
-  isolate->set_external_reference_map(map_);
-}
-
-
-uint32_t ExternalReferenceEncoder::Encode(Address address) const {
-  DCHECK_NOT_NULL(address);
-  HashMap::Entry* entry =
-      const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
-  DCHECK_NOT_NULL(entry);
-  return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
-}
-
-
-const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
-                                                    Address address) const {
-  HashMap::Entry* entry =
-      const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
-  if (entry == NULL) return "<unknown>";
-  uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
-  return ExternalReferenceTable::instance(isolate)->name(i);
-}
-
-
-class CodeAddressMap: public CodeEventLogger {
- public:
-  explicit CodeAddressMap(Isolate* isolate)
-      : isolate_(isolate) {
-    isolate->logger()->addCodeEventListener(this);
-  }
-
-  ~CodeAddressMap() override {
-    isolate_->logger()->removeCodeEventListener(this);
-  }
-
-  void CodeMoveEvent(Address from, Address to) override {
-    address_to_name_map_.Move(from, to);
-  }
-
-  void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) override {}
-
-  void CodeDeleteEvent(Address from) override {
-    address_to_name_map_.Remove(from);
-  }
-
-  const char* Lookup(Address address) {
-    return address_to_name_map_.Lookup(address);
-  }
-
- private:
-  class NameMap {
-   public:
-    NameMap() : impl_(HashMap::PointersMatch) {}
-
-    ~NameMap() {
-      for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
-        DeleteArray(static_cast<const char*>(p->value));
-      }
-    }
-
-    void Insert(Address code_address, const char* name, int name_size) {
-      HashMap::Entry* entry = FindOrCreateEntry(code_address);
-      if (entry->value == NULL) {
-        entry->value = CopyName(name, name_size);
-      }
-    }
-
-    const char* Lookup(Address code_address) {
-      HashMap::Entry* entry = FindEntry(code_address);
-      return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
-    }
-
-    void Remove(Address code_address) {
-      HashMap::Entry* entry = FindEntry(code_address);
-      if (entry != NULL) {
-        DeleteArray(static_cast<char*>(entry->value));
-        RemoveEntry(entry);
-      }
-    }
-
-    void Move(Address from, Address to) {
-      if (from == to) return;
-      HashMap::Entry* from_entry = FindEntry(from);
-      DCHECK(from_entry != NULL);
-      void* value = from_entry->value;
-      RemoveEntry(from_entry);
-      HashMap::Entry* to_entry = FindOrCreateEntry(to);
-      DCHECK(to_entry->value == NULL);
-      to_entry->value = value;
-    }
-
-   private:
-    static char* CopyName(const char* name, int name_size) {
-      char* result = NewArray<char>(name_size + 1);
-      for (int i = 0; i < name_size; ++i) {
-        char c = name[i];
-        if (c == '\0') c = ' ';
-        result[i] = c;
-      }
-      result[name_size] = '\0';
-      return result;
-    }
-
-    HashMap::Entry* FindOrCreateEntry(Address code_address) {
-      return impl_.LookupOrInsert(code_address,
-                                  ComputePointerHash(code_address));
-    }
-
-    HashMap::Entry* FindEntry(Address code_address) {
-      return impl_.Lookup(code_address, ComputePointerHash(code_address));
-    }
-
-    void RemoveEntry(HashMap::Entry* entry) {
-      impl_.Remove(entry->key, entry->hash);
-    }
-
-    HashMap impl_;
-
-    DISALLOW_COPY_AND_ASSIGN(NameMap);
-  };
-
-  void LogRecordedBuffer(Code* code, SharedFunctionInfo*, const char* name,
-                         int length) override {
-    address_to_name_map_.Insert(code->address(), name, length);
-  }
-
-  NameMap address_to_name_map_;
-  Isolate* isolate_;
-};
-
-
-void Deserializer::DecodeReservation(
-    Vector<const SerializedData::Reservation> res) {
-  DCHECK_EQ(0, reservations_[NEW_SPACE].length());
-  STATIC_ASSERT(NEW_SPACE == 0);
-  int current_space = NEW_SPACE;
-  for (auto& r : res) {
-    reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
-    if (r.is_last()) current_space++;
-  }
-  DCHECK_EQ(kNumberOfSpaces, current_space);
-  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
-}
-
-
-void Deserializer::FlushICacheForNewIsolate() {
-  DCHECK(!deserializing_user_code_);
-  // The entire isolate is newly deserialized. Simply flush all code pages.
-  PageIterator it(isolate_->heap()->code_space());
-  while (it.has_next()) {
-    Page* p = it.next();
-    Assembler::FlushICache(isolate_, p->area_start(),
-                           p->area_end() - p->area_start());
-  }
-}
-
-
-void Deserializer::FlushICacheForNewCodeObjects() {
-  DCHECK(deserializing_user_code_);
-  for (Code* code : new_code_objects_) {
-    Assembler::FlushICache(isolate_, code->instruction_start(),
-                           code->instruction_size());
-  }
-}
-
-
-bool Deserializer::ReserveSpace() {
-#ifdef DEBUG
-  for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
-    CHECK(reservations_[i].length() > 0);
-  }
-#endif  // DEBUG
-  if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
-  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
-    high_water_[i] = reservations_[i][0].start;
-  }
-  return true;
-}
-
-
-void Deserializer::Initialize(Isolate* isolate) {
-  DCHECK_NULL(isolate_);
-  DCHECK_NOT_NULL(isolate);
-  isolate_ = isolate;
-  DCHECK_NULL(external_reference_table_);
-  external_reference_table_ = ExternalReferenceTable::instance(isolate);
-  CHECK_EQ(magic_number_,
-           SerializedData::ComputeMagicNumber(external_reference_table_));
-}
-
-
-void Deserializer::Deserialize(Isolate* isolate) {
-  Initialize(isolate);
-  if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
-  // No active threads.
-  DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
-  // No active handles.
-  DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
-
-  {
-    DisallowHeapAllocation no_gc;
-    isolate_->heap()->IterateSmiRoots(this);
-    isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
-    isolate_->heap()->RepairFreeListsAfterDeserialization();
-    isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
-    DeserializeDeferredObjects();
-    FlushICacheForNewIsolate();
-  }
-
-  isolate_->heap()->set_native_contexts_list(
-      isolate_->heap()->undefined_value());
-  // The allocation site list is build during root iteration, but if no sites
-  // were encountered then it needs to be initialized to undefined.
-  if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
-    isolate_->heap()->set_allocation_sites_list(
-        isolate_->heap()->undefined_value());
-  }
-
-  // Update data pointers to the external strings containing natives sources.
-  Natives::UpdateSourceCache(isolate_->heap());
-  ExtraNatives::UpdateSourceCache(isolate_->heap());
-
-  // Issue code events for newly deserialized code objects.
-  LOG_CODE_EVENT(isolate_, LogCodeObjects());
-  LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
-}
-
-
-MaybeHandle<Object> Deserializer::DeserializePartial(
-    Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
-  Initialize(isolate);
-  if (!ReserveSpace()) {
-    V8::FatalProcessOutOfMemory("deserialize context");
-    return MaybeHandle<Object>();
-  }
-
-  Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1);
-  attached_objects[kGlobalProxyReference] = global_proxy;
-  SetAttachedObjects(attached_objects);
-
-  DisallowHeapAllocation no_gc;
-  // Keep track of the code space start and end pointers in case new
-  // code objects were unserialized
-  OldSpace* code_space = isolate_->heap()->code_space();
-  Address start_address = code_space->top();
-  Object* root;
-  VisitPointer(&root);
-  DeserializeDeferredObjects();
-
-  // There's no code deserialized here. If this assert fires then that's
-  // changed and logging should be added to notify the profiler et al of the
-  // new code, which also has to be flushed from instruction cache.
-  CHECK_EQ(start_address, code_space->top());
-  return Handle<Object>(root, isolate);
-}
-
-
-MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
-    Isolate* isolate) {
-  Initialize(isolate);
-  if (!ReserveSpace()) {
-    return Handle<SharedFunctionInfo>();
-  } else {
-    deserializing_user_code_ = true;
-    HandleScope scope(isolate);
-    Handle<SharedFunctionInfo> result;
-    {
-      DisallowHeapAllocation no_gc;
-      Object* root;
-      VisitPointer(&root);
-      DeserializeDeferredObjects();
-      FlushICacheForNewCodeObjects();
-      result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
-    }
-    CommitPostProcessedObjects(isolate);
-    return scope.CloseAndEscape(result);
-  }
-}
-
-
-Deserializer::~Deserializer() {
-  // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
-  // DCHECK(source_.AtEOF());
-  attached_objects_.Dispose();
-}
-
-
-// This is called on the roots.  It is the driver of the deserialization
-// process.  It is also called on the body of each function.
-void Deserializer::VisitPointers(Object** start, Object** end) {
-  // The space must be new space.  Any other space would cause ReadChunk to try
-  // to update the remembered using NULL as the address.
-  ReadData(start, end, NEW_SPACE, NULL);
-}
-
-void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
-  static const byte expected = kSynchronize;
-  CHECK_EQ(expected, source_.Get());
-}
-
-void Deserializer::DeserializeDeferredObjects() {
-  for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
-    switch (code) {
-      case kAlignmentPrefix:
-      case kAlignmentPrefix + 1:
-      case kAlignmentPrefix + 2:
-        SetAlignment(code);
-        break;
-      default: {
-        int space = code & kSpaceMask;
-        DCHECK(space <= kNumberOfSpaces);
-        DCHECK(code - space == kNewObject);
-        HeapObject* object = GetBackReferencedObject(space);
-        int size = source_.GetInt() << kPointerSizeLog2;
-        Address obj_address = object->address();
-        Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
-        Object** end = reinterpret_cast<Object**>(obj_address + size);
-        bool filled = ReadData(start, end, space, obj_address);
-        CHECK(filled);
-        DCHECK(CanBeDeferred(object));
-        PostProcessNewObject(object, space);
-      }
-    }
-  }
-}
-
-
-// Used to insert a deserialized internalized string into the string table.
-class StringTableInsertionKey : public HashTableKey {
- public:
-  explicit StringTableInsertionKey(String* string)
-      : string_(string), hash_(HashForObject(string)) {
-    DCHECK(string->IsInternalizedString());
-  }
-
-  bool IsMatch(Object* string) override {
-    // We know that all entries in a hash table had their hash keys created.
-    // Use that knowledge to have fast failure.
-    if (hash_ != HashForObject(string)) return false;
-    // We want to compare the content of two internalized strings here.
-    return string_->SlowEquals(String::cast(string));
-  }
-
-  uint32_t Hash() override { return hash_; }
-
-  uint32_t HashForObject(Object* key) override {
-    return String::cast(key)->Hash();
-  }
-
-  MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
-    return handle(string_, isolate);
-  }
-
- private:
-  String* string_;
-  uint32_t hash_;
-  DisallowHeapAllocation no_gc;
-};
-
-
-HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
-  if (deserializing_user_code()) {
-    if (obj->IsString()) {
-      String* string = String::cast(obj);
-      // Uninitialize hash field as the hash seed may have changed.
-      string->set_hash_field(String::kEmptyHashField);
-      if (string->IsInternalizedString()) {
-        // Canonicalize the internalized string. If it already exists in the
-        // string table, set it to forward to the existing one.
-        StringTableInsertionKey key(string);
-        String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
-        if (canonical == NULL) {
-          new_internalized_strings_.Add(handle(string));
-          return string;
-        } else {
-          string->SetForwardedInternalizedString(canonical);
-          return canonical;
-        }
-      }
-    } else if (obj->IsScript()) {
-      new_scripts_.Add(handle(Script::cast(obj)));
-    } else {
-      DCHECK(CanBeDeferred(obj));
-    }
-  }
-  if (obj->IsAllocationSite()) {
-    DCHECK(obj->IsAllocationSite());
-    // Allocation sites are present in the snapshot, and must be linked into
-    // a list at deserialization time.
-    AllocationSite* site = AllocationSite::cast(obj);
-    // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
-    // as a (weak) root. If this root is relocated correctly, this becomes
-    // unnecessary.
-    if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
-      site->set_weak_next(isolate_->heap()->undefined_value());
-    } else {
-      site->set_weak_next(isolate_->heap()->allocation_sites_list());
-    }
-    isolate_->heap()->set_allocation_sites_list(site);
-  } else if (obj->IsCode()) {
-    // We flush all code pages after deserializing the startup snapshot. In that
-    // case, we only need to remember code objects in the large object space.
-    // When deserializing user code, remember each individual code object.
-    if (deserializing_user_code() || space == LO_SPACE) {
-      new_code_objects_.Add(Code::cast(obj));
-    }
-  }
-  // Check alignment.
-  DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
-  return obj;
-}
-
-
-void Deserializer::CommitPostProcessedObjects(Isolate* isolate) {
-  StringTable::EnsureCapacityForDeserialization(
-      isolate, new_internalized_strings_.length());
-  for (Handle<String> string : new_internalized_strings_) {
-    StringTableInsertionKey key(*string);
-    DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key));
-    StringTable::LookupKey(isolate, &key);
-  }
-
-  Heap* heap = isolate->heap();
-  Factory* factory = isolate->factory();
-  for (Handle<Script> script : new_scripts_) {
-    // Assign a new script id to avoid collision.
-    script->set_id(isolate_->heap()->NextScriptId());
-    // Add script to list.
-    Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script);
-    heap->SetRootScriptList(*list);
-  }
-}
-
-
-HeapObject* Deserializer::GetBackReferencedObject(int space) {
-  HeapObject* obj;
-  BackReference back_reference(source_.GetInt());
-  if (space == LO_SPACE) {
-    CHECK(back_reference.chunk_index() == 0);
-    uint32_t index = back_reference.large_object_index();
-    obj = deserialized_large_objects_[index];
-  } else {
-    DCHECK(space < kNumberOfPreallocatedSpaces);
-    uint32_t chunk_index = back_reference.chunk_index();
-    DCHECK_LE(chunk_index, current_chunk_[space]);
-    uint32_t chunk_offset = back_reference.chunk_offset();
-    Address address = reservations_[space][chunk_index].start + chunk_offset;
-    if (next_alignment_ != kWordAligned) {
-      int padding = Heap::GetFillToAlign(address, next_alignment_);
-      next_alignment_ = kWordAligned;
-      DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
-      address += padding;
-    }
-    obj = HeapObject::FromAddress(address);
-  }
-  if (deserializing_user_code() && obj->IsInternalizedString()) {
-    obj = String::cast(obj)->GetForwardedInternalizedString();
-  }
-  hot_objects_.Add(obj);
-  return obj;
-}
-
-
-// This routine writes the new object into the pointer provided and then
-// returns true if the new object was in young space and false otherwise.
-// The reason for this strange interface is that otherwise the object is
-// written very late, which means the FreeSpace map is not set up by the
-// time we need to use it to mark the space at the end of a page free.
-void Deserializer::ReadObject(int space_number, Object** write_back) {
-  Address address;
-  HeapObject* obj;
-  int size = source_.GetInt() << kObjectAlignmentBits;
-
-  if (next_alignment_ != kWordAligned) {
-    int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
-    address = Allocate(space_number, reserved);
-    obj = HeapObject::FromAddress(address);
-    // If one of the following assertions fails, then we are deserializing an
-    // aligned object when the filler maps have not been deserialized yet.
-    // We require filler maps as padding to align the object.
-    Heap* heap = isolate_->heap();
-    DCHECK(heap->free_space_map()->IsMap());
-    DCHECK(heap->one_pointer_filler_map()->IsMap());
-    DCHECK(heap->two_pointer_filler_map()->IsMap());
-    obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
-    address = obj->address();
-    next_alignment_ = kWordAligned;
-  } else {
-    address = Allocate(space_number, size);
-    obj = HeapObject::FromAddress(address);
-  }
-
-  isolate_->heap()->OnAllocationEvent(obj, size);
-  Object** current = reinterpret_cast<Object**>(address);
-  Object** limit = current + (size >> kPointerSizeLog2);
-  if (FLAG_log_snapshot_positions) {
-    LOG(isolate_, SnapshotPositionEvent(address, source_.position()));
-  }
-
-  if (ReadData(current, limit, space_number, address)) {
-    // Only post process if object content has not been deferred.
-    obj = PostProcessNewObject(obj, space_number);
-  }
-
-  Object* write_back_obj = obj;
-  UnalignedCopy(write_back, &write_back_obj);
-#ifdef DEBUG
-  if (obj->IsCode()) {
-    DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
-  } else {
-    DCHECK(space_number != CODE_SPACE);
-  }
-#endif  // DEBUG
-}
-
-
-// We know the space requirements before deserialization and can
-// pre-allocate that reserved space. During deserialization, all we need
-// to do is to bump up the pointer for each space in the reserved
-// space. This is also used for fixing back references.
-// We may have to split up the pre-allocation into several chunks
-// because it would not fit onto a single page. We do not have to keep
-// track of when to move to the next chunk. An opcode will signal this.
-// Since multiple large objects cannot be folded into one large object
-// space allocation, we have to do an actual allocation when deserializing
-// each large object. Instead of tracking offset for back references, we
-// reference large objects by index.
-Address Deserializer::Allocate(int space_index, int size) {
-  if (space_index == LO_SPACE) {
-    AlwaysAllocateScope scope(isolate_);
-    LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
-    Executability exec = static_cast<Executability>(source_.Get());
-    AllocationResult result = lo_space->AllocateRaw(size, exec);
-    HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
-    deserialized_large_objects_.Add(obj);
-    return obj->address();
-  } else {
-    DCHECK(space_index < kNumberOfPreallocatedSpaces);
-    Address address = high_water_[space_index];
-    DCHECK_NOT_NULL(address);
-    high_water_[space_index] += size;
-#ifdef DEBUG
-    // Assert that the current reserved chunk is still big enough.
-    const Heap::Reservation& reservation = reservations_[space_index];
-    int chunk_index = current_chunk_[space_index];
-    CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
-#endif
-    return address;
-  }
-}
-
-
-Object** Deserializer::CopyInNativesSource(Vector<const char> source_vector,
-                                           Object** current) {
-  DCHECK(!isolate_->heap()->deserialization_complete());
-  NativesExternalStringResource* resource = new NativesExternalStringResource(
-      source_vector.start(), source_vector.length());
-  Object* resource_obj = reinterpret_cast<Object*>(resource);
-  UnalignedCopy(current++, &resource_obj);
-  return current;
-}
-
-
-bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
-                            Address current_object_address) {
-  Isolate* const isolate = isolate_;
-  // Write barrier support costs around 1% in startup time.  In fact there
-  // are no new space objects in current boot snapshots, so it's not needed,
-  // but that may change.
-  bool write_barrier_needed =
-      (current_object_address != NULL && source_space != NEW_SPACE &&
-       source_space != CODE_SPACE);
-  while (current < limit) {
-    byte data = source_.Get();
-    switch (data) {
-#define CASE_STATEMENT(where, how, within, space_number) \
-  case where + how + within + space_number:              \
-    STATIC_ASSERT((where & ~kWhereMask) == 0);           \
-    STATIC_ASSERT((how & ~kHowToCodeMask) == 0);         \
-    STATIC_ASSERT((within & ~kWhereToPointMask) == 0);   \
-    STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
-
-#define CASE_BODY(where, how, within, space_number_if_any)                     \
-  {                                                                            \
-    bool emit_write_barrier = false;                                           \
-    bool current_was_incremented = false;                                      \
-    int space_number = space_number_if_any == kAnyOldSpace                     \
-                           ? (data & kSpaceMask)                               \
-                           : space_number_if_any;                              \
-    if (where == kNewObject && how == kPlain && within == kStartOfObject) {    \
-      ReadObject(space_number, current);                                       \
-      emit_write_barrier = (space_number == NEW_SPACE);                        \
-    } else {                                                                   \
-      Object* new_object = NULL; /* May not be a real Object pointer. */       \
-      if (where == kNewObject) {                                               \
-        ReadObject(space_number, &new_object);                                 \
-      } else if (where == kBackref) {                                          \
-        emit_write_barrier = (space_number == NEW_SPACE);                      \
-        new_object = GetBackReferencedObject(data & kSpaceMask);               \
-      } else if (where == kBackrefWithSkip) {                                  \
-        int skip = source_.GetInt();                                           \
-        current = reinterpret_cast<Object**>(                                  \
-            reinterpret_cast<Address>(current) + skip);                        \
-        emit_write_barrier = (space_number == NEW_SPACE);                      \
-        new_object = GetBackReferencedObject(data & kSpaceMask);               \
-      } else if (where == kRootArray) {                                        \
-        int id = source_.GetInt();                                             \
-        Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
-        new_object = isolate->heap()->root(root_index);                        \
-        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
-      } else if (where == kPartialSnapshotCache) {                             \
-        int cache_index = source_.GetInt();                                    \
-        new_object = isolate->partial_snapshot_cache()->at(cache_index);       \
-        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
-      } else if (where == kExternalReference) {                                \
-        int skip = source_.GetInt();                                           \
-        current = reinterpret_cast<Object**>(                                  \
-            reinterpret_cast<Address>(current) + skip);                        \
-        int reference_id = source_.GetInt();                                   \
-        Address address = external_reference_table_->address(reference_id);    \
-        new_object = reinterpret_cast<Object*>(address);                       \
-      } else if (where == kAttachedReference) {                                \
-        int index = source_.GetInt();                                          \
-        DCHECK(deserializing_user_code() || index == kGlobalProxyReference);   \
-        new_object = *attached_objects_[index];                                \
-        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
-      } else {                                                                 \
-        DCHECK(where == kBuiltin);                                             \
-        DCHECK(deserializing_user_code());                                     \
-        int builtin_id = source_.GetInt();                                     \
-        DCHECK_LE(0, builtin_id);                                              \
-        DCHECK_LT(builtin_id, Builtins::builtin_count);                        \
-        Builtins::Name name = static_cast<Builtins::Name>(builtin_id);         \
-        new_object = isolate->builtins()->builtin(name);                       \
-        emit_write_barrier = false;                                            \
-      }                                                                        \
-      if (within == kInnerPointer) {                                           \
-        if (space_number != CODE_SPACE || new_object->IsCode()) {              \
-          Code* new_code_object = reinterpret_cast<Code*>(new_object);         \
-          new_object =                                                         \
-              reinterpret_cast<Object*>(new_code_object->instruction_start()); \
-        } else {                                                               \
-          DCHECK(space_number == CODE_SPACE);                                  \
-          Cell* cell = Cell::cast(new_object);                                 \
-          new_object = reinterpret_cast<Object*>(cell->ValueAddress());        \
-        }                                                                      \
-      }                                                                        \
-      if (how == kFromCode) {                                                  \
-        Address location_of_branch_data = reinterpret_cast<Address>(current);  \
-        Assembler::deserialization_set_special_target_at(                      \
-            isolate, location_of_branch_data,                                  \
-            Code::cast(HeapObject::FromAddress(current_object_address)),       \
-            reinterpret_cast<Address>(new_object));                            \
-        location_of_branch_data += Assembler::kSpecialTargetSize;              \
-        current = reinterpret_cast<Object**>(location_of_branch_data);         \
-        current_was_incremented = true;                                        \
-      } else {                                                                 \
-        UnalignedCopy(current, &new_object);                                   \
-      }                                                                        \
-    }                                                                          \
-    if (emit_write_barrier && write_barrier_needed) {                          \
-      Address current_address = reinterpret_cast<Address>(current);            \
-      SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));      \
-      isolate->heap()->RecordWrite(                                            \
-          HeapObject::FromAddress(current_object_address),                     \
-          static_cast<int>(current_address - current_object_address),          \
-          *reinterpret_cast<Object**>(current_address));                       \
-    }                                                                          \
-    if (!current_was_incremented) {                                            \
-      current++;                                                               \
-    }                                                                          \
-    break;                                                                     \
-  }
-
-// This generates a case and a body for the new space (which has to do extra
-// write barrier handling) and handles the other spaces with fall-through cases
-// and one body.
-#define ALL_SPACES(where, how, within)           \
-  CASE_STATEMENT(where, how, within, NEW_SPACE)  \
-  CASE_BODY(where, how, within, NEW_SPACE)       \
-  CASE_STATEMENT(where, how, within, OLD_SPACE)  \
-  CASE_STATEMENT(where, how, within, CODE_SPACE) \
-  CASE_STATEMENT(where, how, within, MAP_SPACE)  \
-  CASE_STATEMENT(where, how, within, LO_SPACE)   \
-  CASE_BODY(where, how, within, kAnyOldSpace)
-
-#define FOUR_CASES(byte_code)             \
-  case byte_code:                         \
-  case byte_code + 1:                     \
-  case byte_code + 2:                     \
-  case byte_code + 3:
-
-#define SIXTEEN_CASES(byte_code)          \
-  FOUR_CASES(byte_code)                   \
-  FOUR_CASES(byte_code + 4)               \
-  FOUR_CASES(byte_code + 8)               \
-  FOUR_CASES(byte_code + 12)
-
-#define SINGLE_CASE(where, how, within, space) \
-  CASE_STATEMENT(where, how, within, space)    \
-  CASE_BODY(where, how, within, space)
-
-      // Deserialize a new object and write a pointer to it to the current
-      // object.
-      ALL_SPACES(kNewObject, kPlain, kStartOfObject)
-      // Support for direct instruction pointers in functions.  It's an inner
-      // pointer because it points at the entry point, not at the start of the
-      // code object.
-      SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
-      // Deserialize a new code object and write a pointer to its first
-      // instruction to the current code object.
-      ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
-      // Find a recently deserialized object using its offset from the current
-      // allocation point and write a pointer to it to the current object.
-      ALL_SPACES(kBackref, kPlain, kStartOfObject)
-      ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
-    defined(V8_TARGET_ARCH_PPC) || V8_EMBEDDED_CONSTANT_POOL
-      // Deserialize a new object from pointer found in code and write
-      // a pointer to it to the current object. Required only for MIPS, PPC or
-      // ARM with embedded constant pool, and omitted on the other architectures
-      // because it is fully unrolled and would cause bloat.
-      ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
-      // Find a recently deserialized code object using its offset from the
-      // current allocation point and write a pointer to it to the current
-      // object. Required only for MIPS, PPC or ARM with embedded constant pool.
-      ALL_SPACES(kBackref, kFromCode, kStartOfObject)
-      ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
-#endif
-      // Find a recently deserialized code object using its offset from the
-      // current allocation point and write a pointer to its first instruction
-      // to the current code object or the instruction pointer in a function
-      // object.
-      ALL_SPACES(kBackref, kFromCode, kInnerPointer)
-      ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
-      ALL_SPACES(kBackref, kPlain, kInnerPointer)
-      ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
-      // Find an object in the roots array and write a pointer to it to the
-      // current object.
-      SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
-    defined(V8_TARGET_ARCH_PPC) || V8_EMBEDDED_CONSTANT_POOL
-      // Find an object in the roots array and write a pointer to it to in code.
-      SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0)
-#endif
-      // Find an object in the partial snapshots cache and write a pointer to it
-      // to the current object.
-      SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
-      // Find an code entry in the partial snapshots cache and
-      // write a pointer to it to the current object.
-      SINGLE_CASE(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
-      // Find an external reference and write a pointer to it to the current
-      // object.
-      SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0)
-      // Find an external reference and write a pointer to it in the current
-      // code object.
-      SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0)
-      // Find an object in the attached references and write a pointer to it to
-      // the current object.
-      SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
-      SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0)
-      SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
-      // Find a builtin and write a pointer to it to the current object.
-      SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
-      SINGLE_CASE(kBuiltin, kPlain, kInnerPointer, 0)
-      SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
-
-#undef CASE_STATEMENT
-#undef CASE_BODY
-#undef ALL_SPACES
-
-      case kSkip: {
-        int size = source_.GetInt();
-        current = reinterpret_cast<Object**>(
-            reinterpret_cast<intptr_t>(current) + size);
-        break;
-      }
-
-      case kInternalReferenceEncoded:
-      case kInternalReference: {
-        // Internal reference address is not encoded via skip, but by offset
-        // from code entry.
-        int pc_offset = source_.GetInt();
-        int target_offset = source_.GetInt();
-        Code* code =
-            Code::cast(HeapObject::FromAddress(current_object_address));
-        DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size());
-        DCHECK(0 <= target_offset && target_offset <= code->instruction_size());
-        Address pc = code->entry() + pc_offset;
-        Address target = code->entry() + target_offset;
-        Assembler::deserialization_set_target_internal_reference_at(
-            isolate, pc, target, data == kInternalReference
-                                     ? RelocInfo::INTERNAL_REFERENCE
-                                     : RelocInfo::INTERNAL_REFERENCE_ENCODED);
-        break;
-      }
-
-      case kNop:
-        break;
-
-      case kNextChunk: {
-        int space = source_.Get();
-        DCHECK(space < kNumberOfPreallocatedSpaces);
-        int chunk_index = current_chunk_[space];
-        const Heap::Reservation& reservation = reservations_[space];
-        // Make sure the current chunk is indeed exhausted.
-        CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
-        // Move to next reserved chunk.
-        chunk_index = ++current_chunk_[space];
-        CHECK_LT(chunk_index, reservation.length());
-        high_water_[space] = reservation[chunk_index].start;
-        break;
-      }
-
-      case kDeferred: {
-        // Deferred can only occur right after the heap object header.
-        DCHECK(current == reinterpret_cast<Object**>(current_object_address +
-                                                     kPointerSize));
-        HeapObject* obj = HeapObject::FromAddress(current_object_address);
-        // If the deferred object is a map, its instance type may be used
-        // during deserialization. Initialize it with a temporary value.
-        if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
-        current = limit;
-        return false;
-      }
-
-      case kSynchronize:
-        // If we get here then that indicates that you have a mismatch between
-        // the number of GC roots when serializing and deserializing.
-        CHECK(false);
-        break;
-
-      case kNativesStringResource:
-        current = CopyInNativesSource(Natives::GetScriptSource(source_.Get()),
-                                      current);
-        break;
-
-      case kExtraNativesStringResource:
-        current = CopyInNativesSource(
-            ExtraNatives::GetScriptSource(source_.Get()), current);
-        break;
-
-      // Deserialize raw data of variable length.
-      case kVariableRawData: {
-        int size_in_bytes = source_.GetInt();
-        byte* raw_data_out = reinterpret_cast<byte*>(current);
-        source_.CopyRaw(raw_data_out, size_in_bytes);
-        break;
-      }
-
-      case kVariableRepeat: {
-        int repeats = source_.GetInt();
-        Object* object = current[-1];
-        DCHECK(!isolate->heap()->InNewSpace(object));
-        for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
-        break;
-      }
-
-      case kAlignmentPrefix:
-      case kAlignmentPrefix + 1:
-      case kAlignmentPrefix + 2:
-        SetAlignment(data);
-        break;
-
-      STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
-      STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
-      SIXTEEN_CASES(kRootArrayConstantsWithSkip)
-      SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
-        int skip = source_.GetInt();
-        current = reinterpret_cast<Object**>(
-            reinterpret_cast<intptr_t>(current) + skip);
-        // Fall through.
-      }
-
-      SIXTEEN_CASES(kRootArrayConstants)
-      SIXTEEN_CASES(kRootArrayConstants + 16) {
-        int id = data & kRootArrayConstantsMask;
-        Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
-        Object* object = isolate->heap()->root(root_index);
-        DCHECK(!isolate->heap()->InNewSpace(object));
-        UnalignedCopy(current++, &object);
-        break;
-      }
-
-      STATIC_ASSERT(kNumberOfHotObjects == 8);
-      FOUR_CASES(kHotObjectWithSkip)
-      FOUR_CASES(kHotObjectWithSkip + 4) {
-        int skip = source_.GetInt();
-        current = reinterpret_cast<Object**>(
-            reinterpret_cast<Address>(current) + skip);
-        // Fall through.
-      }
-
-      FOUR_CASES(kHotObject)
-      FOUR_CASES(kHotObject + 4) {
-        int index = data & kHotObjectMask;
-        Object* hot_object = hot_objects_.Get(index);
-        UnalignedCopy(current, &hot_object);
-        if (write_barrier_needed) {
-          Address current_address = reinterpret_cast<Address>(current);
-          SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
-          isolate->heap()->RecordWrite(
-              HeapObject::FromAddress(current_object_address),
-              static_cast<int>(current_address - current_object_address),
-              hot_object);
-        }
-        current++;
-        break;
-      }
-
-      // Deserialize raw data of fixed length from 1 to 32 words.
-      STATIC_ASSERT(kNumberOfFixedRawData == 32);
-      SIXTEEN_CASES(kFixedRawData)
-      SIXTEEN_CASES(kFixedRawData + 16) {
-        byte* raw_data_out = reinterpret_cast<byte*>(current);
-        int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
-        source_.CopyRaw(raw_data_out, size_in_bytes);
-        current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes);
-        break;
-      }
-
-      STATIC_ASSERT(kNumberOfFixedRepeat == 16);
-      SIXTEEN_CASES(kFixedRepeat) {
-        int repeats = data - kFixedRepeatStart;
-        Object* object;
-        UnalignedCopy(&object, current - 1);
-        DCHECK(!isolate->heap()->InNewSpace(object));
-        for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
-        break;
-      }
-
-#undef SIXTEEN_CASES
-#undef FOUR_CASES
-#undef SINGLE_CASE
-
-      default:
-        CHECK(false);
-    }
-  }
-  CHECK_EQ(limit, current);
-  return true;
-}
-
-
-Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
-    : isolate_(isolate),
-      sink_(sink),
-      external_reference_encoder_(isolate),
-      root_index_map_(isolate),
-      recursion_depth_(0),
-      code_address_map_(NULL),
-      large_objects_total_size_(0),
-      seen_large_objects_index_(0) {
-  // The serializer is meant to be used only to generate initial heap images
-  // from a context in which there is only one isolate.
-  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
-    pending_chunk_[i] = 0;
-    max_chunk_size_[i] = static_cast<uint32_t>(
-        MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
-  }
-
-#ifdef OBJECT_PRINT
-  if (FLAG_serialization_statistics) {
-    instance_type_count_ = NewArray<int>(kInstanceTypes);
-    instance_type_size_ = NewArray<size_t>(kInstanceTypes);
-    for (int i = 0; i < kInstanceTypes; i++) {
-      instance_type_count_[i] = 0;
-      instance_type_size_[i] = 0;
-    }
-  } else {
-    instance_type_count_ = NULL;
-    instance_type_size_ = NULL;
-  }
-#endif  // OBJECT_PRINT
-}
-
-
-Serializer::~Serializer() {
-  if (code_address_map_ != NULL) delete code_address_map_;
-#ifdef OBJECT_PRINT
-  if (instance_type_count_ != NULL) {
-    DeleteArray(instance_type_count_);
-    DeleteArray(instance_type_size_);
-  }
-#endif  // OBJECT_PRINT
-}
-
-
-#ifdef OBJECT_PRINT
-void Serializer::CountInstanceType(Map* map, int size) {
-  int instance_type = map->instance_type();
-  instance_type_count_[instance_type]++;
-  instance_type_size_[instance_type] += size;
-}
-#endif  // OBJECT_PRINT
-
-
-void Serializer::OutputStatistics(const char* name) {
-  if (!FLAG_serialization_statistics) return;
-  PrintF("%s:\n", name);
-  PrintF("  Spaces (bytes):\n");
-  for (int space = 0; space < kNumberOfSpaces; space++) {
-    PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
-  }
-  PrintF("\n");
-  for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
-    size_t s = pending_chunk_[space];
-    for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
-    PrintF("%16" V8_PTR_PREFIX "d", s);
-  }
-  PrintF("%16d\n", large_objects_total_size_);
-#ifdef OBJECT_PRINT
-  PrintF("  Instance types (count and bytes):\n");
-#define PRINT_INSTANCE_TYPE(Name)                                          \
-  if (instance_type_count_[Name]) {                                        \
-    PrintF("%10d %10" V8_PTR_PREFIX "d  %s\n", instance_type_count_[Name], \
-           instance_type_size_[Name], #Name);                              \
-  }
-  INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
-#undef PRINT_INSTANCE_TYPE
-  PrintF("\n");
-#endif  // OBJECT_PRINT
-}
-
-
-class Serializer::ObjectSerializer : public ObjectVisitor {
- public:
-  ObjectSerializer(Serializer* serializer, Object* o, SnapshotByteSink* sink,
-                   HowToCode how_to_code, WhereToPoint where_to_point)
-      : serializer_(serializer),
-        object_(HeapObject::cast(o)),
-        sink_(sink),
-        reference_representation_(how_to_code + where_to_point),
-        bytes_processed_so_far_(0),
-        is_code_object_(o->IsCode()),
-        code_has_been_output_(false) {}
-  void Serialize();
-  void SerializeDeferred();
-  void VisitPointers(Object** start, Object** end) override;
-  void VisitEmbeddedPointer(RelocInfo* target) override;
-  void VisitExternalReference(Address* p) override;
-  void VisitExternalReference(RelocInfo* rinfo) override;
-  void VisitInternalReference(RelocInfo* rinfo) override;
-  void VisitCodeTarget(RelocInfo* target) override;
-  void VisitCodeEntry(Address entry_address) override;
-  void VisitCell(RelocInfo* rinfo) override;
-  void VisitRuntimeEntry(RelocInfo* reloc) override;
-  // Used for seralizing the external strings that hold the natives source.
-  void VisitExternalOneByteString(
-      v8::String::ExternalOneByteStringResource** resource) override;
-  // We can't serialize a heap with external two byte strings.
-  void VisitExternalTwoByteString(
-      v8::String::ExternalStringResource** resource) override {
-    UNREACHABLE();
-  }
-
- private:
-  void SerializePrologue(AllocationSpace space, int size, Map* map);
-
-  bool SerializeExternalNativeSourceString(
-      int builtin_count,
-      v8::String::ExternalOneByteStringResource** resource_pointer,
-      FixedArray* source_cache, int resource_index);
-
-  enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
-  // This function outputs or skips the raw data between the last pointer and
-  // up to the current position.  It optionally can just return the number of
-  // bytes to skip instead of performing a skip instruction, in case the skip
-  // can be merged into the next instruction.
-  int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
-  // External strings are serialized in a way to resemble sequential strings.
-  void SerializeExternalString();
-
-  Address PrepareCode();
-
-  Serializer* serializer_;
-  HeapObject* object_;
-  SnapshotByteSink* sink_;
-  int reference_representation_;
-  int bytes_processed_so_far_;
-  bool is_code_object_;
-  bool code_has_been_output_;
-};
-
-
-void Serializer::SerializeDeferredObjects() {
-  while (deferred_objects_.length() > 0) {
-    HeapObject* obj = deferred_objects_.RemoveLast();
-    ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
-    obj_serializer.SerializeDeferred();
-  }
-  sink_->Put(kSynchronize, "Finished with deferred objects");
-}
-
-
-void StartupSerializer::SerializeStrongReferences() {
-  Isolate* isolate = this->isolate();
-  // No active threads.
-  CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
-  // No active or weak handles.
-  CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
-  CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
-  CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
-  // We don't support serializing installed extensions.
-  CHECK(!isolate->has_installed_extensions());
-  isolate->heap()->IterateSmiRoots(this);
-  isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
-}
-
-
-void StartupSerializer::VisitPointers(Object** start, Object** end) {
-  for (Object** current = start; current < end; current++) {
-    if (start == isolate()->heap()->roots_array_start()) {
-      root_index_wave_front_ =
-          Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
-    }
-    if (ShouldBeSkipped(current)) {
-      sink_->Put(kSkip, "Skip");
-      sink_->PutInt(kPointerSize, "SkipOneWord");
-    } else if ((*current)->IsSmi()) {
-      sink_->Put(kOnePointerRawData, "Smi");
-      for (int i = 0; i < kPointerSize; i++) {
-        sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
-      }
-    } else {
-      SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
-    }
-  }
-}
-
-
-void PartialSerializer::Serialize(Object** o) {
-  if ((*o)->IsContext()) {
-    Context* context = Context::cast(*o);
-    global_object_ = context->global_object();
-    back_reference_map()->AddGlobalProxy(context->global_proxy());
-    // The bootstrap snapshot has a code-stub context. When serializing the
-    // partial snapshot, it is chained into the weak context list on the isolate
-    // and it's next context pointer may point to the code-stub context.  Clear
-    // it before serializing, it will get re-added to the context list
-    // explicitly when it's loaded.
-    if (context->IsNativeContext()) {
-      context->set(Context::NEXT_CONTEXT_LINK,
-                   isolate_->heap()->undefined_value());
-      DCHECK(!context->global_object()->IsUndefined());
-    }
-  }
-  VisitPointer(o);
-  SerializeDeferredObjects();
-  Pad();
-}
-
-
-bool Serializer::ShouldBeSkipped(Object** current) {
-  Object** roots = isolate()->heap()->roots_array_start();
-  return current == &roots[Heap::kStoreBufferTopRootIndex]
-      || current == &roots[Heap::kStackLimitRootIndex]
-      || current == &roots[Heap::kRealStackLimitRootIndex];
-}
-
-
-void Serializer::VisitPointers(Object** start, Object** end) {
-  for (Object** current = start; current < end; current++) {
-    if ((*current)->IsSmi()) {
-      sink_->Put(kOnePointerRawData, "Smi");
-      for (int i = 0; i < kPointerSize; i++) {
-        sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
-      }
-    } else {
-      SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
-    }
-  }
-}
-
-
-void Serializer::EncodeReservations(
-    List<SerializedData::Reservation>* out) const {
-  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
-    for (int j = 0; j < completed_chunks_[i].length(); j++) {
-      out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
-    }
-
-    if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
-      out->Add(SerializedData::Reservation(pending_chunk_[i]));
-    }
-    out->last().mark_as_last();
-  }
-
-  out->Add(SerializedData::Reservation(large_objects_total_size_));
-  out->last().mark_as_last();
-}
-
-
-// This ensures that the partial snapshot cache keeps things alive during GC and
-// tracks their movement.  When it is called during serialization of the startup
-// snapshot nothing happens.  When the partial (context) snapshot is created,
-// this array is populated with the pointers that the partial snapshot will
-// need. As that happens we emit serialized objects to the startup snapshot
-// that correspond to the elements of this cache array.  On deserialization we
-// therefore need to visit the cache array.  This fills it up with pointers to
-// deserialized objects.
-void SerializerDeserializer::Iterate(Isolate* isolate,
-                                     ObjectVisitor* visitor) {
-  if (isolate->serializer_enabled()) return;
-  List<Object*>* cache = isolate->partial_snapshot_cache();
-  for (int i = 0;; ++i) {
-    // Extend the array ready to get a value when deserializing.
-    if (cache->length() <= i) cache->Add(Smi::FromInt(0));
-    visitor->VisitPointer(&cache->at(i));
-    // Sentinel is the undefined object, which is a root so it will not normally
-    // be found in the cache.
-    if (cache->at(i)->IsUndefined()) break;
-  }
-}
-
-
-bool SerializerDeserializer::CanBeDeferred(HeapObject* o) {
-  return !o->IsString() && !o->IsScript();
-}
-
-
-int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
-  Isolate* isolate = this->isolate();
-  List<Object*>* cache = isolate->partial_snapshot_cache();
-  int new_index = cache->length();
-
-  int index = partial_cache_index_map_.LookupOrInsert(heap_object, new_index);
-  if (index == PartialCacheIndexMap::kInvalidIndex) {
-    // We didn't find the object in the cache.  So we add it to the cache and
-    // then visit the pointer so that it becomes part of the startup snapshot
-    // and we can refer to it from the partial snapshot.
-    cache->Add(heap_object);
-    startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
-    // We don't recurse from the startup snapshot generator into the partial
-    // snapshot generator.
-    return new_index;
-  }
-  return index;
-}
-
-
-bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
-  // Scripts should be referred only through shared function infos.  We can't
-  // allow them to be part of the partial snapshot because they contain a
-  // unique ID, and deserializing several partial snapshots containing script
-  // would cause dupes.
-  DCHECK(!o->IsScript());
-  return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
-         o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
-         o->map() ==
-             startup_serializer_->isolate()->heap()->fixed_cow_array_map();
-}
-
-
-#ifdef DEBUG
-bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
-  DCHECK(reference.is_valid());
-  DCHECK(!reference.is_source());
-  DCHECK(!reference.is_global_proxy());
-  AllocationSpace space = reference.space();
-  int chunk_index = reference.chunk_index();
-  if (space == LO_SPACE) {
-    return chunk_index == 0 &&
-           reference.large_object_index() < seen_large_objects_index_;
-  } else if (chunk_index == completed_chunks_[space].length()) {
-    return reference.chunk_offset() < pending_chunk_[space];
-  } else {
-    return chunk_index < completed_chunks_[space].length() &&
-           reference.chunk_offset() < completed_chunks_[space][chunk_index];
-  }
-}
-#endif  // DEBUG
-
-
-bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
-                                      WhereToPoint where_to_point, int skip) {
-  if (how_to_code == kPlain && where_to_point == kStartOfObject) {
-    // Encode a reference to a hot object by its index in the working set.
-    int index = hot_objects_.Find(obj);
-    if (index != HotObjectsList::kNotFound) {
-      DCHECK(index >= 0 && index < kNumberOfHotObjects);
-      if (FLAG_trace_serializer) {
-        PrintF(" Encoding hot object %d:", index);
-        obj->ShortPrint();
-        PrintF("\n");
-      }
-      if (skip != 0) {
-        sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
-        sink_->PutInt(skip, "HotObjectSkipDistance");
-      } else {
-        sink_->Put(kHotObject + index, "HotObject");
-      }
-      return true;
-    }
-  }
-  BackReference back_reference = back_reference_map_.Lookup(obj);
-  if (back_reference.is_valid()) {
-    // Encode the location of an already deserialized object in order to write
-    // its location into a later object.  We can encode the location as an
-    // offset fromthe start of the deserialized objects or as an offset
-    // backwards from thecurrent allocation pointer.
-    if (back_reference.is_source()) {
-      FlushSkip(skip);
-      if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
-      DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
-      sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
-      sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
-    } else if (back_reference.is_global_proxy()) {
-      FlushSkip(skip);
-      if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
-      DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
-      sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
-      sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
-    } else {
-      if (FLAG_trace_serializer) {
-        PrintF(" Encoding back reference to: ");
-        obj->ShortPrint();
-        PrintF("\n");
-      }
-
-      PutAlignmentPrefix(obj);
-      AllocationSpace space = back_reference.space();
-      if (skip == 0) {
-        sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
-      } else {
-        sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
-                   "BackRefWithSkip");
-        sink_->PutInt(skip, "BackRefSkipDistance");
-      }
-      PutBackReference(obj, back_reference);
-    }
-    return true;
-  }
-  return false;
-}
-
-StartupSerializer::StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
-    : Serializer(isolate, sink),
-      root_index_wave_front_(0),
-      serializing_builtins_(false) {
-  // Clear the cache of objects used by the partial snapshot.  After the
-  // strong roots have been serialized we can create a partial snapshot
-  // which will repopulate the cache with objects needed by that partial
-  // snapshot.
-  isolate->partial_snapshot_cache()->Clear();
-  InitializeCodeAddressMap();
-}
-
-
-void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
-                                        WhereToPoint where_to_point, int skip) {
-  DCHECK(!obj->IsJSFunction());
-
-  if (obj->IsCode()) {
-    Code* code = Code::cast(obj);
-    // If the function code is compiled (either as native code or bytecode),
-    // replace it with lazy-compile builtin. Only exception is when we are
-    // serializing the canonical interpreter-entry-trampoline builtin.
-    if (code->kind() == Code::FUNCTION ||
-        (!serializing_builtins_ && code->is_interpreter_entry_trampoline())) {
-      obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
-    }
-  } else if (obj->IsBytecodeArray()) {
-    obj = isolate()->heap()->undefined_value();
-  }
-
-  int root_index = root_index_map_.Lookup(obj);
-  bool is_immortal_immovable_root = false;
-  // We can only encode roots as such if it has already been serialized.
-  // That applies to root indices below the wave front.
-  if (root_index != RootIndexMap::kInvalidRootIndex) {
-    if (root_index < root_index_wave_front_) {
-      PutRoot(root_index, obj, how_to_code, where_to_point, skip);
-      return;
-    } else {
-      is_immortal_immovable_root = Heap::RootIsImmortalImmovable(root_index);
-    }
-  }
-
-  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
-
-  FlushSkip(skip);
-
-  // Object has not yet been serialized.  Serialize it here.
-  ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
-                                     where_to_point);
-  object_serializer.Serialize();
-
-  if (is_immortal_immovable_root) {
-    // Make sure that the immortal immovable root has been included in the first
-    // chunk of its reserved space , so that it is deserialized onto the first
-    // page of its space and stays immortal immovable.
-    BackReference ref = back_reference_map_.Lookup(obj);
-    CHECK(ref.is_valid() && ref.chunk_index() == 0);
-  }
-}
-
-
-void StartupSerializer::SerializeWeakReferencesAndDeferred() {
-  // This phase comes right after the serialization (of the snapshot).
-  // After we have done the partial serialization the partial snapshot cache
-  // will contain some references needed to decode the partial snapshot.  We
-  // add one entry with 'undefined' which is the sentinel that the deserializer
-  // uses to know it is done deserializing the array.
-  Object* undefined = isolate()->heap()->undefined_value();
-  VisitPointer(&undefined);
-  isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
-  SerializeDeferredObjects();
-  Pad();
-}
-
-void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
-  // We expect the builtins tag after builtins have been serialized.
-  DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
-  serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
-  sink_->Put(kSynchronize, "Synchronize");
-}
-
-void Serializer::PutRoot(int root_index,
-                         HeapObject* object,
-                         SerializerDeserializer::HowToCode how_to_code,
-                         SerializerDeserializer::WhereToPoint where_to_point,
-                         int skip) {
-  if (FLAG_trace_serializer) {
-    PrintF(" Encoding root %d:", root_index);
-    object->ShortPrint();
-    PrintF("\n");
-  }
-
-  if (how_to_code == kPlain && where_to_point == kStartOfObject &&
-      root_index < kNumberOfRootArrayConstants &&
-      !isolate()->heap()->InNewSpace(object)) {
-    if (skip == 0) {
-      sink_->Put(kRootArrayConstants + root_index, "RootConstant");
-    } else {
-      sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
-      sink_->PutInt(skip, "SkipInPutRoot");
-    }
-  } else {
-    FlushSkip(skip);
-    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
-    sink_->PutInt(root_index, "root_index");
-  }
-}
-
-
-void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
-  DCHECK(BackReferenceIsAlreadyAllocated(reference));
-  sink_->PutInt(reference.reference(), "BackRefValue");
-  hot_objects_.Add(object);
-}
-
-
-int Serializer::PutAlignmentPrefix(HeapObject* object) {
-  AllocationAlignment alignment = object->RequiredAlignment();
-  if (alignment != kWordAligned) {
-    DCHECK(1 <= alignment && alignment <= 3);
-    byte prefix = (kAlignmentPrefix - 1) + alignment;
-    sink_->Put(prefix, "Alignment");
-    return Heap::GetMaximumFillToAlign(alignment);
-  }
-  return 0;
-}
-
-
-void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
-                                        WhereToPoint where_to_point, int skip) {
-  if (obj->IsMap()) {
-    // The code-caches link to context-specific code objects, which
-    // the startup and context serializes cannot currently handle.
-    DCHECK(Map::cast(obj)->code_cache() == obj->GetHeap()->empty_fixed_array());
-  }
-
-  // Replace typed arrays by undefined.
-  if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();
-
-  int root_index = root_index_map_.Lookup(obj);
-  if (root_index != RootIndexMap::kInvalidRootIndex) {
-    PutRoot(root_index, obj, how_to_code, where_to_point, skip);
-    return;
-  }
-
-  if (ShouldBeInThePartialSnapshotCache(obj)) {
-    FlushSkip(skip);
-
-    int cache_index = PartialSnapshotCacheIndex(obj);
-    sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
-               "PartialSnapshotCache");
-    sink_->PutInt(cache_index, "partial_snapshot_cache_index");
-    return;
-  }
-
-  // Pointers from the partial snapshot to the objects in the startup snapshot
-  // should go through the root array or through the partial snapshot cache.
-  // If this is not the case you may have to add something to the root array.
-  DCHECK(!startup_serializer_->back_reference_map()->Lookup(obj).is_valid());
-  // All the internalized strings that the partial snapshot needs should be
-  // either in the root table or in the partial snapshot cache.
-  DCHECK(!obj->IsInternalizedString());
-
-  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
-
-  FlushSkip(skip);
-
-  // Clear literal boilerplates.
-  if (obj->IsJSFunction()) {
-    FixedArray* literals = JSFunction::cast(obj)->literals();
-    for (int i = 0; i < literals->length(); i++) literals->set_undefined(i);
-  }
-
-  // Object has not yet been serialized.  Serialize it here.
-  ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
-  serializer.Serialize();
-}
-
-
-void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
-                                                     int size, Map* map) {
-  if (serializer_->code_address_map_) {
-    const char* code_name =
-        serializer_->code_address_map_->Lookup(object_->address());
-    LOG(serializer_->isolate_,
-        CodeNameEvent(object_->address(), sink_->Position(), code_name));
-    LOG(serializer_->isolate_,
-        SnapshotPositionEvent(object_->address(), sink_->Position()));
-  }
-
-  BackReference back_reference;
-  if (space == LO_SPACE) {
-    sink_->Put(kNewObject + reference_representation_ + space,
-               "NewLargeObject");
-    sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
-    if (object_->IsCode()) {
-      sink_->Put(EXECUTABLE, "executable large object");
-    } else {
-      sink_->Put(NOT_EXECUTABLE, "not executable large object");
-    }
-    back_reference = serializer_->AllocateLargeObject(size);
-  } else {
-    int fill = serializer_->PutAlignmentPrefix(object_);
-    back_reference = serializer_->Allocate(space, size + fill);
-    sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
-    sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
-  }
-
-#ifdef OBJECT_PRINT
-  if (FLAG_serialization_statistics) {
-    serializer_->CountInstanceType(map, size);
-  }
-#endif  // OBJECT_PRINT
-
-  // Mark this object as already serialized.
-  serializer_->back_reference_map()->Add(object_, back_reference);
-
-  // Serialize the map (first word of the object).
-  serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
-}
-
-
-void Serializer::ObjectSerializer::SerializeExternalString() {
-  // Instead of serializing this as an external string, we serialize
-  // an imaginary sequential string with the same content.
-  Isolate* isolate = serializer_->isolate();
-  DCHECK(object_->IsExternalString());
-  DCHECK(object_->map() != isolate->heap()->native_source_string_map());
-  ExternalString* string = ExternalString::cast(object_);
-  int length = string->length();
-  Map* map;
-  int content_size;
-  int allocation_size;
-  const byte* resource;
-  // Find the map and size for the imaginary sequential string.
-  bool internalized = object_->IsInternalizedString();
-  if (object_->IsExternalOneByteString()) {
-    map = internalized ? isolate->heap()->one_byte_internalized_string_map()
-                       : isolate->heap()->one_byte_string_map();
-    allocation_size = SeqOneByteString::SizeFor(length);
-    content_size = length * kCharSize;
-    resource = reinterpret_cast<const byte*>(
-        ExternalOneByteString::cast(string)->resource()->data());
-  } else {
-    map = internalized ? isolate->heap()->internalized_string_map()
-                       : isolate->heap()->string_map();
-    allocation_size = SeqTwoByteString::SizeFor(length);
-    content_size = length * kShortSize;
-    resource = reinterpret_cast<const byte*>(
-        ExternalTwoByteString::cast(string)->resource()->data());
-  }
-
-  AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
-                              ? LO_SPACE
-                              : OLD_SPACE;
-  SerializePrologue(space, allocation_size, map);
-
-  // Output the rest of the imaginary string.
-  int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
-
-  // Output raw data header. Do not bother with common raw length cases here.
-  sink_->Put(kVariableRawData, "RawDataForString");
-  sink_->PutInt(bytes_to_output, "length");
-
-  // Serialize string header (except for map).
-  Address string_start = string->address();
-  for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
-    sink_->PutSection(string_start[i], "StringHeader");
-  }
-
-  // Serialize string content.
-  sink_->PutRaw(resource, content_size, "StringContent");
-
-  // Since the allocation size is rounded up to object alignment, there
-  // maybe left-over bytes that need to be padded.
-  int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
-  DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
-  for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
-
-  sink_->Put(kSkip, "SkipAfterString");
-  sink_->PutInt(bytes_to_output, "SkipDistance");
-}
-
-// Clear and later restore the next link in the weak cell or allocation site.
-// TODO(all): replace this with proper iteration of weak slots in serializer.
-class UnlinkWeakNextScope {
- public:
-  explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
-    if (object->IsWeakCell()) {
-      object_ = object;
-      next_ = WeakCell::cast(object)->next();
-      WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
-    } else if (object->IsAllocationSite()) {
-      object_ = object;
-      next_ = AllocationSite::cast(object)->weak_next();
-      AllocationSite::cast(object)
-          ->set_weak_next(object->GetHeap()->undefined_value());
-    }
-  }
-
-  ~UnlinkWeakNextScope() {
-    if (object_ != nullptr) {
-      if (object_->IsWeakCell()) {
-        WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
-      } else {
-        AllocationSite::cast(object_)
-            ->set_weak_next(next_, UPDATE_WEAK_WRITE_BARRIER);
-      }
-    }
-  }
-
- private:
-  HeapObject* object_;
-  Object* next_;
-  DisallowHeapAllocation no_gc_;
-};
-
-
-void Serializer::ObjectSerializer::Serialize() {
-  if (FLAG_trace_serializer) {
-    PrintF(" Encoding heap object: ");
-    object_->ShortPrint();
-    PrintF("\n");
-  }
-
-  // We cannot serialize typed array objects correctly.
-  DCHECK(!object_->IsJSTypedArray());
-
-  // We don't expect fillers.
-  DCHECK(!object_->IsFiller());
-
-  if (object_->IsScript()) {
-    // Clear cached line ends.
-    Object* undefined = serializer_->isolate()->heap()->undefined_value();
-    Script::cast(object_)->set_line_ends(undefined);
-  }
-
-  if (object_->IsExternalString()) {
-    Heap* heap = serializer_->isolate()->heap();
-    if (object_->map() != heap->native_source_string_map()) {
-      // Usually we cannot recreate resources for external strings. To work
-      // around this, external strings are serialized to look like ordinary
-      // sequential strings.
-      // The exception are native source code strings, since we can recreate
-      // their resources. In that case we fall through and leave it to
-      // VisitExternalOneByteString further down.
-      SerializeExternalString();
-      return;
-    }
-  }
-
-  int size = object_->Size();
-  Map* map = object_->map();
-  AllocationSpace space =
-      MemoryChunk::FromAddress(object_->address())->owner()->identity();
-  SerializePrologue(space, size, map);
-
-  // Serialize the rest of the object.
-  CHECK_EQ(0, bytes_processed_so_far_);
-  bytes_processed_so_far_ = kPointerSize;
-
-  RecursionScope recursion(serializer_);
-  // Objects that are immediately post processed during deserialization
-  // cannot be deferred, since post processing requires the object content.
-  if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
-    serializer_->QueueDeferredObject(object_);
-    sink_->Put(kDeferred, "Deferring object content");
-    return;
-  }
-
-  UnlinkWeakNextScope unlink_weak_next(object_);
-
-  object_->IterateBody(map->instance_type(), size, this);
-  OutputRawData(object_->address() + size);
-}
-
-
-void Serializer::ObjectSerializer::SerializeDeferred() {
-  if (FLAG_trace_serializer) {
-    PrintF(" Encoding deferred heap object: ");
-    object_->ShortPrint();
-    PrintF("\n");
-  }
-
-  int size = object_->Size();
-  Map* map = object_->map();
-  BackReference reference = serializer_->back_reference_map()->Lookup(object_);
-
-  // Serialize the rest of the object.
-  CHECK_EQ(0, bytes_processed_so_far_);
-  bytes_processed_so_far_ = kPointerSize;
-
-  serializer_->PutAlignmentPrefix(object_);
-  sink_->Put(kNewObject + reference.space(), "deferred object");
-  serializer_->PutBackReference(object_, reference);
-  sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
-
-  UnlinkWeakNextScope unlink_weak_next(object_);
-
-  object_->IterateBody(map->instance_type(), size, this);
-  OutputRawData(object_->address() + size);
-}
-
-
-void Serializer::ObjectSerializer::VisitPointers(Object** start,
-                                                 Object** end) {
-  Object** current = start;
-  while (current < end) {
-    while (current < end && (*current)->IsSmi()) current++;
-    if (current < end) OutputRawData(reinterpret_cast<Address>(current));
-
-    while (current < end && !(*current)->IsSmi()) {
-      HeapObject* current_contents = HeapObject::cast(*current);
-      int root_index = serializer_->root_index_map()->Lookup(current_contents);
-      // Repeats are not subject to the write barrier so we can only use
-      // immortal immovable root members. They are never in new space.
-      if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
-          Heap::RootIsImmortalImmovable(root_index) &&
-          current_contents == current[-1]) {
-        DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
-        int repeat_count = 1;
-        while (&current[repeat_count] < end - 1 &&
-               current[repeat_count] == current_contents) {
-          repeat_count++;
-        }
-        current += repeat_count;
-        bytes_processed_so_far_ += repeat_count * kPointerSize;
-        if (repeat_count > kNumberOfFixedRepeat) {
-          sink_->Put(kVariableRepeat, "VariableRepeat");
-          sink_->PutInt(repeat_count, "repeat count");
-        } else {
-          sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
-        }
-      } else {
-        serializer_->SerializeObject(
-                current_contents, kPlain, kStartOfObject, 0);
-        bytes_processed_so_far_ += kPointerSize;
-        current++;
-      }
-    }
-  }
-}
-
-
-void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
-  int skip = OutputRawData(rinfo->target_address_address(),
-                           kCanReturnSkipInsteadOfSkipping);
-  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
-  Object* object = rinfo->target_object();
-  serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
-                               kStartOfObject, skip);
-  bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
-  int skip = OutputRawData(reinterpret_cast<Address>(p),
-                           kCanReturnSkipInsteadOfSkipping);
-  sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
-  sink_->PutInt(skip, "SkipB4ExternalRef");
-  Address target = *p;
-  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
-  bytes_processed_so_far_ += kPointerSize;
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
-  int skip = OutputRawData(rinfo->target_address_address(),
-                           kCanReturnSkipInsteadOfSkipping);
-  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
-  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
-  sink_->PutInt(skip, "SkipB4ExternalRef");
-  Address target = rinfo->target_external_reference();
-  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
-  bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
-  // We can only reference to internal references of code that has been output.
-  DCHECK(is_code_object_ && code_has_been_output_);
-  // We do not use skip from last patched pc to find the pc to patch, since
-  // target_address_address may not return addresses in ascending order when
-  // used for internal references. External references may be stored at the
-  // end of the code in the constant pool, whereas internal references are
-  // inline. That would cause the skip to be negative. Instead, we store the
-  // offset from code entry.
-  Address entry = Code::cast(object_)->entry();
-  intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
-  intptr_t target_offset = rinfo->target_internal_reference() - entry;
-  DCHECK(0 <= pc_offset &&
-         pc_offset <= Code::cast(object_)->instruction_size());
-  DCHECK(0 <= target_offset &&
-         target_offset <= Code::cast(object_)->instruction_size());
-  sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
-                 ? kInternalReference
-                 : kInternalReferenceEncoded,
-             "InternalRef");
-  sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
-  sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
-}
-
-
-void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
-  int skip = OutputRawData(rinfo->target_address_address(),
-                           kCanReturnSkipInsteadOfSkipping);
-  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
-  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
-  sink_->PutInt(skip, "SkipB4ExternalRef");
-  Address target = rinfo->target_address();
-  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
-  bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
-  int skip = OutputRawData(rinfo->target_address_address(),
-                           kCanReturnSkipInsteadOfSkipping);
-  Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
-  serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
-  bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
-void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
-  int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
-  Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
-  serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
-  bytes_processed_so_far_ += kPointerSize;
-}
-
-
-void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
-  int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
-  Cell* object = Cell::cast(rinfo->target_cell());
-  serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
-  bytes_processed_so_far_ += kPointerSize;
-}
-
-
-bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
-    int builtin_count,
-    v8::String::ExternalOneByteStringResource** resource_pointer,
-    FixedArray* source_cache, int resource_index) {
-  for (int i = 0; i < builtin_count; i++) {
-    Object* source = source_cache->get(i);
-    if (!source->IsUndefined()) {
-      ExternalOneByteString* string = ExternalOneByteString::cast(source);
-      typedef v8::String::ExternalOneByteStringResource Resource;
-      const Resource* resource = string->resource();
-      if (resource == *resource_pointer) {
-        sink_->Put(resource_index, "NativesStringResource");
-        sink_->PutSection(i, "NativesStringResourceEnd");
-        bytes_processed_so_far_ += sizeof(resource);
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-
-void Serializer::ObjectSerializer::VisitExternalOneByteString(
-    v8::String::ExternalOneByteStringResource** resource_pointer) {
-  Address references_start = reinterpret_cast<Address>(resource_pointer);
-  OutputRawData(references_start);
-  if (SerializeExternalNativeSourceString(
-          Natives::GetBuiltinsCount(), resource_pointer,
-          Natives::GetSourceCache(serializer_->isolate()->heap()),
-          kNativesStringResource)) {
-    return;
-  }
-  if (SerializeExternalNativeSourceString(
-          ExtraNatives::GetBuiltinsCount(), resource_pointer,
-          ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
-          kExtraNativesStringResource)) {
-    return;
-  }
-  // One of the strings in the natives cache should match the resource.  We
-  // don't expect any other kinds of external strings here.
-  UNREACHABLE();
-}
-
-
-Address Serializer::ObjectSerializer::PrepareCode() {
-  // To make snapshots reproducible, we make a copy of the code object
-  // and wipe all pointers in the copy, which we then serialize.
-  Code* original = Code::cast(object_);
-  Code* code = serializer_->CopyCode(original);
-  // Code age headers are not serializable.
-  code->MakeYoung(serializer_->isolate());
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
-                  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
-  for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
-    RelocInfo* rinfo = it.rinfo();
-    rinfo->WipeOut();
-  }
-  // We need to wipe out the header fields *after* wiping out the
-  // relocations, because some of these fields are needed for the latter.
-  code->WipeOutHeader();
-  return code->address();
-}
-
-
-int Serializer::ObjectSerializer::OutputRawData(
-    Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
-  Address object_start = object_->address();
-  int base = bytes_processed_so_far_;
-  int up_to_offset = static_cast<int>(up_to - object_start);
-  int to_skip = up_to_offset - bytes_processed_so_far_;
-  int bytes_to_output = to_skip;
-  bytes_processed_so_far_ += to_skip;
-  // This assert will fail if the reloc info gives us the target_address_address
-  // locations in a non-ascending order.  Luckily that doesn't happen.
-  DCHECK(to_skip >= 0);
-  bool outputting_code = false;
-  if (to_skip != 0 && is_code_object_ && !code_has_been_output_) {
-    // Output the code all at once and fix later.
-    bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
-    outputting_code = true;
-    code_has_been_output_ = true;
-  }
-  if (bytes_to_output != 0 && (!is_code_object_ || outputting_code)) {
-    if (!outputting_code && bytes_to_output == to_skip &&
-        IsAligned(bytes_to_output, kPointerAlignment) &&
-        bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
-      int size_in_words = bytes_to_output >> kPointerSizeLog2;
-      sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
-      to_skip = 0;  // This instruction includes skip.
-    } else {
-      // We always end up here if we are outputting the code of a code object.
-      sink_->Put(kVariableRawData, "VariableRawData");
-      sink_->PutInt(bytes_to_output, "length");
-    }
-
-    if (is_code_object_) object_start = PrepareCode();
-
-    const char* description = is_code_object_ ? "Code" : "Byte";
-    sink_->PutRaw(object_start + base, bytes_to_output, description);
-  }
-  if (to_skip != 0 && return_skip == kIgnoringReturn) {
-    sink_->Put(kSkip, "Skip");
-    sink_->PutInt(to_skip, "SkipDistance");
-    to_skip = 0;
-  }
-  return to_skip;
-}
-
-
-BackReference Serializer::AllocateLargeObject(int size) {
-  // Large objects are allocated one-by-one when deserializing. We do not
-  // have to keep track of multiple chunks.
-  large_objects_total_size_ += size;
-  return BackReference::LargeObjectReference(seen_large_objects_index_++);
-}
-
-
-BackReference Serializer::Allocate(AllocationSpace space, int size) {
-  DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
-  DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
-  uint32_t new_chunk_size = pending_chunk_[space] + size;
-  if (new_chunk_size > max_chunk_size(space)) {
-    // The new chunk size would not fit onto a single page. Complete the
-    // current chunk and start a new one.
-    sink_->Put(kNextChunk, "NextChunk");
-    sink_->Put(space, "NextChunkSpace");
-    completed_chunks_[space].Add(pending_chunk_[space]);
-    DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
-    pending_chunk_[space] = 0;
-    new_chunk_size = size;
-  }
-  uint32_t offset = pending_chunk_[space];
-  pending_chunk_[space] = new_chunk_size;
-  return BackReference::Reference(space, completed_chunks_[space].length(),
-                                  offset);
-}
-
-
-void Serializer::Pad() {
-  // The non-branching GetInt will read up to 3 bytes too far, so we need
-  // to pad the snapshot to make sure we don't read over the end.
-  for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
-    sink_->Put(kNop, "Padding");
-  }
-  // Pad up to pointer size for checksum.
-  while (!IsAligned(sink_->Position(), kPointerAlignment)) {
-    sink_->Put(kNop, "Padding");
-  }
-}
-
-
-void Serializer::InitializeCodeAddressMap() {
-  isolate_->InitializeLoggingAndCounters();
-  code_address_map_ = new CodeAddressMap(isolate_);
-}
-
-
-Code* Serializer::CopyCode(Code* code) {
-  code_buffer_.Rewind(0);  // Clear buffer without deleting backing store.
-  int size = code->CodeSize();
-  code_buffer_.AddAll(Vector<byte>(code->address(), size));
-  return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
-}
-
-
-ScriptData* CodeSerializer::Serialize(Isolate* isolate,
-                                      Handle<SharedFunctionInfo> info,
-                                      Handle<String> source) {
-  base::ElapsedTimer timer;
-  if (FLAG_profile_deserialization) timer.Start();
-  if (FLAG_trace_serializer) {
-    PrintF("[Serializing from");
-    Object* script = info->script();
-    if (script->IsScript()) Script::cast(script)->name()->ShortPrint();
-    PrintF("]\n");
-  }
-
-  // Serialize code object.
-  SnapshotByteSink sink(info->code()->CodeSize() * 2);
-  CodeSerializer cs(isolate, &sink, *source);
-  DisallowHeapAllocation no_gc;
-  Object** location = Handle<Object>::cast(info).location();
-  cs.VisitPointer(location);
-  cs.SerializeDeferredObjects();
-  cs.Pad();
-
-  SerializedCodeData data(sink.data(), cs);
-  ScriptData* script_data = data.GetScriptData();
-
-  if (FLAG_profile_deserialization) {
-    double ms = timer.Elapsed().InMillisecondsF();
-    int length = script_data->length();
-    PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
-  }
-
-  return script_data;
-}
-
-
-void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
-                                     WhereToPoint where_to_point, int skip) {
-  int root_index = root_index_map_.Lookup(obj);
-  if (root_index != RootIndexMap::kInvalidRootIndex) {
-    PutRoot(root_index, obj, how_to_code, where_to_point, skip);
-    return;
-  }
-
-  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
-
-  FlushSkip(skip);
-
-  if (obj->IsCode()) {
-    Code* code_object = Code::cast(obj);
-    switch (code_object->kind()) {
-      case Code::OPTIMIZED_FUNCTION:  // No optimized code compiled yet.
-      case Code::HANDLER:             // No handlers patched in yet.
-      case Code::REGEXP:              // No regexp literals initialized yet.
-      case Code::NUMBER_OF_KINDS:     // Pseudo enum value.
-        CHECK(false);
-      case Code::BUILTIN:
-        SerializeBuiltin(code_object->builtin_index(), how_to_code,
-                         where_to_point);
-        return;
-      case Code::STUB:
-        SerializeCodeStub(code_object->stub_key(), how_to_code, where_to_point);
-        return;
-#define IC_KIND_CASE(KIND) case Code::KIND:
-        IC_KIND_LIST(IC_KIND_CASE)
-#undef IC_KIND_CASE
-        SerializeIC(code_object, how_to_code, where_to_point);
-        return;
-      case Code::FUNCTION:
-        DCHECK(code_object->has_reloc_info_for_serialization());
-        SerializeGeneric(code_object, how_to_code, where_to_point);
-        return;
-      case Code::WASM_FUNCTION:
-        UNREACHABLE();
-    }
-    UNREACHABLE();
-  }
-
-  // Past this point we should not see any (context-specific) maps anymore.
-  CHECK(!obj->IsMap());
-  // There should be no references to the global object embedded.
-  CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
-  // There should be no hash table embedded. They would require rehashing.
-  CHECK(!obj->IsHashTable());
-  // We expect no instantiated function objects or contexts.
-  CHECK(!obj->IsJSFunction() && !obj->IsContext());
-
-  SerializeGeneric(obj, how_to_code, where_to_point);
-}
-
-
-void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
-                                      HowToCode how_to_code,
-                                      WhereToPoint where_to_point) {
-  // Object has not yet been serialized.  Serialize it here.
-  ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
-                              where_to_point);
-  serializer.Serialize();
-}
-
-
-void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code,
-                                      WhereToPoint where_to_point) {
-  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
-         (how_to_code == kPlain && where_to_point == kInnerPointer) ||
-         (how_to_code == kFromCode && where_to_point == kInnerPointer));
-  DCHECK_LT(builtin_index, Builtins::builtin_count);
-  DCHECK_LE(0, builtin_index);
-
-  if (FLAG_trace_serializer) {
-    PrintF(" Encoding builtin: %s\n",
-           isolate()->builtins()->name(builtin_index));
-  }
-
-  sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
-  sink_->PutInt(builtin_index, "builtin_index");
-}
-
-
-void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
-                                       WhereToPoint where_to_point) {
-  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
-         (how_to_code == kPlain && where_to_point == kInnerPointer) ||
-         (how_to_code == kFromCode && where_to_point == kInnerPointer));
-  DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
-  DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
-
-  int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
-
-  if (FLAG_trace_serializer) {
-    PrintF(" Encoding code stub %s as %d\n",
-           CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key)), index);
-  }
-
-  sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
-  sink_->PutInt(index, "CodeStub key");
-}
-
-
-void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
-                                 WhereToPoint where_to_point) {
-  // The IC may be implemented as a stub.
-  uint32_t stub_key = ic->stub_key();
-  if (stub_key != CodeStub::NoCacheKey()) {
-    if (FLAG_trace_serializer) {
-      PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind()));
-    }
-    SerializeCodeStub(stub_key, how_to_code, where_to_point);
-    return;
-  }
-  // The IC may be implemented as builtin. Only real builtins have an
-  // actual builtin_index value attached (otherwise it's just garbage).
-  // Compare to make sure we are really dealing with a builtin.
-  int builtin_index = ic->builtin_index();
-  if (builtin_index < Builtins::builtin_count) {
-    Builtins::Name name = static_cast<Builtins::Name>(builtin_index);
-    Code* builtin = isolate()->builtins()->builtin(name);
-    if (builtin == ic) {
-      if (FLAG_trace_serializer) {
-        PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind()));
-      }
-      DCHECK(ic->kind() == Code::KEYED_LOAD_IC ||
-             ic->kind() == Code::KEYED_STORE_IC);
-      SerializeBuiltin(builtin_index, how_to_code, where_to_point);
-      return;
-    }
-  }
-  // The IC may also just be a piece of code kept in the non_monomorphic_cache.
-  // In that case, just serialize as a normal code object.
-  if (FLAG_trace_serializer) {
-    PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind()));
-  }
-  DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC);
-  SerializeGeneric(ic, how_to_code, where_to_point);
-}
-
-
-int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
-  // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
-  int index = 0;
-  while (index < stub_keys_.length()) {
-    if (stub_keys_[index] == stub_key) return index;
-    index++;
-  }
-  stub_keys_.Add(stub_key);
-  return index;
-}
-
-
-MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
-    Isolate* isolate, ScriptData* cached_data, Handle<String> source) {
-  base::ElapsedTimer timer;
-  if (FLAG_profile_deserialization) timer.Start();
-
-  HandleScope scope(isolate);
-
-  base::SmartPointer<SerializedCodeData> scd(
-      SerializedCodeData::FromCachedData(isolate, cached_data, *source));
-  if (scd.is_empty()) {
-    if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
-    DCHECK(cached_data->rejected());
-    return MaybeHandle<SharedFunctionInfo>();
-  }
-
-  // Prepare and register list of attached objects.
-  Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
-  Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
-      code_stub_keys.length() + kCodeStubsBaseIndex);
-  attached_objects[kSourceObjectIndex] = source;
-  for (int i = 0; i < code_stub_keys.length(); i++) {
-    attached_objects[i + kCodeStubsBaseIndex] =
-        CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
-  }
-
-  Deserializer deserializer(scd.get());
-  deserializer.SetAttachedObjects(attached_objects);
-
-  // Deserialize.
-  Handle<SharedFunctionInfo> result;
-  if (!deserializer.DeserializeCode(isolate).ToHandle(&result)) {
-    // Deserializing may fail if the reservations cannot be fulfilled.
-    if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
-    return MaybeHandle<SharedFunctionInfo>();
-  }
-
-  if (FLAG_profile_deserialization) {
-    double ms = timer.Elapsed().InMillisecondsF();
-    int length = cached_data->length();
-    PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
-  }
-  result->set_deserialized(true);
-
-  if (isolate->logger()->is_logging_code_events() ||
-      isolate->cpu_profiler()->is_profiling()) {
-    String* name = isolate->heap()->empty_string();
-    if (result->script()->IsScript()) {
-      Script* script = Script::cast(result->script());
-      if (script->name()->IsString()) name = String::cast(script->name());
-    }
-    isolate->logger()->CodeCreateEvent(Logger::SCRIPT_TAG, result->code(),
-                                       *result, NULL, name);
-  }
-  return scope.CloseAndEscape(result);
-}
-
-
-void SerializedData::AllocateData(int size) {
-  DCHECK(!owns_data_);
-  data_ = NewArray<byte>(size);
-  size_ = size;
-  owns_data_ = true;
-  DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment));
-}
-
-
-SnapshotData::SnapshotData(const Serializer& ser) {
-  DisallowHeapAllocation no_gc;
-  List<Reservation> reservations;
-  ser.EncodeReservations(&reservations);
-  const List<byte>& payload = ser.sink()->data();
-
-  // Calculate sizes.
-  int reservation_size = reservations.length() * kInt32Size;
-  int size = kHeaderSize + reservation_size + payload.length();
-
-  // Allocate backing store and create result data.
-  AllocateData(size);
-
-  // Set header values.
-  SetMagicNumber(ser.isolate());
-  SetHeaderValue(kCheckSumOffset, Version::Hash());
-  SetHeaderValue(kNumReservationsOffset, reservations.length());
-  SetHeaderValue(kPayloadLengthOffset, payload.length());
-
-  // Copy reservation chunk sizes.
-  CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
-            reservation_size);
-
-  // Copy serialized data.
-  CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
-            static_cast<size_t>(payload.length()));
-}
-
-
-bool SnapshotData::IsSane() {
-  return GetHeaderValue(kCheckSumOffset) == Version::Hash();
-}
-
-
-Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
-  return Vector<const Reservation>(
-      reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
-      GetHeaderValue(kNumReservationsOffset));
-}
-
-
-Vector<const byte> SnapshotData::Payload() const {
-  int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
-  const byte* payload = data_ + kHeaderSize + reservations_size;
-  int length = GetHeaderValue(kPayloadLengthOffset);
-  DCHECK_EQ(data_ + size_, payload + length);
-  return Vector<const byte>(payload, length);
-}
-
-
-class Checksum {
- public:
-  explicit Checksum(Vector<const byte> payload) {
-#ifdef MEMORY_SANITIZER
-    // Computing the checksum includes padding bytes for objects like strings.
-    // Mark every object as initialized in the code serializer.
-    MSAN_MEMORY_IS_INITIALIZED(payload.start(), payload.length());
-#endif  // MEMORY_SANITIZER
-    // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
-    uintptr_t a = 1;
-    uintptr_t b = 0;
-    const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
-    DCHECK(IsAligned(payload.length(), kIntptrSize));
-    const uintptr_t* end = cur + payload.length() / kIntptrSize;
-    while (cur < end) {
-      // Unsigned overflow expected and intended.
-      a += *cur++;
-      b += a;
-    }
-#if V8_HOST_ARCH_64_BIT
-    a ^= a >> 32;
-    b ^= b >> 32;
-#endif  // V8_HOST_ARCH_64_BIT
-    a_ = static_cast<uint32_t>(a);
-    b_ = static_cast<uint32_t>(b);
-  }
-
-  bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }
-
-  uint32_t a() const { return a_; }
-  uint32_t b() const { return b_; }
-
- private:
-  uint32_t a_;
-  uint32_t b_;
-
-  DISALLOW_COPY_AND_ASSIGN(Checksum);
-};
-
-
-SerializedCodeData::SerializedCodeData(const List<byte>& payload,
-                                       const CodeSerializer& cs) {
-  DisallowHeapAllocation no_gc;
-  const List<uint32_t>* stub_keys = cs.stub_keys();
-
-  List<Reservation> reservations;
-  cs.EncodeReservations(&reservations);
-
-  // Calculate sizes.
-  int reservation_size = reservations.length() * kInt32Size;
-  int num_stub_keys = stub_keys->length();
-  int stub_keys_size = stub_keys->length() * kInt32Size;
-  int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
-  int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
-  int size = padded_payload_offset + payload.length();
-
-  // Allocate backing store and create result data.
-  AllocateData(size);
-
-  // Set header values.
-  SetMagicNumber(cs.isolate());
-  SetHeaderValue(kVersionHashOffset, Version::Hash());
-  SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
-  SetHeaderValue(kCpuFeaturesOffset,
-                 static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
-  SetHeaderValue(kFlagHashOffset, FlagList::Hash());
-  SetHeaderValue(kNumReservationsOffset, reservations.length());
-  SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
-  SetHeaderValue(kPayloadLengthOffset, payload.length());
-
-  Checksum checksum(payload.ToConstVector());
-  SetHeaderValue(kChecksum1Offset, checksum.a());
-  SetHeaderValue(kChecksum2Offset, checksum.b());
-
-  // Copy reservation chunk sizes.
-  CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
-            reservation_size);
-
-  // Copy code stub keys.
-  CopyBytes(data_ + kHeaderSize + reservation_size,
-            reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
-
-  memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
-
-  // Copy serialized data.
-  CopyBytes(data_ + padded_payload_offset, payload.begin(),
-            static_cast<size_t>(payload.length()));
-}
-
-
-SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
-    Isolate* isolate, String* source) const {
-  uint32_t magic_number = GetMagicNumber();
-  if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
-  uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
-  uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
-  uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
-  uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
-  uint32_t c1 = GetHeaderValue(kChecksum1Offset);
-  uint32_t c2 = GetHeaderValue(kChecksum2Offset);
-  if (version_hash != Version::Hash()) return VERSION_MISMATCH;
-  if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
-  if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
-    return CPU_FEATURES_MISMATCH;
-  }
-  if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
-  if (!Checksum(Payload()).Check(c1, c2)) return CHECKSUM_MISMATCH;
-  return CHECK_SUCCESS;
-}
-
-
-uint32_t SerializedCodeData::SourceHash(String* source) const {
-  return source->length();
-}
-
-
-// Return ScriptData object and relinquish ownership over it to the caller.
-ScriptData* SerializedCodeData::GetScriptData() {
-  DCHECK(owns_data_);
-  ScriptData* result = new ScriptData(data_, size_);
-  result->AcquireDataOwnership();
-  owns_data_ = false;
-  data_ = NULL;
-  return result;
-}
-
-
-Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
-    const {
-  return Vector<const Reservation>(
-      reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
-      GetHeaderValue(kNumReservationsOffset));
-}
-
-
-Vector<const byte> SerializedCodeData::Payload() const {
-  int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
-  int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
-  int payload_offset = kHeaderSize + reservations_size + code_stubs_size;
-  int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
-  const byte* payload = data_ + padded_payload_offset;
-  DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
-  int length = GetHeaderValue(kPayloadLengthOffset);
-  DCHECK_EQ(data_ + size_, payload + length);
-  return Vector<const byte>(payload, length);
-}
-
-
-Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
-  int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
-  const byte* start = data_ + kHeaderSize + reservations_size;
-  return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
-                                GetHeaderValue(kNumCodeStubKeysOffset));
-}
-
-
-SerializedCodeData::SerializedCodeData(ScriptData* data)
-    : SerializedData(const_cast<byte*>(data->data()), data->length()) {}
-
-
-SerializedCodeData* SerializedCodeData::FromCachedData(Isolate* isolate,
-                                                       ScriptData* cached_data,
-                                                       String* source) {
-  DisallowHeapAllocation no_gc;
-  SerializedCodeData* scd = new SerializedCodeData(cached_data);
-  SanityCheckResult r = scd->SanityCheck(isolate, source);
-  if (r == CHECK_SUCCESS) return scd;
-  cached_data->Reject();
-  source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r);
-  delete scd;
-  return NULL;
-}
-}  // namespace internal
-}  // namespace v8
diff --git a/src/snapshot/serialize.h b/src/snapshot/serialize.h
deleted file mode 100644
index f7420ef..0000000
--- a/src/snapshot/serialize.h
+++ /dev/null
@@ -1,816 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_SERIALIZE_H_
-#define V8_SNAPSHOT_SERIALIZE_H_
-
-#include "src/address-map.h"
-#include "src/heap/heap.h"
-#include "src/objects.h"
-#include "src/snapshot/snapshot-source-sink.h"
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-class ScriptData;
-
-static const int kDeoptTableSerializeEntryCount = 64;
-
-// ExternalReferenceTable is a helper class that defines the relationship
-// between external references and their encodings. It is used to build
-// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
-class ExternalReferenceTable {
- public:
-  static ExternalReferenceTable* instance(Isolate* isolate);
-
-  int size() const { return refs_.length(); }
-  Address address(int i) { return refs_[i].address; }
-  const char* name(int i) { return refs_[i].name; }
-
-  inline static Address NotAvailable() { return NULL; }
-
- private:
-  struct ExternalReferenceEntry {
-    Address address;
-    const char* name;
-  };
-
-  explicit ExternalReferenceTable(Isolate* isolate);
-
-  void Add(Address address, const char* name) {
-    ExternalReferenceEntry entry = {address, name};
-    refs_.Add(entry);
-  }
-
-  List<ExternalReferenceEntry> refs_;
-
-  DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
-};
-
-
-class ExternalReferenceEncoder {
- public:
-  explicit ExternalReferenceEncoder(Isolate* isolate);
-
-  uint32_t Encode(Address key) const;
-
-  const char* NameOfAddress(Isolate* isolate, Address address) const;
-
- private:
-  static uint32_t Hash(Address key) {
-    return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >>
-                                 kPointerSizeLog2);
-  }
-
-  HashMap* map_;
-
-  DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
-};
-
-
-class PartialCacheIndexMap : public AddressMapBase {
- public:
-  PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
-
-  static const int kInvalidIndex = -1;
-
-  // Lookup object in the map. Return its index if found, or create
-  // a new entry with new_index as value, and return kInvalidIndex.
-  int LookupOrInsert(HeapObject* obj, int new_index) {
-    HashMap::Entry* entry = LookupEntry(&map_, obj, false);
-    if (entry != NULL) return GetValue(entry);
-    SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
-    return kInvalidIndex;
-  }
-
- private:
-  HashMap map_;
-
-  DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
-};
-
-
-class HotObjectsList {
- public:
-  HotObjectsList() : index_(0) {
-    for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
-  }
-
-  void Add(HeapObject* object) {
-    circular_queue_[index_] = object;
-    index_ = (index_ + 1) & kSizeMask;
-  }
-
-  HeapObject* Get(int index) {
-    DCHECK_NOT_NULL(circular_queue_[index]);
-    return circular_queue_[index];
-  }
-
-  static const int kNotFound = -1;
-
-  int Find(HeapObject* object) {
-    for (int i = 0; i < kSize; i++) {
-      if (circular_queue_[i] == object) return i;
-    }
-    return kNotFound;
-  }
-
-  static const int kSize = 8;
-
- private:
-  STATIC_ASSERT(IS_POWER_OF_TWO(kSize));
-  static const int kSizeMask = kSize - 1;
-  HeapObject* circular_queue_[kSize];
-  int index_;
-
-  DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
-};
-
-
-// The Serializer/Deserializer class is a common superclass for Serializer and
-// Deserializer which is used to store common constants and methods used by
-// both.
-class SerializerDeserializer: public ObjectVisitor {
- public:
-  static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
-
-  // No reservation for large object space necessary.
-  static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
-  static const int kNumberOfSpaces = LAST_SPACE + 1;
-
- protected:
-  static bool CanBeDeferred(HeapObject* o);
-
-  // ---------- byte code range 0x00..0x7f ----------
-  // Byte codes in this range represent Where, HowToCode and WhereToPoint.
-  // Where the pointed-to object can be found:
-  // The static assert below will trigger when the number of preallocated spaces
-  // changed. If that happens, update the bytecode ranges in the comments below.
-  STATIC_ASSERT(5 == kNumberOfSpaces);
-  enum Where {
-    // 0x00..0x04  Allocate new object, in specified space.
-    kNewObject = 0,
-    // 0x05        Unused (including 0x25, 0x45, 0x65).
-    // 0x06        Unused (including 0x26, 0x46, 0x66).
-    // 0x07        Unused (including 0x27, 0x47, 0x67).
-    // 0x08..0x0c  Reference to previous object from space.
-    kBackref = 0x08,
-    // 0x0d        Unused (including 0x2d, 0x4d, 0x6d).
-    // 0x0e        Unused (including 0x2e, 0x4e, 0x6e).
-    // 0x0f        Unused (including 0x2f, 0x4f, 0x6f).
-    // 0x10..0x14  Reference to previous object from space after skip.
-    kBackrefWithSkip = 0x10,
-    // 0x15        Unused (including 0x35, 0x55, 0x75).
-    // 0x16        Unused (including 0x36, 0x56, 0x76).
-    // 0x17        Misc (including 0x37, 0x57, 0x77).
-    // 0x18        Root array item.
-    kRootArray = 0x18,
-    // 0x19        Object in the partial snapshot cache.
-    kPartialSnapshotCache = 0x19,
-    // 0x1a        External reference referenced by id.
-    kExternalReference = 0x1a,
-    // 0x1b        Object provided in the attached list.
-    kAttachedReference = 0x1b,
-    // 0x1c        Builtin code referenced by index.
-    kBuiltin = 0x1c
-    // 0x1d..0x1f  Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
-  };
-
-  static const int kWhereMask = 0x1f;
-  static const int kSpaceMask = 7;
-  STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
-
-  // How to code the pointer to the object.
-  enum HowToCode {
-    // Straight pointer.
-    kPlain = 0,
-    // A pointer inlined in code. What this means depends on the architecture.
-    kFromCode = 0x20
-  };
-
-  static const int kHowToCodeMask = 0x20;
-
-  // Where to point within the object.
-  enum WhereToPoint {
-    // Points to start of object
-    kStartOfObject = 0,
-    // Points to instruction in code object or payload of cell.
-    kInnerPointer = 0x40
-  };
-
-  static const int kWhereToPointMask = 0x40;
-
-  // ---------- Misc ----------
-  // Skip.
-  static const int kSkip = 0x1d;
-  // Internal reference encoded as offsets of pc and target from code entry.
-  static const int kInternalReference = 0x1e;
-  static const int kInternalReferenceEncoded = 0x1f;
-  // Do nothing, used for padding.
-  static const int kNop = 0x3d;
-  // Move to next reserved chunk.
-  static const int kNextChunk = 0x3e;
-  // Deferring object content.
-  static const int kDeferred = 0x3f;
-  // Used for the source code of the natives, which is in the executable, but
-  // is referred to from external strings in the snapshot.
-  static const int kNativesStringResource = 0x5d;
-  // Used for the source code for compiled stubs, which is in the executable,
-  // but is referred to from external strings in the snapshot.
-  static const int kExtraNativesStringResource = 0x5e;
-  // A tag emitted at strategic points in the snapshot to delineate sections.
-  // If the deserializer does not find these at the expected moments then it
-  // is an indication that the snapshot and the VM do not fit together.
-  // Examine the build process for architecture, version or configuration
-  // mismatches.
-  static const int kSynchronize = 0x17;
-  // Repeats of variable length.
-  static const int kVariableRepeat = 0x37;
-  // Raw data of variable length.
-  static const int kVariableRawData = 0x57;
-  // Alignment prefixes 0x7d..0x7f
-  static const int kAlignmentPrefix = 0x7d;
-
-  // 0x77 unused
-
-  // ---------- byte code range 0x80..0xff ----------
-  // First 32 root array items.
-  static const int kNumberOfRootArrayConstants = 0x20;
-  // 0x80..0x9f
-  static const int kRootArrayConstants = 0x80;
-  // 0xa0..0xbf
-  static const int kRootArrayConstantsWithSkip = 0xa0;
-  static const int kRootArrayConstantsMask = 0x1f;
-
-  // 8 hot (recently seen or back-referenced) objects with optional skip.
-  static const int kNumberOfHotObjects = 0x08;
-  // 0xc0..0xc7
-  static const int kHotObject = 0xc0;
-  // 0xc8..0xcf
-  static const int kHotObjectWithSkip = 0xc8;
-  static const int kHotObjectMask = 0x07;
-
-  // 32 common raw data lengths.
-  static const int kNumberOfFixedRawData = 0x20;
-  // 0xd0..0xef
-  static const int kFixedRawData = 0xd0;
-  static const int kOnePointerRawData = kFixedRawData;
-  static const int kFixedRawDataStart = kFixedRawData - 1;
-
-  // 16 repeats lengths.
-  static const int kNumberOfFixedRepeat = 0x10;
-  // 0xf0..0xff
-  static const int kFixedRepeat = 0xf0;
-  static const int kFixedRepeatStart = kFixedRepeat - 1;
-
-  // ---------- special values ----------
-  static const int kAnyOldSpace = -1;
-
-  // Sentinel after a new object to indicate that double alignment is needed.
-  static const int kDoubleAlignmentSentinel = 0;
-
-  // Used as index for the attached reference representing the source object.
-  static const int kSourceObjectReference = 0;
-
-  // Used as index for the attached reference representing the global proxy.
-  static const int kGlobalProxyReference = 0;
-
-  // ---------- member variable ----------
-  HotObjectsList hot_objects_;
-};
-
-
-class SerializedData {
- public:
-  class Reservation {
-   public:
-    explicit Reservation(uint32_t size)
-        : reservation_(ChunkSizeBits::encode(size)) {}
-
-    uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
-    bool is_last() const { return IsLastChunkBits::decode(reservation_); }
-
-    void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
-
-   private:
-    uint32_t reservation_;
-  };
-
-  SerializedData(byte* data, int size)
-      : data_(data), size_(size), owns_data_(false) {}
-  SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
-
-  ~SerializedData() {
-    if (owns_data_) DeleteArray<byte>(data_);
-  }
-
-  uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
-
-  class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
-  class IsLastChunkBits : public BitField<bool, 31, 1> {};
-
-  static uint32_t ComputeMagicNumber(ExternalReferenceTable* table) {
-    uint32_t external_refs = table->size();
-    return 0xC0DE0000 ^ external_refs;
-  }
-
- protected:
-  void SetHeaderValue(int offset, uint32_t value) {
-    uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
-    memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value));
-  }
-
-  uint32_t GetHeaderValue(int offset) const {
-    uint32_t value;
-    memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value));
-    return value;
-  }
-
-  void AllocateData(int size);
-
-  static uint32_t ComputeMagicNumber(Isolate* isolate) {
-    return ComputeMagicNumber(ExternalReferenceTable::instance(isolate));
-  }
-
-  void SetMagicNumber(Isolate* isolate) {
-    SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate));
-  }
-
-  static const int kMagicNumberOffset = 0;
-
-  byte* data_;
-  int size_;
-  bool owns_data_;
-};
-
-
-// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-class Deserializer: public SerializerDeserializer {
- public:
-  // Create a deserializer from a snapshot byte source.
-  template <class Data>
-  explicit Deserializer(Data* data)
-      : isolate_(NULL),
-        source_(data->Payload()),
-        magic_number_(data->GetMagicNumber()),
-        external_reference_table_(NULL),
-        deserialized_large_objects_(0),
-        deserializing_user_code_(false),
-        next_alignment_(kWordAligned) {
-    DecodeReservation(data->Reservations());
-  }
-
-  ~Deserializer() override;
-
-  // Deserialize the snapshot into an empty heap.
-  void Deserialize(Isolate* isolate);
-
-  // Deserialize a single object and the objects reachable from it.
-  MaybeHandle<Object> DeserializePartial(Isolate* isolate,
-                                         Handle<JSGlobalProxy> global_proxy);
-
-  // Deserialize a shared function info. Fail gracefully.
-  MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
-
-  // Pass a vector of externally-provided objects referenced by the snapshot.
-  // The ownership to its backing store is handed over as well.
-  void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
-    attached_objects_ = attached_objects;
-  }
-
- private:
-  void VisitPointers(Object** start, Object** end) override;
-
-  void Synchronize(VisitorSynchronization::SyncTag tag) override;
-
-  void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
-
-  void Initialize(Isolate* isolate);
-
-  bool deserializing_user_code() { return deserializing_user_code_; }
-
-  void DecodeReservation(Vector<const SerializedData::Reservation> res);
-
-  bool ReserveSpace();
-
-  void UnalignedCopy(Object** dest, Object** src) {
-    memcpy(dest, src, sizeof(*src));
-  }
-
-  void SetAlignment(byte data) {
-    DCHECK_EQ(kWordAligned, next_alignment_);
-    int alignment = data - (kAlignmentPrefix - 1);
-    DCHECK_LE(kWordAligned, alignment);
-    DCHECK_LE(alignment, kSimd128Unaligned);
-    next_alignment_ = static_cast<AllocationAlignment>(alignment);
-  }
-
-  void DeserializeDeferredObjects();
-
-  void FlushICacheForNewIsolate();
-  void FlushICacheForNewCodeObjects();
-
-  void CommitPostProcessedObjects(Isolate* isolate);
-
-  // Fills in some heap data in an area from start to end (non-inclusive).  The
-  // space id is used for the write barrier.  The object_address is the address
-  // of the object we are writing into, or NULL if we are not writing into an
-  // object, i.e. if we are writing a series of tagged values that are not on
-  // the heap. Return false if the object content has been deferred.
-  bool ReadData(Object** start, Object** end, int space,
-                Address object_address);
-  void ReadObject(int space_number, Object** write_back);
-  Address Allocate(int space_index, int size);
-
-  // Special handling for serialized code like hooking up internalized strings.
-  HeapObject* PostProcessNewObject(HeapObject* obj, int space);
-
-  // This returns the address of an object that has been described in the
-  // snapshot by chunk index and offset.
-  HeapObject* GetBackReferencedObject(int space);
-
-  Object** CopyInNativesSource(Vector<const char> source_vector,
-                               Object** current);
-
-  // Cached current isolate.
-  Isolate* isolate_;
-
-  // Objects from the attached object descriptions in the serialized user code.
-  Vector<Handle<Object> > attached_objects_;
-
-  SnapshotByteSource source_;
-  uint32_t magic_number_;
-
-  // The address of the next object that will be allocated in each space.
-  // Each space has a number of chunks reserved by the GC, with each chunk
-  // fitting into a page. Deserialized objects are allocated into the
-  // current chunk of the target space by bumping up high water mark.
-  Heap::Reservation reservations_[kNumberOfSpaces];
-  uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
-  Address high_water_[kNumberOfPreallocatedSpaces];
-
-  ExternalReferenceTable* external_reference_table_;
-
-  List<HeapObject*> deserialized_large_objects_;
-  List<Code*> new_code_objects_;
-  List<Handle<String> > new_internalized_strings_;
-  List<Handle<Script> > new_scripts_;
-
-  bool deserializing_user_code_;
-
-  AllocationAlignment next_alignment_;
-
-  DISALLOW_COPY_AND_ASSIGN(Deserializer);
-};
-
-
-class CodeAddressMap;
-
-// There can be only one serializer per V8 process.
-class Serializer : public SerializerDeserializer {
- public:
-  Serializer(Isolate* isolate, SnapshotByteSink* sink);
-  ~Serializer() override;
-
-  void EncodeReservations(List<SerializedData::Reservation>* out) const;
-
-  void SerializeDeferredObjects();
-
-  Isolate* isolate() const { return isolate_; }
-
-  BackReferenceMap* back_reference_map() { return &back_reference_map_; }
-  RootIndexMap* root_index_map() { return &root_index_map_; }
-
-#ifdef OBJECT_PRINT
-  void CountInstanceType(Map* map, int size);
-#endif  // OBJECT_PRINT
-
- protected:
-  class ObjectSerializer;
-  class RecursionScope {
-   public:
-    explicit RecursionScope(Serializer* serializer) : serializer_(serializer) {
-      serializer_->recursion_depth_++;
-    }
-    ~RecursionScope() { serializer_->recursion_depth_--; }
-    bool ExceedsMaximum() {
-      return serializer_->recursion_depth_ >= kMaxRecursionDepth;
-    }
-
-   private:
-    static const int kMaxRecursionDepth = 32;
-    Serializer* serializer_;
-  };
-
-  virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
-                               WhereToPoint where_to_point, int skip) = 0;
-
-  void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
-               int skip);
-
-  void PutBackReference(HeapObject* object, BackReference reference);
-
-  // Emit alignment prefix if necessary, return required padding space in bytes.
-  int PutAlignmentPrefix(HeapObject* object);
-
-  // Returns true if the object was successfully serialized.
-  bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
-                            WhereToPoint where_to_point, int skip);
-
-  inline void FlushSkip(int skip) {
-    if (skip != 0) {
-      sink_->Put(kSkip, "SkipFromSerializeObject");
-      sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
-    }
-  }
-
-  bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
-
-  // This will return the space for an object.
-  BackReference AllocateLargeObject(int size);
-  BackReference Allocate(AllocationSpace space, int size);
-  int EncodeExternalReference(Address addr) {
-    return external_reference_encoder_.Encode(addr);
-  }
-
-  // GetInt reads 4 bytes at once, requiring padding at the end.
-  void Pad();
-
-  // Some roots should not be serialized, because their actual value depends on
-  // absolute addresses and they are reset after deserialization, anyway.
-  bool ShouldBeSkipped(Object** current);
-
-  // We may not need the code address map for logging for every instance
-  // of the serializer.  Initialize it on demand.
-  void InitializeCodeAddressMap();
-
-  Code* CopyCode(Code* code);
-
-  inline uint32_t max_chunk_size(int space) const {
-    DCHECK_LE(0, space);
-    DCHECK_LT(space, kNumberOfSpaces);
-    return max_chunk_size_[space];
-  }
-
-  SnapshotByteSink* sink() const { return sink_; }
-
-  void QueueDeferredObject(HeapObject* obj) {
-    DCHECK(back_reference_map_.Lookup(obj).is_valid());
-    deferred_objects_.Add(obj);
-  }
-
-  void OutputStatistics(const char* name);
-
-  Isolate* isolate_;
-
-  SnapshotByteSink* sink_;
-  ExternalReferenceEncoder external_reference_encoder_;
-
-  BackReferenceMap back_reference_map_;
-  RootIndexMap root_index_map_;
-
-  int recursion_depth_;
-
-  friend class Deserializer;
-  friend class ObjectSerializer;
-  friend class RecursionScope;
-  friend class SnapshotData;
-
- private:
-  void VisitPointers(Object** start, Object** end) override;
-
-  CodeAddressMap* code_address_map_;
-  // Objects from the same space are put into chunks for bulk-allocation
-  // when deserializing. We have to make sure that each chunk fits into a
-  // page. So we track the chunk size in pending_chunk_ of a space, but
-  // when it exceeds a page, we complete the current chunk and start a new one.
-  uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
-  List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
-  uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
-
-  // We map serialized large objects to indexes for back-referencing.
-  uint32_t large_objects_total_size_;
-  uint32_t seen_large_objects_index_;
-
-  List<byte> code_buffer_;
-
-  // To handle stack overflow.
-  List<HeapObject*> deferred_objects_;
-
-#ifdef OBJECT_PRINT
-  static const int kInstanceTypes = 256;
-  int* instance_type_count_;
-  size_t* instance_type_size_;
-#endif  // OBJECT_PRINT
-
-  DISALLOW_COPY_AND_ASSIGN(Serializer);
-};
-
-
-class PartialSerializer : public Serializer {
- public:
-  PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
-                    SnapshotByteSink* sink)
-      : Serializer(isolate, sink),
-        startup_serializer_(startup_snapshot_serializer),
-        global_object_(NULL) {
-    InitializeCodeAddressMap();
-  }
-
-  ~PartialSerializer() override { OutputStatistics("PartialSerializer"); }
-
-  // Serialize the objects reachable from a single object pointer.
-  void Serialize(Object** o);
-
- private:
-  void SerializeObject(HeapObject* o, HowToCode how_to_code,
-                       WhereToPoint where_to_point, int skip) override;
-
-  int PartialSnapshotCacheIndex(HeapObject* o);
-  bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
-
-  Serializer* startup_serializer_;
-  Object* global_object_;
-  PartialCacheIndexMap partial_cache_index_map_;
-  DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
-};
-
-
-class StartupSerializer : public Serializer {
- public:
-  StartupSerializer(Isolate* isolate, SnapshotByteSink* sink);
-  ~StartupSerializer() override { OutputStatistics("StartupSerializer"); }
-
-  // Serialize the current state of the heap.  The order is:
-  // 1) Strong references.
-  // 2) Partial snapshot cache.
-  // 3) Weak references (e.g. the string table).
-  void SerializeStrongReferences();
-  void SerializeWeakReferencesAndDeferred();
-
- private:
-  // The StartupSerializer has to serialize the root array, which is slightly
-  // different.
-  void VisitPointers(Object** start, Object** end) override;
-  void SerializeObject(HeapObject* o, HowToCode how_to_code,
-                       WhereToPoint where_to_point, int skip) override;
-  void Synchronize(VisitorSynchronization::SyncTag tag) override;
-
-  intptr_t root_index_wave_front_;
-  bool serializing_builtins_;
-  DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
-};
-
-
-class CodeSerializer : public Serializer {
- public:
-  static ScriptData* Serialize(Isolate* isolate,
-                               Handle<SharedFunctionInfo> info,
-                               Handle<String> source);
-
-  MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
-      Isolate* isolate, ScriptData* cached_data, Handle<String> source);
-
-  static const int kSourceObjectIndex = 0;
-  STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
-
-  static const int kCodeStubsBaseIndex = 1;
-
-  String* source() const {
-    DCHECK(!AllowHeapAllocation::IsAllowed());
-    return source_;
-  }
-
-  const List<uint32_t>* stub_keys() const { return &stub_keys_; }
-
- private:
-  CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
-      : Serializer(isolate, sink), source_(source) {
-    back_reference_map_.AddSourceString(source);
-  }
-
-  ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
-
-  void SerializeObject(HeapObject* o, HowToCode how_to_code,
-                       WhereToPoint where_to_point, int skip) override;
-
-  void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
-                        WhereToPoint where_to_point);
-  void SerializeIC(Code* ic, HowToCode how_to_code,
-                   WhereToPoint where_to_point);
-  void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
-                         WhereToPoint where_to_point);
-  void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
-                        WhereToPoint where_to_point);
-  int AddCodeStubKey(uint32_t stub_key);
-
-  DisallowHeapAllocation no_gc_;
-  String* source_;
-  List<uint32_t> stub_keys_;
-  DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
-};
-
-
-// Wrapper around reservation sizes and the serialization payload.
-class SnapshotData : public SerializedData {
- public:
-  // Used when producing.
-  explicit SnapshotData(const Serializer& ser);
-
-  // Used when consuming.
-  explicit SnapshotData(const Vector<const byte> snapshot)
-      : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
-    CHECK(IsSane());
-  }
-
-  Vector<const Reservation> Reservations() const;
-  Vector<const byte> Payload() const;
-
-  Vector<const byte> RawData() const {
-    return Vector<const byte>(data_, size_);
-  }
-
- private:
-  bool IsSane();
-
-  // The data header consists of uint32_t-sized entries:
-  // [0] magic number and external reference count
-  // [1] version hash
-  // [2] number of reservation size entries
-  // [3] payload length
-  // ... reservations
-  // ... serialized payload
-  static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size;
-  static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
-  static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
-  static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
-};
-
-
-// Wrapper around ScriptData to provide code-serializer-specific functionality.
-class SerializedCodeData : public SerializedData {
- public:
-  // Used when consuming.
-  static SerializedCodeData* FromCachedData(Isolate* isolate,
-                                            ScriptData* cached_data,
-                                            String* source);
-
-  // Used when producing.
-  SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
-
-  // Return ScriptData object and relinquish ownership over it to the caller.
-  ScriptData* GetScriptData();
-
-  Vector<const Reservation> Reservations() const;
-  Vector<const byte> Payload() const;
-
-  Vector<const uint32_t> CodeStubKeys() const;
-
- private:
-  explicit SerializedCodeData(ScriptData* data);
-
-  enum SanityCheckResult {
-    CHECK_SUCCESS = 0,
-    MAGIC_NUMBER_MISMATCH = 1,
-    VERSION_MISMATCH = 2,
-    SOURCE_MISMATCH = 3,
-    CPU_FEATURES_MISMATCH = 4,
-    FLAGS_MISMATCH = 5,
-    CHECKSUM_MISMATCH = 6
-  };
-
-  SanityCheckResult SanityCheck(Isolate* isolate, String* source) const;
-
-  uint32_t SourceHash(String* source) const;
-
-  // The data header consists of uint32_t-sized entries:
-  // [0] magic number and external reference count
-  // [1] version hash
-  // [2] source hash
-  // [3] cpu features
-  // [4] flag hash
-  // [5] number of code stub keys
-  // [6] number of reservation size entries
-  // [7] payload length
-  // [8] payload checksum part 1
-  // [9] payload checksum part 2
-  // ...  reservations
-  // ...  code stub keys
-  // ...  serialized payload
-  static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
-  static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
-  static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
-  static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
-  static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
-  static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
-  static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
-  static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
-  static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
-  static const int kHeaderSize = kChecksum2Offset + kInt32Size;
-};
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_SNAPSHOT_SERIALIZE_H_
diff --git a/src/snapshot/serializer-common.cc b/src/snapshot/serializer-common.cc
new file mode 100644
index 0000000..eeb7eb7
--- /dev/null
+++ b/src/snapshot/serializer-common.cc
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/serializer-common.h"
+
+#include "src/external-reference-table.h"
+#include "src/ic/stub-cache.h"
+#include "src/list-inl.h"
+
+namespace v8 {
+namespace internal {
+
+ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
+  map_ = isolate->external_reference_map();
+  if (map_ != NULL) return;
+  map_ = new HashMap(HashMap::PointersMatch);
+  ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
+  for (int i = 0; i < table->size(); ++i) {
+    Address addr = table->address(i);
+    if (addr == ExternalReferenceTable::NotAvailable()) continue;
+    // We expect no duplicate external references entries in the table.
+    DCHECK_NULL(map_->Lookup(addr, Hash(addr)));
+    map_->LookupOrInsert(addr, Hash(addr))->value = reinterpret_cast<void*>(i);
+  }
+  isolate->set_external_reference_map(map_);
+}
+
+uint32_t ExternalReferenceEncoder::Encode(Address address) const {
+  DCHECK_NOT_NULL(address);
+  HashMap::Entry* entry =
+      const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
+  DCHECK_NOT_NULL(entry);
+  return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
+}
+
+const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
+                                                    Address address) const {
+  HashMap::Entry* entry =
+      const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
+  if (entry == NULL) return "<unknown>";
+  uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
+  return ExternalReferenceTable::instance(isolate)->name(i);
+}
+
+void SerializedData::AllocateData(int size) {
+  DCHECK(!owns_data_);
+  data_ = NewArray<byte>(size);
+  size_ = size;
+  owns_data_ = true;
+  DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment));
+}
+
+// The partial snapshot cache is terminated by undefined. We visit the
+// partial snapshot...
+//  - during deserialization to populate it.
+//  - during normal GC to keep its content alive.
+//  - not during serialization. The partial serializer adds to it explicitly.
+void SerializerDeserializer::Iterate(Isolate* isolate, ObjectVisitor* visitor) {
+  List<Object*>* cache = isolate->partial_snapshot_cache();
+  for (int i = 0;; ++i) {
+    // Extend the array ready to get a value when deserializing.
+    if (cache->length() <= i) cache->Add(Smi::FromInt(0));
+    // During deserialization, the visitor populates the partial snapshot cache
+    // and eventually terminates the cache with undefined.
+    visitor->VisitPointer(&cache->at(i));
+    if (cache->at(i)->IsUndefined()) break;
+  }
+}
+
+bool SerializerDeserializer::CanBeDeferred(HeapObject* o) {
+  return !o->IsString() && !o->IsScript();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/snapshot/serializer-common.h b/src/snapshot/serializer-common.h
new file mode 100644
index 0000000..645a9af
--- /dev/null
+++ b/src/snapshot/serializer-common.h
@@ -0,0 +1,290 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_SERIALIZER_COMMON_H_
+#define V8_SNAPSHOT_SERIALIZER_COMMON_H_
+
+#include "src/address-map.h"
+#include "src/external-reference-table.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+class ExternalReferenceEncoder {
+ public:
+  explicit ExternalReferenceEncoder(Isolate* isolate);
+
+  uint32_t Encode(Address key) const;
+
+  const char* NameOfAddress(Isolate* isolate, Address address) const;
+
+ private:
+  static uint32_t Hash(Address key) {
+    return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >>
+                                 kPointerSizeLog2);
+  }
+
+  HashMap* map_;
+
+  DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
+};
+
+class HotObjectsList {
+ public:
+  HotObjectsList() : index_(0) {
+    for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
+  }
+
+  void Add(HeapObject* object) {
+    circular_queue_[index_] = object;
+    index_ = (index_ + 1) & kSizeMask;
+  }
+
+  HeapObject* Get(int index) {
+    DCHECK_NOT_NULL(circular_queue_[index]);
+    return circular_queue_[index];
+  }
+
+  static const int kNotFound = -1;
+
+  int Find(HeapObject* object) {
+    for (int i = 0; i < kSize; i++) {
+      if (circular_queue_[i] == object) return i;
+    }
+    return kNotFound;
+  }
+
+  static const int kSize = 8;
+
+ private:
+  STATIC_ASSERT(IS_POWER_OF_TWO(kSize));
+  static const int kSizeMask = kSize - 1;
+  HeapObject* circular_queue_[kSize];
+  int index_;
+
+  DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
+};
+
+// The Serializer/Deserializer class is a common superclass for Serializer and
+// Deserializer which is used to store common constants and methods used by
+// both.
+class SerializerDeserializer : public ObjectVisitor {
+ public:
+  static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
+
+  // No reservation for large object space necessary.
+  static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
+  static const int kNumberOfSpaces = LAST_SPACE + 1;
+
+ protected:
+  static bool CanBeDeferred(HeapObject* o);
+
+  // ---------- byte code range 0x00..0x7f ----------
+  // Byte codes in this range represent Where, HowToCode and WhereToPoint.
+  // Where the pointed-to object can be found:
+  // The static assert below will trigger when the number of preallocated spaces
+  // changed. If that happens, update the bytecode ranges in the comments below.
+  STATIC_ASSERT(5 == kNumberOfSpaces);
+  enum Where {
+    // 0x00..0x04  Allocate new object, in specified space.
+    kNewObject = 0,
+    // 0x05        Unused (including 0x25, 0x45, 0x65).
+    // 0x06        Unused (including 0x26, 0x46, 0x66).
+    // 0x07        Unused (including 0x27, 0x47, 0x67).
+    // 0x08..0x0c  Reference to previous object from space.
+    kBackref = 0x08,
+    // 0x0d        Unused (including 0x2d, 0x4d, 0x6d).
+    // 0x0e        Unused (including 0x2e, 0x4e, 0x6e).
+    // 0x0f        Unused (including 0x2f, 0x4f, 0x6f).
+    // 0x10..0x14  Reference to previous object from space after skip.
+    kBackrefWithSkip = 0x10,
+    // 0x15        Unused (including 0x35, 0x55, 0x75).
+    // 0x16        Unused (including 0x36, 0x56, 0x76).
+    // 0x17        Misc (including 0x37, 0x57, 0x77).
+    // 0x18        Root array item.
+    kRootArray = 0x18,
+    // 0x19        Object in the partial snapshot cache.
+    kPartialSnapshotCache = 0x19,
+    // 0x1a        External reference referenced by id.
+    kExternalReference = 0x1a,
+    // 0x1b        Object provided in the attached list.
+    kAttachedReference = 0x1b,
+    // 0x1c        Builtin code referenced by index.
+    kBuiltin = 0x1c
+    // 0x1d..0x1f  Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
+  };
+
+  static const int kWhereMask = 0x1f;
+  static const int kSpaceMask = 7;
+  STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
+
+  // How to code the pointer to the object.
+  enum HowToCode {
+    // Straight pointer.
+    kPlain = 0,
+    // A pointer inlined in code. What this means depends on the architecture.
+    kFromCode = 0x20
+  };
+
+  static const int kHowToCodeMask = 0x20;
+
+  // Where to point within the object.
+  enum WhereToPoint {
+    // Points to start of object
+    kStartOfObject = 0,
+    // Points to instruction in code object or payload of cell.
+    kInnerPointer = 0x40
+  };
+
+  static const int kWhereToPointMask = 0x40;
+
+  // ---------- Misc ----------
+  // Skip.
+  static const int kSkip = 0x1d;
+  // Internal reference encoded as offsets of pc and target from code entry.
+  static const int kInternalReference = 0x1e;
+  static const int kInternalReferenceEncoded = 0x1f;
+  // Do nothing, used for padding.
+  static const int kNop = 0x3d;
+  // Move to next reserved chunk.
+  static const int kNextChunk = 0x3e;
+  // Deferring object content.
+  static const int kDeferred = 0x3f;
+  // Used for the source code of the natives, which is in the executable, but
+  // is referred to from external strings in the snapshot.
+  static const int kNativesStringResource = 0x5d;
+  // Used for the source code for compiled stubs, which is in the executable,
+  // but is referred to from external strings in the snapshot.
+  static const int kExtraNativesStringResource = 0x5e;
+  // A tag emitted at strategic points in the snapshot to delineate sections.
+  // If the deserializer does not find these at the expected moments then it
+  // is an indication that the snapshot and the VM do not fit together.
+  // Examine the build process for architecture, version or configuration
+  // mismatches.
+  static const int kSynchronize = 0x17;
+  // Repeats of variable length.
+  static const int kVariableRepeat = 0x37;
+  // Raw data of variable length.
+  static const int kVariableRawData = 0x57;
+  // Alignment prefixes 0x7d..0x7f
+  static const int kAlignmentPrefix = 0x7d;
+
+  // 0x77 unused
+
+  // ---------- byte code range 0x80..0xff ----------
+  // First 32 root array items.
+  static const int kNumberOfRootArrayConstants = 0x20;
+  // 0x80..0x9f
+  static const int kRootArrayConstants = 0x80;
+  // 0xa0..0xbf
+  static const int kRootArrayConstantsWithSkip = 0xa0;
+  static const int kRootArrayConstantsMask = 0x1f;
+
+  // 8 hot (recently seen or back-referenced) objects with optional skip.
+  static const int kNumberOfHotObjects = 0x08;
+  // 0xc0..0xc7
+  static const int kHotObject = 0xc0;
+  // 0xc8..0xcf
+  static const int kHotObjectWithSkip = 0xc8;
+  static const int kHotObjectMask = 0x07;
+
+  // 32 common raw data lengths.
+  static const int kNumberOfFixedRawData = 0x20;
+  // 0xd0..0xef
+  static const int kFixedRawData = 0xd0;
+  static const int kOnePointerRawData = kFixedRawData;
+  static const int kFixedRawDataStart = kFixedRawData - 1;
+
+  // 16 repeats lengths.
+  static const int kNumberOfFixedRepeat = 0x10;
+  // 0xf0..0xff
+  static const int kFixedRepeat = 0xf0;
+  static const int kFixedRepeatStart = kFixedRepeat - 1;
+
+  // ---------- special values ----------
+  static const int kAnyOldSpace = -1;
+
+  // Sentinel after a new object to indicate that double alignment is needed.
+  static const int kDoubleAlignmentSentinel = 0;
+
+  // Used as index for the attached reference representing the source object.
+  static const int kSourceObjectReference = 0;
+
+  // Used as index for the attached reference representing the global proxy.
+  static const int kGlobalProxyReference = 0;
+
+  // ---------- member variable ----------
+  HotObjectsList hot_objects_;
+};
+
+class SerializedData {
+ public:
+  class Reservation {
+   public:
+    explicit Reservation(uint32_t size)
+        : reservation_(ChunkSizeBits::encode(size)) {}
+
+    uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
+    bool is_last() const { return IsLastChunkBits::decode(reservation_); }
+
+    void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
+
+   private:
+    uint32_t reservation_;
+  };
+
+  SerializedData(byte* data, int size)
+      : data_(data), size_(size), owns_data_(false) {}
+  SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
+
+  ~SerializedData() {
+    if (owns_data_) DeleteArray<byte>(data_);
+  }
+
+  uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
+
+  class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
+  class IsLastChunkBits : public BitField<bool, 31, 1> {};
+
+  static uint32_t ComputeMagicNumber(ExternalReferenceTable* table) {
+    uint32_t external_refs = table->size();
+    return 0xC0DE0000 ^ external_refs;
+  }
+
+ protected:
+  void SetHeaderValue(int offset, uint32_t value) {
+    uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
+    memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value));
+  }
+
+  uint32_t GetHeaderValue(int offset) const {
+    uint32_t value;
+    memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value));
+    return value;
+  }
+
+  void AllocateData(int size);
+
+  static uint32_t ComputeMagicNumber(Isolate* isolate) {
+    return ComputeMagicNumber(ExternalReferenceTable::instance(isolate));
+  }
+
+  void SetMagicNumber(Isolate* isolate) {
+    SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate));
+  }
+
+  static const int kMagicNumberOffset = 0;
+
+  byte* data_;
+  int size_;
+  bool owns_data_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SNAPSHOT_SERIALIZER_COMMON_H_
diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc
new file mode 100644
index 0000000..4169338
--- /dev/null
+++ b/src/snapshot/serializer.cc
@@ -0,0 +1,770 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/serializer.h"
+
+#include "src/macro-assembler.h"
+#include "src/snapshot/natives.h"
+
+namespace v8 {
+namespace internal {
+
+Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
+    : isolate_(isolate),
+      sink_(sink),
+      external_reference_encoder_(isolate),
+      root_index_map_(isolate),
+      recursion_depth_(0),
+      code_address_map_(NULL),
+      large_objects_total_size_(0),
+      seen_large_objects_index_(0) {
+  // The serializer is meant to be used only to generate initial heap images
+  // from a context in which there is only one isolate.
+  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+    pending_chunk_[i] = 0;
+    max_chunk_size_[i] = static_cast<uint32_t>(
+        MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
+  }
+
+#ifdef OBJECT_PRINT
+  if (FLAG_serialization_statistics) {
+    instance_type_count_ = NewArray<int>(kInstanceTypes);
+    instance_type_size_ = NewArray<size_t>(kInstanceTypes);
+    for (int i = 0; i < kInstanceTypes; i++) {
+      instance_type_count_[i] = 0;
+      instance_type_size_[i] = 0;
+    }
+  } else {
+    instance_type_count_ = NULL;
+    instance_type_size_ = NULL;
+  }
+#endif  // OBJECT_PRINT
+}
+
+Serializer::~Serializer() {
+  if (code_address_map_ != NULL) delete code_address_map_;
+#ifdef OBJECT_PRINT
+  if (instance_type_count_ != NULL) {
+    DeleteArray(instance_type_count_);
+    DeleteArray(instance_type_size_);
+  }
+#endif  // OBJECT_PRINT
+}
+
+#ifdef OBJECT_PRINT
+void Serializer::CountInstanceType(Map* map, int size) {
+  int instance_type = map->instance_type();
+  instance_type_count_[instance_type]++;
+  instance_type_size_[instance_type] += size;
+}
+#endif  // OBJECT_PRINT
+
+void Serializer::OutputStatistics(const char* name) {
+  if (!FLAG_serialization_statistics) return;
+  PrintF("%s:\n", name);
+  PrintF("  Spaces (bytes):\n");
+  for (int space = 0; space < kNumberOfSpaces; space++) {
+    PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
+  }
+  PrintF("\n");
+  for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
+    size_t s = pending_chunk_[space];
+    for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
+    PrintF("%16" V8_SIZET_PREFIX V8_PTR_PREFIX "d", s);
+  }
+  PrintF("%16d\n", large_objects_total_size_);
+#ifdef OBJECT_PRINT
+  PrintF("  Instance types (count and bytes):\n");
+#define PRINT_INSTANCE_TYPE(Name)                                         \
+  if (instance_type_count_[Name]) {                                       \
+    PrintF("%10d %10" V8_SIZET_PREFIX V8_PTR_PREFIX "d  %s\n",            \
+           instance_type_count_[Name], instance_type_size_[Name], #Name); \
+  }
+  INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
+#undef PRINT_INSTANCE_TYPE
+  PrintF("\n");
+#endif  // OBJECT_PRINT
+}
+
+void Serializer::SerializeDeferredObjects() {
+  while (deferred_objects_.length() > 0) {
+    HeapObject* obj = deferred_objects_.RemoveLast();
+    ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
+    obj_serializer.SerializeDeferred();
+  }
+  sink_->Put(kSynchronize, "Finished with deferred objects");
+}
+
+void Serializer::VisitPointers(Object** start, Object** end) {
+  for (Object** current = start; current < end; current++) {
+    if ((*current)->IsSmi()) {
+      PutSmi(Smi::cast(*current));
+    } else {
+      SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
+    }
+  }
+}
+
+void Serializer::EncodeReservations(
+    List<SerializedData::Reservation>* out) const {
+  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+    for (int j = 0; j < completed_chunks_[i].length(); j++) {
+      out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
+    }
+
+    if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
+      out->Add(SerializedData::Reservation(pending_chunk_[i]));
+    }
+    out->last().mark_as_last();
+  }
+
+  out->Add(SerializedData::Reservation(large_objects_total_size_));
+  out->last().mark_as_last();
+}
+
+#ifdef DEBUG
+bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
+  DCHECK(reference.is_valid());
+  DCHECK(!reference.is_source());
+  DCHECK(!reference.is_global_proxy());
+  AllocationSpace space = reference.space();
+  int chunk_index = reference.chunk_index();
+  if (space == LO_SPACE) {
+    return chunk_index == 0 &&
+           reference.large_object_index() < seen_large_objects_index_;
+  } else if (chunk_index == completed_chunks_[space].length()) {
+    return reference.chunk_offset() < pending_chunk_[space];
+  } else {
+    return chunk_index < completed_chunks_[space].length() &&
+           reference.chunk_offset() < completed_chunks_[space][chunk_index];
+  }
+}
+#endif  // DEBUG
+
+bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
+                                      WhereToPoint where_to_point, int skip) {
+  if (how_to_code == kPlain && where_to_point == kStartOfObject) {
+    // Encode a reference to a hot object by its index in the working set.
+    int index = hot_objects_.Find(obj);
+    if (index != HotObjectsList::kNotFound) {
+      DCHECK(index >= 0 && index < kNumberOfHotObjects);
+      if (FLAG_trace_serializer) {
+        PrintF(" Encoding hot object %d:", index);
+        obj->ShortPrint();
+        PrintF("\n");
+      }
+      if (skip != 0) {
+        sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
+        sink_->PutInt(skip, "HotObjectSkipDistance");
+      } else {
+        sink_->Put(kHotObject + index, "HotObject");
+      }
+      return true;
+    }
+  }
+  BackReference back_reference = back_reference_map_.Lookup(obj);
+  if (back_reference.is_valid()) {
+    // Encode the location of an already deserialized object in order to write
+    // its location into a later object.  We can encode the location as an
+    // offset fromthe start of the deserialized objects or as an offset
+    // backwards from thecurrent allocation pointer.
+    if (back_reference.is_source()) {
+      FlushSkip(skip);
+      if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
+      DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
+      sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
+      sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
+    } else if (back_reference.is_global_proxy()) {
+      FlushSkip(skip);
+      if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
+      DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
+      sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
+      sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
+    } else {
+      if (FLAG_trace_serializer) {
+        PrintF(" Encoding back reference to: ");
+        obj->ShortPrint();
+        PrintF("\n");
+      }
+
+      PutAlignmentPrefix(obj);
+      AllocationSpace space = back_reference.space();
+      if (skip == 0) {
+        sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
+      } else {
+        sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
+                   "BackRefWithSkip");
+        sink_->PutInt(skip, "BackRefSkipDistance");
+      }
+      PutBackReference(obj, back_reference);
+    }
+    return true;
+  }
+  return false;
+}
+
+void Serializer::PutRoot(int root_index, HeapObject* object,
+                         SerializerDeserializer::HowToCode how_to_code,
+                         SerializerDeserializer::WhereToPoint where_to_point,
+                         int skip) {
+  if (FLAG_trace_serializer) {
+    PrintF(" Encoding root %d:", root_index);
+    object->ShortPrint();
+    PrintF("\n");
+  }
+
+  if (how_to_code == kPlain && where_to_point == kStartOfObject &&
+      root_index < kNumberOfRootArrayConstants &&
+      !isolate()->heap()->InNewSpace(object)) {
+    if (skip == 0) {
+      sink_->Put(kRootArrayConstants + root_index, "RootConstant");
+    } else {
+      sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
+      sink_->PutInt(skip, "SkipInPutRoot");
+    }
+  } else {
+    FlushSkip(skip);
+    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
+    sink_->PutInt(root_index, "root_index");
+  }
+}
+
+void Serializer::PutSmi(Smi* smi) {
+  sink_->Put(kOnePointerRawData, "Smi");
+  byte* bytes = reinterpret_cast<byte*>(&smi);
+  for (int i = 0; i < kPointerSize; i++) sink_->Put(bytes[i], "Byte");
+}
+
+void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
+  DCHECK(BackReferenceIsAlreadyAllocated(reference));
+  sink_->PutInt(reference.reference(), "BackRefValue");
+  hot_objects_.Add(object);
+}
+
+int Serializer::PutAlignmentPrefix(HeapObject* object) {
+  AllocationAlignment alignment = object->RequiredAlignment();
+  if (alignment != kWordAligned) {
+    DCHECK(1 <= alignment && alignment <= 3);
+    byte prefix = (kAlignmentPrefix - 1) + alignment;
+    sink_->Put(prefix, "Alignment");
+    return Heap::GetMaximumFillToAlign(alignment);
+  }
+  return 0;
+}
+
+BackReference Serializer::AllocateLargeObject(int size) {
+  // Large objects are allocated one-by-one when deserializing. We do not
+  // have to keep track of multiple chunks.
+  large_objects_total_size_ += size;
+  return BackReference::LargeObjectReference(seen_large_objects_index_++);
+}
+
+BackReference Serializer::Allocate(AllocationSpace space, int size) {
+  DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
+  DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
+  uint32_t new_chunk_size = pending_chunk_[space] + size;
+  if (new_chunk_size > max_chunk_size(space)) {
+    // The new chunk size would not fit onto a single page. Complete the
+    // current chunk and start a new one.
+    sink_->Put(kNextChunk, "NextChunk");
+    sink_->Put(space, "NextChunkSpace");
+    completed_chunks_[space].Add(pending_chunk_[space]);
+    DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
+    pending_chunk_[space] = 0;
+    new_chunk_size = size;
+  }
+  uint32_t offset = pending_chunk_[space];
+  pending_chunk_[space] = new_chunk_size;
+  return BackReference::Reference(space, completed_chunks_[space].length(),
+                                  offset);
+}
+
+void Serializer::Pad() {
+  // The non-branching GetInt will read up to 3 bytes too far, so we need
+  // to pad the snapshot to make sure we don't read over the end.
+  for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
+    sink_->Put(kNop, "Padding");
+  }
+  // Pad up to pointer size for checksum.
+  while (!IsAligned(sink_->Position(), kPointerAlignment)) {
+    sink_->Put(kNop, "Padding");
+  }
+}
+
+void Serializer::InitializeCodeAddressMap() {
+  isolate_->InitializeLoggingAndCounters();
+  code_address_map_ = new CodeAddressMap(isolate_);
+}
+
+Code* Serializer::CopyCode(Code* code) {
+  code_buffer_.Rewind(0);  // Clear buffer without deleting backing store.
+  int size = code->CodeSize();
+  code_buffer_.AddAll(Vector<byte>(code->address(), size));
+  return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
+}
+
+bool Serializer::HasNotExceededFirstPageOfEachSpace() {
+  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+    if (!completed_chunks_[i].is_empty()) return false;
+  }
+  return true;
+}
+
+void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
+                                                     int size, Map* map) {
+  if (serializer_->code_address_map_) {
+    const char* code_name =
+        serializer_->code_address_map_->Lookup(object_->address());
+    LOG(serializer_->isolate_,
+        CodeNameEvent(object_->address(), sink_->Position(), code_name));
+  }
+
+  BackReference back_reference;
+  if (space == LO_SPACE) {
+    sink_->Put(kNewObject + reference_representation_ + space,
+               "NewLargeObject");
+    sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
+    if (object_->IsCode()) {
+      sink_->Put(EXECUTABLE, "executable large object");
+    } else {
+      sink_->Put(NOT_EXECUTABLE, "not executable large object");
+    }
+    back_reference = serializer_->AllocateLargeObject(size);
+  } else {
+    int fill = serializer_->PutAlignmentPrefix(object_);
+    back_reference = serializer_->Allocate(space, size + fill);
+    sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
+    sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
+  }
+
+#ifdef OBJECT_PRINT
+  if (FLAG_serialization_statistics) {
+    serializer_->CountInstanceType(map, size);
+  }
+#endif  // OBJECT_PRINT
+
+  // Mark this object as already serialized.
+  serializer_->back_reference_map()->Add(object_, back_reference);
+
+  // Serialize the map (first word of the object).
+  serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
+}
+
+void Serializer::ObjectSerializer::SerializeExternalString() {
+  // Instead of serializing this as an external string, we serialize
+  // an imaginary sequential string with the same content.
+  Isolate* isolate = serializer_->isolate();
+  DCHECK(object_->IsExternalString());
+  DCHECK(object_->map() != isolate->heap()->native_source_string_map());
+  ExternalString* string = ExternalString::cast(object_);
+  int length = string->length();
+  Map* map;
+  int content_size;
+  int allocation_size;
+  const byte* resource;
+  // Find the map and size for the imaginary sequential string.
+  bool internalized = object_->IsInternalizedString();
+  if (object_->IsExternalOneByteString()) {
+    map = internalized ? isolate->heap()->one_byte_internalized_string_map()
+                       : isolate->heap()->one_byte_string_map();
+    allocation_size = SeqOneByteString::SizeFor(length);
+    content_size = length * kCharSize;
+    resource = reinterpret_cast<const byte*>(
+        ExternalOneByteString::cast(string)->resource()->data());
+  } else {
+    map = internalized ? isolate->heap()->internalized_string_map()
+                       : isolate->heap()->string_map();
+    allocation_size = SeqTwoByteString::SizeFor(length);
+    content_size = length * kShortSize;
+    resource = reinterpret_cast<const byte*>(
+        ExternalTwoByteString::cast(string)->resource()->data());
+  }
+
+  AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
+                              ? LO_SPACE
+                              : OLD_SPACE;
+  SerializePrologue(space, allocation_size, map);
+
+  // Output the rest of the imaginary string.
+  int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
+
+  // Output raw data header. Do not bother with common raw length cases here.
+  sink_->Put(kVariableRawData, "RawDataForString");
+  sink_->PutInt(bytes_to_output, "length");
+
+  // Serialize string header (except for map).
+  Address string_start = string->address();
+  for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
+    sink_->PutSection(string_start[i], "StringHeader");
+  }
+
+  // Serialize string content.
+  sink_->PutRaw(resource, content_size, "StringContent");
+
+  // Since the allocation size is rounded up to object alignment, there
+  // maybe left-over bytes that need to be padded.
+  int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
+  DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
+  for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
+
+  sink_->Put(kSkip, "SkipAfterString");
+  sink_->PutInt(bytes_to_output, "SkipDistance");
+}
+
+// Clear and later restore the next link in the weak cell or allocation site.
+// TODO(all): replace this with proper iteration of weak slots in serializer.
+class UnlinkWeakNextScope {
+ public:
+  explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
+    if (object->IsWeakCell()) {
+      object_ = object;
+      next_ = WeakCell::cast(object)->next();
+      WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
+    } else if (object->IsAllocationSite()) {
+      object_ = object;
+      next_ = AllocationSite::cast(object)->weak_next();
+      AllocationSite::cast(object)->set_weak_next(
+          object->GetHeap()->undefined_value());
+    }
+  }
+
+  ~UnlinkWeakNextScope() {
+    if (object_ != nullptr) {
+      if (object_->IsWeakCell()) {
+        WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
+      } else {
+        AllocationSite::cast(object_)->set_weak_next(next_,
+                                                     UPDATE_WEAK_WRITE_BARRIER);
+      }
+    }
+  }
+
+ private:
+  HeapObject* object_;
+  Object* next_;
+  DisallowHeapAllocation no_gc_;
+};
+
+void Serializer::ObjectSerializer::Serialize() {
+  if (FLAG_trace_serializer) {
+    PrintF(" Encoding heap object: ");
+    object_->ShortPrint();
+    PrintF("\n");
+  }
+
+  // We cannot serialize typed array objects correctly.
+  DCHECK(!object_->IsJSTypedArray());
+
+  // We don't expect fillers.
+  DCHECK(!object_->IsFiller());
+
+  if (object_->IsScript()) {
+    // Clear cached line ends.
+    Object* undefined = serializer_->isolate()->heap()->undefined_value();
+    Script::cast(object_)->set_line_ends(undefined);
+  }
+
+  if (object_->IsExternalString()) {
+    Heap* heap = serializer_->isolate()->heap();
+    if (object_->map() != heap->native_source_string_map()) {
+      // Usually we cannot recreate resources for external strings. To work
+      // around this, external strings are serialized to look like ordinary
+      // sequential strings.
+      // The exception are native source code strings, since we can recreate
+      // their resources. In that case we fall through and leave it to
+      // VisitExternalOneByteString further down.
+      SerializeExternalString();
+      return;
+    }
+  }
+
+  int size = object_->Size();
+  Map* map = object_->map();
+  AllocationSpace space =
+      MemoryChunk::FromAddress(object_->address())->owner()->identity();
+  SerializePrologue(space, size, map);
+
+  // Serialize the rest of the object.
+  CHECK_EQ(0, bytes_processed_so_far_);
+  bytes_processed_so_far_ = kPointerSize;
+
+  RecursionScope recursion(serializer_);
+  // Objects that are immediately post processed during deserialization
+  // cannot be deferred, since post processing requires the object content.
+  if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
+    serializer_->QueueDeferredObject(object_);
+    sink_->Put(kDeferred, "Deferring object content");
+    return;
+  }
+
+  UnlinkWeakNextScope unlink_weak_next(object_);
+
+  object_->IterateBody(map->instance_type(), size, this);
+  OutputRawData(object_->address() + size);
+}
+
+void Serializer::ObjectSerializer::SerializeDeferred() {
+  if (FLAG_trace_serializer) {
+    PrintF(" Encoding deferred heap object: ");
+    object_->ShortPrint();
+    PrintF("\n");
+  }
+
+  int size = object_->Size();
+  Map* map = object_->map();
+  BackReference reference = serializer_->back_reference_map()->Lookup(object_);
+
+  // Serialize the rest of the object.
+  CHECK_EQ(0, bytes_processed_so_far_);
+  bytes_processed_so_far_ = kPointerSize;
+
+  serializer_->PutAlignmentPrefix(object_);
+  sink_->Put(kNewObject + reference.space(), "deferred object");
+  serializer_->PutBackReference(object_, reference);
+  sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
+
+  UnlinkWeakNextScope unlink_weak_next(object_);
+
+  object_->IterateBody(map->instance_type(), size, this);
+  OutputRawData(object_->address() + size);
+}
+
+void Serializer::ObjectSerializer::VisitPointers(Object** start, Object** end) {
+  Object** current = start;
+  while (current < end) {
+    while (current < end && (*current)->IsSmi()) current++;
+    if (current < end) OutputRawData(reinterpret_cast<Address>(current));
+
+    while (current < end && !(*current)->IsSmi()) {
+      HeapObject* current_contents = HeapObject::cast(*current);
+      int root_index = serializer_->root_index_map()->Lookup(current_contents);
+      // Repeats are not subject to the write barrier so we can only use
+      // immortal immovable root members. They are never in new space.
+      if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
+          Heap::RootIsImmortalImmovable(root_index) &&
+          current_contents == current[-1]) {
+        DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
+        int repeat_count = 1;
+        while (&current[repeat_count] < end - 1 &&
+               current[repeat_count] == current_contents) {
+          repeat_count++;
+        }
+        current += repeat_count;
+        bytes_processed_so_far_ += repeat_count * kPointerSize;
+        if (repeat_count > kNumberOfFixedRepeat) {
+          sink_->Put(kVariableRepeat, "VariableRepeat");
+          sink_->PutInt(repeat_count, "repeat count");
+        } else {
+          sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
+        }
+      } else {
+        serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
+                                     0);
+        bytes_processed_so_far_ += kPointerSize;
+        current++;
+      }
+    }
+  }
+}
+
+void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
+  int skip = OutputRawData(rinfo->target_address_address(),
+                           kCanReturnSkipInsteadOfSkipping);
+  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+  Object* object = rinfo->target_object();
+  serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
+                               kStartOfObject, skip);
+  bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
+  int skip = OutputRawData(reinterpret_cast<Address>(p),
+                           kCanReturnSkipInsteadOfSkipping);
+  sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
+  sink_->PutInt(skip, "SkipB4ExternalRef");
+  Address target = *p;
+  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
+  bytes_processed_so_far_ += kPointerSize;
+}
+
+void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
+  int skip = OutputRawData(rinfo->target_address_address(),
+                           kCanReturnSkipInsteadOfSkipping);
+  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
+  sink_->PutInt(skip, "SkipB4ExternalRef");
+  Address target = rinfo->target_external_reference();
+  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
+  bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
+  // We can only reference to internal references of code that has been output.
+  DCHECK(object_->IsCode() && code_has_been_output_);
+  // We do not use skip from last patched pc to find the pc to patch, since
+  // target_address_address may not return addresses in ascending order when
+  // used for internal references. External references may be stored at the
+  // end of the code in the constant pool, whereas internal references are
+  // inline. That would cause the skip to be negative. Instead, we store the
+  // offset from code entry.
+  Address entry = Code::cast(object_)->entry();
+  intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
+  intptr_t target_offset = rinfo->target_internal_reference() - entry;
+  DCHECK(0 <= pc_offset &&
+         pc_offset <= Code::cast(object_)->instruction_size());
+  DCHECK(0 <= target_offset &&
+         target_offset <= Code::cast(object_)->instruction_size());
+  sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
+                 ? kInternalReference
+                 : kInternalReferenceEncoded,
+             "InternalRef");
+  sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
+  sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
+}
+
+void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
+  int skip = OutputRawData(rinfo->target_address_address(),
+                           kCanReturnSkipInsteadOfSkipping);
+  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
+  sink_->PutInt(skip, "SkipB4ExternalRef");
+  Address target = rinfo->target_address();
+  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
+  bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
+  int skip = OutputRawData(rinfo->target_address_address(),
+                           kCanReturnSkipInsteadOfSkipping);
+  Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
+  serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
+  bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
+  int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
+  Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+  serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
+  bytes_processed_so_far_ += kPointerSize;
+}
+
+void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
+  int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
+  Cell* object = Cell::cast(rinfo->target_cell());
+  serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
+  bytes_processed_so_far_ += kPointerSize;
+}
+
+bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
+    int builtin_count,
+    v8::String::ExternalOneByteStringResource** resource_pointer,
+    FixedArray* source_cache, int resource_index) {
+  for (int i = 0; i < builtin_count; i++) {
+    Object* source = source_cache->get(i);
+    if (!source->IsUndefined()) {
+      ExternalOneByteString* string = ExternalOneByteString::cast(source);
+      typedef v8::String::ExternalOneByteStringResource Resource;
+      const Resource* resource = string->resource();
+      if (resource == *resource_pointer) {
+        sink_->Put(resource_index, "NativesStringResource");
+        sink_->PutSection(i, "NativesStringResourceEnd");
+        bytes_processed_so_far_ += sizeof(resource);
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+void Serializer::ObjectSerializer::VisitExternalOneByteString(
+    v8::String::ExternalOneByteStringResource** resource_pointer) {
+  Address references_start = reinterpret_cast<Address>(resource_pointer);
+  OutputRawData(references_start);
+  if (SerializeExternalNativeSourceString(
+          Natives::GetBuiltinsCount(), resource_pointer,
+          Natives::GetSourceCache(serializer_->isolate()->heap()),
+          kNativesStringResource)) {
+    return;
+  }
+  if (SerializeExternalNativeSourceString(
+          ExtraNatives::GetBuiltinsCount(), resource_pointer,
+          ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
+          kExtraNativesStringResource)) {
+    return;
+  }
+  // One of the strings in the natives cache should match the resource.  We
+  // don't expect any other kinds of external strings here.
+  UNREACHABLE();
+}
+
+Address Serializer::ObjectSerializer::PrepareCode() {
+  // To make snapshots reproducible, we make a copy of the code object
+  // and wipe all pointers in the copy, which we then serialize.
+  Code* original = Code::cast(object_);
+  Code* code = serializer_->CopyCode(original);
+  // Code age headers are not serializable.
+  code->MakeYoung(serializer_->isolate());
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+                  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+  for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    rinfo->WipeOut();
+  }
+  // We need to wipe out the header fields *after* wiping out the
+  // relocations, because some of these fields are needed for the latter.
+  code->WipeOutHeader();
+  return code->address();
+}
+
+int Serializer::ObjectSerializer::OutputRawData(
+    Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
+  Address object_start = object_->address();
+  int base = bytes_processed_so_far_;
+  int up_to_offset = static_cast<int>(up_to - object_start);
+  int to_skip = up_to_offset - bytes_processed_so_far_;
+  int bytes_to_output = to_skip;
+  bytes_processed_so_far_ += to_skip;
+  // This assert will fail if the reloc info gives us the target_address_address
+  // locations in a non-ascending order.  Luckily that doesn't happen.
+  DCHECK(to_skip >= 0);
+  bool outputting_code = false;
+  bool is_code_object = object_->IsCode();
+  if (to_skip != 0 && is_code_object && !code_has_been_output_) {
+    // Output the code all at once and fix later.
+    bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
+    outputting_code = true;
+    code_has_been_output_ = true;
+  }
+  if (bytes_to_output != 0 && (!is_code_object || outputting_code)) {
+    if (!outputting_code && bytes_to_output == to_skip &&
+        IsAligned(bytes_to_output, kPointerAlignment) &&
+        bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
+      int size_in_words = bytes_to_output >> kPointerSizeLog2;
+      sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
+      to_skip = 0;  // This instruction includes skip.
+    } else {
+      // We always end up here if we are outputting the code of a code object.
+      sink_->Put(kVariableRawData, "VariableRawData");
+      sink_->PutInt(bytes_to_output, "length");
+    }
+
+    if (is_code_object) object_start = PrepareCode();
+
+    const char* description = is_code_object ? "Code" : "Byte";
+    sink_->PutRaw(object_start + base, bytes_to_output, description);
+  }
+  if (to_skip != 0 && return_skip == kIgnoringReturn) {
+    sink_->Put(kSkip, "Skip");
+    sink_->PutInt(to_skip, "SkipDistance");
+    to_skip = 0;
+  }
+  return to_skip;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/snapshot/serializer.h b/src/snapshot/serializer.h
new file mode 100644
index 0000000..eccbaab
--- /dev/null
+++ b/src/snapshot/serializer.h
@@ -0,0 +1,321 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_SERIALIZER_H_
+#define V8_SNAPSHOT_SERIALIZER_H_
+
+#include "src/isolate.h"
+#include "src/log.h"
+#include "src/objects.h"
+#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/snapshot-source-sink.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeAddressMap : public CodeEventLogger {
+ public:
+  explicit CodeAddressMap(Isolate* isolate) : isolate_(isolate) {
+    isolate->logger()->addCodeEventListener(this);
+  }
+
+  ~CodeAddressMap() override {
+    isolate_->logger()->removeCodeEventListener(this);
+  }
+
+  void CodeMoveEvent(AbstractCode* from, Address to) override {
+    address_to_name_map_.Move(from->address(), to);
+  }
+
+  void CodeDisableOptEvent(AbstractCode* code,
+                           SharedFunctionInfo* shared) override {}
+
+  const char* Lookup(Address address) {
+    return address_to_name_map_.Lookup(address);
+  }
+
+ private:
+  class NameMap {
+   public:
+    NameMap() : impl_(HashMap::PointersMatch) {}
+
+    ~NameMap() {
+      for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
+        DeleteArray(static_cast<const char*>(p->value));
+      }
+    }
+
+    void Insert(Address code_address, const char* name, int name_size) {
+      HashMap::Entry* entry = FindOrCreateEntry(code_address);
+      if (entry->value == NULL) {
+        entry->value = CopyName(name, name_size);
+      }
+    }
+
+    const char* Lookup(Address code_address) {
+      HashMap::Entry* entry = FindEntry(code_address);
+      return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
+    }
+
+    void Remove(Address code_address) {
+      HashMap::Entry* entry = FindEntry(code_address);
+      if (entry != NULL) {
+        DeleteArray(static_cast<char*>(entry->value));
+        RemoveEntry(entry);
+      }
+    }
+
+    void Move(Address from, Address to) {
+      if (from == to) return;
+      HashMap::Entry* from_entry = FindEntry(from);
+      DCHECK(from_entry != NULL);
+      void* value = from_entry->value;
+      RemoveEntry(from_entry);
+      HashMap::Entry* to_entry = FindOrCreateEntry(to);
+      DCHECK(to_entry->value == NULL);
+      to_entry->value = value;
+    }
+
+   private:
+    static char* CopyName(const char* name, int name_size) {
+      char* result = NewArray<char>(name_size + 1);
+      for (int i = 0; i < name_size; ++i) {
+        char c = name[i];
+        if (c == '\0') c = ' ';
+        result[i] = c;
+      }
+      result[name_size] = '\0';
+      return result;
+    }
+
+    HashMap::Entry* FindOrCreateEntry(Address code_address) {
+      return impl_.LookupOrInsert(code_address,
+                                  ComputePointerHash(code_address));
+    }
+
+    HashMap::Entry* FindEntry(Address code_address) {
+      return impl_.Lookup(code_address, ComputePointerHash(code_address));
+    }
+
+    void RemoveEntry(HashMap::Entry* entry) {
+      impl_.Remove(entry->key, entry->hash);
+    }
+
+    HashMap impl_;
+
+    DISALLOW_COPY_AND_ASSIGN(NameMap);
+  };
+
+  void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
+                         const char* name, int length) override {
+    address_to_name_map_.Insert(code->address(), name, length);
+  }
+
+  NameMap address_to_name_map_;
+  Isolate* isolate_;
+};
+
+// There can be only one serializer per V8 process.
+class Serializer : public SerializerDeserializer {
+ public:
+  Serializer(Isolate* isolate, SnapshotByteSink* sink);
+  ~Serializer() override;
+
+  void EncodeReservations(List<SerializedData::Reservation>* out) const;
+
+  void SerializeDeferredObjects();
+
+  Isolate* isolate() const { return isolate_; }
+
+  BackReferenceMap* back_reference_map() { return &back_reference_map_; }
+  RootIndexMap* root_index_map() { return &root_index_map_; }
+
+#ifdef OBJECT_PRINT
+  void CountInstanceType(Map* map, int size);
+#endif  // OBJECT_PRINT
+
+ protected:
+  class ObjectSerializer;
+  class RecursionScope {
+   public:
+    explicit RecursionScope(Serializer* serializer) : serializer_(serializer) {
+      serializer_->recursion_depth_++;
+    }
+    ~RecursionScope() { serializer_->recursion_depth_--; }
+    bool ExceedsMaximum() {
+      return serializer_->recursion_depth_ >= kMaxRecursionDepth;
+    }
+
+   private:
+    static const int kMaxRecursionDepth = 32;
+    Serializer* serializer_;
+  };
+
+  virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
+                               WhereToPoint where_to_point, int skip) = 0;
+
+  void VisitPointers(Object** start, Object** end) override;
+
+  void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
+               int skip);
+
+  void PutSmi(Smi* smi);
+
+  void PutBackReference(HeapObject* object, BackReference reference);
+
+  // Emit alignment prefix if necessary, return required padding space in bytes.
+  int PutAlignmentPrefix(HeapObject* object);
+
+  // Returns true if the object was successfully serialized.
+  bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
+                            WhereToPoint where_to_point, int skip);
+
+  inline void FlushSkip(int skip) {
+    if (skip != 0) {
+      sink_->Put(kSkip, "SkipFromSerializeObject");
+      sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+    }
+  }
+
+  bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
+
+  // This will return the space for an object.
+  BackReference AllocateLargeObject(int size);
+  BackReference Allocate(AllocationSpace space, int size);
+  int EncodeExternalReference(Address addr) {
+    return external_reference_encoder_.Encode(addr);
+  }
+
+  bool HasNotExceededFirstPageOfEachSpace();
+
+  // GetInt reads 4 bytes at once, requiring padding at the end.
+  void Pad();
+
+  // We may not need the code address map for logging for every instance
+  // of the serializer.  Initialize it on demand.
+  void InitializeCodeAddressMap();
+
+  Code* CopyCode(Code* code);
+
+  inline uint32_t max_chunk_size(int space) const {
+    DCHECK_LE(0, space);
+    DCHECK_LT(space, kNumberOfSpaces);
+    return max_chunk_size_[space];
+  }
+
+  SnapshotByteSink* sink() const { return sink_; }
+
+  void QueueDeferredObject(HeapObject* obj) {
+    DCHECK(back_reference_map_.Lookup(obj).is_valid());
+    deferred_objects_.Add(obj);
+  }
+
+  void OutputStatistics(const char* name);
+
+  Isolate* isolate_;
+
+  SnapshotByteSink* sink_;
+  ExternalReferenceEncoder external_reference_encoder_;
+
+  BackReferenceMap back_reference_map_;
+  RootIndexMap root_index_map_;
+
+  int recursion_depth_;
+
+  friend class Deserializer;
+  friend class ObjectSerializer;
+  friend class RecursionScope;
+  friend class SnapshotData;
+
+ private:
+  CodeAddressMap* code_address_map_;
+  // Objects from the same space are put into chunks for bulk-allocation
+  // when deserializing. We have to make sure that each chunk fits into a
+  // page. So we track the chunk size in pending_chunk_ of a space, but
+  // when it exceeds a page, we complete the current chunk and start a new one.
+  uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
+  List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
+  uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
+
+  // We map serialized large objects to indexes for back-referencing.
+  uint32_t large_objects_total_size_;
+  uint32_t seen_large_objects_index_;
+
+  List<byte> code_buffer_;
+
+  // To handle stack overflow.
+  List<HeapObject*> deferred_objects_;
+
+#ifdef OBJECT_PRINT
+  static const int kInstanceTypes = 256;
+  int* instance_type_count_;
+  size_t* instance_type_size_;
+#endif  // OBJECT_PRINT
+
+  DISALLOW_COPY_AND_ASSIGN(Serializer);
+};
+
+class Serializer::ObjectSerializer : public ObjectVisitor {
+ public:
+  ObjectSerializer(Serializer* serializer, HeapObject* obj,
+                   SnapshotByteSink* sink, HowToCode how_to_code,
+                   WhereToPoint where_to_point)
+      : serializer_(serializer),
+        object_(obj),
+        sink_(sink),
+        reference_representation_(how_to_code + where_to_point),
+        bytes_processed_so_far_(0),
+        code_has_been_output_(false) {}
+  ~ObjectSerializer() override {}
+  void Serialize();
+  void SerializeDeferred();
+  void VisitPointers(Object** start, Object** end) override;
+  void VisitEmbeddedPointer(RelocInfo* target) override;
+  void VisitExternalReference(Address* p) override;
+  void VisitExternalReference(RelocInfo* rinfo) override;
+  void VisitInternalReference(RelocInfo* rinfo) override;
+  void VisitCodeTarget(RelocInfo* target) override;
+  void VisitCodeEntry(Address entry_address) override;
+  void VisitCell(RelocInfo* rinfo) override;
+  void VisitRuntimeEntry(RelocInfo* reloc) override;
+  // Used for seralizing the external strings that hold the natives source.
+  void VisitExternalOneByteString(
+      v8::String::ExternalOneByteStringResource** resource) override;
+  // We can't serialize a heap with external two byte strings.
+  void VisitExternalTwoByteString(
+      v8::String::ExternalStringResource** resource) override {
+    UNREACHABLE();
+  }
+
+ private:
+  void SerializePrologue(AllocationSpace space, int size, Map* map);
+
+  bool SerializeExternalNativeSourceString(
+      int builtin_count,
+      v8::String::ExternalOneByteStringResource** resource_pointer,
+      FixedArray* source_cache, int resource_index);
+
+  enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
+  // This function outputs or skips the raw data between the last pointer and
+  // up to the current position.  It optionally can just return the number of
+  // bytes to skip instead of performing a skip instruction, in case the skip
+  // can be merged into the next instruction.
+  int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
+  // External strings are serialized in a way to resemble sequential strings.
+  void SerializeExternalString();
+
+  Address PrepareCode();
+
+  Serializer* serializer_;
+  HeapObject* object_;
+  SnapshotByteSink* sink_;
+  int reference_representation_;
+  int bytes_processed_so_far_;
+  bool code_has_been_output_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SNAPSHOT_SERIALIZER_H_
diff --git a/src/snapshot/snapshot-common.cc b/src/snapshot/snapshot-common.cc
index 97e7c6b..eb3bdb5 100644
--- a/src/snapshot/snapshot-common.cc
+++ b/src/snapshot/snapshot-common.cc
@@ -9,6 +9,9 @@
 #include "src/api.h"
 #include "src/base/platform/platform.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/snapshot/deserializer.h"
+#include "src/snapshot/snapshot-source-sink.h"
+#include "src/version.h"
 
 namespace v8 {
 namespace internal {
@@ -228,5 +231,52 @@
   int context_length = data->raw_size - context_offset;
   return Vector<const byte>(context_data, context_length);
 }
+
+SnapshotData::SnapshotData(const Serializer& ser) {
+  DisallowHeapAllocation no_gc;
+  List<Reservation> reservations;
+  ser.EncodeReservations(&reservations);
+  const List<byte>& payload = ser.sink()->data();
+
+  // Calculate sizes.
+  int reservation_size = reservations.length() * kInt32Size;
+  int size = kHeaderSize + reservation_size + payload.length();
+
+  // Allocate backing store and create result data.
+  AllocateData(size);
+
+  // Set header values.
+  SetMagicNumber(ser.isolate());
+  SetHeaderValue(kCheckSumOffset, Version::Hash());
+  SetHeaderValue(kNumReservationsOffset, reservations.length());
+  SetHeaderValue(kPayloadLengthOffset, payload.length());
+
+  // Copy reservation chunk sizes.
+  CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
+            reservation_size);
+
+  // Copy serialized data.
+  CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
+            static_cast<size_t>(payload.length()));
+}
+
+bool SnapshotData::IsSane() {
+  return GetHeaderValue(kCheckSumOffset) == Version::Hash();
+}
+
+Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
+  return Vector<const Reservation>(
+      reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
+      GetHeaderValue(kNumReservationsOffset));
+}
+
+Vector<const byte> SnapshotData::Payload() const {
+  int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
+  const byte* payload = data_ + kHeaderSize + reservations_size;
+  int length = GetHeaderValue(kPayloadLengthOffset);
+  DCHECK_EQ(data_ + size_, payload + length);
+  return Vector<const byte>(payload, length);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/snapshot/snapshot-external.cc b/src/snapshot/snapshot-external.cc
index 1d5476c..67dcb60 100644
--- a/src/snapshot/snapshot-external.cc
+++ b/src/snapshot/snapshot-external.cc
@@ -7,7 +7,6 @@
 #include "src/snapshot/snapshot.h"
 
 #include "src/base/platform/mutex.h"
-#include "src/snapshot/serialize.h"
 #include "src/snapshot/snapshot-source-sink.h"
 #include "src/v8.h"  // for V8::Initialize
 
diff --git a/src/snapshot/snapshot-source-sink.cc b/src/snapshot/snapshot-source-sink.cc
index 812de5e..cee5875 100644
--- a/src/snapshot/snapshot-source-sink.cc
+++ b/src/snapshot/snapshot-source-sink.cc
@@ -7,7 +7,6 @@
 
 #include "src/base/logging.h"
 #include "src/handles-inl.h"
-#include "src/snapshot/serialize.h"  // for SerializerDeserializer::nop()
 
 
 namespace v8 {
diff --git a/src/snapshot/snapshot.h b/src/snapshot/snapshot.h
index d99f118..c648d75 100644
--- a/src/snapshot/snapshot.h
+++ b/src/snapshot/snapshot.h
@@ -5,7 +5,8 @@
 #ifndef V8_SNAPSHOT_SNAPSHOT_H_
 #define V8_SNAPSHOT_SNAPSHOT_H_
 
-#include "src/snapshot/serialize.h"
+#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/startup-serializer.h"
 
 namespace v8 {
 namespace internal {
@@ -88,6 +89,41 @@
 void SetSnapshotFromFile(StartupData* snapshot_blob);
 #endif
 
+// Wrapper around reservation sizes and the serialization payload.
+class SnapshotData : public SerializedData {
+ public:
+  // Used when producing.
+  explicit SnapshotData(const Serializer& ser);
+
+  // Used when consuming.
+  explicit SnapshotData(const Vector<const byte> snapshot)
+      : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
+    CHECK(IsSane());
+  }
+
+  Vector<const Reservation> Reservations() const;
+  Vector<const byte> Payload() const;
+
+  Vector<const byte> RawData() const {
+    return Vector<const byte>(data_, size_);
+  }
+
+ private:
+  bool IsSane();
+
+  // The data header consists of uint32_t-sized entries:
+  // [0] magic number and external reference count
+  // [1] version hash
+  // [2] number of reservation size entries
+  // [3] payload length
+  // ... reservations
+  // ... serialized payload
+  static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size;
+  static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
+  static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
+  static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/snapshot/startup-serializer.cc b/src/snapshot/startup-serializer.cc
new file mode 100644
index 0000000..fab01f5
--- /dev/null
+++ b/src/snapshot/startup-serializer.cc
@@ -0,0 +1,167 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/startup-serializer.h"
+
+#include "src/objects-inl.h"
+#include "src/v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+StartupSerializer::StartupSerializer(
+    Isolate* isolate, SnapshotByteSink* sink,
+    FunctionCodeHandling function_code_handling)
+    : Serializer(isolate, sink),
+      function_code_handling_(function_code_handling),
+      serializing_builtins_(false) {
+  InitializeCodeAddressMap();
+}
+
+StartupSerializer::~StartupSerializer() {
+  OutputStatistics("StartupSerializer");
+}
+
+void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
+                                        WhereToPoint where_to_point, int skip) {
+  DCHECK(!obj->IsJSFunction());
+
+  if (function_code_handling_ == CLEAR_FUNCTION_CODE) {
+    if (obj->IsCode()) {
+      Code* code = Code::cast(obj);
+      // If the function code is compiled (either as native code or bytecode),
+      // replace it with lazy-compile builtin. Only exception is when we are
+      // serializing the canonical interpreter-entry-trampoline builtin.
+      if (code->kind() == Code::FUNCTION ||
+          (!serializing_builtins_ && code->is_interpreter_entry_trampoline())) {
+        obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
+      }
+    } else if (obj->IsBytecodeArray()) {
+      obj = isolate()->heap()->undefined_value();
+    }
+  } else if (obj->IsCode()) {
+    DCHECK_EQ(KEEP_FUNCTION_CODE, function_code_handling_);
+    Code* code = Code::cast(obj);
+    if (code->kind() == Code::FUNCTION) {
+      code->ClearInlineCaches();
+      code->set_profiler_ticks(0);
+    }
+  }
+
+  int root_index = root_index_map_.Lookup(obj);
+  // We can only encode roots as such if it has already been serialized.
+  // That applies to root indices below the wave front.
+  if (root_index != RootIndexMap::kInvalidRootIndex) {
+    if (root_has_been_serialized_.test(root_index)) {
+      PutRoot(root_index, obj, how_to_code, where_to_point, skip);
+      return;
+    }
+  }
+
+  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+
+  FlushSkip(skip);
+
+  // Object has not yet been serialized.  Serialize it here.
+  ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
+                                     where_to_point);
+  object_serializer.Serialize();
+
+  if (serializing_immortal_immovables_roots_ &&
+      root_index != RootIndexMap::kInvalidRootIndex) {
+    // Make sure that the immortal immovable root has been included in the first
+    // chunk of its reserved space , so that it is deserialized onto the first
+    // page of its space and stays immortal immovable.
+    BackReference ref = back_reference_map_.Lookup(obj);
+    CHECK(ref.is_valid() && ref.chunk_index() == 0);
+  }
+}
+
+void StartupSerializer::SerializeWeakReferencesAndDeferred() {
+  // This comes right after serialization of the partial snapshot, where we
+  // add entries to the partial snapshot cache of the startup snapshot. Add
+  // one entry with 'undefined' to terminate the partial snapshot cache.
+  Object* undefined = isolate()->heap()->undefined_value();
+  VisitPointer(&undefined);
+  isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
+  SerializeDeferredObjects();
+  Pad();
+}
+
+void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+  // We expect the builtins tag after builtins have been serialized.
+  DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
+  serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
+  sink_->Put(kSynchronize, "Synchronize");
+}
+
+void StartupSerializer::SerializeStrongReferences() {
+  Isolate* isolate = this->isolate();
+  // No active threads.
+  CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
+  // No active or weak handles.
+  CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
+  CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
+  CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
+  // We don't support serializing installed extensions.
+  CHECK(!isolate->has_installed_extensions());
+  // First visit immortal immovables to make sure they end up in the first page.
+  serializing_immortal_immovables_roots_ = true;
+  isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
+  // Check that immortal immovable roots are allocated on the first page.
+  CHECK(HasNotExceededFirstPageOfEachSpace());
+  serializing_immortal_immovables_roots_ = false;
+  // Visit the rest of the strong roots.
+  // Clear the stack limits to make the snapshot reproducible.
+  // Reset it again afterwards.
+  isolate->heap()->ClearStackLimits();
+  isolate->heap()->IterateSmiRoots(this);
+  isolate->heap()->SetStackLimits();
+
+  isolate->heap()->IterateStrongRoots(this,
+                                      VISIT_ONLY_STRONG_FOR_SERIALIZATION);
+}
+
+void StartupSerializer::VisitPointers(Object** start, Object** end) {
+  if (start == isolate()->heap()->roots_array_start()) {
+    // Serializing the root list needs special handling:
+    // - The first pass over the root list only serializes immortal immovables.
+    // - The second pass over the root list serializes the rest.
+    // - Only root list elements that have been fully serialized can be
+    //   referenced via as root by using kRootArray bytecodes.
+    int skip = 0;
+    for (Object** current = start; current < end; current++) {
+      int root_index = static_cast<int>(current - start);
+      if (RootShouldBeSkipped(root_index)) {
+        skip += kPointerSize;
+        continue;
+      } else {
+        if ((*current)->IsSmi()) {
+          FlushSkip(skip);
+          PutSmi(Smi::cast(*current));
+        } else {
+          SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject,
+                          skip);
+        }
+        root_has_been_serialized_.set(root_index);
+        skip = 0;
+      }
+    }
+    FlushSkip(skip);
+  } else {
+    Serializer::VisitPointers(start, end);
+  }
+}
+
+bool StartupSerializer::RootShouldBeSkipped(int root_index) {
+  if (root_index == Heap::kStackLimitRootIndex ||
+      root_index == Heap::kRealStackLimitRootIndex) {
+    return true;
+  }
+  return Heap::RootIsImmortalImmovable(root_index) !=
+         serializing_immortal_immovables_roots_;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/snapshot/startup-serializer.h b/src/snapshot/startup-serializer.h
new file mode 100644
index 0000000..71b8475
--- /dev/null
+++ b/src/snapshot/startup-serializer.h
@@ -0,0 +1,55 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_STARTUP_SERIALIZER_H_
+#define V8_SNAPSHOT_STARTUP_SERIALIZER_H_
+
+#include <bitset>
+#include "src/snapshot/serializer.h"
+
+namespace v8 {
+namespace internal {
+
+class StartupSerializer : public Serializer {
+ public:
+  enum FunctionCodeHandling { CLEAR_FUNCTION_CODE, KEEP_FUNCTION_CODE };
+
+  StartupSerializer(
+      Isolate* isolate, SnapshotByteSink* sink,
+      FunctionCodeHandling function_code_handling = CLEAR_FUNCTION_CODE);
+  ~StartupSerializer() override;
+
+  // Serialize the current state of the heap.  The order is:
+  // 1) Immortal immovable roots
+  // 2) Remaining strong references.
+  // 3) Partial snapshot cache.
+  // 4) Weak references (e.g. the string table).
+  void SerializeStrongReferences();
+  void SerializeWeakReferencesAndDeferred();
+
+ private:
+  // The StartupSerializer has to serialize the root array, which is slightly
+  // different.
+  void VisitPointers(Object** start, Object** end) override;
+  void SerializeObject(HeapObject* o, HowToCode how_to_code,
+                       WhereToPoint where_to_point, int skip) override;
+  void Synchronize(VisitorSynchronization::SyncTag tag) override;
+
+  // Some roots should not be serialized, because their actual value depends on
+  // absolute addresses and they are reset after deserialization, anyway.
+  // In the first pass over the root list, we only serialize immortal immovable
+  // roots. In the second pass, we serialize the rest.
+  bool RootShouldBeSkipped(int root_index);
+
+  FunctionCodeHandling function_code_handling_;
+  bool serializing_builtins_;
+  bool serializing_immortal_immovables_roots_;
+  std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
+  DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SNAPSHOT_STARTUP_SERIALIZER_H_
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 43be8f1..02f6f1c 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -537,6 +537,7 @@
     for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
                                 PrototypeIterator::START_AT_RECEIVER);
          !iter.IsAtEnd(); iter.Advance()) {
+      if (iter.GetCurrent()->IsJSProxy()) break;
       Object* key = iter.GetCurrent<JSObject>()->SlowReverseLookup(fun);
       if (!key->IsUndefined()) {
         if (!name->IsString() ||
diff --git a/src/tracing/trace-event.h b/src/tracing/trace-event.h
index d17f785..2b3bf9b 100644
--- a/src/tracing/trace-event.h
+++ b/src/tracing/trace-event.h
@@ -10,6 +10,7 @@
 #include "base/trace_event/common/trace_event_common.h"
 #include "include/v8-platform.h"
 #include "src/base/atomicops.h"
+#include "src/base/macros.h"
 
 // This header file defines implementation details of how the trace macros in
 // trace_event_common.h collect and store trace events. Anything not
@@ -42,6 +43,11 @@
 // macros. Use this macro to prevent Process ID mangling.
 #define TRACE_ID_DONT_MANGLE(id) v8::internal::tracing::TraceID::DontMangle(id)
 
+// By default, trace IDs are eventually converted to a single 64-bit number. Use
+// this macro to add a scope string.
+#define TRACE_ID_WITH_SCOPE(scope, id) \
+  trace_event_internal::TraceID::WithScope(scope, id)
+
 // Sets the current sample state to the given category and name (both must be
 // constant strings). These states are intended for a sampling profiler.
 // Implementation note: we store category and name together because we don't
@@ -106,6 +112,7 @@
 //                    char phase,
 //                    const uint8_t* category_group_enabled,
 //                    const char* name,
+//                    const char* scope,
 //                    uint64_t id,
 //                    uint64_t bind_id,
 //                    int num_args,
@@ -181,26 +188,26 @@
     if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) {  \
       v8::internal::tracing::AddTraceEvent(                                  \
           phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,     \
-          v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, flags, \
-          ##__VA_ARGS__);                                                    \
+          v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId, \
+          v8::internal::tracing::kNoId, flags, ##__VA_ARGS__);               \
     }                                                                        \
   } while (0)
 
 // Implementation detail: internal macro to create static category and add begin
 // event if the category is enabled. Also adds the end event when the scope
 // ends.
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...)          \
-  INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                   \
-  v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer);     \
-  if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) {   \
-    uint64_t h = v8::internal::tracing::AddTraceEvent(                      \
-        TRACE_EVENT_PHASE_COMPLETE,                                         \
-        INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,             \
-        v8::internal::tracing::kNoId, v8::internal::tracing::kNoId,         \
-        TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__);                              \
-    INTERNAL_TRACE_EVENT_UID(tracer)                                        \
-        .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
-                    h);                                                     \
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...)           \
+  INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                    \
+  v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer);      \
+  if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) {    \
+    uint64_t h = v8::internal::tracing::AddTraceEvent(                       \
+        TRACE_EVENT_PHASE_COMPLETE,                                          \
+        INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,              \
+        v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,   \
+        v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+    INTERNAL_TRACE_EVENT_UID(tracer)                                         \
+        .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,  \
+                    h);                                                      \
   }
 
 #define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name,     \
@@ -214,8 +221,8 @@
     uint64_t h = v8::internal::tracing::AddTraceEvent(                      \
         TRACE_EVENT_PHASE_COMPLETE,                                         \
         INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,             \
-        v8::internal::tracing::kNoId, trace_event_bind_id.data(),           \
-        trace_event_flags, ##__VA_ARGS__);                                  \
+        v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,  \
+        trace_event_bind_id.raw_id(), trace_event_flags, ##__VA_ARGS__);    \
     INTERNAL_TRACE_EVENT_UID(tracer)                                        \
         .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
                     h);                                                     \
@@ -233,8 +240,8 @@
                                                           &trace_event_flags); \
       v8::internal::tracing::AddTraceEvent(                                    \
           phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,       \
-          trace_event_trace_id.data(), v8::internal::tracing::kNoId,           \
-          trace_event_flags, ##__VA_ARGS__);                                   \
+          trace_event_trace_id.scope(), trace_event_trace_id.raw_id(),         \
+          v8::internal::tracing::kNoId, trace_event_flags, ##__VA_ARGS__);     \
     }                                                                          \
   } while (0)
 
@@ -254,6 +261,27 @@
     phase, category_group, name, id, thread_id, timestamp, flags, ...) \
   UNIMPLEMENTED()
 
+// Enter and leave a context based on the current scope.
+#define INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+  struct INTERNAL_TRACE_EVENT_UID(ScopedContext) {                         \
+   public:                                                                 \
+    INTERNAL_TRACE_EVENT_UID(ScopedContext)(uint64_t cid) : cid_(cid) {    \
+      TRACE_EVENT_ENTER_CONTEXT(category_group, name, cid_);               \
+    }                                                                      \
+    ~INTERNAL_TRACE_EVENT_UID(ScopedContext)() {                           \
+      TRACE_EVENT_LEAVE_CONTEXT(category_group, name, cid_);               \
+    }                                                                      \
+                                                                           \
+   private:                                                                \
+    /* Local class friendly DISALLOW_COPY_AND_ASSIGN */                    \
+    INTERNAL_TRACE_EVENT_UID(ScopedContext)                                \
+    (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {}                    \
+    void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {}      \
+    uint64_t cid_;                                                         \
+  };                                                                       \
+  INTERNAL_TRACE_EVENT_UID(ScopedContext)                                  \
+  INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+
 namespace v8 {
 namespace internal {
 namespace tracing {
@@ -261,6 +289,7 @@
 // Specify these values when the corresponding argument of AddTraceEvent is not
 // used.
 const int kZeroNumArgs = 0;
+const decltype(nullptr) kGlobalScope = nullptr;
 const uint64_t kNoId = 0;
 
 class TraceEventHelper {
@@ -273,70 +302,108 @@
 // collide when the same pointer is used on different processes.
 class TraceID {
  public:
-  class DontMangle {
+  class WithScope {
    public:
-    explicit DontMangle(const void* id)
-        : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {}
-    explicit DontMangle(uint64_t id) : data_(id) {}
-    explicit DontMangle(unsigned int id) : data_(id) {}
-    explicit DontMangle(uint16_t id) : data_(id) {}
-    explicit DontMangle(unsigned char id) : data_(id) {}
-    explicit DontMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {}
-    explicit DontMangle(int id) : data_(static_cast<uint64_t>(id)) {}
-    explicit DontMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {}
-    explicit DontMangle(signed char id) : data_(static_cast<uint64_t>(id)) {}
-    uint64_t data() const { return data_; }
+    WithScope(const char* scope, uint64_t raw_id)
+        : scope_(scope), raw_id_(raw_id) {}
+    uint64_t raw_id() const { return raw_id_; }
+    const char* scope() const { return scope_; }
 
    private:
-    uint64_t data_;
+    const char* scope_ = nullptr;
+    uint64_t raw_id_;
+  };
+
+  class DontMangle {
+   public:
+    explicit DontMangle(const void* raw_id)
+        : raw_id_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(raw_id))) {}
+    explicit DontMangle(uint64_t raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(uint16_t raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(int64_t raw_id)
+        : raw_id_(static_cast<uint64_t>(raw_id)) {}
+    explicit DontMangle(int raw_id) : raw_id_(static_cast<uint64_t>(raw_id)) {}
+    explicit DontMangle(int16_t raw_id)
+        : raw_id_(static_cast<uint64_t>(raw_id)) {}
+    explicit DontMangle(signed char raw_id)
+        : raw_id_(static_cast<uint64_t>(raw_id)) {}
+    explicit DontMangle(WithScope scoped_id)
+        : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+    const char* scope() const { return scope_; }
+    uint64_t raw_id() const { return raw_id_; }
+
+   private:
+    const char* scope_ = nullptr;
+    uint64_t raw_id_;
   };
 
   class ForceMangle {
    public:
-    explicit ForceMangle(uint64_t id) : data_(id) {}
-    explicit ForceMangle(unsigned int id) : data_(id) {}
-    explicit ForceMangle(uint16_t id) : data_(id) {}
-    explicit ForceMangle(unsigned char id) : data_(id) {}
-    explicit ForceMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {}
-    explicit ForceMangle(int id) : data_(static_cast<uint64_t>(id)) {}
-    explicit ForceMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {}
-    explicit ForceMangle(signed char id) : data_(static_cast<uint64_t>(id)) {}
-    uint64_t data() const { return data_; }
+    explicit ForceMangle(uint64_t raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(uint16_t raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(int64_t raw_id)
+        : raw_id_(static_cast<uint64_t>(raw_id)) {}
+    explicit ForceMangle(int raw_id) : raw_id_(static_cast<uint64_t>(raw_id)) {}
+    explicit ForceMangle(int16_t raw_id)
+        : raw_id_(static_cast<uint64_t>(raw_id)) {}
+    explicit ForceMangle(signed char raw_id)
+        : raw_id_(static_cast<uint64_t>(raw_id)) {}
+    uint64_t raw_id() const { return raw_id_; }
 
    private:
-    uint64_t data_;
+    uint64_t raw_id_;
   };
 
-  TraceID(const void* id, unsigned int* flags)
-      : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {
+  TraceID(const void* raw_id, unsigned int* flags)
+      : raw_id_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(raw_id))) {
     *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
   }
-  TraceID(ForceMangle id, unsigned int* flags) : data_(id.data()) {
+  TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
     *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
   }
-  TraceID(DontMangle id, unsigned int* flags) : data_(id.data()) {}
-  TraceID(uint64_t id, unsigned int* flags) : data_(id) { (void)flags; }
-  TraceID(unsigned int id, unsigned int* flags) : data_(id) { (void)flags; }
-  TraceID(uint16_t id, unsigned int* flags) : data_(id) { (void)flags; }
-  TraceID(unsigned char id, unsigned int* flags) : data_(id) { (void)flags; }
-  TraceID(int64_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+  TraceID(DontMangle maybe_scoped_id, unsigned int* flags)
+      : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {}
+  TraceID(uint64_t raw_id, unsigned int* flags) : raw_id_(raw_id) {
     (void)flags;
   }
-  TraceID(int id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+  TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) {
     (void)flags;
   }
-  TraceID(int16_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+  TraceID(uint16_t raw_id, unsigned int* flags) : raw_id_(raw_id) {
     (void)flags;
   }
-  TraceID(signed char id, unsigned int* flags)
-      : data_(static_cast<uint64_t>(id)) {
+  TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) {
     (void)flags;
   }
+  TraceID(int64_t raw_id, unsigned int* flags)
+      : raw_id_(static_cast<uint64_t>(raw_id)) {
+    (void)flags;
+  }
+  TraceID(int raw_id, unsigned int* flags)
+      : raw_id_(static_cast<uint64_t>(raw_id)) {
+    (void)flags;
+  }
+  TraceID(int16_t raw_id, unsigned int* flags)
+      : raw_id_(static_cast<uint64_t>(raw_id)) {
+    (void)flags;
+  }
+  TraceID(signed char raw_id, unsigned int* flags)
+      : raw_id_(static_cast<uint64_t>(raw_id)) {
+    (void)flags;
+  }
+  TraceID(WithScope scoped_id, unsigned int* flags)
+      : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
 
-  uint64_t data() const { return data_; }
+  uint64_t raw_id() const { return raw_id_; }
+  const char* scope() const { return scope_; }
 
  private:
-  uint64_t data_;
+  const char* scope_ = nullptr;
+  uint64_t raw_id_;
 };
 
 // Simple union to store various types as uint64_t.
@@ -407,34 +474,33 @@
 
 static V8_INLINE uint64_t AddTraceEvent(char phase,
                                         const uint8_t* category_group_enabled,
-                                        const char* name, uint64_t id,
-                                        uint64_t bind_id, unsigned int flags) {
+                                        const char* name, const char* scope,
+                                        uint64_t id, uint64_t bind_id,
+                                        unsigned int flags) {
   return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
-                                         id, bind_id, kZeroNumArgs, NULL, NULL,
-                                         NULL, flags);
+                                         scope, id, bind_id, kZeroNumArgs, NULL,
+                                         NULL, NULL, flags);
 }
 
 template <class ARG1_TYPE>
-static V8_INLINE uint64_t AddTraceEvent(char phase,
-                                        const uint8_t* category_group_enabled,
-                                        const char* name, uint64_t id,
-                                        uint64_t bind_id, unsigned int flags,
-                                        const char* arg1_name,
-                                        const ARG1_TYPE& arg1_val) {
+static V8_INLINE uint64_t AddTraceEvent(
+    char phase, const uint8_t* category_group_enabled, const char* name,
+    const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
+    const char* arg1_name, const ARG1_TYPE& arg1_val) {
   const int num_args = 1;
   uint8_t arg_types[1];
   uint64_t arg_values[1];
   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
-  return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
-                                         id, bind_id, num_args, &arg1_name,
-                                         arg_types, arg_values, flags);
+  return TRACE_EVENT_API_ADD_TRACE_EVENT(
+      phase, category_group_enabled, name, scope, id, bind_id, num_args,
+      &arg1_name, arg_types, arg_values, flags);
 }
 
 template <class ARG1_TYPE, class ARG2_TYPE>
 static V8_INLINE uint64_t AddTraceEvent(
     char phase, const uint8_t* category_group_enabled, const char* name,
-    uint64_t id, uint64_t bind_id, unsigned int flags, const char* arg1_name,
-    const ARG1_TYPE& arg1_val, const char* arg2_name,
+    const char* scope, uint64_t id, uint64_t bind_id, unsigned int flags,
+    const char* arg1_name, const ARG1_TYPE& arg1_val, const char* arg2_name,
     const ARG2_TYPE& arg2_val) {
   const int num_args = 2;
   const char* arg_names[2] = {arg1_name, arg2_name};
@@ -442,9 +508,9 @@
   uint64_t arg_values[2];
   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
   SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
-  return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
-                                         id, bind_id, num_args, arg_names,
-                                         arg_types, arg_values, flags);
+  return TRACE_EVENT_API_ADD_TRACE_EVENT(
+      phase, category_group_enabled, name, scope, id, bind_id, num_args,
+      arg_names, arg_types, arg_values, flags);
 }
 
 // Used by TRACE_EVENTx macros. Do not use directly.
diff --git a/src/transitions-inl.h b/src/transitions-inl.h
index 9424497..ea02d61 100644
--- a/src/transitions-inl.h
+++ b/src/transitions-inl.h
@@ -114,7 +114,6 @@
          name == heap->sealed_symbol() || name == heap->frozen_symbol() ||
          name == heap->elements_transition_symbol() ||
          name == heap->strict_function_transition_symbol() ||
-         name == heap->strong_function_transition_symbol() ||
          name == heap->observed_symbol();
 }
 #endif
diff --git a/src/transitions.cc b/src/transitions.cc
index e63769e..082ebc1 100644
--- a/src/transitions.cc
+++ b/src/transitions.cc
@@ -395,7 +395,6 @@
                                                   int slack) {
   Handle<FixedArray> array = isolate->factory()->NewTransitionArray(
       LengthFor(number_of_transitions + slack));
-  array->set(kNextLinkIndex, isolate->heap()->undefined_value());
   array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
   array->set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
   return Handle<TransitionArray>::cast(array);
diff --git a/src/type-cache.h b/src/type-cache.h
index 8bd35c0..2a95df9 100644
--- a/src/type-cache.h
+++ b/src/type-cache.h
@@ -13,12 +13,13 @@
 class TypeCache final {
  private:
   // This has to be first for the initialization magic to work.
+  base::AccountingAllocator allocator;
   Zone zone_;
 
  public:
   static TypeCache const& Get();
 
-  TypeCache() = default;
+  TypeCache() : zone_(&allocator) {}
 
   Type* const kInt8 =
       CreateNative(CreateRange<int8_t>(), Type::UntaggedIntegral8());
diff --git a/src/type-feedback-vector-inl.h b/src/type-feedback-vector-inl.h
index 97df1b9..015104e 100644
--- a/src/type-feedback-vector-inl.h
+++ b/src/type-feedback-vector-inl.h
@@ -133,23 +133,19 @@
   *generic = gen;
 }
 
-
-Handle<Object> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
+Handle<Symbol> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
   return isolate->factory()->uninitialized_symbol();
 }
 
-
-Handle<Object> TypeFeedbackVector::MegamorphicSentinel(Isolate* isolate) {
+Handle<Symbol> TypeFeedbackVector::MegamorphicSentinel(Isolate* isolate) {
   return isolate->factory()->megamorphic_symbol();
 }
 
-
-Handle<Object> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
+Handle<Symbol> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
   return isolate->factory()->premonomorphic_symbol();
 }
 
-
-Object* TypeFeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
+Symbol* TypeFeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
   return isolate->heap()->uninitialized_symbol();
 }
 
diff --git a/src/type-feedback-vector.h b/src/type-feedback-vector.h
index effbfe7..770b5e5 100644
--- a/src/type-feedback-vector.h
+++ b/src/type-feedback-vector.h
@@ -231,17 +231,17 @@
   void ClearKeyedStoreICs(SharedFunctionInfo* shared);
 
   // The object that indicates an uninitialized cache.
-  static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
+  static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
 
   // The object that indicates a megamorphic state.
-  static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+  static inline Handle<Symbol> MegamorphicSentinel(Isolate* isolate);
 
   // The object that indicates a premonomorphic state.
-  static inline Handle<Object> PremonomorphicSentinel(Isolate* isolate);
+  static inline Handle<Symbol> PremonomorphicSentinel(Isolate* isolate);
 
   // A raw version of the uninitialized sentinel that's safe to read during
   // garbage collection (e.g., for patching the cache).
-  static inline Object* RawUninitializedSentinel(Isolate* isolate);
+  static inline Symbol* RawUninitializedSentinel(Isolate* isolate);
 
   static const int kDummyLoadICSlot = 0;
   static const int kDummyKeyedLoadICSlot = 2;
diff --git a/src/type-info.cc b/src/type-info.cc
index ad25342..9087576 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -214,10 +214,6 @@
     *left_type = CompareICState::StateToType(zone(), stub.left());
     *right_type = CompareICState::StateToType(zone(), stub.right());
     *combined_type = CompareICState::StateToType(zone(), stub.state(), map);
-  } else if (code->is_compare_nil_ic_stub()) {
-    CompareNilICStub stub(isolate(), code->extra_ic_state());
-    *combined_type = stub.GetType(zone(), map);
-    *left_type = *right_type = stub.GetInputType(zone(), map);
   }
 }
 
@@ -457,7 +453,6 @@
       case Code::BINARY_OP_IC:
       case Code::COMPARE_IC:
       case Code::TO_BOOLEAN_IC:
-      case Code::COMPARE_NIL_IC:
         SetInfo(ast_id, target);
         break;
 
diff --git a/src/type-info.h b/src/type-info.h
index 13a7f88..c4b0928 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -71,7 +71,7 @@
   Handle<JSFunction> GetCallNewTarget(FeedbackVectorSlot slot);
   Handle<AllocationSite> GetCallNewAllocationSite(FeedbackVectorSlot slot);
 
-  // TODO(1571) We can't use ToBooleanStub::Types as the return value because
+  // TODO(1571) We can't use ToBooleanICStub::Types as the return value because
   // of various cycles in our headers. Death to tons of implementations in
   // headers!! :-P
   uint16_t ToBooleanTypes(TypeFeedbackId id);
diff --git a/src/types.cc b/src/types.cc
index d54826e..49c9418 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -191,24 +191,29 @@
              map == heap->uninitialized_map() ||
              map == heap->no_interceptor_result_sentinel_map() ||
              map == heap->termination_exception_map() ||
-             map == heap->arguments_marker_map());
+             map == heap->arguments_marker_map() ||
+             map == heap->optimized_out_map());
       return kInternal & kTaggedPointer;
     }
     case HEAP_NUMBER_TYPE:
       return kNumber & kTaggedPointer;
     case SIMD128_VALUE_TYPE:
       return kSimd;
+    case JS_OBJECT_TYPE:
+    case JS_GLOBAL_OBJECT_TYPE:
+    case JS_GLOBAL_PROXY_TYPE:
+    case JS_SPECIAL_API_OBJECT_TYPE:
+      if (map->is_undetectable()) return kOtherUndetectable;
+      return kOtherObject;
     case JS_VALUE_TYPE:
     case JS_MESSAGE_OBJECT_TYPE:
     case JS_DATE_TYPE:
-    case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
     case JS_MODULE_TYPE:
-    case JS_GLOBAL_OBJECT_TYPE:
-    case JS_GLOBAL_PROXY_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
     case JS_ARRAY_TYPE:
+    case JS_REGEXP_TYPE:  // TODO(rossberg): there should be a RegExp type.
     case JS_TYPED_ARRAY_TYPE:
     case JS_DATA_VIEW_TYPE:
     case JS_SET_TYPE:
@@ -219,26 +224,15 @@
     case JS_WEAK_SET_TYPE:
     case JS_PROMISE_TYPE:
     case JS_BOUND_FUNCTION_TYPE:
-      if (map->is_undetectable()) return kUndetectable;
+      DCHECK(!map->is_undetectable());
       return kOtherObject;
     case JS_FUNCTION_TYPE:
-      if (map->is_undetectable()) return kUndetectable;
+      DCHECK(!map->is_undetectable());
       return kFunction;
-    case JS_REGEXP_TYPE:
-      return kOtherObject;  // TODO(rossberg): there should be a RegExp type.
     case JS_PROXY_TYPE:
+      DCHECK(!map->is_undetectable());
       return kProxy;
     case MAP_TYPE:
-      // When compiling stub templates, the meta map is used as a place holder
-      // for the actual map with which the template is later instantiated.
-      // We treat it as a kind of type variable whose upper bound is Any.
-      // TODO(rossberg): for caching of CompareNilIC stubs to work correctly,
-      // we must exclude Undetectable here. This makes no sense, really,
-      // because it means that the template isn't actually parametric.
-      // Also, it doesn't apply elsewhere. 8-(
-      // We ought to find a cleaner solution for compiling stubs parameterised
-      // over type or class variables, esp ones with bounds...
-      return kDetectable & kTaggedPointer;
     case ALLOCATION_SITE_TYPE:
     case ACCESSOR_INFO_TYPE:
     case SHARED_FUNCTION_INFO_TYPE:
diff --git a/src/types.h b/src/types.h
index 9984ad8..8061410 100644
--- a/src/types.h
+++ b/src/types.h
@@ -42,8 +42,8 @@
 //   Array < Object
 //   Function < Object
 //   RegExp < Object
-//   Undetectable < Object
-//   Detectable = Receiver \/ Number \/ Name - Undetectable
+//   OtherUndetectable < Object
+//   DetectableReceiver = Receiver - OtherUndetectable
 //
 //   Class(map) < T   iff instance_type(map) < T
 //   Constant(x) < T  iff instance_type(map(x)) < T
@@ -195,8 +195,8 @@
   V(InternalizedString,  1u << 13 | REPRESENTATION(kTaggedPointer)) \
   V(OtherString,         1u << 14 | REPRESENTATION(kTaggedPointer)) \
   V(Simd,                1u << 15 | REPRESENTATION(kTaggedPointer)) \
-  V(Undetectable,        1u << 16 | REPRESENTATION(kTaggedPointer)) \
   V(OtherObject,         1u << 17 | REPRESENTATION(kTaggedPointer)) \
+  V(OtherUndetectable,   1u << 16 | REPRESENTATION(kTaggedPointer)) \
   V(Proxy,               1u << 18 | REPRESENTATION(kTaggedPointer)) \
   V(Function,            1u << 19 | REPRESENTATION(kTaggedPointer)) \
   V(Internal,            1u << 20 | REPRESENTATION(kTagged | kUntagged)) \
@@ -218,13 +218,13 @@
   V(BooleanOrNumber,          kBoolean | kNumber) \
   V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
   V(NullOrUndefined,          kNull | kUndefined) \
+  V(Undetectable,             kNullOrUndefined | kOtherUndetectable) \
   V(NumberOrString,           kNumber | kString) \
   V(NumberOrUndefined,        kNumber | kUndefined) \
   V(PlainPrimitive,           kNumberOrString | kBoolean | kNullOrUndefined) \
   V(Primitive,                kSymbol | kSimd | kPlainPrimitive) \
   V(DetectableReceiver,       kFunction | kOtherObject | kProxy) \
-  V(Detectable,               kDetectableReceiver | kNumber | kName) \
-  V(Object,                   kFunction | kOtherObject | kUndetectable) \
+  V(Object,                   kFunction | kOtherObject | kOtherUndetectable) \
   V(Receiver,                 kObject | kProxy) \
   V(StringOrReceiver,         kString | kReceiver) \
   V(Unique,                   kBoolean | kUniqueName | kNull | kUndefined | \
diff --git a/src/typing-asm.cc b/src/typing-asm.cc
index ddb608f..7482c4f 100644
--- a/src/typing-asm.cc
+++ b/src/typing-asm.cc
@@ -690,7 +690,7 @@
     expected_type_ = target_type;
     VisitVariableProxy(expr->target()->AsVariableProxy(), true);
   } else if (expr->target()->IsProperty()) {
-    int value_intish = intish_;
+    int32_t value_intish = intish_;
     Property* property = expr->target()->AsProperty();
     RECURSE(VisitWithExpectation(property->obj(), Type::Any(),
                                  "bad propety object"));
@@ -781,7 +781,7 @@
                                      "array index expected to be integer"));
         Literal* right = bin->right()->AsLiteral();
         if (right == NULL || right->raw_value()->ContainsDot()) {
-          FAIL(right, "heap access shift must be integer");
+          FAIL(bin->right(), "heap access shift must be integer");
         }
         RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
                                      "array shift expected to be integer"));
@@ -934,6 +934,54 @@
   FAIL(expr, "invalid property access");
 }
 
+void AsmTyper::CheckPolymorphicStdlibArguments(
+    enum StandardMember standard_member, ZoneList<Expression*>* args) {
+  if (args->length() == 0) {
+    return;
+  }
+  // Handle polymorphic stdlib functions specially.
+  Expression* arg0 = args->at(0);
+  Type* arg0_type = arg0->bounds().upper;
+  switch (standard_member) {
+    case kMathFround: {
+      if (!arg0_type->Is(cache_.kAsmFloat) &&
+          !arg0_type->Is(cache_.kAsmDouble) &&
+          !arg0_type->Is(cache_.kAsmSigned) &&
+          !arg0_type->Is(cache_.kAsmUnsigned)) {
+        FAIL(arg0, "illegal function argument type");
+      }
+      break;
+    }
+    case kMathCeil:
+    case kMathFloor:
+    case kMathSqrt: {
+      if (!arg0_type->Is(cache_.kAsmFloat) &&
+          !arg0_type->Is(cache_.kAsmDouble)) {
+        FAIL(arg0, "illegal function argument type");
+      }
+      break;
+    }
+    case kMathAbs:
+    case kMathMin:
+    case kMathMax: {
+      if (!arg0_type->Is(cache_.kAsmFloat) &&
+          !arg0_type->Is(cache_.kAsmDouble) &&
+          !arg0_type->Is(cache_.kAsmSigned)) {
+        FAIL(arg0, "illegal function argument type");
+      }
+      if (args->length() > 1) {
+        Type* other = Type::Intersect(args->at(0)->bounds().upper,
+                                      args->at(1)->bounds().upper, zone());
+        if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
+            !other->Is(cache_.kAsmSigned)) {
+          FAIL(arg0, "function arguments types don't match");
+        }
+      }
+      break;
+    }
+    default: { break; }
+  }
+}
 
 void AsmTyper::VisitCall(Call* expr) {
   Type* expected_type = expected_type_;
@@ -956,7 +1004,6 @@
     ZoneList<Expression*>* args = expr->arguments();
     if (Type::Any()->Is(result_type)) {
       // For foreign calls.
-      ZoneList<Expression*>* args = expr->arguments();
       for (int i = 0; i < args->length(); ++i) {
         Expression* arg = args->at(i);
         RECURSE(VisitWithExpectation(
@@ -988,29 +1035,7 @@
           result_type = computed_type_;
         }
       }
-      // Handle polymorphic stdlib functions specially.
-      if (standard_member == kMathCeil || standard_member == kMathFloor ||
-          standard_member == kMathSqrt) {
-        if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
-            !args->at(0)->bounds().upper->Is(cache_.kAsmDouble)) {
-          FAIL(expr, "illegal function argument type");
-        }
-      } else if (standard_member == kMathAbs || standard_member == kMathMin ||
-                 standard_member == kMathMax) {
-        if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
-            !args->at(0)->bounds().upper->Is(cache_.kAsmDouble) &&
-            !args->at(0)->bounds().upper->Is(cache_.kAsmSigned)) {
-          FAIL(expr, "illegal function argument type");
-        }
-        if (args->length() > 1) {
-          Type* other = Type::Intersect(args->at(0)->bounds().upper,
-                                        args->at(1)->bounds().upper, zone());
-          if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
-              !other->Is(cache_.kAsmSigned)) {
-            FAIL(expr, "function arguments types don't match");
-          }
-        }
-      }
+      RECURSE(CheckPolymorphicStdlibArguments(standard_member, args));
       intish_ = 0;
       IntersectResult(expr, result_type);
     }
@@ -1083,7 +1108,7 @@
                                            Type* result_type, bool conversion) {
   RECURSE(VisitWithExpectation(expr->left(), Type::Number(),
                                "left bitwise operand expected to be a number"));
-  int left_intish = intish_;
+  int32_t left_intish = intish_;
   Type* left_type = computed_type_;
   if (!left_type->Is(left_expected)) {
     FAIL(expr->left(), "left bitwise operand expected to be an integer");
@@ -1095,7 +1120,7 @@
   RECURSE(
       VisitWithExpectation(expr->right(), Type::Number(),
                            "right bitwise operand expected to be a number"));
-  int right_intish = intish_;
+  int32_t right_intish = intish_;
   Type* right_type = computed_type_;
   if (!right_type->Is(right_expected)) {
     FAIL(expr->right(), "right bitwise operand expected to be an integer");
@@ -1113,7 +1138,7 @@
     right_type = left_type;
   }
   if (!conversion) {
-    if (!left_type->Is(right_type) || !right_type->Is(left_type)) {
+    if (!left_type->Is(cache_.kAsmIntQ) || !right_type->Is(cache_.kAsmIntQ)) {
       FAIL(expr, "ill-typed bitwise operation");
     }
   }
@@ -1157,11 +1182,16 @@
       FAIL(expr, "illegal logical operator");
     case Token::BIT_OR: {
       // BIT_OR allows Any since it is used as a type coercion.
-      VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmInt,
-                                  cache_.kAsmSigned, true);
-      if (expr->left()->IsCall() && expr->op() == Token::BIT_OR) {
+      RECURSE(VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmIntQ,
+                                          cache_.kAsmSigned, true));
+      if (expr->left()->IsCall() && expr->op() == Token::BIT_OR &&
+          Type::Number()->Is(expr->left()->bounds().upper)) {
+        // Force the return types of foreign functions.
         expr->left()->set_bounds(Bounds(cache_.kAsmSigned));
       }
+      if (in_function_ && !expr->left()->bounds().upper->Is(cache_.kAsmIntQ)) {
+        FAIL(expr->left(), "intish required");
+      }
       return;
     }
     case Token::BIT_XOR: {
@@ -1170,7 +1200,7 @@
       if (left && left->value()->IsBoolean()) {
         if (left->ToBooleanIsTrue()) {
           left->set_bounds(Bounds(cache_.kSingletonOne));
-          RECURSE(VisitWithExpectation(expr->right(), cache_.kAsmInt,
+          RECURSE(VisitWithExpectation(expr->right(), cache_.kAsmIntQ,
                                        "not operator expects an integer"));
           IntersectResult(expr, cache_.kAsmSigned);
           return;
@@ -1178,21 +1208,21 @@
           FAIL(left, "unexpected false");
         }
       }
-      // BIT_XOR allows Number since it is used as a type coercion (via ~~).
-      VisitIntegerBitwiseOperator(expr, Type::Number(), cache_.kAsmInt,
-                                  cache_.kAsmSigned, true);
+      // BIT_XOR allows Any since it is used as a type coercion (via ~~).
+      RECURSE(VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmIntQ,
+                                          cache_.kAsmSigned, true));
       return;
     }
     case Token::SHR: {
-      VisitIntegerBitwiseOperator(expr, cache_.kAsmInt, cache_.kAsmInt,
-                                  cache_.kAsmUnsigned, false);
+      RECURSE(VisitIntegerBitwiseOperator(
+          expr, cache_.kAsmIntQ, cache_.kAsmIntQ, cache_.kAsmUnsigned, false));
       return;
     }
     case Token::SHL:
     case Token::SAR:
     case Token::BIT_AND: {
-      VisitIntegerBitwiseOperator(expr, cache_.kAsmInt, cache_.kAsmInt,
-                                  cache_.kAsmSigned, false);
+      RECURSE(VisitIntegerBitwiseOperator(
+          expr, cache_.kAsmIntQ, cache_.kAsmIntQ, cache_.kAsmSigned, false));
       return;
     }
     case Token::ADD:
@@ -1204,28 +1234,33 @@
           expr->left(), Type::Number(),
           "left arithmetic operand expected to be number"));
       Type* left_type = computed_type_;
-      int left_intish = intish_;
+      int32_t left_intish = intish_;
       RECURSE(VisitWithExpectation(
           expr->right(), Type::Number(),
           "right arithmetic operand expected to be number"));
       Type* right_type = computed_type_;
-      int right_intish = intish_;
+      int32_t right_intish = intish_;
       Type* type = Type::Union(left_type, right_type, zone());
       if (type->Is(cache_.kAsmInt)) {
         if (expr->op() == Token::MUL) {
-          Literal* right = expr->right()->AsLiteral();
-          if (!right) {
-            FAIL(expr, "direct integer multiply forbidden");
-          }
-          if (!right->value()->IsNumber()) {
-            FAIL(expr, "multiply must be by an integer");
-          }
           int32_t i;
-          if (!right->value()->ToInt32(&i)) {
-            FAIL(expr, "multiply must be a signed integer");
+          Literal* left = expr->left()->AsLiteral();
+          Literal* right = expr->right()->AsLiteral();
+          if (left != nullptr && left->value()->IsNumber() &&
+              left->value()->ToInt32(&i)) {
+            if (right_intish != 0) {
+              FAIL(expr, "intish not allowed in multiply");
+            }
+          } else if (right != nullptr && right->value()->IsNumber() &&
+                     right->value()->ToInt32(&i)) {
+            if (left_intish != 0) {
+              FAIL(expr, "intish not allowed in multiply");
+            }
+          } else {
+            FAIL(expr, "multiply must be by an integer literal");
           }
           i = abs(i);
-          if (i >= 1 << 20) {
+          if (i >= (1 << 20)) {
             FAIL(expr, "multiply must be by value in -2^20 < n < 2^20");
           }
           intish_ = i;
@@ -1246,11 +1281,36 @@
           return;
         }
       } else if (expr->op() == Token::MUL && expr->right()->IsLiteral() &&
-                 right_type->Is(cache_.kAsmDouble)) {
+                 right_type->Is(cache_.kAsmDouble) &&
+                 expr->right()->AsLiteral()->raw_value()->ContainsDot() &&
+                 expr->right()->AsLiteral()->raw_value()->AsNumber() == 1.0) {
         // For unary +, expressed as x * 1.0
-        if (expr->left()->IsCall() && expr->op() == Token::MUL) {
+        if (expr->left()->IsCall() &&
+            Type::Number()->Is(expr->left()->bounds().upper)) {
+          // Force the return types of foreign functions.
           expr->left()->set_bounds(Bounds(cache_.kAsmDouble));
+          left_type = expr->left()->bounds().upper;
         }
+        if (!(expr->left()->IsProperty() &&
+              Type::Number()->Is(expr->left()->bounds().upper))) {
+          if (!left_type->Is(cache_.kAsmSigned) &&
+              !left_type->Is(cache_.kAsmUnsigned) &&
+              !left_type->Is(cache_.kAsmFixnum) &&
+              !left_type->Is(cache_.kAsmFloatQ) &&
+              !left_type->Is(cache_.kAsmDoubleQ)) {
+            FAIL(
+                expr->left(),
+                "unary + only allowed on signed, unsigned, float?, or double?");
+          }
+        }
+        IntersectResult(expr, cache_.kAsmDouble);
+        return;
+      } else if (expr->op() == Token::MUL && left_type->Is(cache_.kAsmDouble) &&
+                 expr->right()->IsLiteral() &&
+                 !expr->right()->AsLiteral()->raw_value()->ContainsDot() &&
+                 expr->right()->AsLiteral()->raw_value()->AsNumber() == -1.0) {
+        // For unary -, expressed as x * -1
+        expr->right()->set_bounds(Bounds(cache_.kAsmDouble));
         IntersectResult(expr, cache_.kAsmDouble);
         return;
       } else if (type->Is(cache_.kAsmFloat) && expr->op() != Token::MOD) {
@@ -1493,8 +1553,6 @@
     if (!entry && in_function_) {
       entry =
           global_variable_type_.Lookup(variable, ComputePointerHash(variable));
-      if (entry && entry->value) {
-      }
     }
   }
   if (!entry) return NULL;
diff --git a/src/typing-asm.h b/src/typing-asm.h
index 54796ed..c7984b2 100644
--- a/src/typing-asm.h
+++ b/src/typing-asm.h
@@ -92,7 +92,7 @@
   Type* expected_type_;
   Type* computed_type_;
   VariableInfo* property_info_;
-  int intish_;  // How many ops we've gone without a x|0.
+  int32_t intish_;  // How many ops we've gone without a x|0.
 
   Type* return_type_;  // Return type of last function.
   size_t array_size_;  // Array size of last ArrayLiteral.
@@ -135,6 +135,9 @@
 
   void VisitHeapAccess(Property* expr, bool assigning, Type* assignment_type);
 
+  void CheckPolymorphicStdlibArguments(enum StandardMember standard_member,
+                                       ZoneList<Expression*>* args);
+
   Expression* GetReceiverOfPropertyAccess(Expression* expr, const char* name);
   bool IsMathObject(Expression* expr);
   bool IsSIMDObject(Expression* expr);
diff --git a/src/utils.h b/src/utils.h
index d779979..44865ed 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -210,6 +210,30 @@
   return std::floor(x);
 }
 
+inline double Pow(double x, double y) {
+#if (defined(__MINGW64_VERSION_MAJOR) &&                              \
+     (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
+    defined(V8_OS_AIX)
+  // MinGW64 and AIX have a custom implementation for pow.  This handles certain
+  // special cases that are different.
+  if ((x == 0.0 || std::isinf(x)) && y != 0.0 && std::isfinite(y)) {
+    double f;
+    double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
+    /* retain sign if odd integer exponent */
+    return ((std::modf(y, &f) == 0.0) && (static_cast<int64_t>(y) & 1))
+               ? copysign(result, x)
+               : result;
+  }
+
+  if (x == 2.0) {
+    int y_int = static_cast<int>(y);
+    if (y == y_int) {
+      return std::ldexp(1.0, y_int);
+    }
+  }
+#endif
+  return std::pow(x, y);
+}
 
 // TODO(svenpanne) Clean up the whole power-of-2 mess.
 inline int32_t WhichPowerOf2Abs(int32_t x) {
@@ -548,240 +572,6 @@
   T buffer_[kSize];
 };
 
-
-/*
- * A class that collects values into a backing store.
- * Specialized versions of the class can allow access to the backing store
- * in different ways.
- * There is no guarantee that the backing store is contiguous (and, as a
- * consequence, no guarantees that consecutively added elements are adjacent
- * in memory). The collector may move elements unless it has guaranteed not
- * to.
- */
-template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
-class Collector {
- public:
-  explicit Collector(int initial_capacity = kMinCapacity)
-      : index_(0), size_(0) {
-    current_chunk_ = Vector<T>::New(initial_capacity);
-  }
-
-  virtual ~Collector() {
-    // Free backing store (in reverse allocation order).
-    current_chunk_.Dispose();
-    for (int i = chunks_.length() - 1; i >= 0; i--) {
-      chunks_.at(i).Dispose();
-    }
-  }
-
-  // Add a single element.
-  inline void Add(T value) {
-    if (index_ >= current_chunk_.length()) {
-      Grow(1);
-    }
-    current_chunk_[index_] = value;
-    index_++;
-    size_++;
-  }
-
-  // Add a block of contiguous elements and return a Vector backed by the
-  // memory area.
-  // A basic Collector will keep this vector valid as long as the Collector
-  // is alive.
-  inline Vector<T> AddBlock(int size, T initial_value) {
-    DCHECK(size > 0);
-    if (size > current_chunk_.length() - index_) {
-      Grow(size);
-    }
-    T* position = current_chunk_.start() + index_;
-    index_ += size;
-    size_ += size;
-    for (int i = 0; i < size; i++) {
-      position[i] = initial_value;
-    }
-    return Vector<T>(position, size);
-  }
-
-
-  // Add a contiguous block of elements and return a vector backed
-  // by the added block.
-  // A basic Collector will keep this vector valid as long as the Collector
-  // is alive.
-  inline Vector<T> AddBlock(Vector<const T> source) {
-    if (source.length() > current_chunk_.length() - index_) {
-      Grow(source.length());
-    }
-    T* position = current_chunk_.start() + index_;
-    index_ += source.length();
-    size_ += source.length();
-    for (int i = 0; i < source.length(); i++) {
-      position[i] = source[i];
-    }
-    return Vector<T>(position, source.length());
-  }
-
-
-  // Write the contents of the collector into the provided vector.
-  void WriteTo(Vector<T> destination) {
-    DCHECK(size_ <= destination.length());
-    int position = 0;
-    for (int i = 0; i < chunks_.length(); i++) {
-      Vector<T> chunk = chunks_.at(i);
-      for (int j = 0; j < chunk.length(); j++) {
-        destination[position] = chunk[j];
-        position++;
-      }
-    }
-    for (int i = 0; i < index_; i++) {
-      destination[position] = current_chunk_[i];
-      position++;
-    }
-  }
-
-  // Allocate a single contiguous vector, copy all the collected
-  // elements to the vector, and return it.
-  // The caller is responsible for freeing the memory of the returned
-  // vector (e.g., using Vector::Dispose).
-  Vector<T> ToVector() {
-    Vector<T> new_store = Vector<T>::New(size_);
-    WriteTo(new_store);
-    return new_store;
-  }
-
-  // Resets the collector to be empty.
-  virtual void Reset() {
-    for (int i = chunks_.length() - 1; i >= 0; i--) {
-      chunks_.at(i).Dispose();
-    }
-    chunks_.Rewind(0);
-    index_ = 0;
-    size_ = 0;
-  }
-
-  // Total number of elements added to collector so far.
-  inline int size() { return size_; }
-
- protected:
-  static const int kMinCapacity = 16;
-  List<Vector<T> > chunks_;
-  Vector<T> current_chunk_;  // Block of memory currently being written into.
-  int index_;  // Current index in current chunk.
-  int size_;  // Total number of elements in collector.
-
-  // Creates a new current chunk, and stores the old chunk in the chunks_ list.
-  void Grow(int min_capacity) {
-    DCHECK(growth_factor > 1);
-    int new_capacity;
-    int current_length = current_chunk_.length();
-    if (current_length < kMinCapacity) {
-      // The collector started out as empty.
-      new_capacity = min_capacity * growth_factor;
-      if (new_capacity < kMinCapacity) new_capacity = kMinCapacity;
-    } else {
-      int growth = current_length * (growth_factor - 1);
-      if (growth > max_growth) {
-        growth = max_growth;
-      }
-      new_capacity = current_length + growth;
-      if (new_capacity < min_capacity) {
-        new_capacity = min_capacity + growth;
-      }
-    }
-    NewChunk(new_capacity);
-    DCHECK(index_ + min_capacity <= current_chunk_.length());
-  }
-
-  // Before replacing the current chunk, give a subclass the option to move
-  // some of the current data into the new chunk. The function may update
-  // the current index_ value to represent data no longer in the current chunk.
-  // Returns the initial index of the new chunk (after copied data).
-  virtual void NewChunk(int new_capacity)  {
-    Vector<T> new_chunk = Vector<T>::New(new_capacity);
-    if (index_ > 0) {
-      chunks_.Add(current_chunk_.SubVector(0, index_));
-    } else {
-      current_chunk_.Dispose();
-    }
-    current_chunk_ = new_chunk;
-    index_ = 0;
-  }
-};
-
-
-/*
- * A collector that allows sequences of values to be guaranteed to
- * stay consecutive.
- * If the backing store grows while a sequence is active, the current
- * sequence might be moved, but after the sequence is ended, it will
- * not move again.
- * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
- * as well, if inside an active sequence where another element is added.
- */
-template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
-class SequenceCollector : public Collector<T, growth_factor, max_growth> {
- public:
-  explicit SequenceCollector(int initial_capacity)
-      : Collector<T, growth_factor, max_growth>(initial_capacity),
-        sequence_start_(kNoSequence) { }
-
-  virtual ~SequenceCollector() {}
-
-  void StartSequence() {
-    DCHECK(sequence_start_ == kNoSequence);
-    sequence_start_ = this->index_;
-  }
-
-  Vector<T> EndSequence() {
-    DCHECK(sequence_start_ != kNoSequence);
-    int sequence_start = sequence_start_;
-    sequence_start_ = kNoSequence;
-    if (sequence_start == this->index_) return Vector<T>();
-    return this->current_chunk_.SubVector(sequence_start, this->index_);
-  }
-
-  // Drops the currently added sequence, and all collected elements in it.
-  void DropSequence() {
-    DCHECK(sequence_start_ != kNoSequence);
-    int sequence_length = this->index_ - sequence_start_;
-    this->index_ = sequence_start_;
-    this->size_ -= sequence_length;
-    sequence_start_ = kNoSequence;
-  }
-
-  virtual void Reset() {
-    sequence_start_ = kNoSequence;
-    this->Collector<T, growth_factor, max_growth>::Reset();
-  }
-
- private:
-  static const int kNoSequence = -1;
-  int sequence_start_;
-
-  // Move the currently active sequence to the new chunk.
-  virtual void NewChunk(int new_capacity) {
-    if (sequence_start_ == kNoSequence) {
-      // Fall back on default behavior if no sequence has been started.
-      this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
-      return;
-    }
-    int sequence_length = this->index_ - sequence_start_;
-    Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
-    DCHECK(sequence_length < new_chunk.length());
-    for (int i = 0; i < sequence_length; i++) {
-      new_chunk[i] = this->current_chunk_[sequence_start_ + i];
-    }
-    if (sequence_start_ > 0) {
-      this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
-    } else {
-      this->current_chunk_.Dispose();
-    }
-    this->current_chunk_ = new_chunk;
-    this->index_ = sequence_length;
-    sequence_start_ = 0;
-  }
-};
-
-
 // Compare 8bit/16bit chars to 8bit/16bit chars.
 template <typename lchar, typename rchar>
 inline int CompareCharsUnsigned(const lchar* lhs, const rchar* rhs,
@@ -1378,7 +1168,7 @@
 INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars));
 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
                               size_t chars));
-#elif defined(V8_HOST_ARCH_PPC)
+#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
 INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars));
 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src,
                               size_t chars));
@@ -1541,7 +1331,7 @@
     MemCopy(dest, src, chars * sizeof(*dest));
   }
 }
-#elif defined(V8_HOST_ARCH_PPC)
+#elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390)
 #define CASE(n)           \
   case n:                 \
     memcpy(dest, src, n); \
@@ -1752,21 +1542,22 @@
   return ReadUnalignedValue<double>(p);
 }
 
-
 static inline void WriteDoubleValue(void* p, double value) {
   WriteUnalignedValue(p, value);
 }
 
-
 static inline uint16_t ReadUnalignedUInt16(const void* p) {
   return ReadUnalignedValue<uint16_t>(p);
 }
 
-
 static inline void WriteUnalignedUInt16(void* p, uint16_t value) {
   WriteUnalignedValue(p, value);
 }
 
+static inline uint32_t ReadUnalignedUInt32(const void* p) {
+  return ReadUnalignedValue<uint32_t>(p);
+}
+
 static inline void WriteUnalignedUInt32(void* p, uint32_t value) {
   WriteUnalignedValue(p, value);
 }
diff --git a/src/v8.cc b/src/v8.cc
index 31b4878..154cf62 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -19,7 +19,6 @@
 #include "src/profiler/sampler.h"
 #include "src/runtime-profiler.h"
 #include "src/snapshot/natives.h"
-#include "src/snapshot/serialize.h"
 #include "src/snapshot/snapshot.h"
 
 
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index 6533aa1..c8bd4e8 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -40,11 +40,8 @@
 template <StateTag Tag>
 VMState<Tag>::VMState(Isolate* isolate)
     : isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
-  if (previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
-    if (FLAG_log_timer_events) {
-      LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name()));
-    }
-    TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
+  if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
+    LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name()));
   }
   isolate_->set_current_vm_state(Tag);
 }
@@ -52,11 +49,8 @@
 
 template <StateTag Tag>
 VMState<Tag>::~VMState() {
-  if (previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
-    if (FLAG_log_timer_events) {
-      LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name()));
-    }
-    TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
+  if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
+    LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name()));
   }
   isolate_->set_current_vm_state(previous_tag_);
 }
@@ -64,16 +58,18 @@
 ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
     : isolate_(isolate),
       callback_(callback),
-      previous_scope_(isolate->external_callback_scope()),
-      timer_(&isolate->counters()->runtime_call_stats()->ExternalCallback,
-             isolate->counters()->runtime_call_stats()->current_timer()) {
+      previous_scope_(isolate->external_callback_scope()) {
 #ifdef USE_SIMULATOR
   scope_address_ = Simulator::current(isolate)->get_sp();
 #endif
   isolate_->set_external_callback_scope(this);
   if (FLAG_runtime_call_stats) {
-    isolate_->counters()->runtime_call_stats()->Enter(&timer_);
+    RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
+    timer_.Initialize(&stats->ExternalCallback, stats->current_timer());
+    stats->Enter(&timer_);
   }
+  TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),
+                     "V8.ExternalCallback");
 }
 
 ExternalCallbackScope::~ExternalCallbackScope() {
@@ -81,6 +77,8 @@
     isolate_->counters()->runtime_call_stats()->Leave(&timer_);
   }
   isolate_->set_external_callback_scope(previous_scope_);
+  TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),
+                   "V8.ExternalCallback");
 }
 
 Address ExternalCallbackScope::scope_address() {
diff --git a/src/wasm/asm-wasm-builder.cc b/src/wasm/asm-wasm-builder.cc
index ee5427b..d16d3a8 100644
--- a/src/wasm/asm-wasm-builder.cc
+++ b/src/wasm/asm-wasm-builder.cc
@@ -4,6 +4,12 @@
 
 #include "src/v8.h"
 
+// Required to get M_E etc. in MSVC.
+#if defined(_WIN32)
+#define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+
 #include "src/wasm/asm-wasm-builder.h"
 #include "src/wasm/wasm-macro-gen.h"
 #include "src/wasm/wasm-opcodes.h"
@@ -28,7 +34,7 @@
 class AsmWasmBuilderImpl : public AstVisitor {
  public:
   AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
-                     Handle<Object> foreign)
+                     Handle<Object> foreign, AsmTyper* typer)
       : local_variables_(HashMap::PointersMatch,
                          ZoneHashMap::kDefaultHashMapCapacity,
                          ZoneAllocationPolicy(zone)),
@@ -46,6 +52,7 @@
         isolate_(isolate),
         zone_(zone),
         foreign_(foreign),
+        typer_(typer),
         cache_(TypeCache::Get()),
         breakable_blocks_(zone),
         block_size_(0),
@@ -59,12 +66,10 @@
   }
 
   void InitializeInitFunction() {
-    unsigned char init[] = "__init__";
     init_function_index_ = builder_->AddFunction();
     current_function_builder_ = builder_->FunctionAt(init_function_index_);
-    current_function_builder_->SetName(init, 8);
     current_function_builder_->ReturnType(kAstStmt);
-    current_function_builder_->Exported(1);
+    builder_->MarkStartFunction(init_function_index_);
     current_function_builder_ = nullptr;
   }
 
@@ -133,13 +138,14 @@
         : builder_(builder) {
       builder_->breakable_blocks_.push_back(std::make_pair(stmt, is_loop));
       builder_->current_function_builder_->Emit(opcode);
-      index_ = builder_->current_function_builder_->EmitEditableImmediate(0);
+      index_ =
+          builder_->current_function_builder_->EmitEditableVarIntImmediate();
       prev_block_size_ = builder_->block_size_;
       builder_->block_size_ = initial_block_size;
     }
     ~BlockVisitor() {
-      builder_->current_function_builder_->EditImmediate(index_,
-                                                         builder_->block_size_);
+      builder_->current_function_builder_->EditVarIntImmediate(
+          index_, builder_->block_size_);
       builder_->block_size_ = prev_block_size_;
       builder_->breakable_blocks_.pop_back();
     }
@@ -188,7 +194,7 @@
       }
     }
     DCHECK(i >= 0);
-    current_function_builder_->EmitWithU8(kExprBr, block_distance);
+    current_function_builder_->EmitWithVarInt(kExprBr, block_distance);
     current_function_builder_->Emit(kExprNop);
   }
 
@@ -211,7 +217,7 @@
       }
     }
     DCHECK(i >= 0);
-    current_function_builder_->EmitWithU8(kExprBr, block_distance);
+    current_function_builder_->EmitWithVarInt(kExprBr, block_distance);
     current_function_builder_->Emit(kExprNop);
   }
 
@@ -232,7 +238,8 @@
   void SetLocalTo(uint16_t index, int value) {
     current_function_builder_->Emit(kExprSetLocal);
     AddLeb128(index, true);
-    byte code[] = {WASM_I32(value)};
+    // TODO(bradnelson): variable size
+    byte code[] = {WASM_I32V(value)};
     current_function_builder_->EmitCode(code, sizeof(code));
     block_size_++;
   }
@@ -286,7 +293,7 @@
     RECURSE(Visit(stmt->body()));
     current_function_builder_->Emit(kExprIf);
     RECURSE(Visit(stmt->cond()));
-    current_function_builder_->EmitWithU8(kExprBr, 0);
+    current_function_builder_->EmitWithVarInt(kExprBr, 0);
     current_function_builder_->Emit(kExprNop);
   }
 
@@ -296,7 +303,7 @@
                          1);
     current_function_builder_->Emit(kExprIf);
     RECURSE(Visit(stmt->cond()));
-    current_function_builder_->EmitWithU8(kExprBr, 0);
+    current_function_builder_->EmitWithVarInt(kExprBr, 0);
     RECURSE(Visit(stmt->body()));
   }
 
@@ -311,9 +318,9 @@
     if (stmt->cond() != nullptr) {
       block_size_++;
       current_function_builder_->Emit(kExprIf);
-      current_function_builder_->Emit(kExprBoolNot);
+      current_function_builder_->Emit(kExprI32Eqz);
       RECURSE(Visit(stmt->cond()));
-      current_function_builder_->EmitWithU8(kExprBr, 1);
+      current_function_builder_->EmitWithVarInt(kExprBr, 1);
       current_function_builder_->Emit(kExprNop);
     }
     if (stmt->body() != nullptr) {
@@ -325,7 +332,7 @@
       RECURSE(Visit(stmt->next()));
     }
     block_size_++;
-    current_function_builder_->EmitWithU8(kExprBr, 0);
+    current_function_builder_->EmitWithVarInt(kExprBr, 0);
     current_function_builder_->Emit(kExprNop);
   }
 
@@ -371,6 +378,58 @@
     RECURSE(Visit(expr->else_expression()));
   }
 
+  bool VisitStdlibConstant(Variable* var) {
+    AsmTyper::StandardMember standard_object =
+        typer_->VariableAsStandardMember(var);
+    double value;
+    switch (standard_object) {
+      case AsmTyper::kInfinity: {
+        value = std::numeric_limits<double>::infinity();
+        break;
+      }
+      case AsmTyper::kNaN: {
+        value = std::numeric_limits<double>::quiet_NaN();
+        break;
+      }
+      case AsmTyper::kMathE: {
+        value = M_E;
+        break;
+      }
+      case AsmTyper::kMathLN10: {
+        value = M_LN10;
+        break;
+      }
+      case AsmTyper::kMathLN2: {
+        value = M_LN2;
+        break;
+      }
+      case AsmTyper::kMathLOG10E: {
+        value = M_LOG10E;
+        break;
+      }
+      case AsmTyper::kMathLOG2E: {
+        value = M_LOG2E;
+        break;
+      }
+      case AsmTyper::kMathPI: {
+        value = M_PI;
+        break;
+      }
+      case AsmTyper::kMathSQRT1_2: {
+        value = M_SQRT1_2;
+        break;
+      }
+      case AsmTyper::kMathSQRT2: {
+        value = M_SQRT2;
+        break;
+      }
+      default: { return false; }
+    }
+    byte code[] = {WASM_F64(value)};
+    current_function_builder_->EmitCode(code, sizeof(code));
+    return true;
+  }
+
   void VisitVariableProxy(VariableProxy* expr) {
     if (in_function_) {
       Variable* var = expr->var();
@@ -382,6 +441,9 @@
         }
         is_set_op_ = false;
       } else {
+        if (VisitStdlibConstant(var)) {
+          return;
+        }
         if (var->IsContextSlot()) {
           current_function_builder_->Emit(kExprLoadGlobal);
         } else {
@@ -399,32 +461,32 @@
   }
 
   void VisitLiteral(Literal* expr) {
-    if (in_function_) {
-      if (expr->raw_value()->IsNumber()) {
-        LocalType type = TypeOf(expr);
-        switch (type) {
-          case kAstI32: {
-            int val = static_cast<int>(expr->raw_value()->AsNumber());
-            byte code[] = {WASM_I32(val)};
-            current_function_builder_->EmitCode(code, sizeof(code));
-            break;
-          }
-          case kAstF32: {
-            float val = static_cast<float>(expr->raw_value()->AsNumber());
-            byte code[] = {WASM_F32(val)};
-            current_function_builder_->EmitCode(code, sizeof(code));
-            break;
-          }
-          case kAstF64: {
-            double val = static_cast<double>(expr->raw_value()->AsNumber());
-            byte code[] = {WASM_F64(val)};
-            current_function_builder_->EmitCode(code, sizeof(code));
-            break;
-          }
-          default:
-            UNREACHABLE();
-        }
+    Handle<Object> value = expr->value();
+    if (!in_function_ || !value->IsNumber()) {
+      return;
+    }
+    Type* type = expr->bounds().upper;
+    if (type->Is(cache_.kAsmSigned)) {
+      int32_t i = 0;
+      if (!value->ToInt32(&i)) {
+        UNREACHABLE();
       }
+      byte code[] = {WASM_I32V(i)};
+      current_function_builder_->EmitCode(code, sizeof(code));
+    } else if (type->Is(cache_.kAsmUnsigned) || type->Is(cache_.kAsmFixnum)) {
+      uint32_t u = 0;
+      if (!value->ToUint32(&u)) {
+        UNREACHABLE();
+      }
+      int32_t i = static_cast<int32_t>(u);
+      byte code[] = {WASM_I32V(i)};
+      current_function_builder_->EmitCode(code, sizeof(code));
+    } else if (type->Is(cache_.kAsmDouble)) {
+      double val = expr->raw_value()->AsNumber();
+      byte code[] = {WASM_F64(val)};
+      current_function_builder_->EmitCode(code, sizeof(code));
+    } else {
+      UNREACHABLE();
     }
   }
 
@@ -589,29 +651,33 @@
         UnLoadInitFunction();
         return;
       }
-      // TODO(bradnelson): Get rid of this.
-      if (TypeOf(expr->value()) == kAstStmt) {
-        Property* prop = expr->value()->AsProperty();
-        if (prop != nullptr) {
-          VariableProxy* vp = prop->obj()->AsVariableProxy();
-          if (vp != nullptr && vp->var()->IsParameter() &&
-              vp->var()->index() == 1) {
-            VariableProxy* target = expr->target()->AsVariableProxy();
-            if (target->bounds().lower->Is(Type::Function())) {
-              const AstRawString* name =
-                  prop->key()->AsLiteral()->AsRawPropertyName();
-              imported_function_table_.AddImport(
-                  target->var(), name->raw_data(), name->length());
-            }
+      Property* prop = expr->value()->AsProperty();
+      if (prop != nullptr) {
+        VariableProxy* vp = prop->obj()->AsVariableProxy();
+        if (vp != nullptr && vp->var()->IsParameter() &&
+            vp->var()->index() == 1) {
+          VariableProxy* target = expr->target()->AsVariableProxy();
+          if (target->bounds().lower->Is(Type::Function())) {
+            const AstRawString* name =
+                prop->key()->AsLiteral()->AsRawPropertyName();
+            imported_function_table_.AddImport(target->var(), name->raw_data(),
+                                               name->length());
           }
         }
-        ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
-        if (funcs != nullptr &&
-            funcs->bounds().lower->AsArray()->Element()->IsFunction()) {
-          VariableProxy* target = expr->target()->AsVariableProxy();
-          DCHECK_NOT_NULL(target);
-          AddFunctionTable(target, funcs);
-        }
+        // Property values in module scope don't emit code, so return.
+        return;
+      }
+      ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
+      if (funcs != nullptr &&
+          funcs->bounds().lower->AsArray()->Element()->IsFunction()) {
+        VariableProxy* target = expr->target()->AsVariableProxy();
+        DCHECK_NOT_NULL(target);
+        AddFunctionTable(target, funcs);
+        // Only add to the function table. No init needed.
+        return;
+      }
+      if (expr->value()->IsCallNew()) {
+        // No init code to emit for CallNew nodes.
         return;
       }
       in_init = true;
@@ -630,6 +696,12 @@
     is_set_op_ = true;
     RECURSE(Visit(expr->target()));
     DCHECK(!is_set_op_);
+    // Assignment to heapf32 from float64 converts.
+    if (TypeOf(expr->value()) == kAstF64 && expr->target()->IsProperty() &&
+        expr->target()->AsProperty()->obj()->bounds().lower->Is(
+            cache_.kFloat32Array)) {
+      current_function_builder_->Emit(kExprF32ConvertF64);
+    }
     RECURSE(Visit(expr->value()));
     if (in_init) {
       UnLoadInitFunction();
@@ -672,7 +744,8 @@
             Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
             if (nvalue->IsNumber()) {
               int32_t val = static_cast<int32_t>(nvalue->Number());
-              byte code[] = {WASM_I32(val)};
+              // TODO(bradnelson): variable size
+              byte code[] = {WASM_I32V(val)};
               current_function_builder_->EmitCode(code, sizeof(code));
               return;
             }
@@ -684,7 +757,7 @@
       byte code[] = {WASM_F64(std::numeric_limits<double>::quiet_NaN())};
       current_function_builder_->EmitCode(code, sizeof(code));
     } else {
-      byte code[] = {WASM_I32(0)};
+      byte code[] = {WASM_I32V_1(0)};
       current_function_builder_->EmitCode(code, sizeof(code));
     }
   }
@@ -725,9 +798,9 @@
     } else {
       UNREACHABLE();
     }
-    current_function_builder_->EmitWithU8(
-        WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_),
-        WasmOpcodes::LoadStoreAccessOf(false));
+    // TODO(titzer): use special asm-compatibility opcodes?
+    current_function_builder_->EmitWithU8U8(
+        WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_), 0, 0);
     is_set_op_ = false;
     if (size == 1) {
       // Allow more general expression in byte arrays than the spec
@@ -742,7 +815,8 @@
         DCHECK(value->raw_value()->IsNumber());
         DCHECK_EQ(kAstI32, TypeOf(value));
         int val = static_cast<int>(value->raw_value()->AsNumber());
-        byte code[] = {WASM_I32(val * size)};
+        // TODO(bradnelson): variable size
+        byte code[] = {WASM_I32V(val * size)};
         current_function_builder_->EmitCode(code, sizeof(code));
         return;
       }
@@ -765,11 +839,209 @@
     UNREACHABLE();
   }
 
+  bool VisitStdlibFunction(Call* call, VariableProxy* expr) {
+    Variable* var = expr->var();
+    AsmTyper::StandardMember standard_object =
+        typer_->VariableAsStandardMember(var);
+    ZoneList<Expression*>* args = call->arguments();
+    LocalType call_type = TypeOf(call);
+    switch (standard_object) {
+      case AsmTyper::kNone: {
+        return false;
+      }
+      case AsmTyper::kMathAcos: {
+        DCHECK_EQ(kAstF64, call_type);
+        current_function_builder_->Emit(kExprF64Acos);
+        break;
+      }
+      case AsmTyper::kMathAsin: {
+        DCHECK_EQ(kAstF64, call_type);
+        current_function_builder_->Emit(kExprF64Asin);
+        break;
+      }
+      case AsmTyper::kMathAtan: {
+        DCHECK_EQ(kAstF64, call_type);
+        current_function_builder_->Emit(kExprF64Atan);
+        break;
+      }
+      case AsmTyper::kMathCos: {
+        DCHECK_EQ(kAstF64, call_type);
+        current_function_builder_->Emit(kExprF64Cos);
+        break;
+      }
+      case AsmTyper::kMathSin: {
+        DCHECK_EQ(kAstF64, call_type);
+        current_function_builder_->Emit(kExprF64Sin);
+        break;
+      }
+      case AsmTyper::kMathTan: {
+        DCHECK_EQ(kAstF64, call_type);
+        current_function_builder_->Emit(kExprF64Tan);
+        break;
+      }
+      case AsmTyper::kMathExp: {
+        DCHECK_EQ(kAstF64, call_type);
+        current_function_builder_->Emit(kExprF64Exp);
+        break;
+      }
+      case AsmTyper::kMathLog: {
+        DCHECK_EQ(kAstF64, call_type);
+        current_function_builder_->Emit(kExprF64Log);
+        break;
+      }
+      case AsmTyper::kMathCeil: {
+        if (call_type == kAstF32) {
+          current_function_builder_->Emit(kExprF32Ceil);
+        } else if (call_type == kAstF64) {
+          current_function_builder_->Emit(kExprF64Ceil);
+        } else {
+          UNREACHABLE();
+        }
+        break;
+      }
+      case AsmTyper::kMathFloor: {
+        if (call_type == kAstF32) {
+          current_function_builder_->Emit(kExprF32Floor);
+        } else if (call_type == kAstF64) {
+          current_function_builder_->Emit(kExprF64Floor);
+        } else {
+          UNREACHABLE();
+        }
+        break;
+      }
+      case AsmTyper::kMathSqrt: {
+        if (call_type == kAstF32) {
+          current_function_builder_->Emit(kExprF32Sqrt);
+        } else if (call_type == kAstF64) {
+          current_function_builder_->Emit(kExprF64Sqrt);
+        } else {
+          UNREACHABLE();
+        }
+        break;
+      }
+      case AsmTyper::kMathAbs: {
+        // TODO(bradnelson): Should this be cast to float?
+        if (call_type == kAstI32) {
+          current_function_builder_->Emit(kExprIfElse);
+          current_function_builder_->Emit(kExprI32LtS);
+          Visit(args->at(0));
+          byte code[] = {WASM_I8(0)};
+          current_function_builder_->EmitCode(code, sizeof(code));
+          current_function_builder_->Emit(kExprI32Sub);
+          current_function_builder_->EmitCode(code, sizeof(code));
+          Visit(args->at(0));
+        } else if (call_type == kAstF32) {
+          current_function_builder_->Emit(kExprF32Abs);
+        } else if (call_type == kAstF64) {
+          current_function_builder_->Emit(kExprF64Abs);
+        } else {
+          UNREACHABLE();
+        }
+        break;
+      }
+      case AsmTyper::kMathMin: {
+        // TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
+        if (call_type == kAstI32) {
+          current_function_builder_->Emit(kExprIfElse);
+          current_function_builder_->Emit(kExprI32LeS);
+          Visit(args->at(0));
+          Visit(args->at(1));
+        } else if (call_type == kAstF32) {
+          current_function_builder_->Emit(kExprF32Min);
+        } else if (call_type == kAstF64) {
+          current_function_builder_->Emit(kExprF64Min);
+        } else {
+          UNREACHABLE();
+        }
+        break;
+      }
+      case AsmTyper::kMathMax: {
+        // TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
+        if (call_type == kAstI32) {
+          current_function_builder_->Emit(kExprIfElse);
+          current_function_builder_->Emit(kExprI32GtS);
+          Visit(args->at(0));
+          Visit(args->at(1));
+        } else if (call_type == kAstF32) {
+          current_function_builder_->Emit(kExprF32Max);
+        } else if (call_type == kAstF64) {
+          current_function_builder_->Emit(kExprF64Max);
+        } else {
+          UNREACHABLE();
+        }
+        break;
+      }
+      case AsmTyper::kMathAtan2: {
+        DCHECK_EQ(kAstF64, call_type);
+        current_function_builder_->Emit(kExprF64Atan2);
+        break;
+      }
+      case AsmTyper::kMathPow: {
+        DCHECK_EQ(kAstF64, call_type);
+        current_function_builder_->Emit(kExprF64Pow);
+        break;
+      }
+      case AsmTyper::kMathImul: {
+        current_function_builder_->Emit(kExprI32Mul);
+        break;
+      }
+      case AsmTyper::kMathFround: {
+        DCHECK(args->length() == 1);
+        Literal* literal = args->at(0)->AsLiteral();
+        if (literal != nullptr) {
+          if (literal->raw_value()->IsNumber()) {
+            float val = static_cast<float>(literal->raw_value()->AsNumber());
+            byte code[] = {WASM_F32(val)};
+            current_function_builder_->EmitCode(code, sizeof(code));
+            return true;
+          }
+        }
+        switch (TypeIndexOf(args->at(0))) {
+          case kInt32:
+          case kFixnum:
+            current_function_builder_->Emit(kExprF32SConvertI32);
+            break;
+          case kUint32:
+            current_function_builder_->Emit(kExprF32UConvertI32);
+            break;
+          case kFloat32:
+            break;
+          case kFloat64:
+            current_function_builder_->Emit(kExprF32ConvertF64);
+            break;
+          default:
+            UNREACHABLE();
+        }
+        break;
+      }
+      default: {
+        UNREACHABLE();
+        break;
+      }
+    }
+    VisitCallArgs(call);
+    return true;
+  }
+
+  void VisitCallArgs(Call* expr) {
+    ZoneList<Expression*>* args = expr->arguments();
+    for (int i = 0; i < args->length(); ++i) {
+      Expression* arg = args->at(i);
+      RECURSE(Visit(arg));
+    }
+  }
+
   void VisitCall(Call* expr) {
     Call::CallType call_type = expr->GetCallType(isolate_);
     switch (call_type) {
       case Call::OTHER_CALL: {
         DCHECK(in_function_);
+        VariableProxy* proxy = expr->expression()->AsVariableProxy();
+        if (proxy != nullptr) {
+          if (VisitStdlibFunction(expr, proxy)) {
+            return;
+          }
+        }
         uint16_t index;
         VariableProxy* vp = expr->expression()->AsVariableProxy();
         if (vp != nullptr &&
@@ -802,10 +1074,11 @@
         VariableProxy* var = p->obj()->AsVariableProxy();
         DCHECK_NOT_NULL(var);
         FunctionTableIndices* indices = LookupFunctionTable(var->var());
-        current_function_builder_->EmitWithU8(kExprCallIndirect,
-                                              indices->signature_index);
+        current_function_builder_->EmitWithVarInt(kExprCallIndirect,
+                                                  indices->signature_index);
         current_function_builder_->Emit(kExprI32Add);
-        byte code[] = {WASM_I32(indices->start_index)};
+        // TODO(bradnelson): variable size
+        byte code[] = {WASM_I32V(indices->start_index)};
         current_function_builder_->EmitCode(code, sizeof(code));
         RECURSE(Visit(p->key()));
         break;
@@ -813,11 +1086,7 @@
       default:
         UNREACHABLE();
     }
-    ZoneList<Expression*>* args = expr->arguments();
-    for (int i = 0; i < args->length(); ++i) {
-      Expression* arg = args->at(i);
-      RECURSE(Visit(arg));
-    }
+    VisitCallArgs(expr);
   }
 
   void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
@@ -828,7 +1097,7 @@
     switch (expr->op()) {
       case Token::NOT: {
         DCHECK_EQ(kAstI32, TypeOf(expr->expression()));
-        current_function_builder_->Emit(kExprBoolNot);
+        current_function_builder_->Emit(kExprI32Eqz);
         break;
       }
       default:
@@ -1022,7 +1291,7 @@
           } else if (type == kUint32) {
             current_function_builder_->Emit(kExprI32RemU);
           } else if (type == kFloat64) {
-            ModF64(expr);
+            current_function_builder_->Emit(kExprF64Mod);
             return;
           } else {
             UNREACHABLE();
@@ -1030,7 +1299,7 @@
           break;
         }
         case Token::COMMA: {
-          current_function_builder_->EmitWithU8(kExprBlock, 2);
+          current_function_builder_->EmitWithVarInt(kExprBlock, 2);
           break;
         }
         default:
@@ -1041,32 +1310,6 @@
     }
   }
 
-  void ModF64(BinaryOperation* expr) {
-    current_function_builder_->EmitWithU8(kExprBlock, 3);
-    uint16_t index_0 = current_function_builder_->AddLocal(kAstF64);
-    uint16_t index_1 = current_function_builder_->AddLocal(kAstF64);
-    current_function_builder_->Emit(kExprSetLocal);
-    AddLeb128(index_0, true);
-    RECURSE(Visit(expr->left()));
-    current_function_builder_->Emit(kExprSetLocal);
-    AddLeb128(index_1, true);
-    RECURSE(Visit(expr->right()));
-    current_function_builder_->Emit(kExprF64Sub);
-    current_function_builder_->Emit(kExprGetLocal);
-    AddLeb128(index_0, true);
-    current_function_builder_->Emit(kExprF64Mul);
-    current_function_builder_->Emit(kExprGetLocal);
-    AddLeb128(index_1, true);
-    // Use trunc instead of two casts
-    current_function_builder_->Emit(kExprF64SConvertI32);
-    current_function_builder_->Emit(kExprI32SConvertF64);
-    current_function_builder_->Emit(kExprF64Div);
-    current_function_builder_->Emit(kExprGetLocal);
-    AddLeb128(index_0, true);
-    current_function_builder_->Emit(kExprGetLocal);
-    AddLeb128(index_1, true);
-  }
-
   void AddLeb128(uint32_t index, bool is_local) {
     std::vector<uint8_t> index_vec = UnsignedLEB128From(index);
     if (is_local) {
@@ -1262,6 +1505,7 @@
   Isolate* isolate_;
   Zone* zone_;
   Handle<Object> foreign_;
+  AsmTyper* typer_;
   TypeCache const& cache_;
   ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
   int block_size_;
@@ -1277,13 +1521,18 @@
 };
 
 AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
-                               FunctionLiteral* literal, Handle<Object> foreign)
-    : isolate_(isolate), zone_(zone), literal_(literal), foreign_(foreign) {}
+                               FunctionLiteral* literal, Handle<Object> foreign,
+                               AsmTyper* typer)
+    : isolate_(isolate),
+      zone_(zone),
+      literal_(literal),
+      foreign_(foreign),
+      typer_(typer) {}
 
 // TODO(aseemgarg): probably should take zone (to write wasm to) as input so
 // that zone in constructor may be thrown away once wasm module is written.
 WasmModuleIndex* AsmWasmBuilder::Run() {
-  AsmWasmBuilderImpl impl(isolate_, zone_, literal_, foreign_);
+  AsmWasmBuilderImpl impl(isolate_, zone_, literal_, foreign_, typer_);
   impl.Compile();
   WasmModuleWriter* writer = impl.builder_->Build(zone_);
   return writer->WriteTo(zone_);
diff --git a/src/wasm/asm-wasm-builder.h b/src/wasm/asm-wasm-builder.h
index 9b761f9..09645ee 100644
--- a/src/wasm/asm-wasm-builder.h
+++ b/src/wasm/asm-wasm-builder.h
@@ -7,6 +7,7 @@
 
 #include "src/allocation.h"
 #include "src/objects.h"
+#include "src/typing-asm.h"
 #include "src/wasm/encoder.h"
 #include "src/zone.h"
 
@@ -20,7 +21,7 @@
 class AsmWasmBuilder {
  public:
   explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
-                          Handle<Object> foreign);
+                          Handle<Object> foreign, AsmTyper* typer);
   WasmModuleIndex* Run();
 
  private:
@@ -28,6 +29,7 @@
   Zone* zone_;
   FunctionLiteral* literal_;
   Handle<Object> foreign_;
+  AsmTyper* typer_;
 };
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/ast-decoder.cc b/src/wasm/ast-decoder.cc
index c97c781..e2f6a04 100644
--- a/src/wasm/ast-decoder.cc
+++ b/src/wasm/ast-decoder.cc
@@ -2,7 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/base/platform/elapsed-timer.h"
 #include "src/signature.h"
 
 #include "src/bit-vector.h"
@@ -15,6 +14,8 @@
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-opcodes.h"
 
+#include "src/ostreams.h"
+
 #include "src/compiler/wasm-compiler.h"
 
 namespace v8 {
@@ -52,7 +53,6 @@
   Tree* last() const { return index > 0 ? tree->children[index - 1] : nullptr; }
 };
 
-
 // An SsaEnv environment carries the current local variable renaming
 // as well as the current effect and control dependency in the TF graph.
 // It maintains a control state that tracks whether the environment
@@ -74,14 +74,12 @@
   }
 };
 
-
 // An entry in the stack of blocks during decoding.
 struct Block {
   SsaEnv* ssa_env;  // SSA renaming environment.
   int stack_depth;  // production stack depth.
 };
 
-
 // An entry in the stack of ifs during decoding.
 struct IfEnv {
   SsaEnv* false_env;
@@ -89,27 +87,27 @@
   SsaEnv** case_envs;
 };
 
-
 // Macros that build nodes only if there is a graph and the current SSA
 // environment is reachable from start. This avoids problems with malformed
 // TF graphs when decoding inputs that have unreachable code.
 #define BUILD(func, ...) (build() ? builder_->func(__VA_ARGS__) : nullptr)
 #define BUILD0(func) (build() ? builder_->func() : nullptr)
 
-
 // Generic Wasm bytecode decoder with utilities for decoding operands,
 // lengths, etc.
 class WasmDecoder : public Decoder {
  public:
-  WasmDecoder() : Decoder(nullptr, nullptr), function_env_(nullptr) {}
-  WasmDecoder(FunctionEnv* env, const byte* start, const byte* end)
-      : Decoder(start, end), function_env_(env) {}
-  FunctionEnv* function_env_;
-
-  void Reset(FunctionEnv* function_env, const byte* start, const byte* end) {
-    Decoder::Reset(start, end);
-    function_env_ = function_env;
-  }
+  WasmDecoder(ModuleEnv* module, FunctionSig* sig, const byte* start,
+              const byte* end)
+      : Decoder(start, end),
+        module_(module),
+        sig_(sig),
+        total_locals_(0),
+        local_types_(nullptr) {}
+  ModuleEnv* module_;
+  FunctionSig* sig_;
+  size_t total_locals_;
+  ZoneVector<LocalType>* local_types_;
 
   byte ByteOperand(const byte* pc, const char* msg = "missing 1-byte operand") {
     if ((pc + sizeof(byte)) >= limit_) {
@@ -136,8 +134,12 @@
   }
 
   inline bool Validate(const byte* pc, LocalIndexOperand& operand) {
-    if (operand.index < function_env_->total_locals) {
-      operand.type = function_env_->GetLocalType(operand.index);
+    if (operand.index < total_locals_) {
+      if (local_types_) {
+        operand.type = local_types_->at(operand.index);
+      } else {
+        operand.type = kAstStmt;
+      }
       return true;
     }
     error(pc, pc + 1, "invalid local index");
@@ -145,9 +147,9 @@
   }
 
   inline bool Validate(const byte* pc, GlobalIndexOperand& operand) {
-    ModuleEnv* m = function_env_->module;
-    if (m && m->module && operand.index < m->module->globals->size()) {
-      operand.machine_type = m->module->globals->at(operand.index).type;
+    ModuleEnv* m = module_;
+    if (m && m->module && operand.index < m->module->globals.size()) {
+      operand.machine_type = m->module->globals[operand.index].type;
       operand.type = WasmOpcodes::LocalTypeFor(operand.machine_type);
       return true;
     }
@@ -156,9 +158,9 @@
   }
 
   inline bool Validate(const byte* pc, FunctionIndexOperand& operand) {
-    ModuleEnv* m = function_env_->module;
-    if (m && m->module && operand.index < m->module->functions->size()) {
-      operand.sig = m->module->functions->at(operand.index).sig;
+    ModuleEnv* m = module_;
+    if (m && m->module && operand.index < m->module->functions.size()) {
+      operand.sig = m->module->functions[operand.index].sig;
       return true;
     }
     error(pc, pc + 1, "invalid function index");
@@ -166,9 +168,9 @@
   }
 
   inline bool Validate(const byte* pc, SignatureIndexOperand& operand) {
-    ModuleEnv* m = function_env_->module;
-    if (m && m->module && operand.index < m->module->signatures->size()) {
-      operand.sig = m->module->signatures->at(operand.index);
+    ModuleEnv* m = module_;
+    if (m && m->module && operand.index < m->module->signatures.size()) {
+      operand.sig = m->module->signatures[operand.index];
       return true;
     }
     error(pc, pc + 1, "invalid signature index");
@@ -176,9 +178,9 @@
   }
 
   inline bool Validate(const byte* pc, ImportIndexOperand& operand) {
-    ModuleEnv* m = function_env_->module;
-    if (m && m->module && operand.index < m->module->import_table->size()) {
-      operand.sig = m->module->import_table->at(operand.index).sig;
+    ModuleEnv* m = module_;
+    if (m && m->module && operand.index < m->module->import_table.size()) {
+      operand.sig = m->module->import_table[operand.index].sig;
       return true;
     }
     error(pc, pc + 1, "invalid signature index");
@@ -195,26 +197,14 @@
     return false;
   }
 
-  bool Validate(const byte* pc, TableSwitchOperand& operand,
+  bool Validate(const byte* pc, BranchTableOperand& operand,
                 size_t block_depth) {
-    if (operand.table_count == 0) {
-      error(pc, "tableswitch with 0 entries");
-      return false;
-    }
     // Verify table.
-    for (uint32_t i = 0; i < operand.table_count; i++) {
-      uint16_t target = operand.read_entry(this, i);
-      if (target >= 0x8000) {
-        size_t depth = target - 0x8000;
-        if (depth > block_depth) {
-          error(operand.table + i * 2, "improper branch in tableswitch");
-          return false;
-        }
-      } else {
-        if (target >= operand.case_count) {
-          error(operand.table + i * 2, "invalid case target in tableswitch");
-          return false;
-        }
+    for (uint32_t i = 0; i < operand.table_count + 1; i++) {
+      uint32_t target = operand.read_entry(this, i);
+      if (target >= block_depth) {
+        error(operand.table + i * 2, "improper branch in br_table");
+        return false;
       }
     }
     return true;
@@ -262,27 +252,23 @@
       case kExprCallFunction: {
         FunctionIndexOperand operand(this, pc);
         return static_cast<int>(
-            function_env_->module->GetFunctionSignature(operand.index)
-                ->parameter_count());
+            module_->GetFunctionSignature(operand.index)->parameter_count());
       }
       case kExprCallIndirect: {
         SignatureIndexOperand operand(this, pc);
         return 1 + static_cast<int>(
-                       function_env_->module->GetSignature(operand.index)
-                           ->parameter_count());
+                       module_->GetSignature(operand.index)->parameter_count());
       }
       case kExprCallImport: {
         ImportIndexOperand operand(this, pc);
         return static_cast<int>(
-            function_env_->module->GetImportSignature(operand.index)
-                ->parameter_count());
+            module_->GetImportSignature(operand.index)->parameter_count());
       }
       case kExprReturn: {
-        return static_cast<int>(function_env_->sig->return_count());
+        return static_cast<int>(sig_->return_count());
       }
-      case kExprTableSwitch: {
-        TableSwitchOperand operand(this, pc);
-        return 1 + operand.case_count;
+      case kExprBrTable: {
+        return 1;
       }
 
 #define DECLARE_OPCODE_CASE(name, opcode, sig) \
@@ -293,10 +279,13 @@
         FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
         FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
         FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
+        FOREACH_ASMJS_COMPAT_OPCODE(DECLARE_OPCODE_CASE)
 #undef DECLARE_OPCODE_CASE
+      case kExprDeclLocals:
+      default:
+        UNREACHABLE();
+        return 0;
     }
-    UNREACHABLE();
-    return 0;
   }
 
   int OpcodeLength(const byte* pc) {
@@ -343,16 +332,22 @@
         LocalIndexOperand operand(this, pc);
         return 1 + operand.length;
       }
-      case kExprTableSwitch: {
-        TableSwitchOperand operand(this, pc);
+      case kExprBrTable: {
+        BranchTableOperand operand(this, pc);
+        return 1 + operand.length;
+      }
+      case kExprI32Const: {
+        ImmI32Operand operand(this, pc);
+        return 1 + operand.length;
+      }
+      case kExprI64Const: {
+        ImmI64Operand operand(this, pc);
         return 1 + operand.length;
       }
       case kExprI8Const:
         return 2;
-      case kExprI32Const:
       case kExprF32Const:
         return 5;
-      case kExprI64Const:
       case kExprF64Const:
         return 9;
 
@@ -365,35 +360,28 @@
 
 // A shift-reduce-parser strategy for decoding Wasm code that uses an explicit
 // shift-reduce strategy with multiple internal stacks.
-class LR_WasmDecoder : public WasmDecoder {
+class SR_WasmDecoder : public WasmDecoder {
  public:
-  LR_WasmDecoder(Zone* zone, TFBuilder* builder)
-      : zone_(zone),
+  SR_WasmDecoder(Zone* zone, TFBuilder* builder, FunctionBody& body)
+      : WasmDecoder(body.module, body.sig, body.start, body.end),
+        zone_(zone),
         builder_(builder),
+        base_(body.base),
+        local_type_vec_(zone),
         trees_(zone),
         stack_(zone),
         blocks_(zone),
-        ifs_(zone) {}
+        ifs_(zone) {
+    local_types_ = &local_type_vec_;
+  }
 
-  TreeResult Decode(FunctionEnv* function_env, const byte* base, const byte* pc,
-                    const byte* end) {
-    base::ElapsedTimer decode_timer;
-    if (FLAG_trace_wasm_decode_time) {
-      decode_timer.Start();
-    }
-    trees_.clear();
-    stack_.clear();
-    blocks_.clear();
-    ifs_.clear();
-
-    if (end < pc) {
-      error(pc, "function body end < start");
+  TreeResult Decode() {
+    if (end_ < pc_) {
+      error(pc_, "function body end < start");
       return result_;
     }
 
-    base_ = base;
-    Reset(function_env, pc, end);
-
+    DecodeLocalDecls();
     InitSsaEnv();
     DecodeFunctionBody();
 
@@ -401,12 +389,12 @@
     if (ok()) {
       if (ssa_env_->go()) {
         if (stack_.size() > 0) {
-          error(stack_.back().pc(), end, "fell off end of code");
+          error(stack_.back().pc(), end_, "fell off end of code");
         }
         AddImplicitReturnAtEnd();
       }
       if (trees_.size() == 0) {
-        if (function_env_->sig->return_count() > 0) {
+        if (sig_->return_count() > 0) {
           error(start_, "no trees created");
         }
       } else {
@@ -415,15 +403,7 @@
     }
 
     if (ok()) {
-      if (FLAG_trace_wasm_ast) {
-        PrintAst(function_env, pc, end);
-      }
-      if (FLAG_trace_wasm_decode_time) {
-        double ms = decode_timer.Elapsed().InMillisecondsF();
-        PrintF("wasm-decode ok (%0.3f ms)\n\n", ms);
-      } else {
-        TRACE("wasm-decode ok\n\n");
-      }
+      TRACE("wasm-decode ok\n");
     } else {
       TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
             startrel(error_pc_), error_msg_.get());
@@ -432,6 +412,36 @@
     return toResult(tree);
   }
 
+  bool DecodeLocalDecls(AstLocalDecls& decls) {
+    DecodeLocalDecls();
+    if (failed()) return false;
+    decls.decls_encoded_size = pc_offset();
+    decls.total_local_count = 0;
+    decls.local_types.reserve(local_type_vec_.size());
+    for (size_t pos = 0; pos < local_type_vec_.size();) {
+      uint32_t count = 0;
+      LocalType type = local_type_vec_[pos];
+      while (pos < local_type_vec_.size() && local_type_vec_[pos] == type) {
+        pos++;
+        count++;
+      }
+      decls.total_local_count += count;
+      decls.local_types.push_back(std::pair<LocalType, uint32_t>(type, count));
+    }
+    return true;
+  }
+
+  BitVector* AnalyzeLoopAssignmentForTesting(const byte* pc,
+                                             size_t num_locals) {
+    total_locals_ = num_locals;
+    local_type_vec_.reserve(num_locals);
+    if (num_locals > local_type_vec_.size()) {
+      local_type_vec_.insert(local_type_vec_.end(),
+                             num_locals - local_type_vec_.size(), kAstI32);
+    }
+    return AnalyzeLoopAssignment(pc);
+  }
+
  private:
   static const size_t kErrorMsgSize = 128;
 
@@ -442,6 +452,7 @@
 
   SsaEnv* ssa_env_;
 
+  ZoneVector<LocalType> local_type_vec_;
   ZoneVector<Tree*> trees_;
   ZoneVector<Production> stack_;
   ZoneVector<Block> blocks_;
@@ -450,8 +461,6 @@
   inline bool build() { return builder_ && ssa_env_->go(); }
 
   void InitSsaEnv() {
-    FunctionSig* sig = function_env_->sig;
-    int param_count = static_cast<int>(sig->parameter_count());
     TFNode* start = nullptr;
     SsaEnv* ssa_env = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
     size_t size = sizeof(TFNode*) * EnvironmentCount();
@@ -459,50 +468,46 @@
     ssa_env->locals =
         size > 0 ? reinterpret_cast<TFNode**>(zone_->New(size)) : nullptr;
 
-    int pos = 0;
     if (builder_) {
-      start = builder_->Start(param_count + 1);
-      // Initialize parameters.
-      for (int i = 0; i < param_count; i++) {
-        ssa_env->locals[pos++] = builder_->Param(i, sig->GetParam(i));
+      start = builder_->Start(static_cast<int>(sig_->parameter_count() + 1));
+      // Initialize local variables.
+      uint32_t index = 0;
+      while (index < sig_->parameter_count()) {
+        ssa_env->locals[index] = builder_->Param(index, local_type_vec_[index]);
+        index++;
       }
-      // Initialize int32 locals.
-      if (function_env_->local_i32_count > 0) {
-        TFNode* zero = builder_->Int32Constant(0);
-        for (uint32_t i = 0; i < function_env_->local_i32_count; i++) {
-          ssa_env->locals[pos++] = zero;
+      while (index < local_type_vec_.size()) {
+        LocalType type = local_type_vec_[index];
+        TFNode* node = DefaultValue(type);
+        while (index < local_type_vec_.size() &&
+               local_type_vec_[index] == type) {
+          // Do a whole run of like-typed locals at a time.
+          ssa_env->locals[index++] = node;
         }
       }
-      // Initialize int64 locals.
-      if (function_env_->local_i64_count > 0) {
-        TFNode* zero = builder_->Int64Constant(0);
-        for (uint32_t i = 0; i < function_env_->local_i64_count; i++) {
-          ssa_env->locals[pos++] = zero;
-        }
-      }
-      // Initialize float32 locals.
-      if (function_env_->local_f32_count > 0) {
-        TFNode* zero = builder_->Float32Constant(0);
-        for (uint32_t i = 0; i < function_env_->local_f32_count; i++) {
-          ssa_env->locals[pos++] = zero;
-        }
-      }
-      // Initialize float64 locals.
-      if (function_env_->local_f64_count > 0) {
-        TFNode* zero = builder_->Float64Constant(0);
-        for (uint32_t i = 0; i < function_env_->local_f64_count; i++) {
-          ssa_env->locals[pos++] = zero;
-        }
-      }
-      DCHECK_EQ(function_env_->total_locals, pos);
-      DCHECK_EQ(EnvironmentCount(), pos);
-      builder_->set_module(function_env_->module);
+      builder_->set_module(module_);
     }
     ssa_env->control = start;
     ssa_env->effect = start;
     SetEnv("initial", ssa_env);
   }
 
+  TFNode* DefaultValue(LocalType type) {
+    switch (type) {
+      case kAstI32:
+        return builder_->Int32Constant(0);
+      case kAstI64:
+        return builder_->Int64Constant(0);
+      case kAstF32:
+        return builder_->Float32Constant(0);
+      case kAstF64:
+        return builder_->Float64Constant(0);
+      default:
+        UNREACHABLE();
+        return nullptr;
+    }
+  }
+
   void Leaf(LocalType type, TFNode* node = nullptr) {
     size_t size = sizeof(Tree);
     Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
@@ -561,6 +566,45 @@
     return bytes;
   }
 
+  // Decodes the locals declarations, if any, populating {local_type_vec_}.
+  void DecodeLocalDecls() {
+    DCHECK_EQ(0, local_type_vec_.size());
+    // Initialize {local_type_vec} from signature.
+    if (sig_) {
+      local_type_vec_.reserve(sig_->parameter_count());
+      for (size_t i = 0; i < sig_->parameter_count(); i++) {
+        local_type_vec_.push_back(sig_->GetParam(i));
+      }
+    }
+    // Decode local declarations, if any.
+    int length;
+    uint32_t entries = consume_u32v(&length, "local decls count");
+    while (entries-- > 0 && pc_ < limit_) {
+      uint32_t count = consume_u32v(&length, "local count");
+      byte code = consume_u8("local type");
+      LocalType type;
+      switch (code) {
+        case kLocalI32:
+          type = kAstI32;
+          break;
+        case kLocalI64:
+          type = kAstI64;
+          break;
+        case kLocalF32:
+          type = kAstF32;
+          break;
+        case kLocalF64:
+          type = kAstF64;
+          break;
+        default:
+          error(pc_ - 1, "invalid local type");
+          return;
+      }
+      local_type_vec_.insert(local_type_vec_.end(), count, type);
+    }
+    total_locals_ = local_type_vec_.size();
+  }
+
   // Decodes the body of a function, producing reduced trees into {result}.
   void DecodeFunctionBody() {
     TRACE("wasm-decode %p...%p (%d bytes) %s\n",
@@ -621,7 +665,7 @@
             PushBlock(break_env);
             SsaEnv* cont_env = Steal(break_env);
             // The continue environment is the inner environment.
-            PrepareForLoop(cont_env);
+            PrepareForLoop(pc_, cont_env);
             SetEnv("loop:start", Split(cont_env));
             if (ssa_env_->go()) ssa_env_->state = SsaEnv::kReached;
             PushBlock(cont_env);
@@ -655,16 +699,16 @@
           len = 1 + operand.length;
           break;
         }
-        case kExprTableSwitch: {
-          TableSwitchOperand operand(this, pc_);
+        case kExprBrTable: {
+          BranchTableOperand operand(this, pc_);
           if (Validate(pc_, operand, blocks_.size())) {
-            Shift(kAstEnd, 1 + operand.case_count);
+            Shift(kAstEnd, 1);
           }
           len = 1 + operand.length;
           break;
         }
         case kExprReturn: {
-          int count = static_cast<int>(function_env_->sig->return_count());
+          int count = static_cast<int>(sig_->return_count());
           if (count == 0) {
             BUILD(Return, 0, builder_->Buffer(0));
             ssa_env_->Kill();
@@ -821,6 +865,7 @@
           len = 1 + operand.length;
           break;
         }
+        case kExprDeclLocals:
         default:
           error("Invalid opcode");
           return;
@@ -853,7 +898,7 @@
   }
 
   void AddImplicitReturnAtEnd() {
-    int retcount = static_cast<int>(function_env_->sig->return_count());
+    int retcount = static_cast<int>(sig_->return_count());
     if (retcount == 0) {
       BUILD0(ReturnVoid);
       return;
@@ -872,7 +917,7 @@
     for (int index = 0; index < retcount; index++) {
       Tree* tree = trees_[trees_.size() - 1 - index];
       if (buffer) buffer[index] = tree->node;
-      LocalType expected = function_env_->sig->GetReturn(index);
+      LocalType expected = sig_->GetReturn(index);
       if (tree->type != expected) {
         error(limit_, tree->pc,
               "ImplicitReturn[%d] expected type %s, found %s of type %s", index,
@@ -1043,73 +1088,42 @@
         }
         break;
       }
-      case kExprTableSwitch: {
+      case kExprBrTable: {
         if (p->index == 1) {
           // Switch key finished.
           TypeCheckLast(p, kAstI32);
           if (failed()) break;
 
-          TableSwitchOperand operand(this, p->pc());
+          BranchTableOperand operand(this, p->pc());
           DCHECK(Validate(p->pc(), operand, blocks_.size()));
 
-          // Build the switch only if it has more than just a default target.
-          bool build_switch = operand.table_count > 1;
+          // Build a switch only if it has more than just a default target.
+          bool build_switch = operand.table_count > 0;
           TFNode* sw = nullptr;
-          if (build_switch)
-            sw = BUILD(Switch, operand.table_count, p->last()->node);
-
-          // Allocate environments for each case.
-          SsaEnv** case_envs = zone_->NewArray<SsaEnv*>(operand.case_count);
-          for (uint32_t i = 0; i < operand.case_count; i++) {
-            case_envs[i] = UnreachableEnv();
+          if (build_switch) {
+            sw = BUILD(Switch, operand.table_count + 1, p->last()->node);
           }
 
-          ifs_.push_back({nullptr, nullptr, case_envs});
-          SsaEnv* break_env = ssa_env_;
-          PushBlock(break_env);
-          SsaEnv* copy = Steal(break_env);
-          ssa_env_ = copy;
-
-          // Build the environments for each case based on the table.
-          for (uint32_t i = 0; i < operand.table_count; i++) {
-            uint16_t target = operand.read_entry(this, i);
+          // Process the targets of the break table.
+          SsaEnv* prev = ssa_env_;
+          SsaEnv* copy = Steal(prev);
+          for (uint32_t i = 0; i < operand.table_count + 1; i++) {
+            uint32_t target = operand.read_entry(this, i);
             SsaEnv* env = copy;
             if (build_switch) {
-              env = Split(env);
-              env->control = (i == operand.table_count - 1)
-                                 ? BUILD(IfDefault, sw)
-                                 : BUILD(IfValue, i, sw);
+              ssa_env_ = env = Split(env);
+              env->control = i == operand.table_count ? BUILD(IfDefault, sw)
+                                                      : BUILD(IfValue, i, sw);
             }
-            if (target >= 0x8000) {
-              // Targets an outer block.
-              int depth = target - 0x8000;
-              SsaEnv* tenv = blocks_[blocks_.size() - depth - 1].ssa_env;
-              Goto(env, tenv);
-            } else {
-              // Targets a case.
-              Goto(env, case_envs[target]);
-            }
+            SsaEnv* tenv = blocks_[blocks_.size() - target - 1].ssa_env;
+            Goto(env, tenv);
           }
-        }
-
-        if (p->done()) {
-          // Last case. Fall through to the end.
-          Block* block = &blocks_.back();
-          if (p->index > 1) ReduceBreakToExprBlock(p, block);
-          SsaEnv* next = block->ssa_env;
-          blocks_.pop_back();
-          ifs_.pop_back();
-          SetEnv("switch:end", next);
-        } else {
-          // Interior case. Maybe fall through to the next case.
-          SsaEnv* next = ifs_.back().case_envs[p->index - 1];
-          if (p->index > 1 && ssa_env_->go()) Goto(ssa_env_, next);
-          SetEnv("switch:case", next);
+          ssa_env_ = prev;
         }
         break;
       }
       case kExprReturn: {
-        TypeCheckLast(p, function_env_->sig->GetReturn(p->index - 1));
+        TypeCheckLast(p, sig_->GetReturn(p->index - 1));
         if (p->done()) {
           if (build()) {
             int count = p->tree->count;
@@ -1346,6 +1360,7 @@
   }
 
   void SetEnv(const char* reason, SsaEnv* env) {
+#if DEBUG
     TRACE("  env = %p, block depth = %d, reason = %s", static_cast<void*>(env),
           static_cast<int>(blocks_.size()), reason);
     if (FLAG_trace_wasm_decoder && env && env->control) {
@@ -1353,6 +1368,7 @@
       compiler::WasmGraphBuilder::PrintDebugName(env->control);
     }
     TRACE("\n");
+#endif
     ssa_env_ = env;
     if (builder_) {
       builder_->set_control_ptr(&env->control);
@@ -1389,8 +1405,7 @@
           TFNode* b = from->locals[i];
           if (a != b) {
             TFNode* vals[] = {a, b};
-            to->locals[i] =
-                builder_->Phi(function_env_->GetLocalType(i), 2, vals, merge);
+            to->locals[i] = builder_->Phi(local_type_vec_[i], 2, vals, merge);
           }
         }
         break;
@@ -1425,8 +1440,8 @@
               vals[j] = tnode;
             }
             vals[count - 1] = fnode;
-            to->locals[i] = builder_->Phi(function_env_->GetLocalType(i), count,
-                                          vals, merge);
+            to->locals[i] =
+                builder_->Phi(local_type_vec_[i], count, vals, merge);
           }
         }
         break;
@@ -1451,29 +1466,32 @@
     return tnode;
   }
 
-  void BuildInfiniteLoop() {
-    if (ssa_env_->go()) {
-      PrepareForLoop(ssa_env_);
-      SsaEnv* cont_env = ssa_env_;
-      ssa_env_ = Split(ssa_env_);
-      ssa_env_->state = SsaEnv::kReached;
-      Goto(ssa_env_, cont_env);
-    }
-  }
+  void PrepareForLoop(const byte* pc, SsaEnv* env) {
+    if (!env->go()) return;
+    env->state = SsaEnv::kMerged;
+    if (!builder_) return;
 
-  void PrepareForLoop(SsaEnv* env) {
-    if (env->go()) {
-      env->state = SsaEnv::kMerged;
-      if (builder_) {
-        env->control = builder_->Loop(env->control);
-        env->effect = builder_->EffectPhi(1, &env->effect, env->control);
-        builder_->Terminate(env->effect, env->control);
+    env->control = builder_->Loop(env->control);
+    env->effect = builder_->EffectPhi(1, &env->effect, env->control);
+    builder_->Terminate(env->effect, env->control);
+    if (FLAG_wasm_loop_assignment_analysis) {
+      BitVector* assigned = AnalyzeLoopAssignment(pc);
+      if (assigned != nullptr) {
+        // Only introduce phis for variables assigned in this loop.
         for (int i = EnvironmentCount() - 1; i >= 0; i--) {
-          env->locals[i] = builder_->Phi(function_env_->GetLocalType(i), 1,
-                                         &env->locals[i], env->control);
+          if (!assigned->Contains(i)) continue;
+          env->locals[i] = builder_->Phi(local_type_vec_[i], 1, &env->locals[i],
+                                         env->control);
         }
+        return;
       }
     }
+
+    // Conservatively introduce phis for all local variables.
+    for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+      env->locals[i] =
+          builder_->Phi(local_type_vec_[i], 1, &env->locals[i], env->control);
+    }
   }
 
   // Create a complete copy of the {from}.
@@ -1524,7 +1542,7 @@
   }
 
   int EnvironmentCount() {
-    if (builder_) return static_cast<int>(function_env_->GetLocalCount());
+    if (builder_) return static_cast<int>(local_type_vec_.size());
     return 0;  // if we aren't building a graph, don't bother with SSA renaming.
   }
 
@@ -1560,23 +1578,84 @@
     PrintProduction(depth + 1);
   }
 #endif
+
+  BitVector* AnalyzeLoopAssignment(const byte* pc) {
+    if (pc >= limit_) return nullptr;
+    if (*pc != kExprLoop) return nullptr;
+
+    BitVector* assigned =
+        new (zone_) BitVector(static_cast<int>(total_locals_), zone_);
+    // Keep a stack to model the nesting of expressions.
+    std::vector<int> arity_stack;
+    arity_stack.push_back(OpcodeArity(pc));
+    pc += OpcodeLength(pc);
+
+    // Iteratively process all AST nodes nested inside the loop.
+    while (pc < limit_) {
+      WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+      int arity = 0;
+      int length = 1;
+      int assigned_index = -1;
+      if (opcode == kExprSetLocal) {
+        LocalIndexOperand operand(this, pc);
+        if (assigned->length() > 0 &&
+            static_cast<int>(operand.index) < assigned->length()) {
+          // Unverified code might have an out-of-bounds index.
+          // Ignore out-of-bounds indices, as the main verification will fail.
+          assigned->Add(operand.index);
+          assigned_index = operand.index;
+        }
+        arity = 1;
+        length = 1 + operand.length;
+      } else {
+        arity = OpcodeArity(pc);
+        length = OpcodeLength(pc);
+      }
+
+      TRACE("loop-assign module+%-6d %s func+%d: 0x%02x %s", baserel(pc),
+            indentation(), startrel(pc), opcode,
+            WasmOpcodes::OpcodeName(opcode));
+
+      if (assigned_index >= 0) {
+        TRACE(" (assigned local #%d)\n", assigned_index);
+      } else {
+        TRACE("\n");
+      }
+
+      pc += length;
+      arity_stack.push_back(arity);
+      while (arity_stack.back() == 0) {
+        arity_stack.pop_back();
+        if (arity_stack.empty()) return assigned;  // reached end of loop
+        arity_stack.back()--;
+      }
+    }
+    return assigned;
+  }
 };
 
+bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
+                      const byte* end) {
+  base::AccountingAllocator allocator;
+  Zone tmp(&allocator);
+  FunctionBody body = {nullptr, nullptr, nullptr, start, end};
+  SR_WasmDecoder decoder(&tmp, nullptr, body);
+  return decoder.DecodeLocalDecls(decls);
+}
 
-TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
-                          const byte* end) {
-  Zone zone;
-  LR_WasmDecoder decoder(&zone, nullptr);
-  TreeResult result = decoder.Decode(env, base, start, end);
+TreeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+                          FunctionBody& body) {
+  Zone zone(allocator);
+  SR_WasmDecoder decoder(&zone, nullptr, body);
+  TreeResult result = decoder.Decode();
   return result;
 }
 
-
-TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
-                        const byte* start, const byte* end) {
-  Zone zone;
-  LR_WasmDecoder decoder(&zone, builder);
-  TreeResult result = decoder.Decode(env, base, start, end);
+TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
+                        TFBuilder* builder, FunctionBody& body) {
+  Zone zone(allocator);
+  SR_WasmDecoder decoder(&zone, builder, body);
+  TreeResult result = decoder.Decode();
   return result;
 }
 
@@ -1608,20 +1687,49 @@
 }
 
 int OpcodeLength(const byte* pc, const byte* end) {
-  WasmDecoder decoder(nullptr, pc, end);
+  WasmDecoder decoder(nullptr, nullptr, pc, end);
   return decoder.OpcodeLength(pc);
 }
 
-int OpcodeArity(FunctionEnv* env, const byte* pc, const byte* end) {
-  WasmDecoder decoder(env, pc, end);
+int OpcodeArity(ModuleEnv* module, FunctionSig* sig, const byte* pc,
+                const byte* end) {
+  WasmDecoder decoder(module, sig, pc, end);
   return decoder.OpcodeArity(pc);
 }
 
-void PrintAst(FunctionEnv* env, const byte* start, const byte* end) {
-  WasmDecoder decoder(env, start, end);
-  const byte* pc = start;
+void PrintAst(base::AccountingAllocator* allocator, FunctionBody& body) {
+  Zone zone(allocator);
+  SR_WasmDecoder decoder(&zone, nullptr, body);
+
+  OFStream os(stdout);
+
+  // Print the function signature.
+  if (body.sig) {
+    os << "// signature: " << *body.sig << std::endl;
+  }
+
+  // Print the local declarations.
+  AstLocalDecls decls(&zone);
+  decoder.DecodeLocalDecls(decls);
+  const byte* pc = decoder.pc();
+  if (body.start != decoder.pc()) {
+    printf("// locals:");
+    for (auto p : decls.local_types) {
+      LocalType type = p.first;
+      uint32_t count = p.second;
+      os << " " << count << " " << WasmOpcodes::TypeName(type);
+    }
+    os << std::endl;
+
+    for (const byte* locals = body.start; locals < pc; locals++) {
+      printf(" 0x%02x,", *locals);
+    }
+    printf("\n");
+  }
+
+  printf("// body: \n");
   std::vector<int> arity_stack;
-  while (pc < end) {
+  while (pc < body.end) {
     int arity = decoder.OpcodeArity(pc);
     size_t length = decoder.OpcodeLength(pc);
 
@@ -1636,6 +1744,35 @@
     for (size_t i = 1; i < length; i++) {
       printf(" 0x%02x,", pc[i]);
     }
+
+    if (body.module) {
+      switch (opcode) {
+        case kExprCallIndirect: {
+          SignatureIndexOperand operand(&decoder, pc);
+          if (decoder.Validate(pc, operand)) {
+            os << " // sig #" << operand.index << ": " << *operand.sig;
+          }
+          break;
+        }
+        case kExprCallImport: {
+          ImportIndexOperand operand(&decoder, pc);
+          if (decoder.Validate(pc, operand)) {
+            os << " // import #" << operand.index << ": " << *operand.sig;
+          }
+          break;
+        }
+        case kExprCallFunction: {
+          FunctionIndexOperand operand(&decoder, pc);
+          if (decoder.Validate(pc, operand)) {
+            os << " // function #" << operand.index << ": " << *operand.sig;
+          }
+          break;
+        }
+        default:
+          break;
+      }
+    }
+
     pc += length;
     printf("\n");
 
@@ -1648,65 +1785,11 @@
   }
 }
 
-// Analyzes loop bodies for static assignments to locals, which helps in
-// reducing the number of phis introduced at loop headers.
-class LoopAssignmentAnalyzer : public WasmDecoder {
- public:
-  LoopAssignmentAnalyzer(Zone* zone, FunctionEnv* function_env) : zone_(zone) {
-    function_env_ = function_env;
-  }
-
-  BitVector* Analyze(const byte* pc, const byte* limit) {
-    Decoder::Reset(pc, limit);
-    if (pc_ >= limit_) return nullptr;
-    if (*pc_ != kExprLoop) return nullptr;
-
-    BitVector* assigned =
-        new (zone_) BitVector(function_env_->total_locals, zone_);
-    // Keep a stack to model the nesting of expressions.
-    std::vector<int> arity_stack;
-    arity_stack.push_back(OpcodeArity(pc_));
-    pc_ += OpcodeLength(pc_);
-
-    // Iteratively process all AST nodes nested inside the loop.
-    while (pc_ < limit_) {
-      WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
-      int arity = 0;
-      int length = 1;
-      if (opcode == kExprSetLocal) {
-        LocalIndexOperand operand(this, pc_);
-        if (assigned->length() > 0 &&
-            static_cast<int>(operand.index) < assigned->length()) {
-          // Unverified code might have an out-of-bounds index.
-          assigned->Add(operand.index);
-        }
-        arity = 1;
-        length = 1 + operand.length;
-      } else {
-        arity = OpcodeArity(pc_);
-        length = OpcodeLength(pc_);
-      }
-
-      pc_ += length;
-      arity_stack.push_back(arity);
-      while (arity_stack.back() == 0) {
-        arity_stack.pop_back();
-        if (arity_stack.empty()) return assigned;  // reached end of loop
-        arity_stack.back()--;
-      }
-    }
-    return assigned;
-  }
-
- private:
-  Zone* zone_;
-};
-
-
-BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, FunctionEnv* env,
+BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
                                            const byte* start, const byte* end) {
-  LoopAssignmentAnalyzer analyzer(zone, env);
-  return analyzer.Analyze(start, end);
+  FunctionBody body = {nullptr, nullptr, nullptr, start, end};
+  SR_WasmDecoder decoder(zone, nullptr, body);
+  return decoder.AnalyzeLoopAssignmentForTesting(start, num_locals);
 }
 
 }  // namespace wasm
diff --git a/src/wasm/ast-decoder.h b/src/wasm/ast-decoder.h
index 465baca..5376e7b 100644
--- a/src/wasm/ast-decoder.h
+++ b/src/wasm/ast-decoder.h
@@ -46,8 +46,7 @@
   int32_t value;
   int length;
   inline ImmI32Operand(Decoder* decoder, const byte* pc) {
-    value = bit_cast<int32_t>(decoder->checked_read_u32(pc, 1, "immi32"));
-    length = 4;
+    value = decoder->checked_read_i32v(pc, 1, &length, "immi32");
   }
 };
 
@@ -55,8 +54,7 @@
   int64_t value;
   int length;
   inline ImmI64Operand(Decoder* decoder, const byte* pc) {
-    value = bit_cast<int64_t>(decoder->checked_read_u64(pc, 1, "immi64"));
-    length = 8;
+    value = decoder->checked_read_i64v(pc, 1, &length, "immi64");
   }
 };
 
@@ -97,8 +95,7 @@
   Block* target;
   int length;
   inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
-    depth = decoder->checked_read_u8(pc, 1, "break depth");
-    length = 1;
+    depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
     target = nullptr;
   }
 };
@@ -107,8 +104,7 @@
   uint32_t count;
   int length;
   inline BlockCountOperand(Decoder* decoder, const byte* pc) {
-    count = decoder->checked_read_u8(pc, 1, "block count");
-    length = 1;
+    count = decoder->checked_read_u32v(pc, 1, &length, "block count");
   }
 };
 
@@ -142,103 +138,55 @@
   }
 };
 
-struct TableSwitchOperand {
-  uint32_t case_count;
+struct BranchTableOperand {
   uint32_t table_count;
   const byte* table;
   int length;
-  inline TableSwitchOperand(Decoder* decoder, const byte* pc) {
-    case_count = decoder->checked_read_u16(pc, 1, "expected #cases");
-    table_count = decoder->checked_read_u16(pc, 3, "expected #entries");
-    length = 4 + table_count * 2;
+  inline BranchTableOperand(Decoder* decoder, const byte* pc) {
+    int varint_length;
+    table_count =
+        decoder->checked_read_u32v(pc, 1, &varint_length, "expected #entries");
+    length = varint_length + (table_count + 1) * sizeof(uint32_t);
 
-    if (decoder->check(pc, 5, table_count * 2, "expected <table entries>")) {
-      table = pc + 5;
+    uint32_t table_start = 1 + varint_length;
+    if (decoder->check(pc, table_start, (table_count + 1) * sizeof(uint32_t),
+                       "expected <table entries>")) {
+      table = pc + table_start;
     } else {
       table = nullptr;
     }
   }
-  inline uint16_t read_entry(Decoder* decoder, int i) {
-    DCHECK(i >= 0 && static_cast<uint32_t>(i) < table_count);
-    return table ? decoder->read_u16(table + i * sizeof(uint16_t)) : 0;
+  inline uint32_t read_entry(Decoder* decoder, int i) {
+    DCHECK(i >= 0 && static_cast<uint32_t>(i) <= table_count);
+    return table ? decoder->read_u32(table + i * sizeof(uint32_t)) : 0;
   }
 };
 
 struct MemoryAccessOperand {
-  bool aligned;
+  uint32_t alignment;
   uint32_t offset;
   int length;
   inline MemoryAccessOperand(Decoder* decoder, const byte* pc) {
-    byte bitfield = decoder->checked_read_u8(pc, 1, "memory access byte");
-    aligned = MemoryAccess::AlignmentField::decode(bitfield);
-    if (MemoryAccess::OffsetField::decode(bitfield)) {
-      offset = decoder->checked_read_u32v(pc, 2, &length, "memory offset");
-      length++;
-    } else {
-      offset = 0;
-      length = 1;
-    }
+    int alignment_length;
+    alignment =
+        decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
+    int offset_length;
+    offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
+                                        &offset_length, "offset");
+    length = alignment_length + offset_length;
   }
 };
 
 typedef compiler::WasmGraphBuilder TFBuilder;
 struct ModuleEnv;  // forward declaration of module interface.
 
-// Interface the function environment during decoding, include the signature
-// and number of locals.
-struct FunctionEnv {
-  ModuleEnv* module;             // module environment
-  FunctionSig* sig;              // signature of this function
-  uint32_t local_i32_count;      // number of int32 locals
-  uint32_t local_i64_count;      // number of int64 locals
-  uint32_t local_f32_count;      // number of float32 locals
-  uint32_t local_f64_count;      // number of float64 locals
-  uint32_t total_locals;         // sum of parameters and all locals
-
-  uint32_t GetLocalCount() { return total_locals; }
-  LocalType GetLocalType(uint32_t index) {
-    if (index < static_cast<uint32_t>(sig->parameter_count())) {
-      return sig->GetParam(index);
-    }
-    index -= static_cast<uint32_t>(sig->parameter_count());
-    if (index < local_i32_count) return kAstI32;
-    index -= local_i32_count;
-    if (index < local_i64_count) return kAstI64;
-    index -= local_i64_count;
-    if (index < local_f32_count) return kAstF32;
-    index -= local_f32_count;
-    if (index < local_f64_count) return kAstF64;
-    return kAstStmt;
-  }
-
-  void AddLocals(LocalType type, uint32_t count) {
-    switch (type) {
-      case kAstI32:
-        local_i32_count += count;
-        break;
-      case kAstI64:
-        local_i64_count += count;
-        break;
-      case kAstF32:
-        local_f32_count += count;
-        break;
-      case kAstF64:
-        local_f64_count += count;
-        break;
-      default:
-        UNREACHABLE();
-    }
-    total_locals += count;
-    DCHECK_EQ(total_locals,
-              (sig->parameter_count() + local_i32_count + local_i64_count +
-               local_f32_count + local_f64_count));
-  }
-
-  void SumLocals() {
-    total_locals = static_cast<uint32_t>(sig->parameter_count()) +
-                   local_i32_count + local_i64_count + local_f32_count +
-                   local_f64_count;
-  }
+// All of the various data structures necessary to decode a function body.
+struct FunctionBody {
+  ModuleEnv* module;  // module environment
+  FunctionSig* sig;   // function signature
+  const byte* base;   // base of the module bytes, for error reporting
+  const byte* start;  // start of the function body
+  const byte* end;    // end of the function body
 };
 
 struct Tree;
@@ -246,21 +194,25 @@
 
 std::ostream& operator<<(std::ostream& os, const Tree& tree);
 
-TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
-                          const byte* end);
-TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
-                        const byte* start, const byte* end);
+TreeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+                          FunctionBody& body);
+TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
+                        TFBuilder* builder, FunctionBody& body);
+void PrintAst(base::AccountingAllocator* allocator, FunctionBody& body);
 
-void PrintAst(FunctionEnv* env, const byte* start, const byte* end);
-
-inline TreeResult VerifyWasmCode(FunctionEnv* env, const byte* start,
-                                 const byte* end) {
-  return VerifyWasmCode(env, nullptr, start, end);
+inline TreeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+                                 ModuleEnv* module, FunctionSig* sig,
+                                 const byte* start, const byte* end) {
+  FunctionBody body = {module, sig, nullptr, start, end};
+  return VerifyWasmCode(allocator, body);
 }
 
-inline TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env,
-                               const byte* start, const byte* end) {
-  return BuildTFGraph(builder, env, nullptr, start, end);
+inline TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
+                               TFBuilder* builder, ModuleEnv* module,
+                               FunctionSig* sig, const byte* start,
+                               const byte* end) {
+  FunctionBody body = {module, sig, nullptr, start, end};
+  return BuildTFGraph(allocator, builder, body);
 }
 
 enum ReadUnsignedLEB128ErrorCode { kNoError, kInvalidLEB128, kMissingLEB128 };
@@ -268,14 +220,31 @@
 ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte*, const byte*,
                                                       int*, uint32_t*);
 
-BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, FunctionEnv* env,
+struct AstLocalDecls {
+  // The size of the encoded declarations.
+  uint32_t decls_encoded_size;  // size of encoded declarations
+
+  // Total number of locals.
+  uint32_t total_local_count;
+
+  // List of {local type, count} pairs.
+  ZoneVector<std::pair<LocalType, uint32_t>> local_types;
+
+  // Constructor initializes the vector.
+  explicit AstLocalDecls(Zone* zone)
+      : decls_encoded_size(0), total_local_count(0), local_types(zone) {}
+};
+
+bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start, const byte* end);
+BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
                                            const byte* start, const byte* end);
 
 // Computes the length of the opcode at the given address.
 int OpcodeLength(const byte* pc, const byte* end);
 
 // Computes the arity (number of sub-nodes) of the opcode at the given address.
-int OpcodeArity(FunctionEnv* env, const byte* pc, const byte* end);
+int OpcodeArity(ModuleEnv* module, FunctionSig* sig, const byte* pc,
+                const byte* end);
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/decoder.h b/src/wasm/decoder.h
index 0e88eda..f9de2e1 100644
--- a/src/wasm/decoder.h
+++ b/src/wasm/decoder.h
@@ -77,33 +77,44 @@
     return check(base, offset, 8, msg) ? read_u64(base + offset) : 0;
   }
 
+  // Reads a variable-length unsigned integer (little endian).
   uint32_t checked_read_u32v(const byte* base, int offset, int* length,
-                             const char* msg = "expected LEB128") {
-    if (!check(base, offset, 1, msg)) {
-      *length = 0;
-      return 0;
-    }
+                             const char* msg = "expected LEB32") {
+    return checked_read_leb<uint32_t, false>(base, offset, length, msg);
+  }
 
-    const ptrdiff_t kMaxDiff = 5;  // maximum 5 bytes.
-    const byte* ptr = base + offset;
-    const byte* end = ptr + kMaxDiff;
-    if (end > limit_) end = limit_;
-    int shift = 0;
-    byte b = 0;
-    uint32_t result = 0;
-    while (ptr < end) {
-      b = *ptr++;
-      result = result | ((b & 0x7F) << shift);
-      if ((b & 0x80) == 0) break;
-      shift += 7;
+  // Reads a variable-length signed integer (little endian).
+  int32_t checked_read_i32v(const byte* base, int offset, int* length,
+                            const char* msg = "expected SLEB32") {
+    uint32_t result =
+        checked_read_leb<uint32_t, true>(base, offset, length, msg);
+    if (*length == 5) return bit_cast<int32_t>(result);
+    if (*length > 0) {
+      int shift = 32 - 7 * *length;
+      // Perform sign extension.
+      return bit_cast<int32_t>(result << shift) >> shift;
     }
-    DCHECK_LE(ptr - (base + offset), kMaxDiff);
-    *length = static_cast<int>(ptr - (base + offset));
-    if (ptr == end && (b & 0x80)) {
-      error(base, ptr, msg);
-      return 0;
+    return 0;
+  }
+
+  // Reads a variable-length unsigned integer (little endian).
+  uint64_t checked_read_u64v(const byte* base, int offset, int* length,
+                             const char* msg = "expected LEB64") {
+    return checked_read_leb<uint64_t, false>(base, offset, length, msg);
+  }
+
+  // Reads a variable-length signed integer (little endian).
+  int64_t checked_read_i64v(const byte* base, int offset, int* length,
+                            const char* msg = "expected SLEB64") {
+    uint64_t result =
+        checked_read_leb<uint64_t, true>(base, offset, length, msg);
+    if (*length == 10) return bit_cast<int64_t>(result);
+    if (*length > 0) {
+      int shift = 64 - 7 * *length;
+      // Perform sign extension.
+      return bit_cast<int64_t>(result << shift) >> shift;
     }
-    return result;
+    return 0;
   }
 
   // Reads a single 16-bit unsigned integer (little endian).
@@ -214,6 +225,8 @@
       *length = static_cast<int>(pc_ - pos);
       if (pc_ == end && (b & 0x80)) {
         error(pc_ - 1, "varint too large");
+      } else if (*length == 0) {
+        error(pc_, "varint of length 0");
       } else {
         TRACE("= %u\n", result);
       }
@@ -222,9 +235,22 @@
     return traceOffEnd<uint32_t>();
   }
 
+  // Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
+  void consume_bytes(int size) {
+    if (checkAvailable(size)) {
+      pc_ += size;
+    } else {
+      pc_ = limit_;
+    }
+  }
+
   // Check that at least {size} bytes exist between {pc_} and {limit_}.
   bool checkAvailable(int size) {
-    if (pc_ < start_ || (pc_ + size) > limit_) {
+    intptr_t pc_overflow_value = std::numeric_limits<intptr_t>::max() - size;
+    if (size < 0 || (intptr_t)pc_ > pc_overflow_value) {
+      error(pc_, nullptr, "reading %d bytes would underflow/overflow", size);
+      return false;
+    } else if (pc_ < start_ || limit_ < (pc_ + size)) {
       error(pc_, nullptr, "expected %d bytes, fell off end", size);
       return false;
     } else {
@@ -232,12 +258,6 @@
     }
   }
 
-  bool RangeOk(const byte* pc, int length) {
-    if (pc < start_ || pc_ >= limit_) return false;
-    if ((pc + length) >= limit_) return false;
-    return true;
-  }
-
   void error(const char* msg) { error(pc_, nullptr, msg); }
 
   void error(const byte* pc, const char* msg) { error(pc, nullptr, msg); }
@@ -283,12 +303,13 @@
   Result<T> toResult(T val) {
     Result<T> result;
     if (error_pc_) {
+      TRACE("Result error: %s\n", error_msg_.get());
       result.error_code = kError;
       result.start = start_;
       result.error_pc = error_pc_;
       result.error_pt = error_pt_;
-      result.error_msg = error_msg_;
-      error_msg_.Reset(nullptr);
+      // transfer ownership of the error to the result.
+      result.error_msg.Reset(error_msg_.Detach());
     } else {
       result.error_code = kSuccess;
     }
@@ -308,7 +329,12 @@
   }
 
   bool ok() const { return error_pc_ == nullptr; }
-  bool failed() const { return error_pc_ != nullptr; }
+  bool failed() const { return !error_msg_.is_empty(); }
+  bool more() const { return pc_ < limit_; }
+
+  const byte* start() { return start_; }
+  const byte* pc() { return pc_; }
+  uint32_t pc_offset() { return static_cast<uint32_t>(pc_ - start_); }
 
  protected:
   const byte* start_;
@@ -318,6 +344,60 @@
   const byte* error_pc_;
   const byte* error_pt_;
   base::SmartArrayPointer<char> error_msg_;
+
+ private:
+  template <typename IntType, bool is_signed>
+  IntType checked_read_leb(const byte* base, int offset, int* length,
+                           const char* msg) {
+    if (!check(base, offset, 1, msg)) {
+      *length = 0;
+      return 0;
+    }
+
+    const int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
+    const byte* ptr = base + offset;
+    const byte* end = ptr + kMaxLength;
+    if (end > limit_) end = limit_;
+    int shift = 0;
+    byte b = 0;
+    IntType result = 0;
+    while (ptr < end) {
+      b = *ptr++;
+      result = result | (static_cast<IntType>(b & 0x7F) << shift);
+      if ((b & 0x80) == 0) break;
+      shift += 7;
+    }
+    DCHECK_LE(ptr - (base + offset), kMaxLength);
+    *length = static_cast<int>(ptr - (base + offset));
+    if (ptr == end) {
+      // Check there are no bits set beyond the bitwidth of {IntType}.
+      const int kExtraBits = (1 + kMaxLength * 7) - (sizeof(IntType) * 8);
+      const byte kExtraBitsMask =
+          static_cast<byte>((0xFF << (8 - kExtraBits)) & 0xFF);
+      int extra_bits_value;
+      if (is_signed) {
+        // A signed-LEB128 must sign-extend the final byte, excluding its
+        // most-signifcant bit. e.g. for a 32-bit LEB128:
+        //   kExtraBits = 4
+        //   kExtraBitsMask = 0xf0
+        // If b is 0x0f, the value is negative, so extra_bits_value is 0x70.
+        // If b is 0x03, the value is positive, so extra_bits_value is 0x00.
+        extra_bits_value = (static_cast<int8_t>(b << kExtraBits) >> 8) &
+                           kExtraBitsMask & ~0x80;
+      } else {
+        extra_bits_value = 0;
+      }
+      if (*length == kMaxLength && (b & kExtraBitsMask) != extra_bits_value) {
+        error(base, ptr, "extra bits in varint");
+        return 0;
+      }
+      if ((b & 0x80) != 0) {
+        error(base, ptr, msg);
+        return 0;
+      }
+    }
+    return result;
+  }
 };
 
 #undef TRACE
diff --git a/src/wasm/encoder.cc b/src/wasm/encoder.cc
index d80a275..92e6b11 100644
--- a/src/wasm/encoder.cc
+++ b/src/wasm/encoder.cc
@@ -10,11 +10,21 @@
 
 #include "src/wasm/ast-decoder.h"
 #include "src/wasm/encoder.h"
+#include "src/wasm/wasm-macro-gen.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-opcodes.h"
 
 #include "src/v8memory.h"
 
+#if DEBUG
+#define TRACE(...)                                    \
+  do {                                                \
+    if (FLAG_trace_wasm_encoder) PrintF(__VA_ARGS__); \
+  } while (false)
+#else
+#define TRACE(...)
+#endif
+
 namespace v8 {
 namespace internal {
 namespace wasm {
@@ -40,6 +50,11 @@
   *b += 4;
 }
 
+// Sections all start with a size, but it's unknown at the start.
+// We generate a large varint which we then fixup later when the size is known.
+//
+// TODO(jfb) Not strictly necessary since sizes are calculated ahead of time.
+const size_t padded_varint = 5;
 
 void EmitVarInt(byte** b, size_t val) {
   while (true) {
@@ -54,8 +69,47 @@
     }
   }
 }
-}  // namespace
 
+size_t SizeOfVarInt(size_t value) {
+  size_t size = 0;
+  do {
+    size++;
+    value = value >> 7;
+  } while (value > 0);
+  return size;
+}
+
+void FixupSection(byte* start, byte* end) {
+  // Same as EmitVarInt, but fixed-width with zeroes in the MSBs.
+  size_t val = end - start - padded_varint;
+  TRACE("  fixup %u\n", (unsigned)val);
+  for (size_t pos = 0; pos != padded_varint; ++pos) {
+    size_t next = val >> 7;
+    byte out = static_cast<byte>(val & 0x7f);
+    if (pos != padded_varint - 1) {
+      *(start++) = 0x80 | out;
+      val = next;
+    } else {
+      *(start++) = out;
+      // TODO(jfb) check that the pre-allocated fixup size isn't overflowed.
+    }
+  }
+}
+
+// Returns the start of the section, where the section VarInt size is.
+byte* EmitSection(WasmSection::Code code, byte** b) {
+  byte* start = *b;
+  const char* name = WasmSection::getName(code);
+  size_t length = WasmSection::getNameLength(code);
+  TRACE("emit section: %s\n", name);
+  for (size_t padding = 0; padding != padded_varint; ++padding) {
+    EmitUint8(b, 0xff);  // Will get fixed up later.
+  }
+  EmitVarInt(b, length);  // Section name string size.
+  for (size_t i = 0; i != length; ++i) EmitUint8(b, name[i]);
+  return start;
+}
+}  // namespace
 
 struct WasmFunctionBuilder::Type {
   bool param_;
@@ -120,16 +174,48 @@
   body_.push_back(immediate);
 }
 
+void WasmFunctionBuilder::EmitWithU8U8(WasmOpcode opcode, const byte imm1,
+                                       const byte imm2) {
+  body_.push_back(static_cast<byte>(opcode));
+  body_.push_back(imm1);
+  body_.push_back(imm2);
+}
 
-uint32_t WasmFunctionBuilder::EmitEditableImmediate(const byte immediate) {
-  body_.push_back(immediate);
+void WasmFunctionBuilder::EmitWithVarInt(WasmOpcode opcode,
+                                         uint32_t immediate) {
+  body_.push_back(static_cast<byte>(opcode));
+  size_t immediate_size = SizeOfVarInt(immediate);
+  body_.insert(body_.end(), immediate_size, 0);
+  byte* p = &body_[body_.size() - immediate_size];
+  EmitVarInt(&p, immediate);
+}
+
+uint32_t WasmFunctionBuilder::EmitEditableVarIntImmediate() {
+  // Guess that the immediate will be 1 byte. If it is more, we'll have to
+  // shift everything down.
+  body_.push_back(0);
   return static_cast<uint32_t>(body_.size()) - 1;
 }
 
+void WasmFunctionBuilder::EditVarIntImmediate(uint32_t offset,
+                                              const uint32_t immediate) {
+  uint32_t immediate_size = static_cast<uint32_t>(SizeOfVarInt(immediate));
+  // In EmitEditableVarIntImmediate, we guessed that we'd only need one byte.
+  // If we need more, shift everything down to make room for the larger
+  // immediate.
+  if (immediate_size > 1) {
+    uint32_t diff = immediate_size - 1;
+    body_.insert(body_.begin() + offset, diff, 0);
 
-void WasmFunctionBuilder::EditImmediate(uint32_t offset, const byte immediate) {
-  DCHECK(offset < body_.size());
-  body_[offset] = immediate;
+    for (size_t i = 0; i < local_indices_.size(); ++i) {
+      if (local_indices_[i] >= offset) {
+        local_indices_[i] += diff;
+      }
+    }
+  }
+  DCHECK(offset + immediate_size <= body_.size());
+  byte* p = &body_[offset];
+  EmitVarInt(&p, immediate);
 }
 
 
@@ -144,7 +230,6 @@
     for (int i = 0; i < name_length; i++) {
       name_.push_back(*(name + i));
     }
-    name_.push_back('\0');
   }
 }
 
@@ -250,15 +335,25 @@
 
 uint32_t WasmFunctionEncoder::HeaderSize() const {
   uint32_t size = 3;
-  if (HasLocals()) size += 8;
   if (!external_) size += 2;
-  if (HasName()) size += 4;
+  if (HasName()) {
+    uint32_t name_size = NameSize();
+    size += static_cast<uint32_t>(SizeOfVarInt(name_size)) + name_size;
+  }
   return size;
 }
 
 
 uint32_t WasmFunctionEncoder::BodySize(void) const {
-  return external_ ? 0 : static_cast<uint32_t>(body_.size());
+  // TODO(titzer): embed a LocalDeclEncoder in the WasmFunctionEncoder
+  LocalDeclEncoder local_decl;
+  local_decl.AddLocals(local_i32_count_, kAstI32);
+  local_decl.AddLocals(local_i64_count_, kAstI64);
+  local_decl.AddLocals(local_f32_count_, kAstF32);
+  local_decl.AddLocals(local_f64_count_, kAstF64);
+
+  return external_ ? 0
+                   : static_cast<uint32_t>(body_.size() + local_decl.Size());
 }
 
 
@@ -271,28 +366,29 @@
                                     byte** body) const {
   uint8_t decl_bits = (exported_ ? kDeclFunctionExport : 0) |
                       (external_ ? kDeclFunctionImport : 0) |
-                      (HasLocals() ? kDeclFunctionLocals : 0) |
                       (HasName() ? kDeclFunctionName : 0);
 
   EmitUint8(header, decl_bits);
   EmitUint16(header, signature_index_);
 
   if (HasName()) {
-    uint32_t name_offset = static_cast<uint32_t>(*body - buffer);
-    EmitUint32(header, name_offset);
-    std::memcpy(*body, &name_[0], name_.size());
-    (*body) += name_.size();
+    EmitVarInt(header, NameSize());
+    for (size_t i = 0; i < name_.size(); ++i) {
+      EmitUint8(header, name_[i]);
+    }
   }
 
-  if (HasLocals()) {
-    EmitUint16(header, local_i32_count_);
-    EmitUint16(header, local_i64_count_);
-    EmitUint16(header, local_f32_count_);
-    EmitUint16(header, local_f64_count_);
-  }
 
   if (!external_) {
-    EmitUint16(header, static_cast<uint16_t>(body_.size()));
+    // TODO(titzer): embed a LocalDeclEncoder in the WasmFunctionEncoder
+    LocalDeclEncoder local_decl;
+    local_decl.AddLocals(local_i32_count_, kAstI32);
+    local_decl.AddLocals(local_i64_count_, kAstI64);
+    local_decl.AddLocals(local_f32_count_, kAstF32);
+    local_decl.AddLocals(local_f64_count_, kAstF64);
+
+    EmitUint16(header, static_cast<uint16_t>(body_.size() + local_decl.Size()));
+    (*header) += local_decl.Emit(*header);
     if (body_.size() > 0) {
       std::memcpy(*header, &body_[0], body_.size());
       (*header) += body_.size();
@@ -323,17 +419,13 @@
 
 void WasmDataSegmentEncoder::Serialize(byte* buffer, byte** header,
                                        byte** body) const {
-  uint32_t body_offset = static_cast<uint32_t>(*body - buffer);
-  EmitUint32(header, dest_);
-  EmitUint32(header, body_offset);
-  EmitUint32(header, static_cast<uint32_t>(data_.size()));
-  EmitUint8(header, 1);  // init
+  EmitVarInt(header, dest_);
+  EmitVarInt(header, static_cast<uint32_t>(data_.size()));
 
-  std::memcpy(*body, &data_[0], data_.size());
-  (*body) += data_.size();
+  std::memcpy(*header, &data_[0], data_.size());
+  (*header) += data_.size();
 }
 
-
 WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
     : zone_(zone),
       signatures_(zone),
@@ -341,8 +433,8 @@
       data_segments_(zone),
       indirect_functions_(zone),
       globals_(zone),
-      signature_map_(zone) {}
-
+      signature_map_(zone),
+      start_function_index_(-1) {}
 
 uint16_t WasmModuleBuilder::AddFunction() {
   functions_.push_back(new (zone_) WasmFunctionBuilder(zone_));
@@ -399,6 +491,9 @@
   indirect_functions_.push_back(index);
 }
 
+void WasmModuleBuilder::MarkStartFunction(uint16_t index) {
+  start_function_index_ = index;
+}
 
 WasmModuleWriter* WasmModuleBuilder::Build(Zone* zone) {
   WasmModuleWriter* writer = new (zone) WasmModuleWriter(zone);
@@ -417,6 +512,7 @@
   for (auto global : globals_) {
     writer->globals_.push_back(global);
   }
+  writer->start_function_index_ = start_function_index_;
   return writer;
 }
 
@@ -434,7 +530,6 @@
       indirect_functions_(zone),
       globals_(zone) {}
 
-
 struct Sizes {
   size_t header_size;
   size_t body_size;
@@ -446,80 +541,124 @@
     body_size += body;
   }
 
-  void AddSection(size_t size) {
-    if (size > 0) {
-      Add(1, 0);
-      while (size > 0) {
-        Add(1, 0);
-        size = size >> 7;
-      }
-    }
+  void AddSection(WasmSection::Code code, size_t other_size) {
+    Add(padded_varint + SizeOfVarInt(WasmSection::getNameLength(code)) +
+            WasmSection::getNameLength(code),
+        0);
+    if (other_size) Add(SizeOfVarInt(other_size), 0);
   }
 };
 
-
 WasmModuleIndex* WasmModuleWriter::WriteTo(Zone* zone) const {
   Sizes sizes = {0, 0};
 
-  sizes.Add(1, 0);
+  sizes.Add(2 * sizeof(uint32_t), 0);  // header
+
+  sizes.AddSection(WasmSection::Code::Memory, 0);
   sizes.Add(kDeclMemorySize, 0);
+  TRACE("Size after memory: %u, %u\n", (unsigned)sizes.header_size,
+        (unsigned)sizes.body_size);
 
-  sizes.AddSection(signatures_.size());
-  for (auto sig : signatures_) {
-    sizes.Add(2 + sig->parameter_count(), 0);
-  }
-
-  sizes.AddSection(globals_.size());
   if (globals_.size() > 0) {
-    sizes.Add(kDeclGlobalSize * globals_.size(), 0);
+    sizes.AddSection(WasmSection::Code::Globals, globals_.size());
+    /* These globals never have names, so are always 3 bytes. */
+    sizes.Add(3 * globals_.size(), 0);
+    TRACE("Size after globals: %u, %u\n", (unsigned)sizes.header_size,
+          (unsigned)sizes.body_size);
   }
 
-  sizes.AddSection(functions_.size());
-  for (auto function : functions_) {
-    sizes.Add(function->HeaderSize() + function->BodySize(),
-              function->NameSize());
+  if (signatures_.size() > 0) {
+    sizes.AddSection(WasmSection::Code::Signatures, signatures_.size());
+    for (auto sig : signatures_) {
+      sizes.Add(
+          1 + SizeOfVarInt(sig->parameter_count()) + sig->parameter_count(), 0);
+    }
+    TRACE("Size after signatures: %u, %u\n", (unsigned)sizes.header_size,
+          (unsigned)sizes.body_size);
   }
 
-  sizes.AddSection(data_segments_.size());
-  for (auto segment : data_segments_) {
-    sizes.Add(segment->HeaderSize(), segment->BodySize());
+  if (functions_.size() > 0) {
+    sizes.AddSection(WasmSection::Code::Functions, functions_.size());
+    for (auto function : functions_) {
+      sizes.Add(function->HeaderSize() + function->BodySize(),
+                function->NameSize());
+    }
+    TRACE("Size after functions: %u, %u\n", (unsigned)sizes.header_size,
+          (unsigned)sizes.body_size);
   }
 
-  sizes.AddSection(indirect_functions_.size());
-  sizes.Add(2 * static_cast<uint32_t>(indirect_functions_.size()), 0);
+  if (start_function_index_ >= 0) {
+    sizes.AddSection(WasmSection::Code::StartFunction, 0);
+    sizes.Add(SizeOfVarInt(start_function_index_), 0);
+    TRACE("Size after start: %u, %u\n", (unsigned)sizes.header_size,
+          (unsigned)sizes.body_size);
+  }
 
-  if (sizes.body_size > 0) sizes.Add(1, 0);
+  if (data_segments_.size() > 0) {
+    sizes.AddSection(WasmSection::Code::DataSegments, data_segments_.size());
+    for (auto segment : data_segments_) {
+      sizes.Add(segment->HeaderSize(), segment->BodySize());
+    }
+    TRACE("Size after data segments: %u, %u\n", (unsigned)sizes.header_size,
+          (unsigned)sizes.body_size);
+  }
+
+  if (indirect_functions_.size() > 0) {
+    sizes.AddSection(WasmSection::Code::FunctionTable,
+                     indirect_functions_.size());
+    for (auto function_index : indirect_functions_) {
+      sizes.Add(SizeOfVarInt(function_index), 0);
+    }
+    TRACE("Size after indirect functions: %u, %u\n",
+          (unsigned)sizes.header_size, (unsigned)sizes.body_size);
+  }
+
+  if (sizes.body_size > 0) {
+    sizes.AddSection(WasmSection::Code::End, 0);
+    TRACE("Size after end: %u, %u\n", (unsigned)sizes.header_size,
+          (unsigned)sizes.body_size);
+  }
 
   ZoneVector<uint8_t> buffer_vector(sizes.total(), zone);
   byte* buffer = &buffer_vector[0];
   byte* header = buffer;
   byte* body = buffer + sizes.header_size;
 
+  // -- emit magic -------------------------------------------------------------
+  TRACE("emit magic\n");
+  EmitUint32(&header, kWasmMagic);
+  EmitUint32(&header, kWasmVersion);
+
   // -- emit memory declaration ------------------------------------------------
-  EmitUint8(&header, kDeclMemory);
-  EmitUint8(&header, 16);  // min memory size
-  EmitUint8(&header, 16);  // max memory size
-  EmitUint8(&header, 0);   // memory export
+  {
+    byte* section = EmitSection(WasmSection::Code::Memory, &header);
+    EmitVarInt(&header, 16);  // min memory size
+    EmitVarInt(&header, 16);  // max memory size
+    EmitUint8(&header, 0);    // memory export
+    static_assert(kDeclMemorySize == 3, "memory size must match emit above");
+    FixupSection(section, header);
+  }
 
   // -- emit globals -----------------------------------------------------------
   if (globals_.size() > 0) {
-    EmitUint8(&header, kDeclGlobals);
+    byte* section = EmitSection(WasmSection::Code::Globals, &header);
     EmitVarInt(&header, globals_.size());
 
     for (auto global : globals_) {
-      EmitUint32(&header, 0);
+      EmitVarInt(&header, 0);  // Length of the global name.
       EmitUint8(&header, WasmOpcodes::MemTypeCodeFor(global.first));
       EmitUint8(&header, global.second);
     }
+    FixupSection(section, header);
   }
 
   // -- emit signatures --------------------------------------------------------
   if (signatures_.size() > 0) {
-    EmitUint8(&header, kDeclSignatures);
+    byte* section = EmitSection(WasmSection::Code::Signatures, &header);
     EmitVarInt(&header, signatures_.size());
 
     for (FunctionSig* sig : signatures_) {
-      EmitUint8(&header, static_cast<byte>(sig->parameter_count()));
+      EmitVarInt(&header, sig->parameter_count());
       if (sig->return_count() > 0) {
         EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetReturn()));
       } else {
@@ -529,39 +668,53 @@
         EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
       }
     }
+    FixupSection(section, header);
   }
 
   // -- emit functions ---------------------------------------------------------
   if (functions_.size() > 0) {
-    EmitUint8(&header, kDeclFunctions);
+    byte* section = EmitSection(WasmSection::Code::Functions, &header);
     EmitVarInt(&header, functions_.size());
 
     for (auto func : functions_) {
       func->Serialize(buffer, &header, &body);
     }
+    FixupSection(section, header);
+  }
+
+  // -- emit start function index ----------------------------------------------
+  if (start_function_index_ >= 0) {
+    byte* section = EmitSection(WasmSection::Code::StartFunction, &header);
+    EmitVarInt(&header, start_function_index_);
+    FixupSection(section, header);
   }
 
   // -- emit data segments -----------------------------------------------------
   if (data_segments_.size() > 0) {
-    EmitUint8(&header, kDeclDataSegments);
+    byte* section = EmitSection(WasmSection::Code::DataSegments, &header);
     EmitVarInt(&header, data_segments_.size());
 
     for (auto segment : data_segments_) {
       segment->Serialize(buffer, &header, &body);
     }
+    FixupSection(section, header);
   }
 
   // -- emit function table ----------------------------------------------------
   if (indirect_functions_.size() > 0) {
-    EmitUint8(&header, kDeclFunctionTable);
+    byte* section = EmitSection(WasmSection::Code::FunctionTable, &header);
     EmitVarInt(&header, indirect_functions_.size());
 
     for (auto index : indirect_functions_) {
-      EmitUint16(&header, index);
+      EmitVarInt(&header, index);
     }
+    FixupSection(section, header);
   }
 
-  if (sizes.body_size > 0) EmitUint8(&header, kDeclEnd);
+  if (sizes.body_size > 0) {
+    byte* section = EmitSection(WasmSection::Code::End, &header);
+    FixupSection(section, header);
+  }
 
   return new (zone) WasmModuleIndex(buffer, buffer + sizes.total());
 }
diff --git a/src/wasm/encoder.h b/src/wasm/encoder.h
index 7b651bf..49a7bf7 100644
--- a/src/wasm/encoder.h
+++ b/src/wasm/encoder.h
@@ -42,11 +42,6 @@
   ZoneVector<uint8_t> body_;
   ZoneVector<char> name_;
 
-  bool HasLocals() const {
-    return (local_i32_count_ + local_i64_count_ + local_f32_count_ +
-            local_f64_count_) > 0;
-  }
-
   bool HasName() const { return (exported_ || external_) && name_.size() > 0; }
 };
 
@@ -60,8 +55,10 @@
                 const uint32_t* local_indices, uint32_t indices_size);
   void Emit(WasmOpcode opcode);
   void EmitWithU8(WasmOpcode opcode, const byte immediate);
-  uint32_t EmitEditableImmediate(const byte immediate);
-  void EditImmediate(uint32_t offset, const byte immediate);
+  void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
+  void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
+  uint32_t EmitEditableVarIntImmediate();
+  void EditVarIntImmediate(uint32_t offset, const uint32_t immediate);
   void Exported(uint8_t flag);
   void External(uint8_t flag);
   void SetName(const unsigned char* name, int name_length);
@@ -120,6 +117,7 @@
   ZoneVector<FunctionSig*> signatures_;
   ZoneVector<uint16_t> indirect_functions_;
   ZoneVector<std::pair<MachineType, bool>> globals_;
+  int start_function_index_;
 };
 
 class WasmModuleBuilder : public ZoneObject {
@@ -131,6 +129,7 @@
   void AddDataSegment(WasmDataSegmentEncoder* data);
   uint16_t AddSignature(FunctionSig* sig);
   void AddIndirectFunction(uint16_t index);
+  void MarkStartFunction(uint16_t index);
   WasmModuleWriter* Build(Zone* zone);
 
   struct CompareFunctionSigs {
@@ -146,6 +145,7 @@
   ZoneVector<uint16_t> indirect_functions_;
   ZoneVector<std::pair<MachineType, bool>> globals_;
   SignatureMap signature_map_;
+  int start_function_index_;
 };
 
 std::vector<uint8_t> UnsignedLEB128From(uint32_t result);
diff --git a/src/wasm/module-decoder.cc b/src/wasm/module-decoder.cc
index 62b000d..3e85a1b 100644
--- a/src/wasm/module-decoder.cc
+++ b/src/wasm/module-decoder.cc
@@ -2,12 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/wasm/module-decoder.h"
+
+#include "src/base/functional.h"
+#include "src/base/platform/platform.h"
 #include "src/macro-assembler.h"
 #include "src/objects.h"
 #include "src/v8.h"
 
 #include "src/wasm/decoder.h"
-#include "src/wasm/module-decoder.h"
 
 namespace v8 {
 namespace internal {
@@ -27,8 +30,8 @@
 class ModuleDecoder : public Decoder {
  public:
   ModuleDecoder(Zone* zone, const byte* module_start, const byte* module_end,
-                bool asm_js)
-      : Decoder(module_start, module_end), module_zone(zone), asm_js_(asm_js) {
+                ModuleOrigin origin)
+      : Decoder(module_start, module_end), module_zone(zone), origin_(origin) {
     result_.start = start_;
     if (limit_ < start_) {
       error(start_, "end is less than start");
@@ -40,86 +43,196 @@
     pc_ = limit_;  // On error, terminate section decoding loop.
   }
 
+  static void DumpModule(WasmModule* module, ModuleResult result) {
+    std::string path;
+    if (FLAG_dump_wasm_module_path) {
+      path = FLAG_dump_wasm_module_path;
+      if (path.size() &&
+          !base::OS::isDirectorySeparator(path[path.size() - 1])) {
+        path += base::OS::DirectorySeparator();
+      }
+    }
+    // File are named `HASH.{ok,failed}.wasm`.
+    size_t hash = base::hash_range(module->module_start, module->module_end);
+    char buf[32] = {'\0'};
+#if V8_OS_WIN && _MSC_VER < 1900
+#define snprintf sprintf_s
+#endif
+    snprintf(buf, sizeof(buf) - 1, "%016zx.%s.wasm", hash,
+             result.ok() ? "ok" : "failed");
+    std::string name(buf);
+    if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
+      fwrite(module->module_start, module->module_end - module->module_start, 1,
+             wasm_file);
+      fclose(wasm_file);
+    }
+  }
+
   // Decodes an entire module.
   ModuleResult DecodeModule(WasmModule* module, bool verify_functions = true) {
     pc_ = start_;
     module->module_start = start_;
     module->module_end = limit_;
-    module->min_mem_size_log2 = 0;
-    module->max_mem_size_log2 = 0;
+    module->min_mem_pages = 0;
+    module->max_mem_pages = 0;
     module->mem_export = false;
     module->mem_external = false;
-    module->globals = new std::vector<WasmGlobal>();
-    module->signatures = new std::vector<FunctionSig*>();
-    module->functions = new std::vector<WasmFunction>();
-    module->data_segments = new std::vector<WasmDataSegment>();
-    module->function_table = new std::vector<uint16_t>();
-    module->import_table = new std::vector<WasmImport>();
+    module->origin = origin_;
 
-    bool sections[kMaxModuleSectionCode];
-    memset(sections, 0, sizeof(sections));
+    bool sections[(size_t)WasmSection::Code::Max] = {false};
+
+    const byte* pos = pc_;
+    uint32_t magic_word = consume_u32("wasm magic");
+#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
+    if (magic_word != kWasmMagic) {
+      error(pos, pos,
+            "expected magic word %02x %02x %02x %02x, "
+            "found %02x %02x %02x %02x",
+            BYTES(kWasmMagic), BYTES(magic_word));
+      goto done;
+    }
+
+    pos = pc_;
+    {
+      uint32_t magic_version = consume_u32("wasm version");
+      if (magic_version != kWasmVersion) {
+        error(pos, pos,
+              "expected version %02x %02x %02x %02x, "
+              "found %02x %02x %02x %02x",
+              BYTES(kWasmVersion), BYTES(magic_version));
+        goto done;
+      }
+    }
 
     // Decode the module sections.
     while (pc_ < limit_) {
       TRACE("DecodeSection\n");
-      WasmSectionDeclCode section =
-          static_cast<WasmSectionDeclCode>(consume_u8("section"));
-      // Each section should appear at most once.
-      if (section < kMaxModuleSectionCode) {
-        CheckForPreviousSection(sections, section, false);
-        sections[section] = true;
+      pos = pc_;
+
+      int length;
+      uint32_t section_length = consume_u32v(&length, "section size");
+
+      int section_string_leb_length = 0;
+      uint32_t section_string_length = 0;
+      WasmSection::Code section = consume_section_name(
+          &section_string_leb_length, &section_string_length);
+      uint32_t string_and_leb_length =
+          section_string_leb_length + section_string_length;
+      if (string_and_leb_length > section_length) {
+        error(pos, pos,
+              "section string of size %u longer than total section bytes %u",
+              string_and_leb_length, section_length);
+        break;
       }
 
+      if (section == WasmSection::Code::Max) {
+        // Skip unknown section.
+        uint32_t skip = section_length - string_and_leb_length;
+        TRACE("skipping %u bytes from unknown section\n", skip);
+        consume_bytes(skip);
+        continue;
+      }
+
+      // Each section should appear at most once.
+      CheckForPreviousSection(sections, section, false);
+      sections[(size_t)section] = true;
+
       switch (section) {
-        case kDeclEnd:
+        case WasmSection::Code::End:
           // Terminate section decoding.
           limit_ = pc_;
           break;
-        case kDeclMemory:
-          module->min_mem_size_log2 = consume_u8("min memory");
-          module->max_mem_size_log2 = consume_u8("max memory");
+        case WasmSection::Code::Memory:
+          int length;
+          module->min_mem_pages = consume_u32v(&length, "min memory");
+          module->max_mem_pages = consume_u32v(&length, "max memory");
           module->mem_export = consume_u8("export memory") != 0;
           break;
-        case kDeclSignatures: {
+        case WasmSection::Code::Signatures: {
           int length;
           uint32_t signatures_count = consume_u32v(&length, "signatures count");
-          module->signatures->reserve(SafeReserve(signatures_count));
+          module->signatures.reserve(SafeReserve(signatures_count));
           // Decode signatures.
           for (uint32_t i = 0; i < signatures_count; i++) {
             if (failed()) break;
             TRACE("DecodeSignature[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
             FunctionSig* s = consume_sig();  // read function sig.
-            module->signatures->push_back(s);
+            module->signatures.push_back(s);
           }
           break;
         }
-        case kDeclFunctions: {
+        case WasmSection::Code::FunctionSignatures: {
           // Functions require a signature table first.
-          CheckForPreviousSection(sections, kDeclSignatures, true);
+          CheckForPreviousSection(sections, WasmSection::Code::Signatures,
+                                  true);
           int length;
           uint32_t functions_count = consume_u32v(&length, "functions count");
-          module->functions->reserve(SafeReserve(functions_count));
+          module->functions.reserve(SafeReserve(functions_count));
+          for (uint32_t i = 0; i < functions_count; i++) {
+            module->functions.push_back(
+                {nullptr, i, 0, 0, 0, 0, 0, 0, false, false});
+            WasmFunction* function = &module->functions.back();
+            function->sig_index = consume_sig_index(module, &function->sig);
+          }
+          break;
+        }
+        case WasmSection::Code::FunctionBodies: {
+          // Function bodies should follow signatures.
+          CheckForPreviousSection(sections,
+                                  WasmSection::Code::FunctionSignatures, true);
+          int length;
+          const byte* pos = pc_;
+          uint32_t functions_count = consume_u32v(&length, "functions count");
+          if (functions_count != module->functions.size()) {
+            error(pos, pos, "function body count %u mismatch (%u expected)",
+                  functions_count,
+                  static_cast<uint32_t>(module->functions.size()));
+            break;
+          }
+          for (uint32_t i = 0; i < functions_count; i++) {
+            WasmFunction* function = &module->functions[i];
+            int length;
+            uint32_t size = consume_u32v(&length, "body size");
+            function->code_start_offset = pc_offset();
+            function->code_end_offset = pc_offset() + size;
+
+            TRACE("  +%d  %-20s: (%d bytes)\n", pc_offset(), "function body",
+                  size);
+            pc_ += size;
+            if (pc_ > limit_) {
+              error(pc_, "function body extends beyond end of file");
+            }
+          }
+          break;
+        }
+        case WasmSection::Code::Functions: {
+          // Functions require a signature table first.
+          CheckForPreviousSection(sections, WasmSection::Code::Signatures,
+                                  true);
+          int length;
+          uint32_t functions_count = consume_u32v(&length, "functions count");
+          module->functions.reserve(SafeReserve(functions_count));
           // Set up module environment for verification.
           ModuleEnv menv;
           menv.module = module;
           menv.instance = nullptr;
-          menv.asm_js = asm_js_;
+          menv.origin = origin_;
           // Decode functions.
           for (uint32_t i = 0; i < functions_count; i++) {
             if (failed()) break;
             TRACE("DecodeFunction[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
 
-            module->functions->push_back(
+            module->functions.push_back(
                 {nullptr, i, 0, 0, 0, 0, 0, 0, false, false});
-            WasmFunction* function = &module->functions->back();
+            WasmFunction* function = &module->functions.back();
             DecodeFunctionInModule(module, function, false);
           }
           if (ok() && verify_functions) {
             for (uint32_t i = 0; i < functions_count; i++) {
               if (failed()) break;
-              WasmFunction* function = &module->functions->at(i);
+              WasmFunction* function = &module->functions[i];
               if (!function->external) {
                 VerifyFunctionBody(i, &menv, function);
                 if (result_.failed())
@@ -129,132 +242,166 @@
           }
           break;
         }
-        case kDeclGlobals: {
+        case WasmSection::Code::Names: {
+          // Names correspond to functions.
+          CheckForPreviousSection(sections,
+                                  WasmSection::Code::FunctionSignatures, true);
+          int length;
+          const byte* pos = pc_;
+          uint32_t functions_count = consume_u32v(&length, "functions count");
+          if (functions_count != module->functions.size()) {
+            error(pos, pos, "function name count %u mismatch (%u expected)",
+                  functions_count,
+                  static_cast<uint32_t>(module->functions.size()));
+            break;
+          }
+
+          for (uint32_t i = 0; i < functions_count; i++) {
+            WasmFunction* function = &module->functions[i];
+            function->name_offset =
+                consume_string(&function->name_length, "function name");
+
+            uint32_t local_names_count =
+                consume_u32v(&length, "local names count");
+            for (uint32_t j = 0; j < local_names_count; j++) {
+              uint32_t unused = 0;
+              uint32_t offset = consume_string(&unused, "local name");
+              USE(unused);
+              USE(offset);
+            }
+          }
+          break;
+        }
+        case WasmSection::Code::Globals: {
           int length;
           uint32_t globals_count = consume_u32v(&length, "globals count");
-          module->globals->reserve(SafeReserve(globals_count));
+          module->globals.reserve(SafeReserve(globals_count));
           // Decode globals.
           for (uint32_t i = 0; i < globals_count; i++) {
             if (failed()) break;
             TRACE("DecodeGlobal[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
-            module->globals->push_back({0, MachineType::Int32(), 0, false});
-            WasmGlobal* global = &module->globals->back();
+            module->globals.push_back({0, 0, MachineType::Int32(), 0, false});
+            WasmGlobal* global = &module->globals.back();
             DecodeGlobalInModule(global);
           }
           break;
         }
-        case kDeclDataSegments: {
+        case WasmSection::Code::DataSegments: {
           int length;
           uint32_t data_segments_count =
               consume_u32v(&length, "data segments count");
-          module->data_segments->reserve(SafeReserve(data_segments_count));
+          module->data_segments.reserve(SafeReserve(data_segments_count));
           // Decode data segments.
           for (uint32_t i = 0; i < data_segments_count; i++) {
             if (failed()) break;
             TRACE("DecodeDataSegment[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
-            module->data_segments->push_back({0, 0, 0});
-            WasmDataSegment* segment = &module->data_segments->back();
+            module->data_segments.push_back({0, 0, 0});
+            WasmDataSegment* segment = &module->data_segments.back();
             DecodeDataSegmentInModule(module, segment);
           }
           break;
         }
-        case kDeclFunctionTable: {
+        case WasmSection::Code::FunctionTable: {
           // An indirect function table requires functions first.
-          CheckForPreviousSection(sections, kDeclFunctions, true);
+          CheckForFunctions(module, section);
           int length;
           uint32_t function_table_count =
               consume_u32v(&length, "function table count");
-          module->function_table->reserve(SafeReserve(function_table_count));
+          module->function_table.reserve(SafeReserve(function_table_count));
           // Decode function table.
           for (uint32_t i = 0; i < function_table_count; i++) {
             if (failed()) break;
             TRACE("DecodeFunctionTable[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
-            uint16_t index = consume_u16();
-            if (index >= module->functions->size()) {
+            uint16_t index = consume_u32v(&length);
+            if (index >= module->functions.size()) {
               error(pc_ - 2, "invalid function index");
               break;
             }
-            module->function_table->push_back(index);
+            module->function_table.push_back(index);
           }
           break;
         }
-        case kDeclStartFunction: {
+        case WasmSection::Code::StartFunction: {
           // Declares a start function for a module.
-          CheckForPreviousSection(sections, kDeclFunctions, true);
+          CheckForFunctions(module, section);
           if (module->start_function_index >= 0) {
             error("start function already declared");
             break;
           }
-          int length;
-          const byte* before = pc_;
-          uint32_t index = consume_u32v(&length, "start function index");
-          if (index >= module->functions->size()) {
-            error(before, "invalid start function index");
-            break;
-          }
-          module->start_function_index = static_cast<int>(index);
-          FunctionSig* sig =
-              module->signatures->at(module->functions->at(index).sig_index);
-          if (sig->parameter_count() > 0) {
-            error(before, "invalid start function: non-zero parameter count");
+          WasmFunction* func;
+          const byte* pos = pc_;
+          module->start_function_index = consume_func_index(module, &func);
+          if (func && func->sig->parameter_count() > 0) {
+            error(pos, "invalid start function: non-zero parameter count");
             break;
           }
           break;
         }
-        case kDeclImportTable: {
+        case WasmSection::Code::ImportTable: {
           // Declares an import table.
-          CheckForPreviousSection(sections, kDeclSignatures, true);
+          CheckForPreviousSection(sections, WasmSection::Code::Signatures,
+                                  true);
           int length;
           uint32_t import_table_count =
               consume_u32v(&length, "import table count");
-          module->import_table->reserve(SafeReserve(import_table_count));
+          module->import_table.reserve(SafeReserve(import_table_count));
           // Decode import table.
           for (uint32_t i = 0; i < import_table_count; i++) {
             if (failed()) break;
             TRACE("DecodeImportTable[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
 
-            module->import_table->push_back({nullptr, 0, 0});
-            WasmImport* import = &module->import_table->back();
+            module->import_table.push_back({nullptr, 0, 0});
+            WasmImport* import = &module->import_table.back();
 
-            const byte* sigpos = pc_;
-            import->sig_index = consume_u16("signature index");
-
-            if (import->sig_index >= module->signatures->size()) {
-              error(sigpos, "invalid signature index");
-            } else {
-              import->sig = module->signatures->at(import->sig_index);
+            import->sig_index = consume_sig_index(module, &import->sig);
+            const byte* pos = pc_;
+            import->module_name_offset = consume_string(
+                &import->module_name_length, "import module name");
+            if (import->module_name_length == 0) {
+              error(pos, "import module name cannot be NULL");
             }
-            import->module_name_offset = consume_string("import module name");
-            import->function_name_offset =
-                consume_string("import function name");
+            import->function_name_offset = consume_string(
+                &import->function_name_length, "import function name");
           }
           break;
         }
-        case kDeclWLL: {
-          // Reserved for experimentation by the Web Low-level Language project
-          // which is augmenting the binary encoding with source code meta
-          // information. This section does not affect the semantics of the code
-          // and can be ignored by the runtime. https://github.com/JSStats/wll
-          int length = 0;
-          uint32_t section_size = consume_u32v(&length, "section size");
-          if (pc_ + section_size > limit_ || pc_ + section_size < pc_) {
-            error(pc_ - length, "invalid section size");
-            break;
+        case WasmSection::Code::ExportTable: {
+          // Declares an export table.
+          CheckForFunctions(module, section);
+          int length;
+          uint32_t export_table_count =
+              consume_u32v(&length, "export table count");
+          module->export_table.reserve(SafeReserve(export_table_count));
+          // Decode export table.
+          for (uint32_t i = 0; i < export_table_count; i++) {
+            if (failed()) break;
+            TRACE("DecodeExportTable[%d] module+%d\n", i,
+                  static_cast<int>(pc_ - start_));
+
+            module->export_table.push_back({0, 0});
+            WasmExport* exp = &module->export_table.back();
+
+            WasmFunction* func;
+            exp->func_index = consume_func_index(module, &func);
+            exp->name_offset = consume_string(&exp->name_length, "export name");
           }
-          pc_ += section_size;
           break;
         }
-        default:
-          error(pc_ - 1, nullptr, "unrecognized section 0x%02x", section);
-          break;
+        case WasmSection::Code::Max:
+          UNREACHABLE();  // Already skipped unknown sections.
       }
     }
 
-    return toResult(module);
+  done:
+    ModuleResult result = toResult(module);
+    if (FLAG_dump_wasm_module) {
+      DumpModule(module, result);
+    }
+    return result;
   }
 
   uint32_t SafeReserve(uint32_t count) {
@@ -263,38 +410,23 @@
     return count < kMaxReserve ? count : kMaxReserve;
   }
 
-  void CheckForPreviousSection(bool* sections, WasmSectionDeclCode section,
-                               bool present) {
-    if (section >= kMaxModuleSectionCode) return;
-    if (sections[section] == present) return;
-    const char* name = "";
-    switch (section) {
-      case kDeclMemory:
-        name = "memory";
-        break;
-      case kDeclSignatures:
-        name = "signatures";
-        break;
-      case kDeclFunctions:
-        name = "function declaration";
-        break;
-      case kDeclGlobals:
-        name = "global variable";
-        break;
-      case kDeclDataSegments:
-        name = "data segment";
-        break;
-      case kDeclFunctionTable:
-        name = "function table";
-        break;
-      default:
-        name = "";
-        break;
+  void CheckForFunctions(WasmModule* module, WasmSection::Code section) {
+    if (module->functions.size() == 0) {
+      error(pc_ - 1, nullptr, "functions must appear before section %s",
+            WasmSection::getName(section));
     }
+  }
+
+  void CheckForPreviousSection(bool* sections, WasmSection::Code section,
+                               bool present) {
+    if (section >= WasmSection::Code::Max) return;
+    if (sections[(size_t)section] == present) return;
     if (present) {
-      error(pc_ - 1, nullptr, "required %s section missing", name);
+      error(pc_ - 1, nullptr, "required %s section missing",
+            WasmSection::getName(section));
     } else {
-      error(pc_ - 1, nullptr, "%s section already present", name);
+      error(pc_ - 1, nullptr, "%s section already present",
+            WasmSection::getName(section));
     }
   }
 
@@ -302,16 +434,13 @@
   FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
                                       WasmFunction* function) {
     pc_ = start_;
-    function->sig = consume_sig();                  // read signature
-    function->name_offset = 0;                   // ---- name
-    function->code_start_offset = off(pc_ + 8);  // ---- code start
-    function->code_end_offset = off(limit_);     // ---- code end
-    function->local_i32_count = consume_u16();      // read u16
-    function->local_i64_count = consume_u16();      // read u16
-    function->local_f32_count = consume_u16();      // read u16
-    function->local_f64_count = consume_u16();      // read u16
-    function->exported = false;                  // ---- exported
-    function->external = false;                  // ---- external
+    function->sig = consume_sig();            // read signature
+    function->name_offset = 0;                // ---- name
+    function->name_length = 0;                // ---- name length
+    function->code_start_offset = off(pc_);   // ---- code start
+    function->code_end_offset = off(limit_);  // ---- code end
+    function->exported = false;               // ---- exported
+    function->external = false;               // ---- external
 
     if (ok()) VerifyFunctionBody(0, module_env, function);
 
@@ -331,19 +460,20 @@
  private:
   Zone* module_zone;
   ModuleResult result_;
-  bool asm_js_;
+  ModuleOrigin origin_;
 
   uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
 
   // Decodes a single global entry inside a module starting at {pc_}.
   void DecodeGlobalInModule(WasmGlobal* global) {
-    global->name_offset = consume_string("global name");
+    global->name_offset = consume_string(&global->name_length, "global name");
     global->type = mem_type();
     global->offset = 0;
     global->exported = consume_u8("exported") != 0;
   }
 
   // Decodes a single function entry inside a module starting at {pc_}.
+  // TODO(titzer): legacy function body; remove
   void DecodeFunctionInModule(WasmModule* module, WasmFunction* function,
                               bool verify_body = true) {
     byte decl_bits = consume_u8("function decl");
@@ -351,10 +481,10 @@
     const byte* sigpos = pc_;
     function->sig_index = consume_u16("signature index");
 
-    if (function->sig_index >= module->signatures->size()) {
+    if (function->sig_index >= module->signatures.size()) {
       return error(sigpos, "invalid signature index");
     } else {
-      function->sig = module->signatures->at(function->sig_index);
+      function->sig = module->signatures[function->sig_index];
     }
 
     TRACE("  +%d  <function attributes:%s%s%s%s%s>\n",
@@ -366,7 +496,8 @@
           (decl_bits & kDeclFunctionImport) == 0 ? " body" : "");
 
     if (decl_bits & kDeclFunctionName) {
-      function->name_offset = consume_string("function name");
+      function->name_offset =
+          consume_string(&function->name_length, "function name");
     }
 
     function->exported = decl_bits & kDeclFunctionExport;
@@ -406,25 +537,30 @@
 
   // Decodes a single data segment entry inside a module starting at {pc_}.
   void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
-    segment->dest_addr = consume_u32("destination");
-    segment->source_offset = consume_offset("source offset");
-    segment->source_size = consume_u32("source size");
-    segment->init = consume_u8("init");
+    const byte* start = pc_;
+    int length;
+    segment->dest_addr = consume_u32v(&length, "destination");
+    segment->source_size = consume_u32v(&length, "source size");
+    segment->source_offset = static_cast<uint32_t>(pc_ - start_);
+    segment->init = true;
 
     // Validate the data is in the module.
     uint32_t module_limit = static_cast<uint32_t>(limit_ - start_);
     if (!IsWithinLimit(module_limit, segment->source_offset,
                        segment->source_size)) {
-      error(pc_ - sizeof(uint32_t), "segment out of bounds of module");
+      error(start, "segment out of bounds of module");
     }
 
     // Validate that the segment will fit into the (minimum) memory.
     uint32_t memory_limit =
-        1 << (module ? module->min_mem_size_log2 : WasmModule::kMaxMemSize);
+        WasmModule::kPageSize * (module ? module->min_mem_pages
+                                        : WasmModule::kMaxMemPages);
     if (!IsWithinLimit(memory_limit, segment->dest_addr,
                        segment->source_size)) {
-      error(pc_ - sizeof(uint32_t), "segment out of bounds of memory");
+      error(start, "segment out of bounds of memory");
     }
+
+    consume_bytes(segment->source_size);
   }
 
   // Verifies the body (code) of a given function.
@@ -436,18 +572,10 @@
          << std::endl;
       os << std::endl;
     }
-    FunctionEnv fenv;
-    fenv.module = menv;
-    fenv.sig = function->sig;
-    fenv.local_i32_count = function->local_i32_count;
-    fenv.local_i64_count = function->local_i64_count;
-    fenv.local_f32_count = function->local_f32_count;
-    fenv.local_f64_count = function->local_f64_count;
-    fenv.SumLocals();
-
-    TreeResult result =
-        VerifyWasmCode(&fenv, start_, start_ + function->code_start_offset,
-                       start_ + function->code_end_offset);
+    FunctionBody body = {menv, function->sig, start_,
+                         start_ + function->code_start_offset,
+                         start_ + function->code_end_offset};
+    TreeResult result = VerifyWasmCode(module_zone->allocator(), body);
     if (result.failed()) {
       // Wrap the error message from the function decoder.
       std::ostringstream str;
@@ -476,11 +604,67 @@
     return offset;
   }
 
-  // Reads a single 32-bit unsigned integer interpreted as an offset into the
-  // data and validating the string there and advances.
-  uint32_t consume_string(const char* name = nullptr) {
-    // TODO(titzer): validate string
-    return consume_offset(name ? name : "string");
+  // Reads a length-prefixed string, checking that it is within bounds. Returns
+  // the offset of the string, and the length as an out parameter.
+  uint32_t consume_string(uint32_t* length, const char* name = nullptr) {
+    int varint_length;
+    *length = consume_u32v(&varint_length, "string length");
+    uint32_t offset = pc_offset();
+    TRACE("  +%u  %-20s: (%u bytes)\n", offset, "string", *length);
+    consume_bytes(*length);
+    return offset;
+  }
+
+  uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
+    const byte* pos = pc_;
+    int length;
+    uint32_t sig_index = consume_u32v(&length, "signature index");
+    if (sig_index >= module->signatures.size()) {
+      error(pos, pos, "signature index %u out of bounds (%d signatures)",
+            sig_index, static_cast<int>(module->signatures.size()));
+      *sig = nullptr;
+      return 0;
+    }
+    *sig = module->signatures[sig_index];
+    return sig_index;
+  }
+
+  uint32_t consume_func_index(WasmModule* module, WasmFunction** func) {
+    const byte* pos = pc_;
+    int length;
+    uint32_t func_index = consume_u32v(&length, "function index");
+    if (func_index >= module->functions.size()) {
+      error(pos, pos, "function index %u out of bounds (%d functions)",
+            func_index, static_cast<int>(module->functions.size()));
+      *func = nullptr;
+      return 0;
+    }
+    *func = &module->functions[func_index];
+    return func_index;
+  }
+
+  // Reads a section name.
+  WasmSection::Code consume_section_name(int* string_leb_length,
+                                         uint32_t* string_length) {
+    *string_length = consume_u32v(string_leb_length, "name length");
+    const byte* start = pc_;
+    consume_bytes(*string_length);
+    if (failed()) {
+      TRACE("Section name of length %u couldn't be read\n", *string_length);
+      return WasmSection::Code::Max;
+    }
+    // TODO(jfb) Linear search, it may be better to do a common-prefix search.
+    for (WasmSection::Code i = WasmSection::begin(); i != WasmSection::end();
+         i = WasmSection::next(i)) {
+      if (WasmSection::getNameLength(i) == *string_length &&
+          0 == memcmp(WasmSection::getName(i), start, *string_length)) {
+        return i;
+      }
+    }
+    TRACE("Unknown section: '");
+    for (uint32_t i = 0; i != *string_length; ++i) TRACE("%c", *(start + i));
+    TRACE("'\n");
+    return WasmSection::Code::Max;
   }
 
   // Reads a single 8-bit integer, interpreting it as a local type.
@@ -537,7 +721,8 @@
 
   // Parses an inline function signature.
   FunctionSig* consume_sig() {
-    byte count = consume_u8("param count");
+    int length;
+    byte count = consume_u32v(&length, "param count");
     LocalType ret = consume_local_type();
     FunctionSig::Builder builder(module_zone, ret == kAstStmt ? 0 : 1, count);
     if (ret != kAstStmt) builder.AddReturn(ret);
@@ -579,22 +764,21 @@
   }
 };
 
-
 ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
                               const byte* module_start, const byte* module_end,
-                              bool verify_functions, bool asm_js) {
+                              bool verify_functions, ModuleOrigin origin) {
   size_t size = module_end - module_start;
   if (module_start > module_end) return ModuleError("start > end");
   if (size >= kMaxModuleSize) return ModuleError("size > maximum module size");
   WasmModule* module = new WasmModule();
-  ModuleDecoder decoder(zone, module_start, module_end, asm_js);
+  ModuleDecoder decoder(zone, module_start, module_end, origin);
   return decoder.DecodeModule(module, verify_functions);
 }
 
 
 FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
                                            const byte* end) {
-  ModuleDecoder decoder(zone, start, end, false);
+  ModuleDecoder decoder(zone, start, end, kWasmOrigin);
   return decoder.DecodeFunctionSignature(start);
 }
 
@@ -608,7 +792,7 @@
   if (size > kMaxFunctionSize)
     return FunctionError("size > maximum function size");
   WasmFunction* function = new WasmFunction();
-  ModuleDecoder decoder(zone, function_start, function_end, false);
+  ModuleDecoder decoder(zone, function_start, function_end, kWasmOrigin);
   return decoder.DecodeSingleFunction(module_env, function);
 }
 }  // namespace wasm
diff --git a/src/wasm/module-decoder.h b/src/wasm/module-decoder.h
index 3f469a5..00a9b87 100644
--- a/src/wasm/module-decoder.h
+++ b/src/wasm/module-decoder.h
@@ -14,7 +14,7 @@
 // Decodes the bytes of a WASM module between {module_start} and {module_end}.
 ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
                               const byte* module_start, const byte* module_end,
-                              bool verify_functions, bool asm_js);
+                              bool verify_functions, ModuleOrigin origin);
 
 // Exposed for testing. Decodes a single function signature, allocating it
 // in the given zone. Returns {nullptr} upon failure.
diff --git a/src/wasm/wasm-external-refs.h b/src/wasm/wasm-external-refs.h
new file mode 100644
index 0000000..4aa452b
--- /dev/null
+++ b/src/wasm/wasm-external-refs.h
@@ -0,0 +1,181 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef WASM_EXTERNAL_REFS_H
+#define WASM_EXTERNAL_REFS_H
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+static void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
+
+static void f32_floor_wrapper(float* param) { *param = floorf(*param); }
+
+static void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
+
+static void f32_nearest_int_wrapper(float* param) {
+  *param = nearbyintf(*param);
+}
+
+static void f64_trunc_wrapper(double* param) { *param = trunc(*param); }
+
+static void f64_floor_wrapper(double* param) { *param = floor(*param); }
+
+static void f64_ceil_wrapper(double* param) { *param = ceil(*param); }
+
+static void f64_nearest_int_wrapper(double* param) {
+  *param = nearbyint(*param);
+}
+
+static void int64_to_float32_wrapper(int64_t* input, float* output) {
+  *output = static_cast<float>(*input);
+}
+
+static void uint64_to_float32_wrapper(uint64_t* input, float* output) {
+#if V8_CC_MSVC
+  // With MSVC we use static_cast<float>(uint32_t) instead of
+  // static_cast<float>(uint64_t) to achieve round-to-nearest-ties-even
+  // semantics. The idea is to calculate
+  // static_cast<float>(high_word) * 2^32 + static_cast<float>(low_word). To
+  // achieve proper rounding in all cases we have to adjust the high_word
+  // with a "rounding bit" sometimes. The rounding bit is stored in the LSB of
+  // the high_word if the low_word may affect the rounding of the high_word.
+  uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+  uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+
+  float shift = static_cast<float>(1ull << 32);
+  // If the MSB of the high_word is set, then we make space for a rounding bit.
+  if (high_word < 0x80000000) {
+    high_word <<= 1;
+    shift = static_cast<float>(1ull << 31);
+  }
+
+  if ((high_word & 0xfe000000) && low_word) {
+    // Set the rounding bit.
+    high_word |= 1;
+  }
+
+  float result = static_cast<float>(high_word);
+  result *= shift;
+  result += static_cast<float>(low_word);
+  *output = result;
+
+#else
+  *output = static_cast<float>(*input);
+#endif
+}
+
+static void int64_to_float64_wrapper(int64_t* input, double* output) {
+  *output = static_cast<double>(*input);
+}
+
+static void uint64_to_float64_wrapper(uint64_t* input, double* output) {
+#if V8_CC_MSVC
+  // With MSVC we use static_cast<double>(uint32_t) instead of
+  // static_cast<double>(uint64_t) to achieve round-to-nearest-ties-even
+  // semantics. The idea is to calculate
+  // static_cast<double>(high_word) * 2^32 + static_cast<double>(low_word).
+  uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+  uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+
+  double shift = static_cast<double>(1ull << 32);
+
+  double result = static_cast<double>(high_word);
+  result *= shift;
+  result += static_cast<double>(low_word);
+  *output = result;
+
+#else
+  *output = static_cast<double>(*input);
+#endif
+}
+
+static int32_t float32_to_int64_wrapper(float* input, int64_t* output) {
+  // We use "<" here to check the upper bound because of rounding problems: With
+  // "<=" some inputs would be considered within int64 range which are actually
+  // not within int64 range.
+  if (*input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
+      *input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
+    *output = static_cast<int64_t>(*input);
+    return 1;
+  }
+  return 0;
+}
+
+static int32_t float32_to_uint64_wrapper(float* input, uint64_t* output) {
+  // We use "<" here to check the upper bound because of rounding problems: With
+  // "<=" some inputs would be considered within uint64 range which are actually
+  // not within uint64 range.
+  if (*input > -1.0 &&
+      *input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
+    *output = static_cast<uint64_t>(*input);
+    return 1;
+  }
+  return 0;
+}
+
+static int32_t float64_to_int64_wrapper(double* input, int64_t* output) {
+  // We use "<" here to check the upper bound because of rounding problems: With
+  // "<=" some inputs would be considered within int64 range which are actually
+  // not within int64 range.
+  if (*input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
+      *input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
+    *output = static_cast<int64_t>(*input);
+    return 1;
+  }
+  return 0;
+}
+
+static int32_t float64_to_uint64_wrapper(double* input, uint64_t* output) {
+  // We use "<" here to check the upper bound because of rounding problems: With
+  // "<=" some inputs would be considered within uint64 range which are actually
+  // not within uint64 range.
+  if (*input > -1.0 &&
+      *input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
+    *output = static_cast<uint64_t>(*input);
+    return 1;
+  }
+  return 0;
+}
+
+static int32_t int64_div_wrapper(int64_t* dst, int64_t* src) {
+  if (*src == 0) {
+    return 0;
+  }
+  if (*src == -1 && *dst == std::numeric_limits<int64_t>::min()) {
+    return -1;
+  }
+  *dst /= *src;
+  return 1;
+}
+
+static int32_t int64_mod_wrapper(int64_t* dst, int64_t* src) {
+  if (*src == 0) {
+    return 0;
+  }
+  *dst %= *src;
+  return 1;
+}
+
+static int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src) {
+  if (*src == 0) {
+    return 0;
+  }
+  *dst /= *src;
+  return 1;
+}
+
+static int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
+  if (*src == 0) {
+    return 0;
+  }
+  *dst %= *src;
+  return 1;
+}
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc
index 62a2676..83009d7 100644
--- a/src/wasm/wasm-js.cc
+++ b/src/wasm/wasm-js.cc
@@ -37,20 +37,43 @@
 
 RawBuffer GetRawBufferArgument(
     ErrorThrower& thrower, const v8::FunctionCallbackInfo<v8::Value>& args) {
-  // TODO(titzer): allow typed array views.
-  if (args.Length() < 1 || !args[0]->IsArrayBuffer()) {
+  if (args.Length() < 1) {
     thrower.Error("Argument 0 must be an array buffer");
     return {nullptr, nullptr};
   }
-  Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
-  ArrayBuffer::Contents contents = buffer->GetContents();
 
-  const byte* start = reinterpret_cast<const byte*>(contents.Data());
-  const byte* end = start + contents.ByteLength();
+  const byte* start = nullptr;
+  const byte* end = nullptr;
 
-  if (start == nullptr) {
-    thrower.Error("ArrayBuffer argument is empty");
+  if (args[0]->IsArrayBuffer()) {
+    // A raw array buffer was passed.
+    Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
+    ArrayBuffer::Contents contents = buffer->GetContents();
+
+    start = reinterpret_cast<const byte*>(contents.Data());
+    end = start + contents.ByteLength();
+
+    if (start == nullptr || end == start) {
+      thrower.Error("ArrayBuffer argument is empty");
+    }
+  } else if (args[0]->IsTypedArray()) {
+    // A TypedArray was passed.
+    Local<TypedArray> array = Local<TypedArray>::Cast(args[0]);
+    Local<ArrayBuffer> buffer = array->Buffer();
+
+    ArrayBuffer::Contents contents = buffer->GetContents();
+
+    start =
+        reinterpret_cast<const byte*>(contents.Data()) + array->ByteOffset();
+    end = start + array->ByteLength();
+
+    if (start == nullptr || end == start) {
+      thrower.Error("ArrayBuffer argument is empty");
+    }
+  } else {
+    thrower.Error("Argument 0 must be an ArrayBuffer or Uint8Array");
   }
+
   return {start, end};
 }
 
@@ -63,9 +86,10 @@
   RawBuffer buffer = GetRawBufferArgument(thrower, args);
   if (thrower.error()) return;
 
-  i::Zone zone;
-  internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
-      isolate, &zone, buffer.start, buffer.end, true, false);
+  i::Zone zone(isolate->allocator());
+  internal::wasm::ModuleResult result =
+      internal::wasm::DecodeWasmModule(isolate, &zone, buffer.start, buffer.end,
+                                       true, internal::wasm::kWasmOrigin);
 
   if (result.failed()) {
     thrower.Failed("", result);
@@ -87,7 +111,7 @@
   {
     // Verification of a single function shouldn't allocate.
     i::DisallowHeapAllocation no_allocation;
-    i::Zone zone;
+    i::Zone zone(isolate->allocator());
     result = internal::wasm::DecodeWasmFunction(isolate, &zone, nullptr,
                                                 buffer.start, buffer.end);
   }
@@ -123,25 +147,18 @@
     return nullptr;
   }
 
-  auto module = v8::internal::wasm::AsmWasmBuilder(
-                    info->isolate(), info->zone(), info->literal(), foreign)
-                    .Run();
-
-  if (i::FLAG_dump_asmjs_wasm) {
-    FILE* wasm_file = fopen(i::FLAG_asmjs_wasm_dumpfile, "wb");
-    if (wasm_file) {
-      fwrite(module->Begin(), module->End() - module->Begin(), 1, wasm_file);
-      fclose(wasm_file);
-    }
-  }
+  auto module =
+      v8::internal::wasm::AsmWasmBuilder(info->isolate(), info->zone(),
+                                         info->literal(), foreign, &typer)
+          .Run();
 
   return module;
 }
 
-
 void InstantiateModuleCommon(const v8::FunctionCallbackInfo<v8::Value>& args,
                              const byte* start, const byte* end,
-                             ErrorThrower* thrower, bool must_decode) {
+                             ErrorThrower* thrower,
+                             internal::wasm::ModuleOrigin origin) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
 
   i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
@@ -153,11 +170,11 @@
 
   // Decode but avoid a redundant pass over function bodies for verification.
   // Verification will happen during compilation.
-  i::Zone zone;
+  i::Zone zone(isolate->allocator());
   internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
-      isolate, &zone, start, end, false, false);
+      isolate, &zone, start, end, false, origin);
 
-  if (result.failed() && must_decode) {
+  if (result.failed() && origin == internal::wasm::kAsmJsOrigin) {
     thrower->Error("Asm.js converted module failed to decode");
   } else if (result.failed()) {
     thrower->Failed("", result);
@@ -192,7 +209,7 @@
   }
 
   i::Factory* factory = isolate->factory();
-  i::Zone zone;
+  i::Zone zone(isolate->allocator());
   Local<String> source = Local<String>::Cast(args[0]);
   i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
   i::ParseInfo info(&zone, script);
@@ -208,7 +225,8 @@
     return;
   }
 
-  InstantiateModuleCommon(args, module->Begin(), module->End(), &thrower, true);
+  InstantiateModuleCommon(args, module->Begin(), module->End(), &thrower,
+                          internal::wasm::kAsmJsOrigin);
 }
 
 
@@ -220,7 +238,8 @@
   RawBuffer buffer = GetRawBufferArgument(thrower, args);
   if (buffer.start == nullptr) return;
 
-  InstantiateModuleCommon(args, buffer.start, buffer.end, &thrower, false);
+  InstantiateModuleCommon(args, buffer.start, buffer.end, &thrower,
+                          internal::wasm::kWasmOrigin);
 }
 }  // namespace
 
@@ -260,7 +279,7 @@
 
   // Bind the WASM object.
   Factory* factory = isolate->factory();
-  Handle<String> name = v8_str(isolate, "_WASMEXP_");
+  Handle<String> name = v8_str(isolate, "Wasm");
   Handle<JSFunction> cons = factory->NewFunction(name);
   JSFunction::SetInstancePrototype(
       cons, Handle<Object>(context->initial_object_prototype(), isolate));
@@ -280,10 +299,26 @@
 
 void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
   if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
-    Handle<Map> wasm_function_map = isolate->factory()->NewMap(
-        JS_FUNCTION_TYPE, JSFunction::kSize + kPointerSize);
-    wasm_function_map->set_is_callable();
-    context->set_wasm_function_map(*wasm_function_map);
+    // TODO(titzer): Move this to bootstrapper.cc??
+    // TODO(titzer): Also make one for strict mode functions?
+    Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
+
+    InstanceType instance_type = prev_map->instance_type();
+    int internal_fields = JSObject::GetInternalFieldCount(*prev_map);
+    CHECK_EQ(0, internal_fields);
+    int pre_allocated =
+        prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
+    int instance_size;
+    int in_object_properties;
+    JSFunction::CalculateInstanceSizeHelper(instance_type, internal_fields + 1,
+                                            0, &instance_size,
+                                            &in_object_properties);
+
+    int unused_property_fields = in_object_properties - pre_allocated;
+    Handle<Map> map = Map::CopyInitialMap(
+        prev_map, instance_size, in_object_properties, unused_property_fields);
+
+    context->set_wasm_function_map(*map);
   }
 }
 
diff --git a/src/wasm/wasm-macro-gen.h b/src/wasm/wasm-macro-gen.h
index dd653c1..d9199e8 100644
--- a/src/wasm/wasm-macro-gen.h
+++ b/src/wasm/wasm-macro-gen.h
@@ -7,6 +7,50 @@
 
 #include "src/wasm/wasm-opcodes.h"
 
+#define U32_LE(v)                                    \
+  static_cast<byte>(v), static_cast<byte>((v) >> 8), \
+      static_cast<byte>((v) >> 16), static_cast<byte>((v) >> 24)
+
+#define U16_LE(v) static_cast<byte>(v), static_cast<byte>((v) >> 8)
+
+#define WASM_MODULE_HEADER U32_LE(kWasmMagic), U32_LE(kWasmVersion)
+
+#define SIG_INDEX(v) U16_LE(v)
+// TODO(binji): make SIG_INDEX match this.
+#define IMPORT_SIG_INDEX(v) U32V_1(v)
+#define FUNC_INDEX(v) U32V_1(v)
+#define NO_NAME U32V_1(0)
+#define NAME_LENGTH(v) U32V_1(v)
+
+#define ZERO_ALIGNMENT 0
+#define ZERO_OFFSET 0
+
+#define BR_TARGET(v) U32_LE(v)
+
+#define MASK_7 ((1 << 7) - 1)
+#define MASK_14 ((1 << 14) - 1)
+#define MASK_21 ((1 << 21) - 1)
+#define MASK_28 ((1 << 28) - 1)
+
+#define U32V_1(x) static_cast<byte>((x)&MASK_7)
+#define U32V_2(x) \
+  static_cast<byte>(((x)&MASK_7) | 0x80), static_cast<byte>(((x) >> 7) & MASK_7)
+#define U32V_3(x)                                      \
+  static_cast<byte>((((x)) & MASK_7) | 0x80),          \
+      static_cast<byte>((((x) >> 7) & MASK_7) | 0x80), \
+      static_cast<byte>(((x) >> 14) & MASK_7)
+#define U32V_4(x)                                       \
+  static_cast<byte>(((x)&MASK_7) | 0x80),               \
+      static_cast<byte>((((x) >> 7) & MASK_7) | 0x80),  \
+      static_cast<byte>((((x) >> 14) & MASK_7) | 0x80), \
+      static_cast<byte>(((x) >> 21) & MASK_7)
+#define U32V_5(x)                                       \
+  static_cast<byte>(((x)&MASK_7) | 0x80),               \
+      static_cast<byte>((((x) >> 7) & MASK_7) | 0x80),  \
+      static_cast<byte>((((x) >> 14) & MASK_7) | 0x80), \
+      static_cast<byte>((((x) >> 21) & MASK_7) | 0x80), \
+      static_cast<byte>((((x) >> 28) & MASK_7))
+
 // Convenience macros for building Wasm bytecode directly into a byte array.
 
 //------------------------------------------------------------------------------
@@ -33,14 +77,8 @@
 #define WASM_RETURN(...) kExprReturn, __VA_ARGS__
 #define WASM_UNREACHABLE kExprUnreachable
 
-#define WASM_TABLESWITCH_OP(case_count, table_count, ...)                 \
-  kExprTableSwitch, static_cast<byte>(case_count),                        \
-      static_cast<byte>(case_count >> 8), static_cast<byte>(table_count), \
-      static_cast<byte>(table_count >> 8), __VA_ARGS__
-
-#define WASM_TABLESWITCH_BODY0(key) key
-
-#define WASM_TABLESWITCH_BODY(key, ...) key, __VA_ARGS__
+#define WASM_BR_TABLE(key, count, ...) \
+  kExprBrTable, U32V_1(count), __VA_ARGS__, key
 
 #define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
 #define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
@@ -52,18 +90,222 @@
 #define WASM_ZERO kExprI8Const, 0
 #define WASM_ONE kExprI8Const, 1
 #define WASM_I8(val) kExprI8Const, static_cast<byte>(val)
-#define WASM_I32(val)                                                 \
-  kExprI32Const, static_cast<byte>(val), static_cast<byte>(val >> 8), \
-      static_cast<byte>(val >> 16), static_cast<byte>(val >> 24)
-#define WASM_I64(val)                                           \
-  kExprI64Const, static_cast<byte>(static_cast<uint64_t>(val)), \
-      static_cast<byte>(static_cast<uint64_t>(val) >> 8),       \
-      static_cast<byte>(static_cast<uint64_t>(val) >> 16),      \
-      static_cast<byte>(static_cast<uint64_t>(val) >> 24),      \
-      static_cast<byte>(static_cast<uint64_t>(val) >> 32),      \
-      static_cast<byte>(static_cast<uint64_t>(val) >> 40),      \
-      static_cast<byte>(static_cast<uint64_t>(val) >> 48),      \
-      static_cast<byte>(static_cast<uint64_t>(val) >> 56)
+
+#define I32V_MIN(length) -(1 << (6 + (7 * ((length) - 1))))
+#define I32V_MAX(length) ((1 << (6 + (7 * ((length) - 1)))) - 1)
+#define I64V_MIN(length) -(1LL << (6 + (7 * ((length) - 1))))
+#define I64V_MAX(length) ((1LL << (6 + 7 * ((length) - 1))) - 1)
+
+#define I32V_IN_RANGE(value, length) \
+  ((value) >= I32V_MIN(length) && (value) <= I32V_MAX(length))
+#define I64V_IN_RANGE(value, length) \
+  ((value) >= I64V_MIN(length) && (value) <= I64V_MAX(length))
+
+#define WASM_NO_LOCALS 0
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+inline void CheckI32v(int32_t value, int length) {
+  DCHECK(length >= 1 && length <= 5);
+  DCHECK(length == 5 || I32V_IN_RANGE(value, length));
+}
+
+inline void CheckI64v(int64_t value, int length) {
+  DCHECK(length >= 1 && length <= 10);
+  DCHECK(length == 10 || I64V_IN_RANGE(value, length));
+}
+
+// A helper for encoding local declarations prepended to the body of a
+// function.
+class LocalDeclEncoder {
+ public:
+  // Prepend local declarations by creating a new buffer and copying data
+  // over. The new buffer must be delete[]'d by the caller.
+  void Prepend(const byte** start, const byte** end) const {
+    size_t size = (*end - *start);
+    byte* buffer = new byte[Size() + size];
+    size_t pos = Emit(buffer);
+    memcpy(buffer + pos, *start, size);
+    pos += size;
+    *start = buffer;
+    *end = buffer + pos;
+  }
+
+  size_t Emit(byte* buffer) const {
+    size_t pos = 0;
+    pos = WriteUint32v(buffer, pos, static_cast<uint32_t>(local_decls.size()));
+    for (size_t i = 0; i < local_decls.size(); i++) {
+      pos = WriteUint32v(buffer, pos, local_decls[i].first);
+      buffer[pos++] = WasmOpcodes::LocalTypeCodeFor(local_decls[i].second);
+    }
+    DCHECK_EQ(Size(), pos);
+    return pos;
+  }
+
+  // Add locals declarations to this helper. Return the index of the newly added
+  // local(s), with an optional adjustment for the parameters.
+  uint32_t AddLocals(uint32_t count, LocalType type,
+                     FunctionSig* sig = nullptr) {
+    if (count == 0) {
+      return static_cast<uint32_t>((sig ? sig->parameter_count() : 0) +
+                                   local_decls.size());
+    }
+    size_t pos = local_decls.size();
+    if (local_decls.size() > 0 && local_decls.back().second == type) {
+      count += local_decls.back().first;
+      local_decls.pop_back();
+    }
+    local_decls.push_back(std::pair<uint32_t, LocalType>(count, type));
+    return static_cast<uint32_t>(pos + (sig ? sig->parameter_count() : 0));
+  }
+
+  size_t Size() const {
+    size_t size = SizeofUint32v(static_cast<uint32_t>(local_decls.size()));
+    for (auto p : local_decls) size += 1 + SizeofUint32v(p.first);
+    return size;
+  }
+
+ private:
+  std::vector<std::pair<uint32_t, LocalType>> local_decls;
+
+  size_t SizeofUint32v(uint32_t val) const {
+    size_t size = 1;
+    while (true) {
+      byte b = val & MASK_7;
+      if (b == val) return size;
+      size++;
+      val = val >> 7;
+    }
+  }
+
+  // TODO(titzer): lift encoding of u32v to a common place.
+  size_t WriteUint32v(byte* buffer, size_t pos, uint32_t val) const {
+    while (true) {
+      byte b = val & MASK_7;
+      if (b == val) {
+        buffer[pos++] = b;
+        break;
+      }
+      buffer[pos++] = 0x80 | b;
+      val = val >> 7;
+    }
+    return pos;
+  }
+};
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+//------------------------------------------------------------------------------
+// Int32 Const operations
+//------------------------------------------------------------------------------
+#define WASM_I32V(val) kExprI32Const, U32V_5(val)
+
+#define WASM_I32V_1(val) \
+  static_cast<byte>(CheckI32v((val), 1), kExprI32Const), U32V_1(val)
+#define WASM_I32V_2(val) \
+  static_cast<byte>(CheckI32v((val), 2), kExprI32Const), U32V_2(val)
+#define WASM_I32V_3(val) \
+  static_cast<byte>(CheckI32v((val), 3), kExprI32Const), U32V_3(val)
+#define WASM_I32V_4(val) \
+  static_cast<byte>(CheckI32v((val), 4), kExprI32Const), U32V_4(val)
+#define WASM_I32V_5(val) \
+  static_cast<byte>(CheckI32v((val), 5), kExprI32Const), U32V_5(val)
+
+//------------------------------------------------------------------------------
+// Int64 Const operations
+//------------------------------------------------------------------------------
+#define WASM_I64V(val)                                                        \
+  kExprI64Const,                                                              \
+      static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80),         \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80),  \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 35) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 42) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 49) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 56) & MASK_7) | 0x80), \
+      static_cast<byte>((static_cast<int64_t>(val) >> 63) & MASK_7)
+
+#define WASM_I64V_1(val)                                                     \
+  static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 1), kExprI64Const), \
+      static_cast<byte>(static_cast<int64_t>(val) & MASK_7)
+#define WASM_I64V_2(val)                                                     \
+  static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 2), kExprI64Const), \
+      static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80),        \
+      static_cast<byte>((static_cast<int64_t>(val) >> 7) & MASK_7)
+#define WASM_I64V_3(val)                                                     \
+  static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 3), kExprI64Const), \
+      static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80),        \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80), \
+      static_cast<byte>((static_cast<int64_t>(val) >> 14) & MASK_7)
+#define WASM_I64V_4(val)                                                      \
+  static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 4), kExprI64Const),  \
+      static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80),         \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80),  \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+      static_cast<byte>((static_cast<int64_t>(val) >> 21) & MASK_7)
+#define WASM_I64V_5(val)                                                      \
+  static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 5), kExprI64Const),  \
+      static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80),         \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80),  \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+      static_cast<byte>((static_cast<int64_t>(val) >> 28) & MASK_7)
+#define WASM_I64V_6(val)                                                      \
+  static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 6), kExprI64Const),  \
+      static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80),         \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80),  \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+      static_cast<byte>((static_cast<int64_t>(val) >> 35) & MASK_7)
+#define WASM_I64V_7(val)                                                      \
+  static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 7), kExprI64Const),  \
+      static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80),         \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80),  \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 35) & MASK_7) | 0x80), \
+      static_cast<byte>((static_cast<int64_t>(val) >> 42) & MASK_7)
+#define WASM_I64V_8(val)                                                      \
+  static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 8), kExprI64Const),  \
+      static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80),         \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80),  \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 35) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 42) & MASK_7) | 0x80), \
+      static_cast<byte>((static_cast<int64_t>(val) >> 49) & MASK_7)
+#define WASM_I64V_9(val)                                                      \
+  static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 9), kExprI64Const),  \
+      static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80),         \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80),  \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 35) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 42) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 49) & MASK_7) | 0x80), \
+      static_cast<byte>((static_cast<int64_t>(val) >> 56) & MASK_7)
+#define WASM_I64V_10(val)                                                     \
+  static_cast<byte>(CheckI64v(static_cast<int64_t>(val), 10), kExprI64Const), \
+      static_cast<byte>((static_cast<int64_t>(val) & MASK_7) | 0x80),         \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 7) & MASK_7) | 0x80),  \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 14) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 21) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 28) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 35) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 42) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 49) & MASK_7) | 0x80), \
+      static_cast<byte>(((static_cast<int64_t>(val) >> 56) & MASK_7) | 0x80), \
+      static_cast<byte>((static_cast<int64_t>(val) >> 63) & MASK_7)
+
 #define WASM_F32(val)                                                       \
   kExprF32Const,                                                            \
       static_cast<byte>(bit_cast<int32_t>(static_cast<float>(val))),        \
@@ -87,21 +329,19 @@
 #define WASM_LOAD_MEM(type, index)                                      \
   static_cast<byte>(                                                    \
       v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
-      v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(false), index
+      ZERO_ALIGNMENT, ZERO_OFFSET, index
 #define WASM_STORE_MEM(type, index, val)                               \
   static_cast<byte>(                                                   \
       v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
-      v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(false), index, val
+      ZERO_ALIGNMENT, ZERO_OFFSET, index, val
 #define WASM_LOAD_MEM_OFFSET(type, offset, index)                       \
   static_cast<byte>(                                                    \
       v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
-      v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(true),         \
-      static_cast<byte>(offset), index
+      ZERO_ALIGNMENT, U32V_1(offset), index
 #define WASM_STORE_MEM_OFFSET(type, offset, index, val)                \
   static_cast<byte>(                                                   \
       v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
-      v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(true),        \
-      static_cast<byte>(offset), index, val
+      ZERO_ALIGNMENT, U32V_1(offset), index, val
 #define WASM_CALL_FUNCTION(index, ...) \
   kExprCallFunction, static_cast<byte>(index), __VA_ARGS__
 #define WASM_CALL_IMPORT(index, ...) \
@@ -112,7 +352,7 @@
 #define WASM_CALL_IMPORT0(index) kExprCallImport, static_cast<byte>(index)
 #define WASM_CALL_INDIRECT0(index, func) \
   kExprCallIndirect, static_cast<byte>(index), func
-#define WASM_NOT(x) kExprBoolNot, x
+#define WASM_NOT(x) kExprI32Eqz, x
 
 //------------------------------------------------------------------------------
 // Constructs that are composed of multiple bytecodes.
@@ -144,6 +384,8 @@
 #define WASM_I32_SHL(x, y) kExprI32Shl, x, y
 #define WASM_I32_SHR(x, y) kExprI32ShrU, x, y
 #define WASM_I32_SAR(x, y) kExprI32ShrS, x, y
+#define WASM_I32_ROR(x, y) kExprI32Ror, x, y
+#define WASM_I32_ROL(x, y) kExprI32Rol, x, y
 #define WASM_I32_EQ(x, y) kExprI32Eq, x, y
 #define WASM_I32_NE(x, y) kExprI32Ne, x, y
 #define WASM_I32_LTS(x, y) kExprI32LtS, x, y
@@ -157,6 +399,7 @@
 #define WASM_I32_CLZ(x) kExprI32Clz, x
 #define WASM_I32_CTZ(x) kExprI32Ctz, x
 #define WASM_I32_POPCNT(x) kExprI32Popcnt, x
+#define WASM_I32_EQZ(x) kExprI32Eqz, x
 
 //------------------------------------------------------------------------------
 // Int64 operations
@@ -174,6 +417,8 @@
 #define WASM_I64_SHL(x, y) kExprI64Shl, x, y
 #define WASM_I64_SHR(x, y) kExprI64ShrU, x, y
 #define WASM_I64_SAR(x, y) kExprI64ShrS, x, y
+#define WASM_I64_ROR(x, y) kExprI64Ror, x, y
+#define WASM_I64_ROL(x, y) kExprI64Rol, x, y
 #define WASM_I64_EQ(x, y) kExprI64Eq, x, y
 #define WASM_I64_NE(x, y) kExprI64Ne, x, y
 #define WASM_I64_LTS(x, y) kExprI64LtS, x, y
@@ -187,6 +432,7 @@
 #define WASM_I64_CLZ(x) kExprI64Clz, x
 #define WASM_I64_CTZ(x) kExprI64Ctz, x
 #define WASM_I64_POPCNT(x) kExprI64Popcnt, x
+#define WASM_I64_EQZ(x) kExprI64Eqz, x
 
 //------------------------------------------------------------------------------
 // Float32 operations
diff --git a/src/wasm/wasm-module.cc b/src/wasm/wasm-module.cc
index 02d197c..a1c2a7a 100644
--- a/src/wasm/wasm-module.cc
+++ b/src/wasm/wasm-module.cc
@@ -19,13 +19,43 @@
 namespace internal {
 namespace wasm {
 
+static const char* wasmSections[] = {
+#define F(enumerator, string) string,
+    FOR_EACH_WASM_SECTION_TYPE(F)
+#undef F
+};
+
+static uint8_t wasmSectionsLengths[]{
+#define F(enumerator, string) sizeof(string) - 1,
+    FOR_EACH_WASM_SECTION_TYPE(F)
+#undef F
+};
+
+static_assert(sizeof(wasmSections) / sizeof(wasmSections[0]) ==
+                  (size_t)WasmSection::Code::Max,
+              "expected enum WasmSection::Code to be monotonic from 0");
+
+WasmSection::Code WasmSection::begin() { return (WasmSection::Code)0; }
+WasmSection::Code WasmSection::end() { return WasmSection::Code::Max; }
+WasmSection::Code WasmSection::next(WasmSection::Code code) {
+  return (WasmSection::Code)(1 + (uint32_t)code);
+}
+
+const char* WasmSection::getName(WasmSection::Code code) {
+  return wasmSections[(size_t)code];
+}
+
+size_t WasmSection::getNameLength(WasmSection::Code code) {
+  return wasmSectionsLengths[(size_t)code];
+}
+
 std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
   os << "WASM module with ";
-  os << (1 << module.min_mem_size_log2) << " min mem";
-  os << (1 << module.max_mem_size_log2) << " max mem";
-  if (module.functions) os << module.functions->size() << " functions";
-  if (module.globals) os << module.functions->size() << " globals";
-  if (module.data_segments) os << module.functions->size() << " data segments";
+  os << (module.min_mem_pages * module.kPageSize) << " min mem";
+  os << (module.max_mem_pages * module.kPageSize) << " max mem";
+  os << module.functions.size() << " functions";
+  os << module.functions.size() << " globals";
+  os << module.functions.size() << " data segments";
   return os;
 }
 
@@ -48,7 +78,9 @@
   os << "#" << pair.function_->func_index << ":";
   if (pair.function_->name_offset > 0) {
     if (pair.module_) {
-      os << pair.module_->GetName(pair.function_->name_offset);
+      WasmName name = pair.module_->GetName(pair.function_->name_offset,
+                                            pair.function_->name_length);
+      os.write(name.name, name.length);
     } else {
       os << "+" << pair.function_->func_index;
     }
@@ -91,15 +123,15 @@
   }
 
   void Link(Handle<FixedArray> function_table,
-            std::vector<uint16_t>* functions) {
+            std::vector<uint16_t>& functions) {
     for (size_t i = 0; i < function_code_.size(); i++) {
       LinkFunction(function_code_[i]);
     }
-    if (functions && !function_table.is_null()) {
-      int table_size = static_cast<int>(functions->size());
+    if (!function_table.is_null()) {
+      int table_size = static_cast<int>(functions.size());
       DCHECK_EQ(function_table->length(), table_size * 2);
       for (int i = 0; i < table_size; i++) {
-        function_table->set(i + table_size, *function_code_[functions->at(i)]);
+        function_table->set(i + table_size, *function_code_[functions[i]]);
       }
     }
   }
@@ -151,11 +183,10 @@
 const int kWasmMemArrayBuffer = 2;
 const int kWasmGlobalsArrayBuffer = 3;
 
-
-size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>* globals) {
+size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>& globals) {
   uint32_t offset = 0;
-  if (!globals) return 0;
-  for (WasmGlobal& global : *globals) {
+  if (globals.size() == 0) return 0;
+  for (WasmGlobal& global : globals) {
     byte size = WasmOpcodes::MemSize(global.type);
     offset = (offset + size - 1) & ~(size - 1);  // align
     global.offset = offset;
@@ -166,8 +197,9 @@
 
 
 void LoadDataSegments(WasmModule* module, byte* mem_addr, size_t mem_size) {
-  for (const WasmDataSegment& segment : *module->data_segments) {
+  for (const WasmDataSegment& segment : module->data_segments) {
     if (!segment.init) continue;
+    if (!segment.source_size) continue;
     CHECK_LT(segment.dest_addr, mem_size);
     CHECK_LE(segment.source_size, mem_size);
     CHECK_LE(segment.dest_addr + segment.source_size, mem_size);
@@ -179,14 +211,13 @@
 
 
 Handle<FixedArray> BuildFunctionTable(Isolate* isolate, WasmModule* module) {
-  if (!module->function_table || module->function_table->size() == 0) {
+  if (module->function_table.size() == 0) {
     return Handle<FixedArray>::null();
   }
-  int table_size = static_cast<int>(module->function_table->size());
+  int table_size = static_cast<int>(module->function_table.size());
   Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
   for (int i = 0; i < table_size; i++) {
-    WasmFunction* function =
-        &module->functions->at(module->function_table->at(i));
+    WasmFunction* function = &module->functions[module->function_table[i]];
     fixed->set(i, Smi::FromInt(function->sig_index));
   }
   return fixed;
@@ -194,7 +225,7 @@
 
 Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
                                      byte** backing_store) {
-  if (size > (1 << WasmModule::kMaxMemSize)) {
+  if (size > (WasmModule::kMaxMemPages * WasmModule::kPageSize)) {
     // TODO(titzer): lift restriction on maximum memory allocated here.
     *backing_store = nullptr;
     return Handle<JSArrayBuffer>::null();
@@ -236,12 +267,11 @@
   DCHECK(instance->module);
   DCHECK(instance->mem_buffer.is_null());
 
-  if (instance->module->min_mem_size_log2 > WasmModule::kMaxMemSize) {
+  if (instance->module->min_mem_pages > WasmModule::kMaxMemPages) {
     thrower->Error("Out of memory: wasm memory too large");
     return false;
   }
-  instance->mem_size = static_cast<size_t>(1)
-                       << instance->module->min_mem_size_log2;
+  instance->mem_size = WasmModule::kPageSize * instance->module->min_mem_pages;
   instance->mem_buffer =
       NewArrayBuffer(isolate, instance->mem_size, &instance->mem_start);
   if (!instance->mem_start) {
@@ -273,50 +303,75 @@
     : shared_isolate(nullptr),
       module_start(nullptr),
       module_end(nullptr),
-      min_mem_size_log2(0),
-      max_mem_size_log2(0),
+      min_mem_pages(0),
+      max_mem_pages(0),
       mem_export(false),
       mem_external(false),
       start_function_index(-1),
-      globals(nullptr),
-      signatures(nullptr),
-      functions(nullptr),
-      data_segments(nullptr),
-      function_table(nullptr),
-      import_table(nullptr) {}
+      origin(kWasmOrigin) {}
 
-WasmModule::~WasmModule() {
-  if (globals) delete globals;
-  if (signatures) delete signatures;
-  if (functions) delete functions;
-  if (data_segments) delete data_segments;
-  if (function_table) delete function_table;
-  if (import_table) delete import_table;
+static MaybeHandle<JSFunction> ReportFFIError(ErrorThrower& thrower,
+                                              const char* error, uint32_t index,
+                                              wasm::WasmName module_name,
+                                              wasm::WasmName function_name) {
+  if (function_name.name) {
+    thrower.Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
+                  index, module_name.length, module_name.name,
+                  function_name.length, function_name.name, error);
+  } else {
+    thrower.Error("Import #%d module=\"%.*s\" error: %s", index,
+                  module_name.length, module_name.name, error);
+  }
+  thrower.Error("Import ");
+  return MaybeHandle<JSFunction>();
 }
 
-static MaybeHandle<JSFunction> LookupFunction(ErrorThrower& thrower,
-                                              Handle<JSObject> ffi,
-                                              uint32_t index,
-                                              Handle<String> name,
-                                              const char* cstr) {
-  if (!ffi.is_null()) {
-    MaybeHandle<Object> result = Object::GetProperty(ffi, name);
-    if (!result.is_null()) {
-      Handle<Object> obj = result.ToHandleChecked();
-      if (obj->IsJSFunction()) {
-        return Handle<JSFunction>::cast(obj);
-      } else {
-        thrower.Error("FFI function #%d:%s is not a JSFunction.", index, cstr);
-        return MaybeHandle<JSFunction>();
-      }
-    } else {
-      thrower.Error("FFI function #%d:%s not found.", index, cstr);
-      return MaybeHandle<JSFunction>();
-    }
-  } else {
-    thrower.Error("FFI table is not an object.");
-    return MaybeHandle<JSFunction>();
+static MaybeHandle<JSFunction> LookupFunction(
+    ErrorThrower& thrower, Factory* factory, Handle<JSObject> ffi,
+    uint32_t index, wasm::WasmName module_name, wasm::WasmName function_name) {
+  if (ffi.is_null()) {
+    return ReportFFIError(thrower, "FFI is not an object", index, module_name,
+                          function_name);
   }
+
+  // Look up the module first.
+  Handle<String> name = factory->InternalizeUtf8String(
+      Vector<const char>(module_name.name, module_name.length));
+  MaybeHandle<Object> result = Object::GetProperty(ffi, name);
+  if (result.is_null()) {
+    return ReportFFIError(thrower, "module not found", index, module_name,
+                          function_name);
+  }
+
+  Handle<Object> module = result.ToHandleChecked();
+
+  if (!module->IsJSReceiver()) {
+    return ReportFFIError(thrower, "module is not an object or function", index,
+                          module_name, function_name);
+  }
+
+  Handle<Object> function;
+  if (function_name.name) {
+    // Look up the function in the module.
+    Handle<String> name = factory->InternalizeUtf8String(
+        Vector<const char>(function_name.name, function_name.length));
+    MaybeHandle<Object> result = Object::GetProperty(module, name);
+    if (result.is_null()) {
+      return ReportFFIError(thrower, "function not found", index, module_name,
+                            function_name);
+    }
+    function = result.ToHandleChecked();
+  } else {
+    // No function specified. Use the "default export".
+    function = module;
+  }
+
+  if (!function->IsJSFunction()) {
+    return ReportFFIError(thrower, "not a function", index, module_name,
+                          function_name);
+  }
+
+  return Handle<JSFunction>::cast(function);
 }
 
 // Instantiates a wasm module as a JSObject.
@@ -338,11 +393,10 @@
       JS_OBJECT_TYPE,
       JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
   WasmModuleInstance instance(this);
-  std::vector<Handle<Code>> import_code;
   instance.context = isolate->native_context();
   instance.js_object = factory->NewJSObjectFromMap(map, TENURED);
   Handle<FixedArray> code_table =
-      factory->NewFixedArray(static_cast<int>(functions->size()), TENURED);
+      factory->NewFixedArray(static_cast<int>(functions.size()), TENURED);
   instance.js_object->SetInternalField(kWasmModuleCodeTable, *code_table);
 
   //-------------------------------------------------------------------------
@@ -359,13 +413,6 @@
                                        *instance.mem_buffer);
   LoadDataSegments(this, instance.mem_start, instance.mem_size);
 
-  if (mem_export) {
-    // Export the memory as a named property.
-    Handle<String> name = factory->InternalizeUtf8String("memory");
-    JSObject::AddProperty(instance.js_object, name, instance.mem_buffer,
-                          READ_ONLY);
-  }
-
   //-------------------------------------------------------------------------
   // Allocate the globals area if necessary.
   //-------------------------------------------------------------------------
@@ -382,25 +429,27 @@
   //-------------------------------------------------------------------------
   uint32_t index = 0;
   instance.function_table = BuildFunctionTable(isolate, this);
-  WasmLinker linker(isolate, functions->size());
+  WasmLinker linker(isolate, functions.size());
   ModuleEnv module_env;
   module_env.module = this;
   module_env.instance = &instance;
   module_env.linker = &linker;
-  module_env.asm_js = false;
+  module_env.origin = origin;
 
-  if (import_table->size() > 0) {
-    instance.import_code = &import_code;
-    instance.import_code->reserve(import_table->size());
-    for (const WasmImport& import : *import_table) {
-      const char* cstr = GetName(import.function_name_offset);
-      Handle<String> name = factory->InternalizeUtf8String(cstr);
-      MaybeHandle<JSFunction> function =
-          LookupFunction(thrower, ffi, index, name, cstr);
+  if (import_table.size() > 0) {
+    instance.import_code.reserve(import_table.size());
+    for (const WasmImport& import : import_table) {
+      WasmName module_name =
+          GetNameOrNull(import.module_name_offset, import.module_name_length);
+      WasmName function_name = GetNameOrNull(import.function_name_offset,
+                                             import.function_name_length);
+      MaybeHandle<JSFunction> function = LookupFunction(
+          thrower, factory, ffi, index, module_name, function_name);
       if (function.is_null()) return MaybeHandle<JSObject>();
       Handle<Code> code = compiler::CompileWasmToJSWrapper(
-          isolate, &module_env, function.ToHandleChecked(), import.sig, cstr);
-      instance.import_code->push_back(code);
+          isolate, &module_env, function.ToHandleChecked(), import.sig,
+          module_name, function_name);
+      instance.import_code.push_back(code);
       index++;
     }
   }
@@ -410,27 +459,32 @@
   //-------------------------------------------------------------------------
 
   // First pass: compile each function and initialize the code table.
-  index = 0;
-  for (const WasmFunction& func : *functions) {
+  index = FLAG_skip_compiling_wasm_funcs;
+  while (index < functions.size()) {
+    const WasmFunction& func = functions[index];
     if (thrower.error()) break;
     DCHECK_EQ(index, func.func_index);
 
-    const char* cstr = GetName(func.name_offset);
-    Handle<String> name = factory->InternalizeUtf8String(cstr);
+    WasmName str = GetName(func.name_offset, func.name_length);
+    WasmName str_null = {nullptr, 0};
+    Handle<String> name = factory->InternalizeUtf8String(
+        Vector<const char>(str.name, str.length));
     Handle<Code> code = Handle<Code>::null();
     Handle<JSFunction> function = Handle<JSFunction>::null();
     if (func.external) {
       // Lookup external function in FFI object.
       MaybeHandle<JSFunction> function =
-          LookupFunction(thrower, ffi, index, name, cstr);
+          LookupFunction(thrower, factory, ffi, index, str, str_null);
       if (function.is_null()) return MaybeHandle<JSObject>();
-      code = compiler::CompileWasmToJSWrapper(
-          isolate, &module_env, function.ToHandleChecked(), func.sig, cstr);
+      code = compiler::CompileWasmToJSWrapper(isolate, &module_env,
+                                              function.ToHandleChecked(),
+                                              func.sig, str, str_null);
     } else {
       // Compile the function.
       code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func);
       if (code.is_null()) {
-        thrower.Error("Compilation of #%d:%s failed.", index, cstr);
+        thrower.Error("Compilation of #%d:%.*s failed.", index, str.length,
+                      str.name);
         return MaybeHandle<JSObject>();
       }
       if (func.exported) {
@@ -455,6 +509,40 @@
   instance.js_object->SetInternalField(kWasmModuleFunctionTable,
                                        Smi::FromInt(0));
 
+  //-------------------------------------------------------------------------
+  // Create and populate the exports object.
+  //-------------------------------------------------------------------------
+  if (export_table.size() > 0 || mem_export) {
+    index = 0;
+    // Create the "exports" object.
+    Handle<JSFunction> object_function = Handle<JSFunction>(
+        isolate->native_context()->object_function(), isolate);
+    Handle<JSObject> exports_object =
+        factory->NewJSObject(object_function, TENURED);
+    Handle<String> exports_name = factory->InternalizeUtf8String("exports");
+    JSObject::AddProperty(instance.js_object, exports_name, exports_object,
+                          READ_ONLY);
+
+    // Compile wrappers and add them to the exports object.
+    for (const WasmExport& exp : export_table) {
+      if (thrower.error()) break;
+      WasmName str = GetName(exp.name_offset, exp.name_length);
+      Handle<String> name = factory->InternalizeUtf8String(
+          Vector<const char>(str.name, str.length));
+      Handle<Code> code = linker.GetFunctionCode(exp.func_index);
+      Handle<JSFunction> function = compiler::CompileJSToWasmWrapper(
+          isolate, &module_env, name, code, instance.js_object, exp.func_index);
+      JSObject::AddProperty(exports_object, name, function, READ_ONLY);
+    }
+
+    if (mem_export) {
+      // Export the memory as a named property.
+      Handle<String> name = factory->InternalizeUtf8String("memory");
+      JSObject::AddProperty(exports_object, name, instance.mem_buffer,
+                            READ_ONLY);
+    }
+  }
+
   // Run the start function if one was specified.
   if (this->start_function_index >= 0) {
     HandleScope scope(isolate);
@@ -480,18 +568,12 @@
 Handle<Code> ModuleEnv::GetFunctionCode(uint32_t index) {
   DCHECK(IsValidFunction(index));
   if (linker) return linker->GetFunctionCode(index);
-  if (instance && instance->function_code) {
-    return instance->function_code->at(index);
-  }
-  return Handle<Code>::null();
+  return instance ? instance->function_code[index] : Handle<Code>::null();
 }
 
 Handle<Code> ModuleEnv::GetImportCode(uint32_t index) {
   DCHECK(IsValidImport(index));
-  if (instance && instance->import_code) {
-    return instance->import_code->at(index);
-  }
-  return Handle<Code>::null();
+  return instance ? instance->import_code[index] : Handle<Code>::null();
 }
 
 compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
@@ -499,7 +581,7 @@
   DCHECK(IsValidFunction(index));
   // Always make a direct call to whatever is in the table at that location.
   // A wrapper will be generated for FFI calls.
-  WasmFunction* function = &module->functions->at(index);
+  WasmFunction* function = &module->functions[index];
   return GetWasmCallDescriptor(zone, function->sig);
 }
 
@@ -507,12 +589,15 @@
 int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
                                 const byte* module_end, bool asm_js) {
   HandleScope scope(isolate);
-  Zone zone;
+  Zone zone(isolate->allocator());
   // Decode the module, but don't verify function bodies, since we'll
   // be compiling them anyway.
-  ModuleResult result =
-      DecodeWasmModule(isolate, &zone, module_start, module_end, false, false);
+  ModuleResult result = DecodeWasmModule(isolate, &zone, module_start,
+                                         module_end, false, kWasmOrigin);
   if (result.failed()) {
+    if (result.val) {
+      delete result.val;
+    }
     // Module verification failed. throw.
     std::ostringstream str;
     str << "WASM.compileRun() failed: " << result;
@@ -546,18 +631,18 @@
   instance.function_table = BuildFunctionTable(isolate, module);
 
   // Create module environment.
-  WasmLinker linker(isolate, module->functions->size());
+  WasmLinker linker(isolate, module->functions.size());
   ModuleEnv module_env;
   module_env.module = module;
   module_env.instance = &instance;
   module_env.linker = &linker;
-  module_env.asm_js = false;
+  module_env.origin = module->origin;
 
   // Compile all functions.
   Handle<Code> main_code = Handle<Code>::null();  // record last code.
   uint32_t index = 0;
   int main_index = 0;
-  for (const WasmFunction& func : *module->functions) {
+  for (const WasmFunction& func : module->functions) {
     DCHECK_EQ(index, func.func_index);
     if (!func.external) {
       // Compile the function and install it in the code table.
diff --git a/src/wasm/wasm-module.h b/src/wasm/wasm-module.h
index 5f5777c..4e5aa78 100644
--- a/src/wasm/wasm-module.h
+++ b/src/wasm/wasm-module.h
@@ -22,22 +22,81 @@
 const size_t kMaxModuleSize = 1024 * 1024 * 1024;
 const size_t kMaxFunctionSize = 128 * 1024;
 const size_t kMaxStringSize = 256;
+const uint32_t kWasmMagic = 0x6d736100;
+const uint32_t kWasmVersion = 0x0a;
 
-enum WasmSectionDeclCode {
-  kDeclMemory = 0x00,
-  kDeclSignatures = 0x01,
-  kDeclFunctions = 0x02,
-  kDeclGlobals = 0x03,
-  kDeclDataSegments = 0x04,
-  kDeclFunctionTable = 0x05,
-  kDeclEnd = 0x06,
-  kDeclStartFunction = 0x07,
-  kDeclImportTable = 0x08,
-  kDeclWLL = 0x11,
+// WebAssembly sections are named as strings in the binary format, but
+// internally V8 uses an enum to handle them.
+//
+// Entries have the form F(enumerator, string).
+#define FOR_EACH_WASM_SECTION_TYPE(F)          \
+  F(Memory, "memory")                          \
+  F(Signatures, "signatures")                  \
+  F(Functions, "functions")                    \
+  F(Globals, "globals")                        \
+  F(DataSegments, "data_segments")             \
+  F(FunctionTable, "function_table")           \
+  F(End, "end")                                \
+  F(StartFunction, "start_function")           \
+  F(ImportTable, "import_table")               \
+  F(ExportTable, "export_table")               \
+  F(FunctionSignatures, "function_signatures") \
+  F(FunctionBodies, "function_bodies")         \
+  F(Names, "names")
+
+// Contants for the above section types: {LEB128 length, characters...}.
+#define WASM_SECTION_MEMORY 6, 'm', 'e', 'm', 'o', 'r', 'y'
+#define WASM_SECTION_SIGNATURES \
+  10, 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', 's'
+#define WASM_SECTION_FUNCTIONS 9, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', 's'
+#define WASM_SECTION_GLOBALS 7, 'g', 'l', 'o', 'b', 'a', 'l', 's'
+#define WASM_SECTION_DATA_SEGMENTS \
+  13, 'd', 'a', 't', 'a', '_', 's', 'e', 'g', 'm', 'e', 'n', 't', 's'
+#define WASM_SECTION_FUNCTION_TABLE \
+  14, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 't', 'a', 'b', 'l', 'e'
+#define WASM_SECTION_END 3, 'e', 'n', 'd'
+#define WASM_SECTION_START_FUNCTION \
+  14, 's', 't', 'a', 'r', 't', '_', 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
+#define WASM_SECTION_IMPORT_TABLE \
+  12, 'i', 'm', 'p', 'o', 'r', 't', '_', 't', 'a', 'b', 'l', 'e'
+#define WASM_SECTION_EXPORT_TABLE \
+  12, 'e', 'x', 'p', 'o', 'r', 't', '_', 't', 'a', 'b', 'l', 'e'
+#define WASM_SECTION_FUNCTION_SIGNATURES                                    \
+  19, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 's', 'i', 'g', 'n', 'a', \
+      't', 'u', 'r', 'e', 's'
+#define WASM_SECTION_FUNCTION_BODIES \
+  15, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 'b', 'o', 'd', 'i', 'e', 's'
+#define WASM_SECTION_NAMES 5, 'n', 'a', 'm', 'e', 's'
+
+// Constants for the above section headers' size (LEB128 + characters).
+#define WASM_SECTION_MEMORY_SIZE ((size_t)7)
+#define WASM_SECTION_SIGNATURES_SIZE ((size_t)11)
+#define WASM_SECTION_FUNCTIONS_SIZE ((size_t)10)
+#define WASM_SECTION_GLOBALS_SIZE ((size_t)8)
+#define WASM_SECTION_DATA_SEGMENTS_SIZE ((size_t)14)
+#define WASM_SECTION_FUNCTION_TABLE_SIZE ((size_t)15)
+#define WASM_SECTION_END_SIZE ((size_t)4)
+#define WASM_SECTION_START_FUNCTION_SIZE ((size_t)15)
+#define WASM_SECTION_IMPORT_TABLE_SIZE ((size_t)13)
+#define WASM_SECTION_EXPORT_TABLE_SIZE ((size_t)13)
+#define WASM_SECTION_FUNCTION_SIGNATURES_SIZE ((size_t)20)
+#define WASM_SECTION_FUNCTION_BODIES_SIZE ((size_t)16)
+#define WASM_SECTION_NAMES_SIZE ((size_t)6)
+
+struct WasmSection {
+  enum class Code : uint32_t {
+#define F(enumerator, string) enumerator,
+    FOR_EACH_WASM_SECTION_TYPE(F)
+#undef F
+        Max
+  };
+  static WasmSection::Code begin();
+  static WasmSection::Code end();
+  static WasmSection::Code next(WasmSection::Code code);
+  static const char* getName(Code code);
+  static size_t getNameLength(Code code);
 };
 
-static const int kMaxModuleSectionCode = 0x11;
-
 enum WasmFunctionDeclBit {
   kDeclFunctionName = 0x01,
   kDeclFunctionImport = 0x02,
@@ -47,15 +106,15 @@
 
 // Constants for fixed-size elements within a module.
 static const size_t kDeclMemorySize = 3;
-static const size_t kDeclGlobalSize = 6;
 static const size_t kDeclDataSegmentSize = 13;
 
 // Static representation of a WASM function.
 struct WasmFunction {
   FunctionSig* sig;      // signature of the function.
   uint32_t func_index;   // index into the function table.
-  uint16_t sig_index;    // index into the signature table.
+  uint32_t sig_index;    // index into the signature table.
   uint32_t name_offset;  // offset in the module bytes of the name, if any.
+  uint32_t name_length;  // length in bytes of the name.
   uint32_t code_start_offset;    // offset in the module bytes of code start.
   uint32_t code_end_offset;      // offset in the module bytes of code end.
   uint16_t local_i32_count;      // number of i32 local variables.
@@ -69,14 +128,24 @@
 // Static representation of an imported WASM function.
 struct WasmImport {
   FunctionSig* sig;               // signature of the function.
-  uint16_t sig_index;             // index into the signature table.
+  uint32_t sig_index;             // index into the signature table.
   uint32_t module_name_offset;    // offset in module bytes of the module name.
+  uint32_t module_name_length;    // length in bytes of the module name.
   uint32_t function_name_offset;  // offset in module bytes of the import name.
+  uint32_t function_name_length;  // length in bytes of the import name.
+};
+
+// Static representation of an exported WASM function.
+struct WasmExport {
+  uint32_t func_index;   // index into the function table.
+  uint32_t name_offset;  // offset in module bytes of the name to export.
+  uint32_t name_length;  // length in bytes of the exported name.
 };
 
 // Static representation of a wasm global variable.
 struct WasmGlobal {
   uint32_t name_offset;  // offset in the module bytes of the name, if any.
+  uint32_t name_length;  // length in bytes of the global name.
   MachineType type;      // type of the global.
   uint32_t offset;       // offset from beginning of globals area.
   bool exported;         // true if this global is exported.
@@ -90,35 +159,46 @@
   bool init;               // true if loaded upon instantiation.
 };
 
+enum ModuleOrigin { kWasmOrigin, kAsmJsOrigin };
+
 // Static representation of a module.
 struct WasmModule {
-  static const uint8_t kMinMemSize = 12;  // Minimum memory size = 4kb
-  static const uint8_t kMaxMemSize = 30;  // Maximum memory size = 1gb
+  static const uint32_t kPageSize = 0x10000;    // Page size, 64kb.
+  static const uint32_t kMinMemPages = 1;       // Minimum memory size = 64kb
+  static const uint32_t kMaxMemPages = 16384;   // Maximum memory size =  1gb
 
   Isolate* shared_isolate;    // isolate for storing shared code.
   const byte* module_start;   // starting address for the module bytes.
   const byte* module_end;     // end address for the module bytes.
-  uint8_t min_mem_size_log2;  // minimum size of the memory (log base 2).
-  uint8_t max_mem_size_log2;  // maximum size of the memory (log base 2).
+  uint32_t min_mem_pages;     // minimum size of the memory in 64k pages.
+  uint32_t max_mem_pages;     // maximum size of the memory in 64k pages.
   bool mem_export;            // true if the memory is exported.
   bool mem_external;          // true if the memory is external.
   int start_function_index;   // start function, if any.
+  ModuleOrigin origin;        // origin of the module
 
-  std::vector<WasmGlobal>* globals;             // globals in this module.
-  std::vector<FunctionSig*>* signatures;        // signatures in this module.
-  std::vector<WasmFunction>* functions;         // functions in this module.
-  std::vector<WasmDataSegment>* data_segments;  // data segments in this module.
-  std::vector<uint16_t>* function_table;        // function table.
-  std::vector<WasmImport>* import_table;        // import table.
+  std::vector<WasmGlobal> globals;             // globals in this module.
+  std::vector<FunctionSig*> signatures;        // signatures in this module.
+  std::vector<WasmFunction> functions;         // functions in this module.
+  std::vector<WasmDataSegment> data_segments;  // data segments in this module.
+  std::vector<uint16_t> function_table;        // function table.
+  std::vector<WasmImport> import_table;        // import table.
+  std::vector<WasmExport> export_table;        // export table.
 
   WasmModule();
-  ~WasmModule();
 
-  // Get a pointer to a string stored in the module bytes representing a name.
-  const char* GetName(uint32_t offset) const {
-    if (offset == 0) return "<?>";  // no name.
-    CHECK(BoundsCheck(offset, offset + 1));
-    return reinterpret_cast<const char*>(module_start + offset);
+  // Get a string stored in the module bytes representing a name.
+  WasmName GetName(uint32_t offset, uint32_t length) const {
+    if (length == 0) return {"<?>", 3};  // no name.
+    CHECK(BoundsCheck(offset, offset + length));
+    return {reinterpret_cast<const char*>(module_start + offset), length};
+  }
+
+  // Get a string stored in the module bytes representing a name.
+  WasmName GetNameOrNull(uint32_t offset, uint32_t length) const {
+    if (length == 0) return {NULL, 0};  // no name.
+    CHECK(BoundsCheck(offset, offset + length));
+    return {reinterpret_cast<const char*>(module_start + offset), length};
   }
 
   // Checks the given offset range is contained within the module bytes.
@@ -141,8 +221,8 @@
   Handle<JSArrayBuffer> mem_buffer;      // Handle to array buffer of memory.
   Handle<JSArrayBuffer> globals_buffer;  // Handle to array buffer of globals.
   Handle<FixedArray> function_table;     // indirect function table.
-  std::vector<Handle<Code>>* function_code;  // code objects for each function.
-  std::vector<Handle<Code>>* import_code;   // code objects for each import.
+  std::vector<Handle<Code>> function_code;  // code objects for each function.
+  std::vector<Handle<Code>> import_code;    // code objects for each import.
   // -- raw memory ------------------------------------------------------------
   byte* mem_start;  // start of linear memory.
   size_t mem_size;  // size of the linear memory.
@@ -152,7 +232,6 @@
 
   explicit WasmModuleInstance(WasmModule* m)
       : module(m),
-        function_code(nullptr),
         mem_start(nullptr),
         mem_size(0),
         globals_start(nullptr),
@@ -168,41 +247,42 @@
   WasmModule* module;
   WasmModuleInstance* instance;
   WasmLinker* linker;
-  bool asm_js;  // true if the module originated from asm.js.
+  ModuleOrigin origin;
 
   bool IsValidGlobal(uint32_t index) {
-    return module && index < module->globals->size();
+    return module && index < module->globals.size();
   }
   bool IsValidFunction(uint32_t index) {
-    return module && index < module->functions->size();
+    return module && index < module->functions.size();
   }
   bool IsValidSignature(uint32_t index) {
-    return module && index < module->signatures->size();
+    return module && index < module->signatures.size();
   }
   bool IsValidImport(uint32_t index) {
-    return module && index < module->import_table->size();
+    return module && index < module->import_table.size();
   }
   MachineType GetGlobalType(uint32_t index) {
     DCHECK(IsValidGlobal(index));
-    return module->globals->at(index).type;
+    return module->globals[index].type;
   }
   FunctionSig* GetFunctionSignature(uint32_t index) {
     DCHECK(IsValidFunction(index));
-    return module->functions->at(index).sig;
+    return module->functions[index].sig;
   }
   FunctionSig* GetImportSignature(uint32_t index) {
     DCHECK(IsValidImport(index));
-    return module->import_table->at(index).sig;
+    return module->import_table[index].sig;
   }
   FunctionSig* GetSignature(uint32_t index) {
     DCHECK(IsValidSignature(index));
-    return module->signatures->at(index);
+    return module->signatures[index];
   }
   size_t FunctionTableSize() {
-    return module && module->function_table ? module->function_table->size()
-                                            : 0;
+    return module ? module->function_table.size() : 0;
   }
 
+  bool asm_js() { return origin == kAsmJsOrigin; }
+
   Handle<Code> GetFunctionCode(uint32_t index);
   Handle<Code> GetImportCode(uint32_t index);
   Handle<FixedArray> GetFunctionTable();
diff --git a/src/wasm/wasm-opcodes.cc b/src/wasm/wasm-opcodes.cc
index a609e03..736c4d9 100644
--- a/src/wasm/wasm-opcodes.cc
+++ b/src/wasm/wasm-opcodes.cc
@@ -66,6 +66,7 @@
 #define SET_SIG_TABLE(name, opcode, sig) \
   kSimpleExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
   FOREACH_SIMPLE_OPCODE(SET_SIG_TABLE);
+  FOREACH_ASMJS_COMPAT_OPCODE(SET_SIG_TABLE);
 #undef SET_SIG_TABLE
 }
 
@@ -103,6 +104,8 @@
     case kExprI64Shl:
     case kExprI64ShrU:
     case kExprI64ShrS:
+    case kExprI64Ror:
+    case kExprI64Rol:
     case kExprI64Eq:
     case kExprI64Ne:
     case kExprI64LtS:
diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h
index 7cb9c00..52f85aa 100644
--- a/src/wasm/wasm-opcodes.h
+++ b/src/wasm/wasm-opcodes.h
@@ -46,28 +46,14 @@
 // We use kTagged here because kNone is already used by kAstStmt.
 const LocalType kAstEnd = MachineRepresentation::kTagged;
 
-// Functionality related to encoding memory accesses.
-struct MemoryAccess {
-  // Atomicity annotations for access to the memory and globals.
-  enum Atomicity {
-    kNone = 0,        // non-atomic
-    kSequential = 1,  // sequential consistency
-    kAcquire = 2,     // acquire semantics
-    kRelease = 3      // release semantics
-  };
-
-  // Alignment annotations for memory accesses.
-  enum Alignment { kAligned = 0, kUnaligned = 1 };
-
-  // Bitfields for the various annotations for memory accesses.
-  typedef BitField<Alignment, 7, 1> AlignmentField;
-  typedef BitField<Atomicity, 5, 2> AtomicityField;
-  typedef BitField<bool, 4, 1> OffsetField;
-};
-
 typedef Signature<LocalType> FunctionSig;
 std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
 
+struct WasmName {
+  const char* name;
+  uint32_t length;
+};
+
 // TODO(titzer): Renumber all the opcodes to fill in holes.
 
 // Control expressions and blocks.
@@ -80,7 +66,7 @@
   V(Select, 0x05, _)              \
   V(Br, 0x06, _)                  \
   V(BrIf, 0x07, _)                \
-  V(TableSwitch, 0x08, _)         \
+  V(BrTable, 0x08, _)             \
   V(Return, 0x14, _)              \
   V(Unreachable, 0x15, _)
 
@@ -97,7 +83,8 @@
   V(StoreGlobal, 0x11, _)      \
   V(CallFunction, 0x12, _)     \
   V(CallIndirect, 0x13, _)     \
-  V(CallImport, 0x1F, _)
+  V(CallImport, 0x1F, _)       \
+  V(DeclLocals, 0x1E, _)
 
 // Load memory expressions.
 #define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -161,7 +148,7 @@
   V(I32Clz, 0x57, i_i)            \
   V(I32Ctz, 0x58, i_i)            \
   V(I32Popcnt, 0x59, i_i)         \
-  V(BoolNot, 0x5a, i_i)           \
+  V(I32Eqz, 0x5a, i_i)            \
   V(I64Add, 0x5b, l_ll)           \
   V(I64Sub, 0x5c, l_ll)           \
   V(I64Mul, 0x5d, l_ll)           \
@@ -188,6 +175,7 @@
   V(I64Clz, 0x72, l_l)            \
   V(I64Ctz, 0x73, l_l)            \
   V(I64Popcnt, 0x74, l_l)         \
+  V(I64Eqz, 0xba, i_l)            \
   V(F32Add, 0x75, f_ff)           \
   V(F32Sub, 0x76, f_ff)           \
   V(F32Mul, 0x77, f_ff)           \
@@ -252,7 +240,47 @@
   V(F64ConvertF32, 0xb2, d_f)     \
   V(F64ReinterpretI64, 0xb3, d_l) \
   V(I32ReinterpretF32, 0xb4, i_f) \
-  V(I64ReinterpretF64, 0xb5, l_d)
+  V(I64ReinterpretF64, 0xb5, l_d) \
+  V(I32Ror, 0xb6, i_ii)           \
+  V(I32Rol, 0xb7, i_ii)           \
+  V(I64Ror, 0xb8, l_ll)           \
+  V(I64Rol, 0xb9, l_ll)
+
+// For compatibility with Asm.js.
+#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
+  V(F64Acos, 0xc0, d_d)                \
+  V(F64Asin, 0xc1, d_d)                \
+  V(F64Atan, 0xc2, d_d)                \
+  V(F64Cos, 0xc3, d_d)                 \
+  V(F64Sin, 0xc4, d_d)                 \
+  V(F64Tan, 0xc5, d_d)                 \
+  V(F64Exp, 0xc6, d_d)                 \
+  V(F64Log, 0xc7, d_d)                 \
+  V(F64Atan2, 0xc8, d_dd)              \
+  V(F64Pow, 0xc9, d_dd)                \
+  V(F64Mod, 0xca, d_dd)
+
+// TODO(titzer): sketch of asm-js compatibility bytecodes
+/* V(I32AsmjsDivS, 0xd0, i_ii)          \ */
+/* V(I32AsmjsDivU, 0xd1, i_ii)          \ */
+/* V(I32AsmjsRemS, 0xd2, i_ii)          \ */
+/* V(I32AsmjsRemU, 0xd3, i_ii)          \ */
+/* V(I32AsmjsLoad8S, 0xd4, i_i)         \ */
+/* V(I32AsmjsLoad8U, 0xd5, i_i)         \ */
+/* V(I32AsmjsLoad16S, 0xd6, i_i)        \ */
+/* V(I32AsmjsLoad16U, 0xd7, i_i)        \ */
+/* V(I32AsmjsLoad, 0xd8, i_i)           \ */
+/* V(F32AsmjsLoad, 0xd9, f_i)           \ */
+/* V(F64AsmjsLoad, 0xda, d_i)           \ */
+/* V(I32AsmjsStore8, 0xdb, i_i)         \ */
+/* V(I32AsmjsStore16, 0xdc, i_i)        \ */
+/* V(I32AsmjsStore, 0xdd, i_ii)         \ */
+/* V(F32AsmjsStore, 0xde, i_if)         \ */
+/* V(F64AsmjsStore, 0xdf, i_id)         \ */
+/* V(I32SAsmjsConvertF32, 0xe0, i_f)    \ */
+/* V(I32UAsmjsConvertF32, 0xe1, i_f)    \ */
+/* V(I32SAsmjsConvertF64, 0xe2, i_d)    \ */
+/* V(I32SAsmjsConvertF64, 0xe3, i_d) */
 
 // All opcodes.
 #define FOREACH_OPCODE(V)     \
@@ -261,7 +289,8 @@
   FOREACH_SIMPLE_OPCODE(V)    \
   FOREACH_STORE_MEM_OPCODE(V) \
   FOREACH_LOAD_MEM_OPCODE(V)  \
-  FOREACH_MISC_MEM_OPCODE(V)
+  FOREACH_MISC_MEM_OPCODE(V)  \
+  FOREACH_ASMJS_COMPAT_OPCODE(V)
 
 // All signatures.
 #define FOREACH_SIGNATURE(V)         \
@@ -300,6 +329,19 @@
 #undef DECLARE_NAMED_ENUM
 };
 
+// The reason for a trap.
+enum TrapReason {
+  kTrapUnreachable,
+  kTrapMemOutOfBounds,
+  kTrapDivByZero,
+  kTrapDivUnrepresentable,
+  kTrapRemByZero,
+  kTrapFloatUnrepresentable,
+  kTrapFuncInvalid,
+  kTrapFuncSigMismatch,
+  kTrapCount
+};
+
 // A collection of opcode-related static methods.
 class WasmOpcodes {
  public:
@@ -428,10 +470,6 @@
     }
   }
 
-  static byte LoadStoreAccessOf(bool with_offset) {
-    return MemoryAccess::OffsetField::encode(with_offset);
-  }
-
   static char ShortNameOf(LocalType type) {
     switch (type) {
       case kAstI32:
@@ -470,6 +508,29 @@
         return "<unknown>";
     }
   }
+
+  static const char* TrapReasonName(TrapReason reason) {
+    switch (reason) {
+      case kTrapUnreachable:
+        return "unreachable";
+      case kTrapMemOutOfBounds:
+        return "memory access out of bounds";
+      case kTrapDivByZero:
+        return "divide by zero";
+      case kTrapDivUnrepresentable:
+        return "divide result unrepresentable";
+      case kTrapRemByZero:
+        return "remainder by zero";
+      case kTrapFloatUnrepresentable:
+        return "integer result unrepresentable";
+      case kTrapFuncInvalid:
+        return "invalid function";
+      case kTrapFuncSigMismatch:
+        return "function signature mismatch";
+      default:
+        return "<?>";
+    }
+  }
 };
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 9a0fc7c..f32f407 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -326,6 +326,10 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Memory::Address_at(pc_);
+}
 
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -364,6 +368,20 @@
   }
 }
 
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, size_t old_size, size_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  DCHECK(old_base <= wasm_memory_reference() &&
+         wasm_memory_reference() < old_base + old_size);
+  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+  DCHECK(new_base <= updated_reference &&
+         updated_reference < new_base + new_size);
+  Memory::Address_at(pc_) = updated_reference;
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
+  }
+}
 
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -410,8 +428,8 @@
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
       target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target));
   }
 }
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 3cf3398..214b786 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -523,8 +523,9 @@
 void Assembler::arithmetic_op_8(byte opcode, Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
   if (!reg.is_byte_register()) {
-    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
-    emit_rex_32(reg);
+    emit_rex_32(reg, op);
+  } else {
+    emit_optional_rex_32(reg, op);
   }
   emit(opcode);
   emit_operand(reg, op);
@@ -1469,17 +1470,18 @@
   emitp(value, rmode);
 }
 
-
-void Assembler::movq(Register dst, int64_t value) {
+void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
   emit_rex_64(dst);
   emit(0xB8 | dst.low_bits());
+  if (!RelocInfo::IsNone(rmode)) {
+    RecordRelocInfo(rmode, value);
+  }
   emitq(value);
 }
 
-
-void Assembler::movq(Register dst, uint64_t value) {
-  movq(dst, static_cast<int64_t>(value));
+void Assembler::movq(Register dst, uint64_t value, RelocInfo::Mode rmode) {
+  movq(dst, static_cast<int64_t>(value), rmode);
 }
 
 
@@ -2014,6 +2016,50 @@
   emit_operand(reg, op);
 }
 
+void Assembler::testw(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  if (src.low_bits() == 4) {
+    emit_rex_32(src, dst);
+  }
+  emit(0x85);
+  emit_modrm(src, dst);
+}
+
+void Assembler::testw(Register reg, Immediate mask) {
+  DCHECK(is_int16(mask.value_) || is_uint16(mask.value_));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  if (reg.is(rax)) {
+    emit(0xA9);
+    emit(mask.value_);
+  } else {
+    if (reg.low_bits() == 4) {
+      emit_rex_32(reg);
+    }
+    emit(0xF7);
+    emit_modrm(0x0, reg);
+    emit(mask.value_);
+  }
+}
+
+void Assembler::testw(const Operand& op, Immediate mask) {
+  DCHECK(is_int16(mask.value_) || is_uint16(mask.value_));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(rax, op);
+  emit(0xF7);
+  emit_operand(rax, op);
+  emit(mask.value_);
+}
+
+void Assembler::testw(const Operand& op, Register reg) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(reg, op);
+  emit(0x85);
+  emit_operand(rax, op);
+}
 
 void Assembler::emit_test(Register dst, Register src, int size) {
   EnsureSpace ensure_space(this);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 2847ff2..e48f358 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -699,8 +699,10 @@
   void movp(Register dst, void* ptr, RelocInfo::Mode rmode);
 
   // Loads a 64-bit immediate into a register.
-  void movq(Register dst, int64_t value);
-  void movq(Register dst, uint64_t value);
+  void movq(Register dst, int64_t value,
+            RelocInfo::Mode rmode = RelocInfo::NONE64);
+  void movq(Register dst, uint64_t value,
+            RelocInfo::Mode rmode = RelocInfo::NONE64);
 
   void movsxbl(Register dst, Register src);
   void movsxbl(Register dst, const Operand& src);
@@ -771,6 +773,10 @@
     arithmetic_op_16(0x39, src, dst);
   }
 
+  void testb(Register reg, const Operand& op) { testb(op, reg); }
+
+  void testw(Register reg, const Operand& op) { testw(op, reg); }
+
   void andb(Register dst, Immediate src) {
     immediate_arithmetic_op_8(0x4, dst, src);
   }
@@ -846,6 +852,11 @@
   void testb(const Operand& op, Immediate mask);
   void testb(const Operand& op, Register reg);
 
+  void testw(Register dst, Register src);
+  void testw(Register reg, Immediate mask);
+  void testw(const Operand& op, Immediate mask);
+  void testw(const Operand& op, Register reg);
+
   // Bit operations.
   void bt(const Operand& dst, Register src);
   void bts(const Operand& dst, Register src);
@@ -1695,7 +1706,9 @@
   void dp(uintptr_t data) { dq(data); }
   void dq(Label* label);
 
-  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+  AssemblerPositionsRecorder* positions_recorder() {
+    return &positions_recorder_;
+  }
 
   // Check if there is less than kGap bytes available in the buffer.
   // If this is the case, we need to grow the buffer before emitting
@@ -2178,8 +2191,8 @@
 
   List< Handle<Code> > code_targets_;
 
-  PositionsRecorder positions_recorder_;
-  friend class PositionsRecorder;
+  AssemblerPositionsRecorder positions_recorder_;
+  friend class AssemblerPositionsRecorder;
 };
 
 
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 6c4419e..3163783 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -124,6 +124,7 @@
                                            bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- rax: number of arguments
+  //  -- rsi: context
   //  -- rdi: constructor function
   //  -- rbx: allocation site or undefined
   //  -- rdx: new target
@@ -135,6 +136,7 @@
 
     // Preserve the incoming parameters on the stack.
     __ AssertUndefinedOrAllocationSite(rbx);
+    __ Push(rsi);
     __ Push(rbx);
     __ Integer32ToSmi(rcx, rax);
     __ Push(rcx);
@@ -200,7 +202,7 @@
     }
 
     // Restore context from the frame.
-    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    __ movp(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
 
     if (create_implicit_receiver) {
       // If the result is an object (in the ECMA sense), we should get rid
@@ -351,9 +353,6 @@
     // r9         : argc
     // [rsp+0x20] : argv
 
-    // Clear the context before we push it when entering the internal frame.
-    __ Set(rsi, 0);
-
     // Enter an internal frame.
     FrameScope scope(masm, StackFrame::INTERNAL);
 
@@ -1010,6 +1009,28 @@
   }
 }
 
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax     : argc
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : first argument (left-hand side)
+  //  -- rsp[16] : receiver (right-hand side)
+  // -----------------------------------
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ movp(InstanceOfDescriptor::LeftRegister(),
+            Operand(rbp, 2 * kPointerSize));  // Load left-hand side.
+    __ movp(InstanceOfDescriptor::RightRegister(),
+            Operand(rbp, 3 * kPointerSize));  // Load right-hand side.
+    InstanceOfStub stub(masm->isolate(), true);
+    __ CallStub(&stub);
+  }
+
+  // Pop the argument and the receiver.
+  __ ret(2 * kPointerSize);
+}
 
 // static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1810,40 +1831,6 @@
   {  // Too few parameters: Actual < expected.
     __ bind(&too_few);
 
-    // If the function is strong we need to throw an error.
-    Label no_strong_error;
-    __ movp(kScratchRegister,
-            FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-    __ testb(FieldOperand(kScratchRegister,
-                          SharedFunctionInfo::kStrongModeByteOffset),
-             Immediate(1 << SharedFunctionInfo::kStrongModeBitWithinByte));
-    __ j(equal, &no_strong_error, Label::kNear);
-
-    // What we really care about is the required number of arguments.
-
-    if (kPointerSize == kInt32Size) {
-      __ movp(
-          kScratchRegister,
-          FieldOperand(kScratchRegister, SharedFunctionInfo::kLengthOffset));
-      __ SmiToInteger32(kScratchRegister, kScratchRegister);
-    } else {
-      // See comment near kLengthOffset in src/objects.h
-      __ movsxlq(
-          kScratchRegister,
-          FieldOperand(kScratchRegister, SharedFunctionInfo::kLengthOffset));
-      __ shrq(kScratchRegister, Immediate(1));
-    }
-
-    __ cmpp(rax, kScratchRegister);
-    __ j(greater_equal, &no_strong_error, Label::kNear);
-
-    {
-      FrameScope frame(masm, StackFrame::MANUAL);
-      EnterArgumentsAdaptorFrame(masm);
-      __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
-    }
-
-    __ bind(&no_strong_error);
     EnterArgumentsAdaptorFrame(masm);
     ArgumentsAdaptorStackCheck(masm, &stack_overflow);
 
@@ -1870,7 +1857,7 @@
     __ j(less, &fill);
 
     // Restore function pointer.
-    __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+    __ movp(rdi, Operand(rbp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
   }
 
   // Call the entry point.
@@ -2065,18 +2052,19 @@
   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
   Comment cmnt(masm, "[ PrepareForTailCall");
 
-  // Prepare for tail call only if the debugger is not active.
+  // Prepare for tail call only if ES2015 tail call elimination is active.
   Label done;
-  ExternalReference debug_is_active =
-      ExternalReference::debug_is_active_address(masm->isolate());
-  __ Move(kScratchRegister, debug_is_active);
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ Move(kScratchRegister, is_tail_call_elimination_enabled);
   __ cmpb(Operand(kScratchRegister, 0), Immediate(0));
-  __ j(not_equal, &done);
+  __ j(equal, &done);
 
   // Drop possible interpreter handler/stub frame.
   {
     Label no_interpreter_frame;
-    __ Cmp(Operand(rbp, StandardFrameConstants::kMarkerOffset),
+    __ Cmp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
            Smi::FromInt(StackFrame::STUB));
     __ j(not_equal, &no_interpreter_frame, Label::kNear);
     __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@@ -2084,16 +2072,18 @@
   }
 
   // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+  __ Cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &no_arguments_adaptor, Label::kNear);
 
-  // Drop arguments adaptor frame and load arguments count.
+  // Drop current frame and load arguments count from arguments adaptor frame.
   __ movp(rbp, scratch2);
   __ SmiToInteger32(
-      scratch1, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+      caller_args_count_reg,
+      Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ jmp(&formal_parameter_count_loaded, Label::kNear);
 
   __ bind(&no_arguments_adaptor);
@@ -2102,55 +2092,14 @@
   __ movp(scratch1,
           FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
   __ LoadSharedFunctionInfoSpecialField(
-      scratch1, scratch1, SharedFunctionInfo::kFormalParameterCountOffset);
+      caller_args_count_reg, scratch1,
+      SharedFunctionInfo::kFormalParameterCountOffset);
 
   __ bind(&formal_parameter_count_loaded);
 
-  // Calculate the destination address where we will put the return address
-  // after we drop current frame.
-  Register new_sp_reg = scratch2;
-  __ subp(scratch1, args_reg);
-  __ leap(new_sp_reg, Operand(rbp, scratch1, times_pointer_size,
-                              StandardFrameConstants::kCallerPCOffset));
-
-  if (FLAG_debug_code) {
-    __ cmpp(rsp, new_sp_reg);
-    __ Check(below, kStackAccessBelowStackPointer);
-  }
-
-  // Copy receiver and return address as well.
-  Register count_reg = scratch1;
-  __ leap(count_reg, Operand(args_reg, 2));
-
-  // Copy return address from caller's frame to current frame's return address
-  // to avoid its trashing and let the following loop copy it to the right
-  // place.
-  Register tmp_reg = scratch3;
-  __ movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
-  __ movp(Operand(rsp, 0), tmp_reg);
-
-  // Restore caller's frame pointer now as it could be overwritten by
-  // the copying loop.
-  __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
-  Operand src(rsp, count_reg, times_pointer_size, 0);
-  Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
-
-  // Now copy callee arguments to the caller frame going backwards to avoid
-  // callee arguments corruption (source and destination areas could overlap).
-  Label loop, entry;
-  __ jmp(&entry, Label::kNear);
-  __ bind(&loop);
-  __ decp(count_reg);
-  __ movp(tmp_reg, src);
-  __ movp(dst, tmp_reg);
-  __ bind(&entry);
-  __ cmpp(count_reg, Immediate(0));
-  __ j(not_equal, &loop, Label::kNear);
-
-  // Leave current frame.
-  __ movp(rsp, new_sp_reg);
-
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack);
   __ bind(&done);
 }
 }  // namespace
@@ -2727,23 +2676,6 @@
 }
 
 
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
-  // We check the stack limit as indicator that recompilation might be done.
-  Label ok;
-  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  __ j(above_equal, &ok);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kStackGuard);
-  }
-  __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
-         RelocInfo::CODE_TARGET);
-
-  __ bind(&ok);
-  __ ret(0);
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index f314b9c..e737801 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -4,8 +4,9 @@
 
 #if V8_TARGET_ARCH_X64
 
-#include "src/bootstrapper.h"
 #include "src/code-stubs.h"
+#include "src/api-arguments.h"
+#include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
@@ -75,6 +76,10 @@
   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+  descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
 
 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
@@ -534,34 +539,6 @@
 }
 
 
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Label slow;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  Register scratch = rax;
-  DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
-  // Check that the key is an array index, that is Uint32.
-  STATIC_ASSERT(kSmiValueSize <= 32);
-  __ JumpUnlessNonNegativeSmi(key, &slow);
-
-  // Everything is fine, call runtime.
-  __ PopReturnAddressTo(scratch);
-  __ Push(receiver);  // receiver
-  __ Push(key);       // key
-  __ PushReturnAddressFrom(scratch);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
-  __ bind(&slow);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
   // Return address is on the stack.
   Label miss;
@@ -1269,7 +1246,7 @@
     // Not strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    Label return_unequal, undetectable;
+    Label return_equal, return_unequal, undetectable;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -1283,10 +1260,10 @@
     __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
     __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
-    __ j(not_zero, &undetectable);
+    __ j(not_zero, &undetectable, Label::kNear);
     __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
-    __ j(not_zero, &return_unequal);
+    __ j(not_zero, &return_unequal, Label::kNear);
 
     __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
     __ j(below, &runtime_call, Label::kNear);
@@ -1300,7 +1277,17 @@
     __ bind(&undetectable);
     __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
-    __ j(zero, &return_unequal);
+    __ j(zero, &return_unequal, Label::kNear);
+
+    // If both sides are JSReceivers, then the result is false according to
+    // the HTML specification, which says that only comparisons with null or
+    // undefined are affected by special casing for document.all.
+    __ CmpInstanceType(rbx, ODDBALL_TYPE);
+    __ j(zero, &return_equal, Label::kNear);
+    __ CmpInstanceType(rcx, ODDBALL_TYPE);
+    __ j(not_zero, &return_unequal, Label::kNear);
+
+    __ bind(&return_equal);
     __ Set(rax, EQUAL);
     __ ret(0);
   }
@@ -1877,14 +1864,12 @@
     __ pushq(rbp);
     __ movp(rbp, rsp);
 
-    // Push the stack frame type marker twice.
+    // Push the stack frame type.
     int marker = type();
-    // Scratch register is neither callee-save, nor an argument register on any
-    // platform. It's free to use at this point.
-    // Cannot use smi-register for loading yet.
-    __ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
-    __ Push(kScratchRegister);  // context slot
-    __ Push(kScratchRegister);  // function slot
+    __ Push(Smi::FromInt(marker));  // context slot
+    ExternalReference context_address(Isolate::kContextAddress, isolate());
+    __ Load(kScratchRegister, context_address);
+    __ Push(kScratchRegister);  // context
     // Save callee-saved registers (X64/X32/Win64 calling conventions).
     __ pushq(r12);
     __ pushq(r13);
@@ -2069,6 +2054,11 @@
   __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
   __ j(not_equal, &slow_case);
 
+  // Go to the runtime if the function is not a constructor.
+  __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &slow_case);
+
   // Ensure that {function} has an instance prototype.
   __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
            Immediate(1 << Map::kHasNonInstancePrototype));
@@ -2137,7 +2127,8 @@
   __ Push(object);
   __ Push(function);
   __ PushReturnAddressFrom(kScratchRegister);
-  __ TailCallRuntime(Runtime::kInstanceOf);
+  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+                                         : Runtime::kInstanceOf);
 }
 
 
@@ -2544,23 +2535,21 @@
   __ Ret();
   __ bind(&not_heap_number);
 
-  Label not_string, slow_string;
+  NonNumberToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in rax.
+  __ AssertNotNumber(rax);
+
+  Label not_string;
   __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
   // rax: object
   // rdi: object map
   __ j(above_equal, &not_string, Label::kNear);
-  // Check if string has a cached array index.
-  __ testl(FieldOperand(rax, String::kHashFieldOffset),
-           Immediate(String::kContainsCachedArrayIndexMask));
-  __ j(not_zero, &slow_string, Label::kNear);
-  __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
-  __ IndexFromHash(rax, rax);
-  __ Ret();
-  __ bind(&slow_string);
-  __ PopReturnAddressTo(rcx);     // Pop return address.
-  __ Push(rax);                   // Push argument.
-  __ PushReturnAddressFrom(rcx);  // Push return address.
-  __ TailCallRuntime(Runtime::kStringToNumber);
+  StringToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
   __ bind(&not_string);
 
   Label not_oddball;
@@ -2576,26 +2565,26 @@
   __ TailCallRuntime(Runtime::kToNumber);
 }
 
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in rax.
+  __ AssertString(rax);
 
-void ToLengthStub::Generate(MacroAssembler* masm) {
-  // The ToLength stub takes on argument in rax.
-  Label not_smi, positive_smi;
-  __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ testp(rax, rax);
-  __ j(greater_equal, &positive_smi, Label::kNear);
-  __ xorl(rax, rax);
-  __ bind(&positive_smi);
+  // Check if string has a cached array index.
+  Label runtime;
+  __ testl(FieldOperand(rax, String::kHashFieldOffset),
+           Immediate(String::kContainsCachedArrayIndexMask));
+  __ j(not_zero, &runtime, Label::kNear);
+  __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
+  __ IndexFromHash(rax, rax);
   __ Ret();
-  __ bind(&not_smi);
 
+  __ bind(&runtime);
   __ PopReturnAddressTo(rcx);     // Pop return address.
   __ Push(rax);                   // Push argument.
   __ PushReturnAddressFrom(rcx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToLength);
+  __ TailCallRuntime(Runtime::kStringToNumber);
 }
 
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in rax.
   Label is_number;
@@ -2803,45 +2792,6 @@
 }
 
 
-void StringCompareStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rdx    : left string
-  //  -- rax    : right string
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  __ AssertString(rdx);
-  __ AssertString(rax);
-
-  // Check for identity.
-  Label not_same;
-  __ cmpp(rdx, rax);
-  __ j(not_equal, &not_same, Label::kNear);
-  __ Move(rax, Smi::FromInt(EQUAL));
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
-  __ Ret();
-
-  __ bind(&not_same);
-
-  // Check that both are sequential one-byte strings.
-  Label runtime;
-  __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx, &runtime);
-
-  // Inline comparison of one-byte strings.
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
-  StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx, rdi,
-                                                  r8);
-
-  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
-  // tagged as a small integer.
-  __ bind(&runtime);
-  __ PopReturnAddressTo(rcx);
-  __ Push(rdx);
-  __ Push(rax);
-  __ PushReturnAddressFrom(rcx);
-  __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rdx    : left
@@ -3158,13 +3108,21 @@
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
-  __ PopReturnAddressTo(tmp1);
-  __ Push(left);
-  __ Push(right);
-  __ PushReturnAddressFrom(tmp1);
   if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(left);
+      __ Push(right);
+      __ CallRuntime(Runtime::kStringEqual);
+    }
+    __ LoadRoot(rdx, Heap::kTrueValueRootIndex);
+    __ subp(rax, rdx);
+    __ Ret();
   } else {
+    __ PopReturnAddressTo(tmp1);
+    __ Push(left);
+    __ Push(right);
+    __ PushReturnAddressFrom(tmp1);
     __ TailCallRuntime(Runtime::kStringCompare);
   }
 
@@ -3641,7 +3599,7 @@
   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
   int parameter_count_offset =
-      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   __ movp(rbx, MemOperand(rbp, parameter_count_offset));
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   __ PopReturnAddressTo(rcx);
@@ -4610,7 +4568,7 @@
     __ bind(&loop);
     __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kMarkerOffset));
+    __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
     __ j(not_equal, &loop);
   }
 
@@ -4618,7 +4576,7 @@
   // arguments adaptor frame below the function frame).
   Label no_rest_parameters;
   __ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+  __ Cmp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &no_rest_parameters, Label::kNear);
 
@@ -4774,7 +4732,7 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movp(r8, Operand(rax, StandardFrameConstants::kContextOffset));
+  __ movp(r8, Operand(rax, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &adaptor_frame);
 
@@ -4980,14 +4938,14 @@
     __ bind(&loop);
     __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kMarkerOffset));
+    __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
     __ j(not_equal, &loop);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
   Label arguments_adaptor, arguments_done;
   __ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+  __ Cmp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &arguments_adaptor, Label::kNear);
   {
@@ -5415,10 +5373,7 @@
   __ jmp(&leave_exit_frame);
 }
 
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
-                                      const ParameterCount& argc,
-                                      bool return_first_arg,
-                                      bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rdi                 : callee
   //  -- rbx                 : call_data
@@ -5451,8 +5406,6 @@
   STATIC_ASSERT(FCA::kHolderIndex == 0);
   STATIC_ASSERT(FCA::kArgsLength == 7);
 
-  DCHECK(argc.is_immediate() || rax.is(argc.reg()));
-
   __ PopReturnAddressTo(return_address);
 
   // context save
@@ -5464,7 +5417,7 @@
   // call data
   __ Push(call_data);
   Register scratch = call_data;
-  if (!call_data_undefined) {
+  if (!this->call_data_undefined()) {
     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   }
   // return value
@@ -5481,7 +5434,7 @@
   // Push return address back on stack.
   __ PushReturnAddressFrom(return_address);
 
-  if (!is_lazy) {
+  if (!this->is_lazy()) {
     // load context from callee
     __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
   }
@@ -5493,28 +5446,15 @@
   PrepareCallApiFunction(masm, kApiStackSpace);
 
   // FunctionCallbackInfo::implicit_args_.
+  int argc = this->argc();
   __ movp(StackSpaceOperand(0), scratch);
-  if (argc.is_immediate()) {
-    __ addp(scratch, Immediate((argc.immediate() + FCA::kArgsLength - 1) *
-                               kPointerSize));
-    // FunctionCallbackInfo::values_.
-    __ movp(StackSpaceOperand(1), scratch);
-    // FunctionCallbackInfo::length_.
-    __ Set(StackSpaceOperand(2), argc.immediate());
-    // FunctionCallbackInfo::is_construct_call_.
-    __ Set(StackSpaceOperand(3), 0);
-  } else {
-    __ leap(scratch, Operand(scratch, argc.reg(), times_pointer_size,
-                             (FCA::kArgsLength - 1) * kPointerSize));
-    // FunctionCallbackInfo::values_.
-    __ movp(StackSpaceOperand(1), scratch);
-    // FunctionCallbackInfo::length_.
-    __ movp(StackSpaceOperand(2), argc.reg());
-    // FunctionCallbackInfo::is_construct_call_.
-    __ leap(argc.reg(), Operand(argc.reg(), times_pointer_size,
-                                (FCA::kArgsLength + 1) * kPointerSize));
-    __ movp(StackSpaceOperand(3), argc.reg());
-  }
+  __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+  // FunctionCallbackInfo::values_.
+  __ movp(StackSpaceOperand(1), scratch);
+  // FunctionCallbackInfo::length_.
+  __ Set(StackSpaceOperand(2), argc);
+  // FunctionCallbackInfo::is_construct_call_.
+  __ Set(StackSpaceOperand(3), 0);
 
 #if defined(__MINGW64__) || defined(_WIN64)
   Register arguments_arg = rcx;
@@ -5541,36 +5481,17 @@
       FCA::kArgsLength - FCA::kContextSaveIndex);
   Operand is_construct_call_operand = StackSpaceOperand(3);
   Operand return_value_operand = args_from_rbp.GetArgumentOperand(
-      return_first_arg ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
+      this->is_store() ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
   int stack_space = 0;
   Operand* stack_space_operand = &is_construct_call_operand;
-  if (argc.is_immediate()) {
-    stack_space = argc.immediate() + FCA::kArgsLength + 1;
-    stack_space_operand = nullptr;
-  }
+  stack_space = argc + FCA::kArgsLength + 1;
+  stack_space_operand = nullptr;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
                            stack_space, stack_space_operand,
                            return_value_operand, &context_restore_operand);
 }
 
 
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
-  bool call_data_undefined = this->call_data_undefined();
-  CallApiFunctionStubHelper(masm, ParameterCount(rax), false,
-                            call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
-  bool is_store = this->is_store();
-  int argc = this->argc();
-  bool call_data_undefined = this->call_data_undefined();
-  bool is_lazy = this->is_lazy();
-  CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined, is_lazy);
-}
-
-
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rsp[0]                          : return address
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 81c1a69..33e987e 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -288,6 +288,7 @@
   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
   __ j(equal, &only_change_map);
 
+  __ Push(rsi);
   __ Push(rax);
 
   __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -326,7 +327,7 @@
   // Call into runtime if GC is required.
   __ bind(&gc_required);
   __ Pop(rax);
-  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ Pop(rsi);
   __ jmp(fail);
 
   // Box doubles into heap numbers.
@@ -380,7 +381,7 @@
                       EMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
   __ Pop(rax);
-  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ Pop(rsi);
 
   __ bind(&only_change_map);
   // Set transitioned map.
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index ddf59eb..9d70c32 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -105,12 +105,6 @@
   }
 }
 
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
-  // There is no dynamic alignment padding on x64 in the input frame.
-  return false;
-}
-
-
 #define __ masm()->
 
 void Deoptimizer::TableEntryGenerator::Generate() {
@@ -162,7 +156,12 @@
 
   // Allocate a new deoptimizer object.
   __ PrepareCallCFunction(6);
+  __ movp(rax, Immediate(0));
+  Label context_check;
+  __ movp(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ JumpIfSmi(rdi, &context_check);
   __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ bind(&context_check);
   __ movp(arg_reg_1, rax);
   __ Set(arg_reg_2, type());
   // Args 3 and 4 are already in the right registers.
@@ -232,7 +231,9 @@
   }
   __ popq(rax);
 
-  // Replace the current frame with the output frames.
+  __ movp(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
+
+  // Replace the current (input) frame with the output frames.
   Label outer_push_loop, inner_push_loop,
       outer_loop_header, inner_loop_header;
   // Outer loop state: rax = current FrameDescription**, rdx = one past the
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index d213ecb..5aad54a 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -39,13 +39,11 @@
   static const int kArgvOffset     = 6 * kPointerSize;
 };
 
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
  public:
-  static const int kFrameSize       = 2 * kPointerSize;
-
-  static const int kCodeOffset      = -2 * kPointerSize;
-  static const int kSPOffset        = -1 * kPointerSize;
+  static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
 
   static const int kCallerFPOffset  = +0 * kPointerSize;
   static const int kCallerPCOffset  = kFPOnStackSize;
@@ -63,7 +61,7 @@
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
   static const int kLastParameterOffset = kFPOnStackSize + kPCOnStackSize;
-  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+  static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
 
   // Caller SP-relative.
   static const int kParam0Offset   = -2 * kPointerSize;
diff --git a/src/x64/interface-descriptors-x64.cc b/src/x64/interface-descriptors-x64.cc
index 0913d1c..b10b522 100644
--- a/src/x64/interface-descriptors-x64.cc
+++ b/src/x64/interface-descriptors-x64.cc
@@ -114,36 +114,8 @@
 }
 
 
-void ToNumberDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // ToNumberStub invokes a function, and therefore needs a context.
-  Register registers[] = {rax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 // static
-const Register ToLengthDescriptor::ReceiverRegister() { return rax; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return rax; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return rax; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return rax; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {rax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return rax; }
 
 void FastCloneRegExpDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -263,6 +235,13 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
+      CallInterfaceDescriptorData* data) {                          \
+    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
+  }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
 
 void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -307,6 +286,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (arg count)
+  Register registers[] = {rax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void CompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -315,20 +300,6 @@
 }
 
 
-void CompareNilDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {rax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {rax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void BinaryOpDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {rdx, rax};
@@ -388,21 +359,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {
-      rdi,  // callee
-      rbx,  // call_data
-      rcx,  // holder
-      rdx,  // api_function_address
-      rax,  // actual number of arguments
-  };
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       rdi,  // callee
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index e72d40b..566091d 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -215,25 +215,27 @@
     bind(&ok);
   }
   // Load store buffer top.
-  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  movp(scratch, ExternalOperand(store_buffer));
   // Store pointer to buffer.
   movp(Operand(scratch, 0), addr);
   // Increment buffer top.
   addp(scratch, Immediate(kPointerSize));
   // Write back new top of buffer.
-  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
+  movp(ExternalOperand(store_buffer), scratch);
   // Call stub on end of buffer.
   Label done;
   // Check for end of buffer.
-  testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+  testp(scratch, Immediate(StoreBuffer::kStoreBufferMask));
   if (and_then == kReturnAtEnd) {
     Label buffer_overflowed;
-    j(not_equal, &buffer_overflowed, Label::kNear);
+    j(equal, &buffer_overflowed, Label::kNear);
     ret(0);
     bind(&buffer_overflowed);
   } else {
     DCHECK(and_then == kFallThroughAtEnd);
-    j(equal, &done, Label::kNear);
+    j(not_equal, &done, Label::kNear);
   }
   StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
   CallStub(&store_buffer_overflow);
@@ -1114,6 +1116,14 @@
   }
 }
 
+void MacroAssembler::Set(Register dst, int64_t x, RelocInfo::Mode rmode) {
+  if (rmode == RelocInfo::WASM_MEMORY_REFERENCE) {
+    DCHECK(x != 0);
+    movq(dst, x, rmode);
+  } else {
+    DCHECK(RelocInfo::IsNone(rmode));
+  }
+}
 
 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
   if (kPointerSize == kInt64Size) {
@@ -3867,6 +3877,15 @@
   }
 }
 
+void MacroAssembler::AssertNotNumber(Register object) {
+  if (emit_debug_code()) {
+    Condition is_smi = CheckSmi(object);
+    Check(NegateCondition(is_smi), kOperandIsANumber);
+    Cmp(FieldOperand(object, HeapObject::kMapOffset),
+        isolate()->factory()->heap_number_map());
+    Check(not_equal, kOperandIsANumber);
+  }
+}
 
 void MacroAssembler::AssertNotSmi(Register object) {
   if (emit_debug_code()) {
@@ -4094,6 +4113,77 @@
   Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
 }
 
+void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+                                        Register caller_args_count_reg,
+                                        Register scratch0, Register scratch1,
+                                        ReturnAddressState ra_state) {
+#if DEBUG
+  if (callee_args_count.is_reg()) {
+    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+                       scratch1));
+  } else {
+    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+  }
+#endif
+
+  // Calculate the destination address where we will put the return address
+  // after we drop current frame.
+  Register new_sp_reg = scratch0;
+  if (callee_args_count.is_reg()) {
+    subp(caller_args_count_reg, callee_args_count.reg());
+    leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
+                             StandardFrameConstants::kCallerPCOffset));
+  } else {
+    leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
+                             StandardFrameConstants::kCallerPCOffset -
+                                 callee_args_count.immediate() * kPointerSize));
+  }
+
+  if (FLAG_debug_code) {
+    cmpp(rsp, new_sp_reg);
+    Check(below, kStackAccessBelowStackPointer);
+  }
+
+  // Copy return address from caller's frame to current frame's return address
+  // to avoid its trashing and let the following loop copy it to the right
+  // place.
+  Register tmp_reg = scratch1;
+  if (ra_state == ReturnAddressState::kOnStack) {
+    movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+    movp(Operand(rsp, 0), tmp_reg);
+  } else {
+    DCHECK(ReturnAddressState::kNotOnStack == ra_state);
+    Push(Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+  }
+
+  // Restore caller's frame pointer now as it could be overwritten by
+  // the copying loop.
+  movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+  // +2 here is to copy both receiver and return address.
+  Register count_reg = caller_args_count_reg;
+  if (callee_args_count.is_reg()) {
+    leap(count_reg, Operand(callee_args_count.reg(), 2));
+  } else {
+    movp(count_reg, Immediate(callee_args_count.immediate() + 2));
+    // TODO(ishell): Unroll copying loop for small immediate values.
+  }
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+  Label loop, entry;
+  jmp(&entry, Label::kNear);
+  bind(&loop);
+  decp(count_reg);
+  movp(tmp_reg, Operand(rsp, count_reg, times_pointer_size, 0));
+  movp(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
+  bind(&entry);
+  cmpp(count_reg, Immediate(0));
+  j(not_equal, &loop, Label::kNear);
+
+  // Leave current frame.
+  movp(rsp, new_sp_reg);
+}
 
 void MacroAssembler::InvokeFunction(Register function,
                                     Register new_target,
@@ -4285,15 +4375,12 @@
   bind(&skip_flooding);
 }
 
-
-void MacroAssembler::StubPrologue() {
-    pushq(rbp);  // Caller's frame pointer.
-    movp(rbp, rsp);
-    Push(rsi);  // Callee's context.
-    Push(Smi::FromInt(StackFrame::STUB));
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
+  pushq(rbp);  // Caller's frame pointer.
+  movp(rbp, rsp);
+  Push(Smi::FromInt(type));
 }
 
-
 void MacroAssembler::Prologue(bool code_pre_aging) {
   PredictableCodeSizeScope predictible_code_size_scope(this,
       kNoCodeAgeSequenceLength);
@@ -4328,10 +4415,11 @@
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   pushq(rbp);
   movp(rbp, rsp);
-  Push(rsi);  // Context.
   Push(Smi::FromInt(type));
-  Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
-  Push(kScratchRegister);
+  if (type == StackFrame::INTERNAL) {
+    Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+    Push(kScratchRegister);
+  }
   if (emit_debug_code()) {
     Move(kScratchRegister,
          isolate()->factory()->undefined_value(),
@@ -4345,7 +4433,8 @@
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   if (emit_debug_code()) {
     Move(kScratchRegister, Smi::FromInt(type));
-    cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+    cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
+         kScratchRegister);
     Check(equal, kStackFrameTypesMustMatch);
   }
   movp(rsp, rbp);
@@ -4356,15 +4445,16 @@
 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
   // Set up the frame structure on the stack.
   // All constants are relative to the frame pointer of the exit frame.
-  DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
-         kFPOnStackSize + kPCOnStackSize);
-  DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
-  DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+  DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
+            ExitFrameConstants::kCallerSPDisplacement);
+  DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
+  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
   pushq(rbp);
   movp(rbp, rsp);
 
   // Reserve room for entry stack pointer and push the code object.
-  DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+  Push(Smi::FromInt(StackFrame::EXIT));
+  DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
   Push(Immediate(0));  // Saved entry sp, patched before call.
   Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   Push(kScratchRegister);  // Accessed from EditFrame::code_slot.
@@ -4391,7 +4481,7 @@
     int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
                 arg_stack_space * kRegisterSize;
     subp(rsp, Immediate(space));
-    int offset = -2 * kPointerSize;
+    int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
     const RegisterConfiguration* config =
         RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
     for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -4438,7 +4528,7 @@
   // Registers:
   // r15 : argv
   if (save_doubles) {
-    int offset = -2 * kPointerSize;
+    int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
     const RegisterConfiguration* config =
         RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
     for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -4501,8 +4591,22 @@
 
   DCHECK(!holder_reg.is(scratch));
   DCHECK(!scratch.is(kScratchRegister));
-  // Load current lexical context from the stack frame.
-  movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
+  // Load current lexical context from the active StandardFrame, which
+  // may require crawling past STUB frames.
+  Label load_context;
+  Label has_context;
+  movp(scratch, rbp);
+  bind(&load_context);
+  DCHECK(SmiValuesAre32Bits());
+  // This is "JumpIfNotSmi" but without loading the value into a register.
+  cmpl(MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset),
+       Immediate(0));
+  j(not_equal, &has_context);
+  movp(scratch, MemOperand(scratch, CommonFrameConstants::kCallerFPOffset));
+  jmp(&load_context);
+  bind(&has_context);
+  movp(scratch,
+       MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
 
   // When generating debug code, make sure the lexical context is set.
   if (emit_debug_code()) {
@@ -5513,19 +5617,39 @@
     Register receiver_reg,
     Register scratch_reg,
     Label* no_memento_found) {
-  ExternalReference new_space_start =
-      ExternalReference::new_space_start(isolate());
+  Label map_check;
+  Label top_check;
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
+  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
 
-  leap(scratch_reg, Operand(receiver_reg,
-      JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
-  Move(kScratchRegister, new_space_start);
-  cmpp(scratch_reg, kScratchRegister);
-  j(less, no_memento_found);
+  // Bail out if the object is not in new space.
+  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+  // If the object is in new space, we need to check whether it is on the same
+  // page as the current top.
+  leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  xorp(scratch_reg, ExternalOperand(new_space_allocation_top));
+  testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+  j(zero, &top_check);
+  // The object is on a different page than allocation top. Bail out if the
+  // object sits on the page boundary as no memento can follow and we cannot
+  // touch the memory following it.
+  leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  xorp(scratch_reg, receiver_reg);
+  testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+  j(not_zero, no_memento_found);
+  // Continue with the actual map check.
+  jmp(&map_check);
+  // If top is on the same page as the current object, we need to check whether
+  // we are below top.
+  bind(&top_check);
+  leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
   cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
   j(greater, no_memento_found);
-  CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
+  // Memento map check.
+  bind(&map_check);
+  CompareRoot(MemOperand(receiver_reg, kMementoMapOffset),
               Heap::kAllocationMementoMapRootIndex);
 }
 
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 9c0b796..af3dd03 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -56,6 +56,8 @@
   kBailoutOnOverflow = 1 << 2
 };
 
+enum class ReturnAddressState { kOnStack, kNotOnStack };
+
 typedef base::Flags<SmiOperationConstraint> SmiOperationConstraints;
 
 DEFINE_OPERATORS_FOR_FLAGS(SmiOperationConstraints)
@@ -326,7 +328,7 @@
   void DebugBreak();
 
   // Generates function and stub prologue code.
-  void StubPrologue();
+  void StubPrologue(StackFrame::Type type);
   void Prologue(bool code_pre_aging);
 
   // Enter specific kind of exit frame; either in normal or
@@ -370,6 +372,16 @@
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Removes current frame and its arguments from the stack preserving
+  // the arguments and a return address pushed to the stack for the next call.
+  // |ra_state| defines whether return address is already pushed to stack or
+  // not. Both |callee_args_count| and |caller_args_count_reg| do not include
+  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+  // is trashed.
+  void PrepareForTailCall(const ParameterCount& callee_args_count,
+                          Register caller_args_count_reg, Register scratch0,
+                          Register scratch1, ReturnAddressState ra_state);
+
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeFunctionCode(Register function, Register new_target,
                           const ParameterCount& expected,
@@ -806,6 +818,7 @@
 
   // Load a register with a long value as efficiently as possible.
   void Set(Register dst, int64_t x);
+  void Set(Register dst, int64_t x, RelocInfo::Mode rmode);
   void Set(const Operand& dst, intptr_t x);
 
   void Cvtss2sd(XMMRegister dst, XMMRegister src);
@@ -1014,12 +1027,6 @@
     return (target.requires_rex() ? 2 : 1) + target.operand_size();
   }
 
-  // Emit call to the code we are currently generating.
-  void CallSelf() {
-    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
-    Call(self, RelocInfo::CODE_TARGET);
-  }
-
   // Non-SSE2 instructions.
   void Pextrd(Register dst, XMMRegister src, int8_t imm8);
   void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
@@ -1191,6 +1198,7 @@
 
   // Abort execution if argument is not a number, enabled via --debug-code.
   void AssertNumber(Register object);
+  void AssertNotNumber(Register object);
 
   // Abort execution if argument is a smi, enabled via --debug-code.
   void AssertNotSmi(Register object);
diff --git a/src/x87/assembler-x87-inl.h b/src/x87/assembler-x87-inl.h
index 7af1d02..802c80f 100644
--- a/src/x87/assembler-x87-inl.h
+++ b/src/x87/assembler-x87-inl.h
@@ -81,6 +81,10 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Memory::Address_at(pc_);
+}
 
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -116,6 +120,20 @@
   }
 }
 
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, size_t old_size, size_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  DCHECK(old_base <= wasm_memory_reference() &&
+         wasm_memory_reference() < old_base + old_size);
+  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+  DCHECK(new_base <= updated_reference &&
+         updated_reference < new_base + new_size);
+  Memory::Address_at(pc_) = updated_reference;
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
+  }
+}
 
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@@ -140,8 +158,8 @@
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
       target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target));
   }
 }
 
@@ -322,6 +340,10 @@
   rmode_ = RelocInfo::NONE32;
 }
 
+Immediate::Immediate(Address x, RelocInfo::Mode rmode) {
+  x_ = reinterpret_cast<int32_t>(x);
+  rmode_ = rmode;
+}
 
 Immediate::Immediate(const ExternalReference& ext) {
   x_ = reinterpret_cast<int32_t>(ext.address());
@@ -430,6 +452,11 @@
   }
 }
 
+void Assembler::emit_b(Immediate x) {
+  DCHECK(x.is_int8() || x.is_uint8());
+  uint8_t value = static_cast<uint8_t>(x.x_);
+  *pc_++ = value;
+}
 
 void Assembler::emit_w(const Immediate& x) {
   DCHECK(RelocInfo::IsNone(x.rmode_));
diff --git a/src/x87/assembler-x87.cc b/src/x87/assembler-x87.cc
index 66fda57..e74d770 100644
--- a/src/x87/assembler-x87.cc
+++ b/src/x87/assembler-x87.cc
@@ -617,8 +617,8 @@
   emit_operand(src, dst);
 }
 
-
-void Assembler::cmpb(const Operand& op, int8_t imm8) {
+void Assembler::cmpb(const Operand& op, Immediate imm8) {
+  DCHECK(imm8.is_int8() || imm8.is_uint8());
   EnsureSpace ensure_space(this);
   if (op.is_reg(eax)) {
     EMIT(0x3C);
@@ -626,7 +626,7 @@
     EMIT(0x80);
     emit_operand(edi, op);  // edi == 7
   }
-  EMIT(imm8);
+  emit_b(imm8);
 }
 
 
@@ -655,6 +655,19 @@
   emit_w(imm16);
 }
 
+void Assembler::cmpw(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x39);
+  emit_operand(reg, op);
+}
+
+void Assembler::cmpw(const Operand& op, Register reg) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x3B);
+  emit_operand(reg, op);
+}
 
 void Assembler::cmp(Register reg, int32_t imm32) {
   EnsureSpace ensure_space(this);
@@ -939,19 +952,26 @@
   emit_operand(edi, dst);
 }
 
-
 void Assembler::sbb(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   EMIT(0x1B);
   emit_operand(dst, src);
 }
 
+void Assembler::shld(Register dst, Register src, uint8_t shift) {
+  DCHECK(is_uint5(shift));
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xA4);
+  emit_operand(src, Operand(dst));
+  EMIT(shift);
+}
 
-void Assembler::shld(Register dst, const Operand& src) {
+void Assembler::shld_cl(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0xA5);
-  emit_operand(dst, src);
+  emit_operand(src, Operand(dst));
 }
 
 
@@ -975,15 +995,6 @@
   emit_operand(esp, dst);
 }
 
-
-void Assembler::shrd(Register dst, const Operand& src) {
-  EnsureSpace ensure_space(this);
-  EMIT(0x0F);
-  EMIT(0xAD);
-  emit_operand(dst, src);
-}
-
-
 void Assembler::shr(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
   DCHECK(is_uint5(imm8));  // illegal shift count
@@ -1004,6 +1015,21 @@
   emit_operand(ebp, dst);
 }
 
+void Assembler::shrd(Register dst, Register src, uint8_t shift) {
+  DCHECK(is_uint5(shift));
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xAC);
+  emit_operand(dst, Operand(src));
+  EMIT(shift);
+}
+
+void Assembler::shrd_cl(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xAD);
+  emit_operand(src, dst);
+}
 
 void Assembler::sub(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
@@ -1026,8 +1052,8 @@
 
 
 void Assembler::test(Register reg, const Immediate& imm) {
-  if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
-    test_b(reg, imm.x_);
+  if (imm.is_uint8()) {
+    test_b(reg, imm);
     return;
   }
 
@@ -1064,8 +1090,8 @@
     test(op.reg(), imm);
     return;
   }
-  if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
-    return test_b(op, imm.x_);
+  if (imm.is_uint8()) {
+    return test_b(op, imm);
   }
   EnsureSpace ensure_space(this);
   EMIT(0xF7);
@@ -1073,25 +1099,25 @@
   emit(imm);
 }
 
-
-void Assembler::test_b(Register reg, uint8_t imm8) {
+void Assembler::test_b(Register reg, Immediate imm8) {
+  DCHECK(imm8.is_uint8());
   EnsureSpace ensure_space(this);
   // Only use test against byte for registers that have a byte
   // variant: eax, ebx, ecx, and edx.
   if (reg.is(eax)) {
     EMIT(0xA8);
-    EMIT(imm8);
+    emit_b(imm8);
   } else if (reg.is_byte_register()) {
-    emit_arith_b(0xF6, 0xC0, reg, imm8);
+    emit_arith_b(0xF6, 0xC0, reg, static_cast<uint8_t>(imm8.x_));
   } else {
+    EMIT(0x66);
     EMIT(0xF7);
     EMIT(0xC0 | reg.code());
-    emit(imm8);
+    emit_w(imm8);
   }
 }
 
-
-void Assembler::test_b(const Operand& op, uint8_t imm8) {
+void Assembler::test_b(const Operand& op, Immediate imm8) {
   if (op.is_reg_only()) {
     test_b(op.reg(), imm8);
     return;
@@ -1099,9 +1125,42 @@
   EnsureSpace ensure_space(this);
   EMIT(0xF6);
   emit_operand(eax, op);
-  EMIT(imm8);
+  emit_b(imm8);
 }
 
+void Assembler::test_w(Register reg, Immediate imm16) {
+  DCHECK(imm16.is_int16() || imm16.is_uint16());
+  EnsureSpace ensure_space(this);
+  if (reg.is(eax)) {
+    EMIT(0xA9);
+    emit_w(imm16);
+  } else {
+    EMIT(0x66);
+    EMIT(0xF7);
+    EMIT(0xc0 | reg.code());
+    emit_w(imm16);
+  }
+}
+
+void Assembler::test_w(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x85);
+  emit_operand(reg, op);
+}
+
+void Assembler::test_w(const Operand& op, Immediate imm16) {
+  DCHECK(imm16.is_int16() || imm16.is_uint16());
+  if (op.is_reg_only()) {
+    test_w(op.reg(), imm16);
+    return;
+  }
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0xF7);
+  emit_operand(eax, op);
+  emit_w(imm16);
+}
 
 void Assembler::xor_(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
diff --git a/src/x87/assembler-x87.h b/src/x87/assembler-x87.h
index 15fc29c..96eced9 100644
--- a/src/x87/assembler-x87.h
+++ b/src/x87/assembler-x87.h
@@ -272,6 +272,7 @@
   inline explicit Immediate(Handle<Object> handle);
   inline explicit Immediate(Smi* value);
   inline explicit Immediate(Address addr);
+  inline explicit Immediate(Address x, RelocInfo::Mode rmode);
 
   static Immediate CodeRelativeOffset(Label* label) {
     return Immediate(label);
@@ -281,9 +282,15 @@
   bool is_int8() const {
     return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
   }
+  bool is_uint8() const {
+    return v8::internal::is_uint8(x_) && RelocInfo::IsNone(rmode_);
+  }
   bool is_int16() const {
     return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
   }
+  bool is_uint16() const {
+    return v8::internal::is_uint16(x_) && RelocInfo::IsNone(rmode_);
+  }
 
  private:
   inline explicit Immediate(Label* value);
@@ -659,13 +666,18 @@
   void and_(const Operand& dst, Register src);
   void and_(const Operand& dst, const Immediate& x);
 
-  void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
-  void cmpb(const Operand& op, int8_t imm8);
+  void cmpb(Register reg, Immediate imm8) { cmpb(Operand(reg), imm8); }
+  void cmpb(const Operand& op, Immediate imm8);
   void cmpb(Register reg, const Operand& op);
   void cmpb(const Operand& op, Register reg);
+  void cmpb(Register dst, Register src) { cmpb(Operand(dst), src); }
   void cmpb_al(const Operand& op);
   void cmpw_ax(const Operand& op);
-  void cmpw(const Operand& op, Immediate imm16);
+  void cmpw(const Operand& dst, Immediate src);
+  void cmpw(Register dst, Immediate src) { cmpw(Operand(dst), src); }
+  void cmpw(Register dst, const Operand& src);
+  void cmpw(Register dst, Register src) { cmpw(Operand(dst), src); }
+  void cmpw(const Operand& dst, Register src);
   void cmp(Register reg, int32_t imm32);
   void cmp(Register reg, Handle<Object> handle);
   void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
@@ -731,21 +743,20 @@
 
   void sbb(Register dst, const Operand& src);
 
-  void shld(Register dst, Register src) { shld(dst, Operand(src)); }
-  void shld(Register dst, const Operand& src);
-
   void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
   void shl(const Operand& dst, uint8_t imm8);
   void shl_cl(Register dst) { shl_cl(Operand(dst)); }
   void shl_cl(const Operand& dst);
-
-  void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
-  void shrd(Register dst, const Operand& src);
+  void shld(Register dst, Register src, uint8_t shift);
+  void shld_cl(Register dst, Register src);
 
   void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
   void shr(const Operand& dst, uint8_t imm8);
   void shr_cl(Register dst) { shr_cl(Operand(dst)); }
   void shr_cl(const Operand& dst);
+  void shrd(Register dst, Register src, uint8_t shift);
+  void shrd_cl(Register dst, Register src) { shrd_cl(Operand(dst), src); }
+  void shrd_cl(const Operand& dst, Register src);
 
   void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
   void sub(const Operand& dst, const Immediate& x);
@@ -756,10 +767,18 @@
   void test(Register reg, const Immediate& imm);
   void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
   void test(Register reg, const Operand& op);
-  void test_b(Register reg, const Operand& op);
   void test(const Operand& op, const Immediate& imm);
-  void test_b(Register reg, uint8_t imm8);
-  void test_b(const Operand& op, uint8_t imm8);
+  void test(const Operand& op, Register reg) { test(reg, op); }
+  void test_b(Register reg, const Operand& op);
+  void test_b(Register reg, Immediate imm8);
+  void test_b(const Operand& op, Immediate imm8);
+  void test_b(const Operand& op, Register reg) { test_b(reg, op); }
+  void test_b(Register dst, Register src) { test_b(dst, Operand(src)); }
+  void test_w(Register reg, const Operand& op);
+  void test_w(Register reg, Immediate imm16);
+  void test_w(const Operand& op, Immediate imm16);
+  void test_w(const Operand& op, Register reg) { test_w(reg, op); }
+  void test_w(Register dst, Register src) { test_w(dst, Operand(src)); }
 
   void xor_(Register dst, int32_t imm32);
   void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
@@ -961,7 +980,9 @@
 
   static bool IsNop(Address addr);
 
-  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+  AssemblerPositionsRecorder* positions_recorder() {
+    return &positions_recorder_;
+  }
 
   int relocation_writer_size() {
     return (buffer_ + buffer_size_) - reloc_info_writer.pos();
@@ -1003,6 +1024,7 @@
                    RelocInfo::Mode rmode,
                    TypeFeedbackId id = TypeFeedbackId::None());
   inline void emit(const Immediate& x);
+  inline void emit_b(Immediate x);
   inline void emit_w(const Immediate& x);
   inline void emit_q(uint64_t x);
 
@@ -1048,8 +1070,8 @@
   // code generation
   RelocInfoWriter reloc_info_writer;
 
-  PositionsRecorder positions_recorder_;
-  friend class PositionsRecorder;
+  AssemblerPositionsRecorder positions_recorder_;
+  friend class AssemblerPositionsRecorder;
 };
 
 
diff --git a/src/x87/builtins-x87.cc b/src/x87/builtins-x87.cc
index ce07908..9e13172 100644
--- a/src/x87/builtins-x87.cc
+++ b/src/x87/builtins-x87.cc
@@ -123,6 +123,7 @@
                                            bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- eax: number of arguments
+  //  -- esi: context
   //  -- edi: constructor function
   //  -- ebx: allocation site or undefined
   //  -- edx: new target
@@ -134,6 +135,7 @@
 
     // Preserve the incoming parameters on the stack.
     __ AssertUndefinedOrAllocationSite(ebx);
+    __ push(esi);
     __ push(ebx);
     __ SmiTag(eax);
     __ push(eax);
@@ -201,7 +203,7 @@
     }
 
     // Restore context from the frame.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
 
     if (create_implicit_receiver) {
       // If the result is an object (in the ECMA sense), we should get rid
@@ -325,9 +327,6 @@
                                              bool is_construct) {
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
-  // Clear the context before we push it when entering the internal frame.
-  __ Move(esi, Immediate(0));
-
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
@@ -603,27 +602,24 @@
   //           they are to be pushed onto the stack.
   // -----------------------------------
 
-  // Save number of arguments on the stack below where arguments are going
-  // to be pushed.
-  __ mov(ecx, eax);
-  __ neg(ecx);
-  __ mov(Operand(esp, ecx, times_pointer_size, -kPointerSize), eax);
-  __ mov(eax, ecx);
-
   // Pop return address to allow tail-call after pushing arguments.
   __ Pop(ecx);
 
+  // Push edi in the slot meant for receiver. We need an extra register
+  // so store edi temporarily on stack.
+  __ Push(edi);
+
   // Find the address of the last argument.
-  __ shl(eax, kPointerSizeLog2);
-  __ add(eax, ebx);
+  __ mov(edi, eax);
+  __ neg(edi);
+  __ shl(edi, kPointerSizeLog2);
+  __ add(edi, ebx);
 
-  // Push padding for receiver.
-  __ Push(Immediate(0));
+  Generate_InterpreterPushArgs(masm, edi);
 
-  Generate_InterpreterPushArgs(masm, eax);
-
-  // Restore number of arguments from slot on stack.
-  __ mov(eax, Operand(esp, -kPointerSize));
+  // Restore the constructor from slot on stack. It was pushed at the slot
+  // meant for receiver.
+  __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
 
   // Re-push return address.
   __ Push(ecx);
@@ -961,6 +957,28 @@
   }
 }
 
+// static
+void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : argc
+  //  -- esp[0] : return address
+  //  -- esp[4] : first argument (left-hand side)
+  //  -- esp[8] : receiver (right-hand side)
+  // -----------------------------------
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ mov(InstanceOfDescriptor::LeftRegister(),
+           Operand(ebp, 2 * kPointerSize));  // Load left-hand side.
+    __ mov(InstanceOfDescriptor::RightRegister(),
+           Operand(ebp, 3 * kPointerSize));  // Load right-hand side.
+    InstanceOfStub stub(masm->isolate(), true);
+    __ CallStub(&stub);
+  }
+
+  // Pop the argument and the receiver.
+  __ ret(2 * kPointerSize);
+}
 
 // static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@@ -1008,7 +1026,8 @@
   Label receiver_not_callable;
   __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
   __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
   __ j(zero, &receiver_not_callable, Label::kNear);
 
   // 3. Tail call with no arguments if argArray is null or undefined.
@@ -1131,7 +1150,8 @@
   Label target_not_callable;
   __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
   __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
   __ j(zero, &target_not_callable, Label::kNear);
 
   // 3a. Apply the target to the given argumentsList (passing undefined for
@@ -1147,7 +1167,6 @@
   }
 }
 
-
 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax     : argc
@@ -1196,14 +1215,16 @@
   Label target_not_constructor;
   __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
   __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
   __ j(zero, &target_not_constructor, Label::kNear);
 
   // 3. Make sure the target is actually a constructor.
   Label new_target_not_constructor;
   __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
   __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
   __ j(zero, &new_target_not_constructor, Label::kNear);
 
   // 4a. Construct the target with the given new.target and argumentsList.
@@ -1891,18 +1912,20 @@
   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
   Comment cmnt(masm, "[ PrepareForTailCall");
 
-  // Prepare for tail call only if the debugger is not active.
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
   Label done;
-  ExternalReference debug_is_active =
-      ExternalReference::debug_is_active_address(masm->isolate());
-  __ movzx_b(scratch1, Operand::StaticVariable(debug_is_active));
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ movzx_b(scratch1,
+             Operand::StaticVariable(is_tail_call_elimination_enabled));
   __ cmp(scratch1, Immediate(0));
-  __ j(not_equal, &done, Label::kNear);
+  __ j(equal, &done, Label::kNear);
 
   // Drop possible interpreter handler/stub frame.
   {
     Label no_interpreter_frame;
-    __ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+    __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
            Immediate(Smi::FromInt(StackFrame::STUB)));
     __ j(not_equal, &no_interpreter_frame, Label::kNear);
     __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -1910,16 +1933,18 @@
   }
 
   // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+  __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &no_arguments_adaptor, Label::kNear);
 
-  // Drop arguments adaptor frame and load arguments count.
+  // Drop current frame and load arguments count from arguments adaptor frame.
   __ mov(ebp, scratch2);
-  __ mov(scratch1, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(scratch1);
+  __ mov(caller_args_count_reg,
+         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
   __ jmp(&formal_parameter_count_loaded, Label::kNear);
 
   __ bind(&no_arguments_adaptor);
@@ -1928,57 +1953,15 @@
   __ mov(scratch1,
          FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
   __ mov(
-      scratch1,
+      caller_args_count_reg,
       FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ SmiUntag(scratch1);
+  __ SmiUntag(caller_args_count_reg);
 
   __ bind(&formal_parameter_count_loaded);
 
-  // Calculate the destination address where we will put the return address
-  // after we drop current frame.
-  Register new_sp_reg = scratch2;
-  __ sub(scratch1, args_reg);
-  __ lea(new_sp_reg, Operand(ebp, scratch1, times_pointer_size,
-                             StandardFrameConstants::kCallerPCOffset));
-
-  if (FLAG_debug_code) {
-    __ cmp(esp, new_sp_reg);
-    __ Check(below, kStackAccessBelowStackPointer);
-  }
-
-  // Copy receiver and return address as well.
-  Register count_reg = scratch1;
-  __ lea(count_reg, Operand(args_reg, 2));
-
-  // Copy return address from caller's frame to current frame's return address
-  // to avoid its trashing and let the following loop copy it to the right
-  // place.
-  Register tmp_reg = scratch3;
-  __ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
-  __ mov(Operand(esp, 0), tmp_reg);
-
-  // Restore caller's frame pointer now as it could be overwritten by
-  // the copying loop.
-  __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
-  Operand src(esp, count_reg, times_pointer_size, 0);
-  Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
-
-  // Now copy callee arguments to the caller frame going backwards to avoid
-  // callee arguments corruption (source and destination areas could overlap).
-  Label loop, entry;
-  __ jmp(&entry, Label::kNear);
-  __ bind(&loop);
-  __ dec(count_reg);
-  __ mov(tmp_reg, src);
-  __ mov(dst, tmp_reg);
-  __ bind(&entry);
-  __ cmp(count_reg, Immediate(0));
-  __ j(not_equal, &loop, Label::kNear);
-
-  // Leave current frame.
-  __ mov(esp, new_sp_reg);
-
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack, 0);
   __ bind(&done);
 }
 }  // namespace
@@ -1998,7 +1981,7 @@
   Label class_constructor;
   __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
-            SharedFunctionInfo::kClassConstructorBitsWithinByte);
+            Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
   __ j(not_zero, &class_constructor);
 
   // Enter the context of the function; ToObject has to run in the function
@@ -2010,8 +1993,8 @@
   // We need to convert the receiver for non-native sloppy mode functions.
   Label done_convert;
   __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
-            (1 << SharedFunctionInfo::kNativeBitWithinByte) |
-                (1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+            Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+                      (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
   __ j(not_zero, &done_convert);
   {
     // ----------- S t a t e -------------
@@ -2233,7 +2216,8 @@
        RelocInfo::CODE_TARGET);
 
   // Check if target has a [[Call]] internal method.
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
   __ j(zero, &non_callable);
 
   __ CmpInstanceType(ecx, JS_PROXY_TYPE);
@@ -2369,7 +2353,8 @@
        RelocInfo::CODE_TARGET);
 
   // Check if target has a [[Construct]] internal method.
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
   __ j(zero, &non_constructor, Label::kNear);
 
   // Only dispatch to bound functions after checking whether they are
@@ -2442,26 +2427,6 @@
   {  // Too few parameters: Actual < expected.
     __ bind(&too_few);
 
-    // If the function is strong we need to throw an error.
-    Label no_strong_error;
-    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrongModeByteOffset),
-              1 << SharedFunctionInfo::kStrongModeBitWithinByte);
-    __ j(equal, &no_strong_error, Label::kNear);
-
-    // What we really care about is the required number of arguments.
-    __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kLengthOffset));
-    __ SmiUntag(ecx);
-    __ cmp(eax, ecx);
-    __ j(greater_equal, &no_strong_error, Label::kNear);
-
-    {
-      FrameScope frame(masm, StackFrame::MANUAL);
-      EnterArgumentsAdaptorFrame(masm);
-      __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
-    }
-
-    __ bind(&no_strong_error);
     EnterArgumentsAdaptorFrame(masm);
     ArgumentsAdaptorStackCheck(masm, &stack_overflow);
 
@@ -2500,7 +2465,7 @@
   // Call the entry point.
   __ bind(&invoke);
   // Restore function pointer.
-  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
   // eax : expected number of arguments
   // edx : new target (passed through to callee)
   // edi : function (passed through to callee)
@@ -2675,24 +2640,6 @@
 }
 
 
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
-  // We check the stack limit as indicator that recompilation might be done.
-  Label ok;
-  ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(masm->isolate());
-  __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above_equal, &ok, Label::kNear);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kStackGuard);
-  }
-  __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
-         RelocInfo::CODE_TARGET);
-
-  __ bind(&ok);
-  __ ret(0);
-}
-
 #undef __
 }  // namespace internal
 }  // namespace v8
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index ff6c8d2..71adfd3 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -4,9 +4,10 @@
 
 #if V8_TARGET_ARCH_X87
 
+#include "src/code-stubs.h"
+#include "src/api-arguments.h"
 #include "src/base/bits.h"
 #include "src/bootstrapper.h"
-#include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
@@ -84,6 +85,10 @@
   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
+void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
+  descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
+}
 
 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
@@ -241,7 +246,7 @@
           Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
   __ add(result_reg,
          Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
-  __ shrd(result_reg, scratch1);
+  __ shrd_cl(scratch1, result_reg);
   __ shr_cl(result_reg);
   __ test(ecx, Immediate(32));
   {
@@ -366,34 +371,6 @@
 }
 
 
-void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Label slow;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  Register scratch = eax;
-  DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
-  // Check that the key is an array index, that is Uint32.
-  __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
-  __ j(not_zero, &slow);
-
-  // Everything is fine, call runtime.
-  __ pop(scratch);
-  __ push(receiver);  // receiver
-  __ push(key);       // key
-  __ push(scratch);   // return address
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
-
-  __ bind(&slow);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
   // Return address is on the stack.
   Label miss;
@@ -799,7 +776,7 @@
   if (FLAG_debug_code) {
     // Assert that we do not have a cons or slice (indirect strings) here.
     // Sequential strings have already been ruled out.
-    __ test_b(ebx, kIsIndirectStringMask);
+    __ test_b(ebx, Immediate(kIsIndirectStringMask));
     __ Assert(zero, kExternalStringExpectedButNotFound);
   }
   __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
@@ -808,7 +785,7 @@
   __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   STATIC_ASSERT(kTwoByteStringTag == 0);
   // (8) Is the external string one byte?  If yes, go to (5).
-  __ test_b(ebx, kStringEncodingMask);
+  __ test_b(ebx, Immediate(kStringEncodingMask));
   __ j(not_zero, &seq_one_byte_string);  // Go to (5).
 
   // eax: sequential subject string (or look-alike, external string)
@@ -933,13 +910,13 @@
       __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
       __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
       // Call runtime on identical JSObjects.  Otherwise return equal.
-      __ cmpb(ecx, static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE));
+      __ cmpb(ecx, Immediate(FIRST_JS_RECEIVER_TYPE));
       __ j(above_equal, &runtime_call, Label::kFar);
       // Call runtime on identical symbols since we need to throw a TypeError.
-      __ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
+      __ cmpb(ecx, Immediate(SYMBOL_TYPE));
       __ j(equal, &runtime_call, Label::kFar);
       // Call runtime on identical SIMD values since we must throw a TypeError.
-      __ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
+      __ cmpb(ecx, Immediate(SIMD128_VALUE_TYPE));
       __ j(equal, &runtime_call, Label::kFar);
     }
     __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -1097,7 +1074,7 @@
     // Non-strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    Label return_unequal, undetectable;
+    Label return_equal, return_unequal, undetectable;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -1105,16 +1082,16 @@
     STATIC_ASSERT(kSmiTagMask == 1);
     __ lea(ecx, Operand(eax, edx, times_1, 0));
     __ test(ecx, Immediate(kSmiTagMask));
-    __ j(not_zero, &runtime_call, Label::kNear);
+    __ j(not_zero, &runtime_call);
 
     __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
     __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
 
     __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
+              Immediate(1 << Map::kIsUndetectable));
     __ j(not_zero, &undetectable, Label::kNear);
     __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
+              Immediate(1 << Map::kIsUndetectable));
     __ j(not_zero, &return_unequal, Label::kNear);
 
     __ CmpInstanceType(ebx, FIRST_JS_RECEIVER_TYPE);
@@ -1128,8 +1105,18 @@
 
     __ bind(&undetectable);
     __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
+              Immediate(1 << Map::kIsUndetectable));
     __ j(zero, &return_unequal, Label::kNear);
+
+    // If both sides are JSReceivers, then the result is false according to
+    // the HTML specification, which says that only comparisons with null or
+    // undefined are affected by special casing for document.all.
+    __ CmpInstanceType(ebx, ODDBALL_TYPE);
+    __ j(zero, &return_equal, Label::kNear);
+    __ CmpInstanceType(ecx, ODDBALL_TYPE);
+    __ j(not_zero, &return_unequal, Label::kNear);
+
+    __ bind(&return_equal);
     __ Move(eax, Immediate(EQUAL));
     __ ret(0);  // eax, edx were pushed
   }
@@ -1705,8 +1692,9 @@
 
   // Push marker in two places.
   int marker = type();
-  __ push(Immediate(Smi::FromInt(marker)));  // context slot
-  __ push(Immediate(Smi::FromInt(marker)));  // function slot
+  __ push(Immediate(Smi::FromInt(marker)));  // marker
+  ExternalReference context_address(Isolate::kContextAddress, isolate());
+  __ push(Operand::StaticVariable(context_address));  // context
   // Save callee-saved registers (C calling conventions).
   __ push(edi);
   __ push(esi);
@@ -1835,9 +1823,14 @@
   __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
   __ j(not_equal, &slow_case);
 
+  // Go to the runtime if the function is not a constructor.
+  __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &slow_case);
+
   // Ensure that {function} has an instance prototype.
   __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
-            static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
+            Immediate(1 << Map::kHasNonInstancePrototype));
   __ j(not_zero, &slow_case);
 
   // Get the "prototype" (or initial map) of the {function}.
@@ -1871,7 +1864,7 @@
 
   // Check if the object needs to be access checked.
   __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded);
+            Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &fast_runtime_fallback, Label::kNear);
   // Check if the current object is a Proxy.
   __ CmpInstanceType(object_map, JS_PROXY_TYPE);
@@ -1906,7 +1899,8 @@
   __ Push(object);
   __ Push(function);
   __ PushReturnAddressFrom(scratch);
-  __ TailCallRuntime(Runtime::kInstanceOf);
+  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
+                                         : Runtime::kInstanceOf);
 }
 
 
@@ -2216,13 +2210,13 @@
   Label two_byte_sequential, runtime_drop_two, sequential_string;
   STATIC_ASSERT(kExternalStringTag != 0);
   STATIC_ASSERT(kSeqStringTag == 0);
-  __ test_b(ebx, kExternalStringTag);
+  __ test_b(ebx, Immediate(kExternalStringTag));
   __ j(zero, &sequential_string);
 
   // Handle external string.
   // Rule out short external strings.
   STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ test_b(ebx, kShortExternalStringMask);
+  __ test_b(ebx, Immediate(kShortExternalStringMask));
   __ j(not_zero, &runtime);
   __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
   // Move the pointer so that offset-wise, it looks like a sequential string.
@@ -2235,7 +2229,7 @@
   __ push(edi);
   __ SmiUntag(ecx);
   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
-  __ test_b(ebx, kStringEncodingMask);
+  __ test_b(ebx, Immediate(kStringEncodingMask));
   __ j(zero, &two_byte_sequential);
 
   // Sequential one byte string.  Allocate the result.
@@ -2324,23 +2318,21 @@
   __ Ret();
   __ bind(&not_heap_number);
 
-  Label not_string, slow_string;
+  NonNumberToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in eax.
+  __ AssertNotNumber(eax);
+
+  Label not_string;
   __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
   // eax: object
   // edi: object map
   __ j(above_equal, &not_string, Label::kNear);
-  // Check if string has a cached array index.
-  __ test(FieldOperand(eax, String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-  __ j(not_zero, &slow_string, Label::kNear);
-  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
-  __ IndexFromHash(eax, eax);
-  __ Ret();
-  __ bind(&slow_string);
-  __ pop(ecx);   // Pop return address.
-  __ push(eax);  // Push argument.
-  __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kStringToNumber);
+  StringToNumberStub stub(masm->isolate());
+  __ TailCallStub(&stub);
   __ bind(&not_string);
 
   Label not_oddball;
@@ -2356,26 +2348,26 @@
   __ TailCallRuntime(Runtime::kToNumber);
 }
 
+void StringToNumberStub::Generate(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in eax.
+  __ AssertString(eax);
 
-void ToLengthStub::Generate(MacroAssembler* masm) {
-  // The ToLength stub takes on argument in eax.
-  Label not_smi, positive_smi;
-  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ test(eax, eax);
-  __ j(greater_equal, &positive_smi, Label::kNear);
-  __ xor_(eax, eax);
-  __ bind(&positive_smi);
+  // Check if string has a cached array index.
+  Label runtime;
+  __ test(FieldOperand(eax, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  __ j(not_zero, &runtime, Label::kNear);
+  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+  __ IndexFromHash(eax, eax);
   __ Ret();
-  __ bind(&not_smi);
 
-  __ pop(ecx);   // Pop return address.
-  __ push(eax);  // Push argument.
-  __ push(ecx);  // Push return address.
-  __ TailCallRuntime(Runtime::kToLength);
+  __ bind(&runtime);
+  __ PopReturnAddressTo(ecx);     // Pop return address.
+  __ Push(eax);                   // Push argument.
+  __ PushReturnAddressFrom(ecx);  // Push return address.
+  __ TailCallRuntime(Runtime::kStringToNumber);
 }
 
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in eax.
   Label is_number;
@@ -2572,44 +2564,6 @@
 }
 
 
-void StringCompareStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- edx    : left string
-  //  -- eax    : right string
-  //  -- esp[0] : return address
-  // -----------------------------------
-  __ AssertString(edx);
-  __ AssertString(eax);
-
-  Label not_same;
-  __ cmp(edx, eax);
-  __ j(not_equal, &not_same, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
-  __ Ret();
-
-  __ bind(&not_same);
-
-  // Check that both objects are sequential one-byte strings.
-  Label runtime;
-  __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
-
-  // Compare flat one-byte strings.
-  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
-  StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
-                                                  edi);
-
-  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
-  // tagged as a small integer.
-  __ bind(&runtime);
-  __ PopReturnAddressTo(ecx);
-  __ Push(edx);
-  __ Push(eax);
-  __ PushReturnAddressFrom(ecx);
-  __ TailCallRuntime(Runtime::kStringCompare);
-}
-
-
 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- edx    : left
@@ -2910,13 +2864,20 @@
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
-  __ pop(tmp1);  // Return address.
-  __ push(left);
-  __ push(right);
-  __ push(tmp1);
   if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(left);
+      __ Push(right);
+      __ CallRuntime(Runtime::kStringEqual);
+    }
+    __ sub(eax, Immediate(masm->isolate()->factory()->true_value()));
+    __ Ret();
   } else {
+    __ pop(tmp1);  // Return address.
+    __ push(left);
+    __ push(right);
+    __ push(tmp1);
     __ TailCallRuntime(Runtime::kStringCompare);
   }
 
@@ -3401,7 +3362,7 @@
   CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
   int parameter_count_offset =
-      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
   __ mov(ebx, MemOperand(ebp, parameter_count_offset));
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   __ pop(ecx);
@@ -4111,7 +4072,7 @@
     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
 
     // is the low bit set? If so, we are holey and that is good.
-    __ test_b(edx, 1);
+    __ test_b(edx, Immediate(1));
     __ j(not_zero, &normal_sequence);
   }
 
@@ -4547,7 +4508,7 @@
     __ bind(&loop);
     __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+    __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
     __ j(not_equal, &loop);
   }
 
@@ -4555,7 +4516,7 @@
   // arguments adaptor frame below the function frame).
   Label no_rest_parameters;
   __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+  __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &no_rest_parameters, Label::kNear);
 
@@ -4697,7 +4658,7 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+  __ mov(eax, Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
@@ -4933,14 +4894,14 @@
     __ bind(&loop);
     __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
     __ bind(&loop_entry);
-    __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+    __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
     __ j(not_equal, &loop);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
   Label arguments_adaptor, arguments_done;
   __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+  __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &arguments_adaptor, Label::kNear);
   {
@@ -5239,7 +5200,7 @@
   Label profiler_disabled;
   Label end_profiler_check;
   __ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
-  __ cmpb(Operand(eax, 0), 0);
+  __ cmpb(Operand(eax, 0), Immediate(0));
   __ j(zero, &profiler_disabled);
 
   // Additional parameter is the address of the actual getter function.
@@ -5362,17 +5323,13 @@
   __ jmp(&leave_exit_frame);
 }
 
-static void CallApiFunctionStubHelper(MacroAssembler* masm,
-                                      const ParameterCount& argc,
-                                      bool return_first_arg,
-                                      bool call_data_undefined, bool is_lazy) {
+void CallApiCallbackStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- edi                 : callee
   //  -- ebx                 : call_data
   //  -- ecx                 : holder
   //  -- edx                 : api_function_address
   //  -- esi                 : context
-  //  -- eax                 : number of arguments if argc is a register
   //  --
   //  -- esp[0]              : return address
   //  -- esp[4]              : last argument
@@ -5399,17 +5356,9 @@
   STATIC_ASSERT(FCA::kHolderIndex == 0);
   STATIC_ASSERT(FCA::kArgsLength == 7);
 
-  DCHECK(argc.is_immediate() || eax.is(argc.reg()));
-
-  if (argc.is_immediate()) {
-    __ pop(return_address);
-    // context save.
-    __ push(context);
-  } else {
-    // pop return address and save context
-    __ xchg(context, Operand(esp, 0));
-    return_address = context;
-  }
+  __ pop(return_address);
+  // context save.
+  __ push(context);
 
   // callee
   __ push(callee);
@@ -5418,7 +5367,7 @@
   __ push(call_data);
 
   Register scratch = call_data;
-  if (!call_data_undefined) {
+  if (!call_data_undefined()) {
     // return value
     __ push(Immediate(masm->isolate()->factory()->undefined_value()));
     // return value default
@@ -5439,7 +5388,7 @@
   // push return address
   __ push(return_address);
 
-  if (!is_lazy) {
+  if (!is_lazy()) {
     // load context from callee
     __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
   }
@@ -5458,27 +5407,13 @@
 
   // FunctionCallbackInfo::implicit_args_.
   __ mov(ApiParameterOperand(2), scratch);
-  if (argc.is_immediate()) {
-    __ add(scratch,
-           Immediate((argc.immediate() + FCA::kArgsLength - 1) * kPointerSize));
-    // FunctionCallbackInfo::values_.
-    __ mov(ApiParameterOperand(3), scratch);
-    // FunctionCallbackInfo::length_.
-    __ Move(ApiParameterOperand(4), Immediate(argc.immediate()));
-    // FunctionCallbackInfo::is_construct_call_.
-    __ Move(ApiParameterOperand(5), Immediate(0));
-  } else {
-    __ lea(scratch, Operand(scratch, argc.reg(), times_pointer_size,
-                            (FCA::kArgsLength - 1) * kPointerSize));
-    // FunctionCallbackInfo::values_.
-    __ mov(ApiParameterOperand(3), scratch);
-    // FunctionCallbackInfo::length_.
-    __ mov(ApiParameterOperand(4), argc.reg());
-    // FunctionCallbackInfo::is_construct_call_.
-    __ lea(argc.reg(), Operand(argc.reg(), times_pointer_size,
-                               (FCA::kArgsLength + 1) * kPointerSize));
-    __ mov(ApiParameterOperand(5), argc.reg());
-  }
+  __ add(scratch, Immediate((argc() + FCA::kArgsLength - 1) * kPointerSize));
+  // FunctionCallbackInfo::values_.
+  __ mov(ApiParameterOperand(3), scratch);
+  // FunctionCallbackInfo::length_.
+  __ Move(ApiParameterOperand(4), Immediate(argc()));
+  // FunctionCallbackInfo::is_construct_call_.
+  __ Move(ApiParameterOperand(5), Immediate(0));
 
   // v8::InvocationCallback's argument.
   __ lea(scratch, ApiParameterOperand(2));
@@ -5491,7 +5426,7 @@
                                   (2 + FCA::kContextSaveIndex) * kPointerSize);
   // Stores return the first js argument
   int return_value_offset = 0;
-  if (return_first_arg) {
+  if (is_store()) {
     return_value_offset = 2 + FCA::kArgsLength;
   } else {
     return_value_offset = 2 + FCA::kReturnValueOffset;
@@ -5500,10 +5435,8 @@
   int stack_space = 0;
   Operand is_construct_call_operand = ApiParameterOperand(5);
   Operand* stack_space_operand = &is_construct_call_operand;
-  if (argc.is_immediate()) {
-    stack_space = argc.immediate() + FCA::kArgsLength + 1;
-    stack_space_operand = nullptr;
-  }
+  stack_space = argc() + FCA::kArgsLength + 1;
+  stack_space_operand = nullptr;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
                            ApiParameterOperand(1), stack_space,
                            stack_space_operand, return_value_operand,
@@ -5511,23 +5444,6 @@
 }
 
 
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
-  bool call_data_undefined = this->call_data_undefined();
-  CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
-                            call_data_undefined, false);
-}
-
-
-void CallApiAccessorStub::Generate(MacroAssembler* masm) {
-  bool is_store = this->is_store();
-  int argc = this->argc();
-  bool call_data_undefined = this->call_data_undefined();
-  bool is_lazy = this->is_lazy();
-  CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined, is_lazy);
-}
-
-
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- esp[0]                        : return address
diff --git a/src/x87/codegen-x87.cc b/src/x87/codegen-x87.cc
index c66166f..776edeb 100644
--- a/src/x87/codegen-x87.cc
+++ b/src/x87/codegen-x87.cc
@@ -381,6 +381,7 @@
   __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
   __ j(equal, &only_change_map);
 
+  __ push(esi);
   __ push(eax);
   __ push(edx);
   __ push(ebx);
@@ -425,10 +426,10 @@
 
   // Call into runtime if GC is required.
   __ bind(&gc_required);
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   __ pop(ebx);
   __ pop(edx);
   __ pop(eax);
+  __ pop(esi);
   __ jmp(fail);
 
   // Box doubles into heap numbers.
@@ -477,7 +478,7 @@
 
   // Restore registers.
   __ pop(eax);
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ pop(esi);
 
   __ bind(&success);
 }
@@ -545,11 +546,11 @@
   }
   // Rule out short external strings.
   STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ test_b(result, kShortExternalStringMask);
+  __ test_b(result, Immediate(kShortExternalStringMask));
   __ j(not_zero, call_runtime);
   // Check encoding.
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ test_b(result, kStringEncodingMask);
+  __ test_b(result, Immediate(kStringEncodingMask));
   __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
   __ j(not_equal, &one_byte_external, Label::kNear);
   // Two-byte string.
diff --git a/src/x87/deoptimizer-x87.cc b/src/x87/deoptimizer-x87.cc
index 3b90276..9d4645e 100644
--- a/src/x87/deoptimizer-x87.cc
+++ b/src/x87/deoptimizer-x87.cc
@@ -186,20 +186,6 @@
   }
 }
 
-bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
-  int parameter_count = shared->internal_formal_parameter_count() + 1;
-  unsigned input_frame_size = input_->GetFrameSize();
-  unsigned alignment_state_offset =
-      input_frame_size - parameter_count * kPointerSize -
-      StandardFrameConstants::kFixedFrameSize -
-      kPointerSize;
-  DCHECK(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
-      JavaScriptFrameConstants::kLocal0Offset);
-  int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
-  return (alignment_state == kAlignmentPaddingPushed);
-}
-
-
 #define __ masm()->
 
 void Deoptimizer::TableEntryGenerator::Generate() {
@@ -260,7 +246,12 @@
   __ push(edi);
   // Allocate a new deoptimizer object.
   __ PrepareCallCFunction(6, eax);
+  __ mov(eax, Immediate(0));
+  Label context_check;
+  __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ JumpIfSmi(edi, &context_check);
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ bind(&context_check);
   __ mov(Operand(esp, 0 * kPointerSize), eax);  // Function.
   __ mov(Operand(esp, 1 * kPointerSize), Immediate(type()));  // Bailout type.
   __ mov(Operand(esp, 2 * kPointerSize), ebx);  // Bailout id.
@@ -336,20 +327,9 @@
   }
   __ pop(eax);
   __ pop(edi);
+  __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
 
-  // If frame was dynamically aligned, pop padding.
-  Label no_padding;
-  __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
-         Immediate(0));
-  __ j(equal, &no_padding);
-  __ pop(ecx);
-  if (FLAG_debug_code) {
-    __ cmp(ecx, Immediate(kAlignmentZapValue));
-    __ Assert(equal, kAlignmentMarkerExpected);
-  }
-  __ bind(&no_padding);
-
-  // Replace the current frame with the output frames.
+  // Replace the current (input) frame with the output frames.
   Label outer_push_loop, inner_push_loop,
       outer_loop_header, inner_loop_header;
   // Outer loop state: eax = current FrameDescription**, edx = one past the
diff --git a/src/x87/disasm-x87.cc b/src/x87/disasm-x87.cc
index a3f1939..91ce227 100644
--- a/src/x87/disasm-x87.cc
+++ b/src/x87/disasm-x87.cc
@@ -29,29 +29,18 @@
 };
 
 static const ByteMnemonic two_operands_instr[] = {
-    {0x01, "add", OPER_REG_OP_ORDER},
-    {0x03, "add", REG_OPER_OP_ORDER},
-    {0x09, "or", OPER_REG_OP_ORDER},
-    {0x0B, "or", REG_OPER_OP_ORDER},
-    {0x1B, "sbb", REG_OPER_OP_ORDER},
-    {0x21, "and", OPER_REG_OP_ORDER},
-    {0x23, "and", REG_OPER_OP_ORDER},
-    {0x29, "sub", OPER_REG_OP_ORDER},
-    {0x2A, "subb", REG_OPER_OP_ORDER},
-    {0x2B, "sub", REG_OPER_OP_ORDER},
-    {0x31, "xor", OPER_REG_OP_ORDER},
-    {0x33, "xor", REG_OPER_OP_ORDER},
-    {0x38, "cmpb", OPER_REG_OP_ORDER},
-    {0x39, "cmp", OPER_REG_OP_ORDER},
-    {0x3A, "cmpb", REG_OPER_OP_ORDER},
-    {0x3B, "cmp", REG_OPER_OP_ORDER},
-    {0x84, "test_b", REG_OPER_OP_ORDER},
-    {0x85, "test", REG_OPER_OP_ORDER},
-    {0x87, "xchg", REG_OPER_OP_ORDER},
-    {0x8A, "mov_b", REG_OPER_OP_ORDER},
-    {0x8B, "mov", REG_OPER_OP_ORDER},
-    {0x8D, "lea", REG_OPER_OP_ORDER},
-    {-1, "", UNSET_OP_ORDER}};
+    {0x01, "add", OPER_REG_OP_ORDER},   {0x03, "add", REG_OPER_OP_ORDER},
+    {0x09, "or", OPER_REG_OP_ORDER},    {0x0B, "or", REG_OPER_OP_ORDER},
+    {0x13, "adc", REG_OPER_OP_ORDER},   {0x1B, "sbb", REG_OPER_OP_ORDER},
+    {0x21, "and", OPER_REG_OP_ORDER},   {0x23, "and", REG_OPER_OP_ORDER},
+    {0x29, "sub", OPER_REG_OP_ORDER},   {0x2A, "subb", REG_OPER_OP_ORDER},
+    {0x2B, "sub", REG_OPER_OP_ORDER},   {0x31, "xor", OPER_REG_OP_ORDER},
+    {0x33, "xor", REG_OPER_OP_ORDER},   {0x38, "cmpb", OPER_REG_OP_ORDER},
+    {0x39, "cmp", OPER_REG_OP_ORDER},   {0x3A, "cmpb", REG_OPER_OP_ORDER},
+    {0x3B, "cmp", REG_OPER_OP_ORDER},   {0x84, "test_b", REG_OPER_OP_ORDER},
+    {0x85, "test", REG_OPER_OP_ORDER},  {0x87, "xchg", REG_OPER_OP_ORDER},
+    {0x8A, "mov_b", REG_OPER_OP_ORDER}, {0x8B, "mov", REG_OPER_OP_ORDER},
+    {0x8D, "lea", REG_OPER_OP_ORDER},   {-1, "", UNSET_OP_ORDER}};
 
 static const ByteMnemonic zero_operands_instr[] = {
   {0xC3, "ret", UNSET_OP_ORDER},
@@ -906,18 +895,34 @@
   switch (f0byte) {
     case 0x0B:
       return "ud2";
-    case 0x18: return "prefetch";
-    case 0xA2: return "cpuid";
-    case 0xBE: return "movsx_b";
-    case 0xBF: return "movsx_w";
-    case 0xB6: return "movzx_b";
-    case 0xB7: return "movzx_w";
-    case 0xAF: return "imul";
-    case 0xA5: return "shld";
-    case 0xAD: return "shrd";
-    case 0xAC: return "shrd";  // 3-operand version.
-    case 0xAB: return "bts";
-    case 0xBD: return "bsr";
+    case 0x18:
+      return "prefetch";
+    case 0xA2:
+      return "cpuid";
+    case 0xBE:
+      return "movsx_b";
+    case 0xBF:
+      return "movsx_w";
+    case 0xB6:
+      return "movzx_b";
+    case 0xB7:
+      return "movzx_w";
+    case 0xAF:
+      return "imul";
+    case 0xA4:
+      return "shld";
+    case 0xA5:
+      return "shld";
+    case 0xAD:
+      return "shrd";
+    case 0xAC:
+      return "shrd";  // 3-operand version.
+    case 0xAB:
+      return "bts";
+    case 0xBC:
+      return "bsf";
+    case 0xBD:
+      return "bsr";
     default: return NULL;
   }
 }
@@ -1134,8 +1139,18 @@
             data += SetCC(data);
           } else if ((f0byte & 0xF0) == 0x40) {
             data += CMov(data);
+          } else if (f0byte == 0xA4 || f0byte == 0xAC) {
+            // shld, shrd
+            data += 2;
+            AppendToBuffer("%s ", f0mnem);
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            int8_t imm8 = static_cast<int8_t>(data[1]);
+            data += 2;
+            AppendToBuffer("%s,%s,%d", NameOfCPURegister(rm),
+                           NameOfCPURegister(regop), static_cast<int>(imm8));
           } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
-            // shrd, shld, bts
+            // shrd_cl, shld_cl, bts
             data += 2;
             AppendToBuffer("%s ", f0mnem);
             int mod, regop, rm;
@@ -1266,6 +1281,13 @@
           int imm = *reinterpret_cast<int16_t*>(data);
           AppendToBuffer(",0x%x", imm);
           data += 2;
+        } else if (*data == 0xF7) {
+          data++;
+          AppendToBuffer("%s ", "test_w");
+          data += PrintRightOperand(data);
+          int imm = *reinterpret_cast<int16_t*>(data);
+          AppendToBuffer(",0x%x", imm);
+          data += 2;
         } else if (*data == 0x0F) {
           data++;
           if (*data == 0x38) {
diff --git a/src/x87/frames-x87.h b/src/x87/frames-x87.h
index 1b90078..1a378ed 100644
--- a/src/x87/frames-x87.h
+++ b/src/x87/frames-x87.h
@@ -28,10 +28,6 @@
 // Number of registers for which space is reserved in safepoints.
 const int kNumSafepointRegisters = 8;
 
-const int kNoAlignmentPadding = 0;
-const int kAlignmentPaddingPushed = 2;
-const int kAlignmentZapValue = 0x12345678;  // Not heap object tagged.
-
 // ----------------------------------------------------
 
 
@@ -46,13 +42,11 @@
   static const int kArgvOffset          = +6 * kPointerSize;
 };
 
-
-class ExitFrameConstants : public AllStatic {
+class ExitFrameConstants : public TypedFrameConstants {
  public:
-  static const int kFrameSize      = 2 * kPointerSize;
-
-  static const int kCodeOffset     = -2 * kPointerSize;
-  static const int kSPOffset       = -1 * kPointerSize;
+  static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+  static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+  DEFINE_TYPED_FRAME_SIZES(2);
 
   static const int kCallerFPOffset =  0 * kPointerSize;
   static const int kCallerPCOffset = +1 * kPointerSize;
@@ -70,13 +64,11 @@
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
   static const int kLastParameterOffset = +2 * kPointerSize;
-  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+  static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
 
   // Caller SP-relative.
   static const int kParam0Offset   = -2 * kPointerSize;
   static const int kReceiverOffset = -1 * kPointerSize;
-
-  static const int kDynamicAlignmentStateOffset = kLocal0Offset;
 };
 
 
diff --git a/src/x87/interface-descriptors-x87.cc b/src/x87/interface-descriptors-x87.cc
index bfed342..e41d42c 100644
--- a/src/x87/interface-descriptors-x87.cc
+++ b/src/x87/interface-descriptors-x87.cc
@@ -111,36 +111,9 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void ToNumberDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // ToNumberStub invokes a function, and therefore needs a context.
-  Register registers[] = {eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
 
 // static
-const Register ToLengthDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToStringDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToNameDescriptor::ReceiverRegister() { return eax; }
-
-
-// static
-const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
-
-
-void NumberToStringDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
+const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
 
 void TypeofDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -269,6 +242,13 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
+#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
+  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
+      CallInterfaceDescriptorData* data) {                          \
+    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
+  }
+SIMD128_TYPES(SIMD128_ALLOC_DESC)
+#undef SIMD128_ALLOC_DESC
 
 void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -313,6 +293,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastArrayPushDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (arg count)
+  Register registers[] = {eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void CompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -321,20 +307,6 @@
 }
 
 
-void CompareNilDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void ToBooleanDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void BinaryOpDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {edx, eax};
@@ -394,21 +366,7 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void ApiFunctionDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {
-      edi,  // callee
-      ebx,  // call_data
-      ecx,  // holder
-      edx,  // api_function_address
-      eax,  // actual number of arguments
-  };
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ApiAccessorDescriptor::InitializePlatformSpecific(
+void ApiCallbackDescriptorBase::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
       edi,  // callee
diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc
index 0c459eb..b46167d 100644
--- a/src/x87/macro-assembler-x87.cc
+++ b/src/x87/macro-assembler-x87.cc
@@ -196,15 +196,15 @@
   mov(Operand::StaticVariable(store_buffer), scratch);
   // Call stub on end of buffer.
   // Check for end of buffer.
-  test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+  test(scratch, Immediate(StoreBuffer::kStoreBufferMask));
   if (and_then == kReturnAtEnd) {
     Label buffer_overflowed;
-    j(not_equal, &buffer_overflowed, Label::kNear);
+    j(equal, &buffer_overflowed, Label::kNear);
     ret(0);
     bind(&buffer_overflowed);
   } else {
     DCHECK(and_then == kFallThroughAtEnd);
-    j(equal, &done, Label::kNear);
+    j(not_equal, &done, Label::kNear);
   }
   StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
   CallStub(&store_buffer_overflow);
@@ -376,7 +376,7 @@
   lea(dst, FieldOperand(object, offset));
   if (emit_debug_code()) {
     Label ok;
-    test_b(dst, (1 << kPointerSizeLog2) - 1);
+    test_b(dst, Immediate((1 << kPointerSizeLog2) - 1));
     j(zero, &ok, Label::kNear);
     int3();
     bind(&ok);
@@ -406,7 +406,7 @@
   if (emit_debug_code()) {
     Label ok;
     lea(address, FieldOperand(object, HeapObject::kMapOffset));
-    test_b(address, (1 << kPointerSizeLog2) - 1);
+    test_b(address, Immediate((1 << kPointerSizeLog2) - 1));
     j(zero, &ok, Label::kNear);
     int3();
     bind(&ok);
@@ -597,6 +597,71 @@
   call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
 }
 
+void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
+  if (shift >= 32) {
+    mov(high, low);
+    shl(high, shift - 32);
+    xor_(low, low);
+  } else {
+    shld(high, low, shift);
+    shl(low, shift);
+  }
+}
+
+void MacroAssembler::ShlPair_cl(Register high, Register low) {
+  shld_cl(high, low);
+  shl_cl(low);
+  Label done;
+  test(ecx, Immediate(0x20));
+  j(equal, &done, Label::kNear);
+  mov(high, low);
+  xor_(low, low);
+  bind(&done);
+}
+
+void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
+  if (shift >= 32) {
+    mov(low, high);
+    shr(low, shift - 32);
+    xor_(high, high);
+  } else {
+    shrd(high, low, shift);
+    shr(high, shift);
+  }
+}
+
+void MacroAssembler::ShrPair_cl(Register high, Register low) {
+  shrd_cl(low, high);
+  shr_cl(high);
+  Label done;
+  test(ecx, Immediate(0x20));
+  j(equal, &done, Label::kNear);
+  mov(low, high);
+  xor_(high, high);
+  bind(&done);
+}
+
+void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
+  if (shift >= 32) {
+    mov(low, high);
+    sar(low, shift - 32);
+    sar(high, 31);
+  } else {
+    shrd(high, low, shift);
+    sar(high, shift);
+  }
+}
+
+void MacroAssembler::SarPair_cl(Register high, Register low) {
+  shrd_cl(low, high);
+  sar_cl(high);
+  Label done;
+  test(ecx, Immediate(0x20));
+  j(equal, &done, Label::kNear);
+  mov(low, high);
+  sar(high, 31);
+  bind(&done);
+}
 
 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
   static const int kMaxImmediateBits = 17;
@@ -634,8 +699,7 @@
 
 
 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
-  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
-       static_cast<int8_t>(type));
+  cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
 }
 
 
@@ -647,7 +711,7 @@
   STATIC_ASSERT(FAST_ELEMENTS == 2);
   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastHoleyElementValue);
+       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
   j(above, fail, distance);
 }
 
@@ -660,10 +724,10 @@
   STATIC_ASSERT(FAST_ELEMENTS == 2);
   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastHoleySmiElementValue);
+       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
   j(below_equal, fail, distance);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastHoleyElementValue);
+       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
   j(above, fail, distance);
 }
 
@@ -674,7 +738,7 @@
   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastHoleySmiElementValue);
+       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
   j(above, fail, distance);
 }
 
@@ -761,7 +825,7 @@
                                            Register instance_type) {
   mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
   movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
-  cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
+  cmpb(instance_type, Immediate(LAST_NAME_TYPE));
   return below_equal;
 }
 
@@ -842,6 +906,15 @@
   }
 }
 
+void MacroAssembler::AssertNotNumber(Register object) {
+  if (emit_debug_code()) {
+    test(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsANumber);
+    cmp(FieldOperand(object, HeapObject::kMapOffset),
+        isolate()->factory()->heap_number_map());
+    Check(not_equal, kOperandIsANumber);
+  }
+}
 
 void MacroAssembler::AssertSmi(Register object) {
   if (emit_debug_code()) {
@@ -933,12 +1006,10 @@
   }
 }
 
-
-void MacroAssembler::StubPrologue() {
+void MacroAssembler::StubPrologue(StackFrame::Type type) {
   push(ebp);  // Caller's frame pointer.
   mov(ebp, esp);
-  push(esi);  // Callee's context.
-  push(Immediate(Smi::FromInt(StackFrame::STUB)));
+  push(Immediate(Smi::FromInt(type)));
 }
 
 
@@ -976,9 +1047,10 @@
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
   mov(ebp, esp);
-  push(esi);
   push(Immediate(Smi::FromInt(type)));
-  push(Immediate(CodeObject()));
+  if (type == StackFrame::INTERNAL) {
+    push(Immediate(CodeObject()));
+  }
   if (emit_debug_code()) {
     cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
     Check(not_equal, kCodeObjectNotProperlyPatched);
@@ -988,7 +1060,7 @@
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   if (emit_debug_code()) {
-    cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+    cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
         Immediate(Smi::FromInt(type)));
     Check(equal, kStackFrameTypesMustMatch);
   }
@@ -998,15 +1070,17 @@
 
 void MacroAssembler::EnterExitFramePrologue() {
   // Set up the frame structure on the stack.
-  DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
-  DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
-  DCHECK(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
+  DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+  DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
   push(ebp);
   mov(ebp, esp);
 
   // Reserve room for entry stack pointer and push the code object.
-  DCHECK(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
+  push(Immediate(Smi::FromInt(StackFrame::EXIT)));
+  DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
   push(Immediate(0));  // Saved entry sp, patched before call.
+  DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
   push(Immediate(CodeObject()));  // Accessed from ExitFrame::code_slot.
 
   // Save the frame pointer and the context in top.
@@ -1025,7 +1099,7 @@
     // Store FPU state to m108byte.
     int space = 108 + argc * kPointerSize;
     sub(esp, Immediate(space));
-    const int offset = -2 * kPointerSize;  // entry fp + code object.
+    const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
     fnsave(MemOperand(ebp, offset - 108));
   } else {
     sub(esp, Immediate(argc * kPointerSize));
@@ -1065,7 +1139,7 @@
 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
   // Optionally restore FPU state.
   if (save_doubles) {
-    const int offset = -2 * kPointerSize;
+    const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
     frstor(MemOperand(ebp, offset - 108));
   }
 
@@ -1145,8 +1219,18 @@
   DCHECK(!holder_reg.is(scratch2));
   DCHECK(!scratch1.is(scratch2));
 
-  // Load current lexical context from the stack frame.
-  mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
+  // Load current lexical context from the active StandardFrame, which
+  // may require crawling past STUB frames.
+  Label load_context;
+  Label has_context;
+  mov(scratch2, ebp);
+  bind(&load_context);
+  mov(scratch1,
+      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+  JumpIfNotSmi(scratch1, &has_context);
+  mov(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
+  jmp(&load_context);
+  bind(&has_context);
 
   // When generating debug code, make sure the lexical context is set.
   if (emit_debug_code()) {
@@ -1859,7 +1943,7 @@
   int byte_index = bit_index / kBitsPerByte;
   int byte_bit_index = bit_index & (kBitsPerByte - 1);
   test_b(FieldOperand(object, field_offset + byte_index),
-         static_cast<byte>(1 << byte_bit_index));
+         Immediate(1 << byte_bit_index));
 }
 
 
@@ -2024,6 +2108,87 @@
   jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
 }
 
+void MacroAssembler::PrepareForTailCall(
+    const ParameterCount& callee_args_count, Register caller_args_count_reg,
+    Register scratch0, Register scratch1, ReturnAddressState ra_state,
+    int number_of_temp_values_after_return_address) {
+#if DEBUG
+  if (callee_args_count.is_reg()) {
+    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
+                       scratch1));
+  } else {
+    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
+  }
+  DCHECK(ra_state != ReturnAddressState::kNotOnStack ||
+         number_of_temp_values_after_return_address == 0);
+#endif
+
+  // Calculate the destination address where we will put the return address
+  // after we drop current frame.
+  Register new_sp_reg = scratch0;
+  if (callee_args_count.is_reg()) {
+    sub(caller_args_count_reg, callee_args_count.reg());
+    lea(new_sp_reg,
+        Operand(ebp, caller_args_count_reg, times_pointer_size,
+                StandardFrameConstants::kCallerPCOffset -
+                    number_of_temp_values_after_return_address * kPointerSize));
+  } else {
+    lea(new_sp_reg, Operand(ebp, caller_args_count_reg, times_pointer_size,
+                            StandardFrameConstants::kCallerPCOffset -
+                                (callee_args_count.immediate() +
+                                 number_of_temp_values_after_return_address) *
+                                    kPointerSize));
+  }
+
+  if (FLAG_debug_code) {
+    cmp(esp, new_sp_reg);
+    Check(below, kStackAccessBelowStackPointer);
+  }
+
+  // Copy return address from caller's frame to current frame's return address
+  // to avoid its trashing and let the following loop copy it to the right
+  // place.
+  Register tmp_reg = scratch1;
+  if (ra_state == ReturnAddressState::kOnStack) {
+    mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+    mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
+        tmp_reg);
+  } else {
+    DCHECK(ReturnAddressState::kNotOnStack == ra_state);
+    DCHECK_EQ(0, number_of_temp_values_after_return_address);
+    Push(Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+  }
+
+  // Restore caller's frame pointer now as it could be overwritten by
+  // the copying loop.
+  mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+  // +2 here is to copy both receiver and return address.
+  Register count_reg = caller_args_count_reg;
+  if (callee_args_count.is_reg()) {
+    lea(count_reg, Operand(callee_args_count.reg(),
+                           2 + number_of_temp_values_after_return_address));
+  } else {
+    mov(count_reg, Immediate(callee_args_count.immediate() + 2 +
+                             number_of_temp_values_after_return_address));
+    // TODO(ishell): Unroll copying loop for small immediate values.
+  }
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+  Label loop, entry;
+  jmp(&entry, Label::kNear);
+  bind(&loop);
+  dec(count_reg);
+  mov(tmp_reg, Operand(esp, count_reg, times_pointer_size, 0));
+  mov(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
+  bind(&entry);
+  cmp(count_reg, Immediate(0));
+  j(not_equal, &loop, Label::kNear);
+
+  // Leave current frame.
+  mov(esp, new_sp_reg);
+}
 
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
@@ -2098,7 +2263,7 @@
   Label skip_flooding;
   ExternalReference step_in_enabled =
       ExternalReference::debug_step_in_enabled_address(isolate());
-  cmpb(Operand::StaticVariable(step_in_enabled), 0);
+  cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
   j(equal, &skip_flooding);
   {
     FrameScope frame(this,
@@ -2695,7 +2860,7 @@
   Label succeed;
   test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
   j(zero, &succeed);
-  cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
+  cmpb(operand, Immediate(SYMBOL_TYPE));
   j(not_equal, not_unique_name, distance);
 
   bind(&succeed);
@@ -2843,8 +3008,7 @@
     and_(scratch, object);
   }
   if (mask < (1 << kBitsPerByte)) {
-    test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
-           static_cast<uint8_t>(mask));
+    test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
   } else {
     test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
   }
@@ -2867,7 +3031,7 @@
   DCHECK(!isolate()->heap()->mark_compact_collector()->
          IsOnEvacuationCandidate(*map));
   if (mask < (1 << kBitsPerByte)) {
-    test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
+    test_b(Operand::StaticVariable(reference), Immediate(mask));
   } else {
     test(Operand::StaticVariable(reference), Immediate(mask));
   }
@@ -2907,7 +3071,8 @@
   jmp(&other_color, Label::kNear);
 
   bind(&word_boundary);
-  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
+  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize),
+         Immediate(1));
 
   j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
   bind(&other_color);
@@ -3009,19 +3174,40 @@
     Register receiver_reg,
     Register scratch_reg,
     Label* no_memento_found) {
-  ExternalReference new_space_start =
-      ExternalReference::new_space_start(isolate());
+  Label map_check;
+  Label top_check;
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address(isolate());
+  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
+  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
 
-  lea(scratch_reg, Operand(receiver_reg,
-      JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
-  cmp(scratch_reg, Immediate(new_space_start));
-  j(less, no_memento_found);
+  // Bail out if the object is not in new space.
+  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
+  // If the object is in new space, we need to check whether it is on the same
+  // page as the current top.
+  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
+  test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+  j(zero, &top_check);
+  // The object is on a different page than allocation top. Bail out if the
+  // object sits on the page boundary as no memento can follow and we cannot
+  // touch the memory following it.
+  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+  xor_(scratch_reg, receiver_reg);
+  test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
+  j(not_zero, no_memento_found);
+  // Continue with the actual map check.
+  jmp(&map_check);
+  // If top is on the same page as the current object, we need to check whether
+  // we are below top.
+  bind(&top_check);
+  lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
   cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
   j(greater, no_memento_found);
-  cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
-      Immediate(isolate()->factory()->allocation_memento_map()));
+  // Memento map check.
+  bind(&map_check);
+  mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
+  cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
 }
 
 
diff --git a/src/x87/macro-assembler-x87.h b/src/x87/macro-assembler-x87.h
index fc49361..5571413 100644
--- a/src/x87/macro-assembler-x87.h
+++ b/src/x87/macro-assembler-x87.h
@@ -44,6 +44,8 @@
 
 enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
 
+enum class ReturnAddressState { kOnStack, kNotOnStack };
+
 #ifdef DEBUG
 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
                 Register reg4 = no_reg, Register reg5 = no_reg,
@@ -234,7 +236,7 @@
   void DebugBreak();
 
   // Generates function and stub prologue code.
-  void StubPrologue();
+  void StubPrologue(StackFrame::Type type);
   void Prologue(bool code_pre_aging);
 
   // Enter specific kind of exit frame. Expects the number of
@@ -318,6 +320,20 @@
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Removes current frame and its arguments from the stack preserving
+  // the arguments and a return address pushed to the stack for the next call.
+  // |ra_state| defines whether return address is already pushed to stack or
+  // not. Both |callee_args_count| and |caller_args_count_reg| do not include
+  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+  // is trashed. |number_of_temp_values_after_return_address| specifies
+  // the number of words pushed to the stack after the return address. This is
+  // to allow "allocation" of scratch registers that this function requires
+  // by saving their values on the stack.
+  void PrepareForTailCall(const ParameterCount& callee_args_count,
+                          Register caller_args_count_reg, Register scratch0,
+                          Register scratch1, ReturnAddressState ra_state,
+                          int number_of_temp_values_after_return_address);
+
   // Invoke the JavaScript function code by either calling or jumping.
 
   void InvokeFunctionCode(Register function, Register new_target,
@@ -344,6 +360,12 @@
                       const ParameterCount& actual, InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
+  void ShlPair(Register high, Register low, uint8_t imm8);
+  void ShlPair_cl(Register high, Register low);
+  void ShrPair(Register high, Register low, uint8_t imm8);
+  void ShrPair_cl(Register high, Register src);
+  void SarPair(Register high, Register low, uint8_t imm8);
+  void SarPair_cl(Register high, Register low);
 
   // Expression support
   // Support for constant splitting.
@@ -509,6 +531,7 @@
 
   // Abort execution if argument is not a number, enabled via --debug-code.
   void AssertNumber(Register object);
+  void AssertNotNumber(Register object);
 
   // Abort execution if argument is not a smi, enabled via --debug-code.
   void AssertSmi(Register object);
@@ -757,12 +780,6 @@
   void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
   void Popcnt(Register dst, const Operand& src);
 
-  // Emit call to the code we are currently generating.
-  void CallSelf() {
-    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
-    call(self, RelocInfo::CODE_TARGET);
-  }
-
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
@@ -770,6 +787,7 @@
   void Move(Register dst, const Immediate& x);
   void Move(const Operand& dst, const Immediate& x);
 
+  void Move(Register dst, Handle<Object> handle) { LoadObject(dst, handle); }
   void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
 
   // Push a handle value.
diff --git a/src/zone.cc b/src/zone.cc
index 1f722f2..a10b636 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -72,15 +72,14 @@
   size_t size_;
 };
 
-
-Zone::Zone()
+Zone::Zone(base::AccountingAllocator* allocator)
     : allocation_size_(0),
       segment_bytes_allocated_(0),
       position_(0),
       limit_(0),
+      allocator_(allocator),
       segment_head_(nullptr) {}
 
-
 Zone::~Zone() {
   DeleteAll();
   DeleteKeptSegment();
@@ -204,7 +203,7 @@
 // Creates a new segment, sets it size, and pushes it to the front
 // of the segment chain. Returns the new segment.
 Segment* Zone::NewSegment(size_t size) {
-  Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
+  Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size));
   segment_bytes_allocated_ += size;
   if (result != nullptr) {
     result->Initialize(segment_head_, size);
@@ -217,7 +216,7 @@
 // Deletes the given segment. Does not touch the segment chain.
 void Zone::DeleteSegment(Segment* segment, size_t size) {
   segment_bytes_allocated_ -= size;
-  Malloced::Delete(segment);
+  allocator_->Free(segment, size);
 }
 
 
diff --git a/src/zone.h b/src/zone.h
index 753e203..fa21155 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -7,7 +7,7 @@
 
 #include <limits>
 
-#include "src/allocation.h"
+#include "src/base/accounting-allocator.h"
 #include "src/base/logging.h"
 #include "src/globals.h"
 #include "src/hashmap.h"
@@ -35,7 +35,7 @@
 // from multi-threaded code.
 class Zone final {
  public:
-  Zone();
+  explicit Zone(base::AccountingAllocator* allocator);
   ~Zone();
 
   // Allocate 'size' bytes of memory in the Zone; expands the Zone by
@@ -64,6 +64,8 @@
 
   size_t allocation_size() const { return allocation_size_; }
 
+  base::AccountingAllocator* allocator() const { return allocator_; }
+
  private:
   // All pointers returned from New() have this alignment.  In addition, if the
   // object being allocated has a size that is divisible by 8 then its alignment
@@ -114,6 +116,8 @@
   Address position_;
   Address limit_;
 
+  base::AccountingAllocator* allocator_;
+
   Segment* segment_head_;
 };