Merge V8 5.4.500.40

Test: Manual - built & ran d8
Change-Id: I4edfa2853d3e565b729723645395688ece3193f4
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index 0eac109..c43a53f 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -17,8 +17,8 @@
 // static
 FieldAccess AccessBuilder::ForMap() {
   FieldAccess access = {
-      kTaggedBase, HeapObject::kMapOffset,   MaybeHandle<Name>(),
-      Type::Any(), MachineType::AnyTagged(), kMapWriteBarrier};
+      kTaggedBase,           HeapObject::kMapOffset,   MaybeHandle<Name>(),
+      Type::OtherInternal(), MachineType::AnyTagged(), kMapWriteBarrier};
   return access;
 }
 
@@ -92,7 +92,7 @@
   FieldAccess access = {kTaggedBase,
                         JSFunction::kSharedFunctionInfoOffset,
                         Handle<Name>(),
-                        Type::Any(),
+                        Type::OtherInternal(),
                         MachineType::AnyTagged(),
                         kPointerWriteBarrier};
   return access;
@@ -156,7 +156,7 @@
   FieldAccess access = {kTaggedBase,
                         JSGeneratorObject::kInputOrDebugPosOffset,
                         Handle<Name>(),
-                        Type::Any(),
+                        Type::NonInternal(),
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
   return access;
@@ -216,16 +216,14 @@
   return access;
 }
 
-
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
-  FieldAccess access = {kTaggedBase,         JSArrayBuffer::kBitFieldOffset,
-                        MaybeHandle<Name>(), TypeCache::Get().kInt8,
-                        MachineType::Int8(), kNoWriteBarrier};
+  FieldAccess access = {kTaggedBase,           JSArrayBuffer::kBitFieldOffset,
+                        MaybeHandle<Name>(),   TypeCache::Get().kUint8,
+                        MachineType::Uint32(), kNoWriteBarrier};
   return access;
 }
 
-
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
   FieldAccess access = {kTaggedBase,
@@ -237,6 +235,38 @@
   return access;
 }
 
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferViewByteLength() {
+  FieldAccess access = {kTaggedBase,
+                        JSArrayBufferView::kByteLengthOffset,
+                        MaybeHandle<Name>(),
+                        TypeCache::Get().kPositiveInteger,
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferViewByteOffset() {
+  FieldAccess access = {kTaggedBase,
+                        JSArrayBufferView::kByteOffsetOffset,
+                        MaybeHandle<Name>(),
+                        TypeCache::Get().kPositiveInteger,
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSTypedArrayLength() {
+  FieldAccess access = {kTaggedBase,
+                        JSTypedArray::kLengthOffset,
+                        MaybeHandle<Name>(),
+                        TypeCache::Get().kJSTypedArrayLengthType,
+                        MachineType::AnyTagged(),
+                        kNoWriteBarrier};
+  return access;
+}
 
 // static
 FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
@@ -253,8 +283,8 @@
 // static
 FieldAccess AccessBuilder::ForJSIteratorResultDone() {
   FieldAccess access = {
-      kTaggedBase, JSIteratorResult::kDoneOffset, MaybeHandle<Name>(),
-      Type::Any(), MachineType::AnyTagged(),      kFullWriteBarrier};
+      kTaggedBase,         JSIteratorResult::kDoneOffset, MaybeHandle<Name>(),
+      Type::NonInternal(), MachineType::AnyTagged(),      kFullWriteBarrier};
   return access;
 }
 
@@ -262,8 +292,8 @@
 // static
 FieldAccess AccessBuilder::ForJSIteratorResultValue() {
   FieldAccess access = {
-      kTaggedBase, JSIteratorResult::kValueOffset, MaybeHandle<Name>(),
-      Type::Any(), MachineType::AnyTagged(),       kFullWriteBarrier};
+      kTaggedBase,         JSIteratorResult::kValueOffset, MaybeHandle<Name>(),
+      Type::NonInternal(), MachineType::AnyTagged(),       kFullWriteBarrier};
   return access;
 }
 
@@ -297,6 +327,27 @@
   return access;
 }
 
+// static
+FieldAccess AccessBuilder::ForFixedTypedArrayBaseBasePointer() {
+  FieldAccess access = {kTaggedBase,
+                        FixedTypedArrayBase::kBasePointerOffset,
+                        MaybeHandle<Name>(),
+                        Type::Tagged(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
+  FieldAccess access = {kTaggedBase,
+                        FixedTypedArrayBase::kExternalPointerOffset,
+                        MaybeHandle<Name>(),
+                        Type::UntaggedPointer(),
+                        MachineType::Pointer(),
+                        kNoWriteBarrier};
+  return access;
+}
 
 // static
 FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
@@ -386,6 +437,78 @@
   return access;
 }
 
+// static
+FieldAccess AccessBuilder::ForConsStringFirst() {
+  FieldAccess access = {
+      kTaggedBase,    ConsString::kFirstOffset, Handle<Name>(),
+      Type::String(), MachineType::AnyTagged(), kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForConsStringSecond() {
+  FieldAccess access = {
+      kTaggedBase,    ConsString::kSecondOffset, Handle<Name>(),
+      Type::String(), MachineType::AnyTagged(),  kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForSlicedStringOffset() {
+  FieldAccess access = {
+      kTaggedBase,         SlicedString::kOffsetOffset, Handle<Name>(),
+      Type::SignedSmall(), MachineType::AnyTagged(),    kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForSlicedStringParent() {
+  FieldAccess access = {
+      kTaggedBase,    SlicedString::kParentOffset, Handle<Name>(),
+      Type::String(), MachineType::AnyTagged(),    kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForExternalStringResourceData() {
+  FieldAccess access = {kTaggedBase,
+                        ExternalString::kResourceDataOffset,
+                        Handle<Name>(),
+                        Type::UntaggedPointer(),
+                        MachineType::Pointer(),
+                        kNoWriteBarrier};
+  return access;
+}
+
+// static
+ElementAccess AccessBuilder::ForExternalOneByteStringCharacter() {
+  ElementAccess access = {kUntaggedBase, 0, TypeCache::Get().kUint8,
+                          MachineType::Uint8(), kNoWriteBarrier};
+  return access;
+}
+
+// static
+ElementAccess AccessBuilder::ForExternalTwoByteStringCharacter() {
+  ElementAccess access = {kUntaggedBase, 0, TypeCache::Get().kUint16,
+                          MachineType::Uint16(), kNoWriteBarrier};
+  return access;
+}
+
+// static
+ElementAccess AccessBuilder::ForSeqOneByteStringCharacter() {
+  ElementAccess access = {kTaggedBase, SeqOneByteString::kHeaderSize,
+                          TypeCache::Get().kUint8, MachineType::Uint8(),
+                          kNoWriteBarrier};
+  return access;
+}
+
+// static
+ElementAccess AccessBuilder::ForSeqTwoByteStringCharacter() {
+  ElementAccess access = {kTaggedBase, SeqTwoByteString::kHeaderSize,
+                          TypeCache::Get().kUint16, MachineType::Uint16(),
+                          kNoWriteBarrier};
+  return access;
+}
 
 // static
 FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
@@ -398,7 +521,6 @@
   return access;
 }
 
-
 // static
 FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
   FieldAccess access = {kTaggedBase,
@@ -414,8 +536,8 @@
 // static
 FieldAccess AccessBuilder::ForValue() {
   FieldAccess access = {
-      kTaggedBase, JSValue::kValueOffset,    Handle<Name>(),
-      Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+      kTaggedBase,         JSValue::kValueOffset,    Handle<Name>(),
+      Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
   return access;
 }
 
@@ -423,8 +545,8 @@
 // static
 FieldAccess AccessBuilder::ForArgumentsLength() {
   FieldAccess access = {
-      kTaggedBase, JSArgumentsObject::kLengthOffset, Handle<Name>(),
-      Type::Any(), MachineType::AnyTagged(),         kFullWriteBarrier};
+      kTaggedBase,         JSArgumentsObject::kLengthOffset, Handle<Name>(),
+      Type::NonInternal(), MachineType::AnyTagged(),         kFullWriteBarrier};
   return access;
 }
 
@@ -434,7 +556,7 @@
   FieldAccess access = {kTaggedBase,
                         JSSloppyArgumentsObject::kCalleeOffset,
                         Handle<Name>(),
-                        Type::Any(),
+                        Type::NonInternal(),
                         MachineType::AnyTagged(),
                         kPointerWriteBarrier};
   return access;
@@ -447,7 +569,7 @@
   FieldAccess access = {kTaggedBase,
                         offset,
                         Handle<Name>(),
-                        Type::Any(),
+                        Type::NonInternal(),
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
   return access;
@@ -490,6 +612,39 @@
   return access;
 }
 
+// static
+ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
+  ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+                          MachineType::AnyTagged(), kFullWriteBarrier};
+  switch (kind) {
+    case FAST_SMI_ELEMENTS:
+      access.type = TypeCache::Get().kSmi;
+      access.write_barrier_kind = kNoWriteBarrier;
+      break;
+    case FAST_HOLEY_SMI_ELEMENTS:
+      access.type = TypeCache::Get().kHoleySmi;
+      break;
+    case FAST_ELEMENTS:
+      access.type = Type::NonInternal();
+      break;
+    case FAST_HOLEY_ELEMENTS:
+      break;
+    case FAST_DOUBLE_ELEMENTS:
+      access.type = Type::Number();
+      access.write_barrier_kind = kNoWriteBarrier;
+      access.machine_type = MachineType::Float64();
+      break;
+    case FAST_HOLEY_DOUBLE_ELEMENTS:
+      access.type = Type::Number();
+      access.write_barrier_kind = kNoWriteBarrier;
+      access.machine_type = MachineType::Float64();
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return access;
+}
 
 // static
 ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index 8345225..caaf8f8 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_ACCESS_BUILDER_H_
 
 #include "src/compiler/simplified-operator.h"
+#include "src/elements-kind.h"
 
 namespace v8 {
 namespace internal {
@@ -79,6 +80,15 @@
   // Provides access to JSArrayBufferView::buffer() field.
   static FieldAccess ForJSArrayBufferViewBuffer();
 
+  // Provides access to JSArrayBufferView::byteLength() field.
+  static FieldAccess ForJSArrayBufferViewByteLength();
+
+  // Provides access to JSArrayBufferView::byteOffset() field.
+  static FieldAccess ForJSArrayBufferViewByteOffset();
+
+  // Provides access to JSTypedArray::length() field.
+  static FieldAccess ForJSTypedArrayLength();
+
   // Provides access to JSDate fields.
   static FieldAccess ForJSDateField(JSDate::FieldIndex index);
 
@@ -97,6 +107,12 @@
   // Provides access to FixedArray::length() field.
   static FieldAccess ForFixedArrayLength();
 
+  // Provides access to FixedTypedArrayBase::base_pointer() field.
+  static FieldAccess ForFixedTypedArrayBaseBasePointer();
+
+  // Provides access to FixedTypedArrayBase::external_pointer() field.
+  static FieldAccess ForFixedTypedArrayBaseExternalPointer();
+
   // Provides access to DescriptorArray::enum_cache() field.
   static FieldAccess ForDescriptorArrayEnumCache();
 
@@ -124,6 +140,33 @@
   // Provides access to String::length() field.
   static FieldAccess ForStringLength();
 
+  // Provides access to ConsString::first() field.
+  static FieldAccess ForConsStringFirst();
+
+  // Provides access to ConsString::second() field.
+  static FieldAccess ForConsStringSecond();
+
+  // Provides access to SlicedString::offset() field.
+  static FieldAccess ForSlicedStringOffset();
+
+  // Provides access to SlicedString::parent() field.
+  static FieldAccess ForSlicedStringParent();
+
+  // Provides access to ExternalString::resource_data() field.
+  static FieldAccess ForExternalStringResourceData();
+
+  // Provides access to ExternalOneByteString characters.
+  static ElementAccess ForExternalOneByteStringCharacter();
+
+  // Provides access to ExternalTwoByteString characters.
+  static ElementAccess ForExternalTwoByteStringCharacter();
+
+  // Provides access to SeqOneByteString characters.
+  static ElementAccess ForSeqOneByteStringCharacter();
+
+  // Provides access to SeqTwoByteString characters.
+  static ElementAccess ForSeqTwoByteStringCharacter();
+
   // Provides access to JSGlobalObject::global_proxy() field.
   static FieldAccess ForJSGlobalObjectGlobalProxy();
 
@@ -149,6 +192,7 @@
 
   // Provides access to FixedArray elements.
   static ElementAccess ForFixedArrayElement();
+  static ElementAccess ForFixedArrayElement(ElementsKind kind);
 
   // Provides access to FixedDoubleArray elements.
   static ElementAccess ForFixedDoubleArrayElement();
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index 768b985..97de25b 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -25,6 +25,8 @@
   ElementsKind const elements_kind = map->elements_kind();
   if (IsFastElementsKind(elements_kind)) return true;
   // TODO(bmeurer): Add support for other elements kind.
+  if (elements_kind == UINT8_CLAMPED_ELEMENTS) return false;
+  if (IsFixedTypedArrayElementsKind(elements_kind)) return true;
   return false;
 }
 
@@ -56,59 +58,55 @@
   return os;
 }
 
+ElementAccessInfo::ElementAccessInfo() {}
+
+ElementAccessInfo::ElementAccessInfo(MapList const& receiver_maps,
+                                     ElementsKind elements_kind)
+    : elements_kind_(elements_kind), receiver_maps_(receiver_maps) {}
 
 // static
-PropertyAccessInfo PropertyAccessInfo::NotFound(Type* receiver_type,
+PropertyAccessInfo PropertyAccessInfo::NotFound(MapList const& receiver_maps,
                                                 MaybeHandle<JSObject> holder) {
-  return PropertyAccessInfo(holder, receiver_type);
+  return PropertyAccessInfo(holder, receiver_maps);
 }
 
-
 // static
 PropertyAccessInfo PropertyAccessInfo::DataConstant(
-    Type* receiver_type, Handle<Object> constant,
+    MapList const& receiver_maps, Handle<Object> constant,
     MaybeHandle<JSObject> holder) {
-  return PropertyAccessInfo(holder, constant, receiver_type);
+  return PropertyAccessInfo(kDataConstant, holder, constant, receiver_maps);
 }
 
-
 // static
 PropertyAccessInfo PropertyAccessInfo::DataField(
-    Type* receiver_type, FieldIndex field_index, Type* field_type,
+    MapList const& receiver_maps, FieldIndex field_index, Type* field_type,
     MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
   return PropertyAccessInfo(holder, transition_map, field_index, field_type,
-                            receiver_type);
+                            receiver_maps);
 }
 
-
-ElementAccessInfo::ElementAccessInfo() : receiver_type_(Type::None()) {}
-
-
-ElementAccessInfo::ElementAccessInfo(Type* receiver_type,
-                                     ElementsKind elements_kind,
-                                     MaybeHandle<JSObject> holder)
-    : elements_kind_(elements_kind),
-      holder_(holder),
-      receiver_type_(receiver_type) {}
-
+// static
+PropertyAccessInfo PropertyAccessInfo::AccessorConstant(
+    MapList const& receiver_maps, Handle<Object> constant,
+    MaybeHandle<JSObject> holder) {
+  return PropertyAccessInfo(kAccessorConstant, holder, constant, receiver_maps);
+}
 
 PropertyAccessInfo::PropertyAccessInfo()
-    : kind_(kInvalid), receiver_type_(Type::None()), field_type_(Type::Any()) {}
-
+    : kind_(kInvalid), field_type_(Type::None()) {}
 
 PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
-                                       Type* receiver_type)
+                                       MapList const& receiver_maps)
     : kind_(kNotFound),
-      receiver_type_(receiver_type),
+      receiver_maps_(receiver_maps),
       holder_(holder),
-      field_type_(Type::Any()) {}
+      field_type_(Type::None()) {}
 
-
-PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
                                        Handle<Object> constant,
-                                       Type* receiver_type)
-    : kind_(kDataConstant),
-      receiver_type_(receiver_type),
+                                       MapList const& receiver_maps)
+    : kind_(kind),
+      receiver_maps_(receiver_maps),
       constant_(constant),
       holder_(holder),
       field_type_(Type::Any()) {}
@@ -116,14 +114,56 @@
 PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
                                        MaybeHandle<Map> transition_map,
                                        FieldIndex field_index, Type* field_type,
-                                       Type* receiver_type)
+                                       MapList const& receiver_maps)
     : kind_(kDataField),
-      receiver_type_(receiver_type),
+      receiver_maps_(receiver_maps),
       transition_map_(transition_map),
       holder_(holder),
       field_index_(field_index),
       field_type_(field_type) {}
 
+bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
+  if (this->kind_ != that->kind_) return false;
+  if (this->holder_.address() != that->holder_.address()) return false;
+
+  switch (this->kind_) {
+    case kInvalid:
+      break;
+
+    case kNotFound:
+      return true;
+
+    case kDataField: {
+      // Check if we actually access the same field.
+      if (this->transition_map_.address() == that->transition_map_.address() &&
+          this->field_index_ == that->field_index_ &&
+          this->field_type_->Is(that->field_type_) &&
+          that->field_type_->Is(this->field_type_)) {
+        this->receiver_maps_.insert(this->receiver_maps_.end(),
+                                    that->receiver_maps_.begin(),
+                                    that->receiver_maps_.end());
+        return true;
+      }
+      return false;
+    }
+
+    case kDataConstant:
+    case kAccessorConstant: {
+      // Check if we actually access the same constant.
+      if (this->constant_.address() == that->constant_.address()) {
+        this->receiver_maps_.insert(this->receiver_maps_.end(),
+                                    that->receiver_maps_.begin(),
+                                    that->receiver_maps_.end());
+        return true;
+      }
+      return false;
+    }
+  }
+
+  UNREACHABLE();
+  return false;
+}
+
 AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
                                      Handle<Context> native_context, Zone* zone)
     : dependencies_(dependencies),
@@ -139,30 +179,8 @@
     Handle<Map> map, AccessMode access_mode, ElementAccessInfo* access_info) {
   // Check if it is safe to inline element access for the {map}.
   if (!CanInlineElementAccess(map)) return false;
-
   ElementsKind const elements_kind = map->elements_kind();
-
-  // Certain (monomorphic) stores need a prototype chain check because shape
-  // changes could allow callbacks on elements in the chain that are not
-  // compatible with monomorphic keyed stores.
-  MaybeHandle<JSObject> holder;
-  if (access_mode == AccessMode::kStore && map->prototype()->IsJSObject()) {
-    for (PrototypeIterator i(map); !i.IsAtEnd(); i.Advance()) {
-      Handle<JSReceiver> prototype =
-          PrototypeIterator::GetCurrent<JSReceiver>(i);
-      if (!prototype->IsJSObject()) return false;
-      // TODO(bmeurer): We do not currently support unstable prototypes.
-      // We might want to revisit the way we handle certain keyed stores
-      // because this whole prototype chain check is essential a hack,
-      // and I'm not sure that it is correct at all with dictionaries in
-      // the prototype chain.
-      if (!prototype->map()->is_stable()) return false;
-      holder = Handle<JSObject>::cast(prototype);
-    }
-  }
-
-  *access_info =
-      ElementAccessInfo(Type::Class(map, zone()), elements_kind, holder);
+  *access_info = ElementAccessInfo(MapList{map}, elements_kind);
   return true;
 }
 
@@ -256,50 +274,75 @@
           return LookupTransition(receiver_map, name, holder, access_info);
         }
       }
-      if (details.type() == DATA_CONSTANT) {
-        *access_info = PropertyAccessInfo::DataConstant(
-            Type::Class(receiver_map, zone()),
-            handle(descriptors->GetValue(number), isolate()), holder);
-        return true;
-      } else if (details.type() == DATA) {
-        int index = descriptors->GetFieldIndex(number);
-        Representation field_representation = details.representation();
-        FieldIndex field_index = FieldIndex::ForPropertyIndex(
-            *map, index, field_representation.IsDouble());
-        Type* field_type = Type::Tagged();
-        if (field_representation.IsSmi()) {
-          field_type = type_cache_.kSmi;
-        } else if (field_representation.IsDouble()) {
-          field_type = type_cache_.kFloat64;
-        } else if (field_representation.IsHeapObject()) {
-          // Extract the field type from the property details (make sure its
-          // representation is TaggedPointer to reflect the heap object case).
-          field_type = Type::Intersect(
-              descriptors->GetFieldType(number)->Convert(zone()),
-              Type::TaggedPointer(), zone());
-          if (field_type->Is(Type::None())) {
-            // Store is not safe if the field type was cleared.
-            if (access_mode == AccessMode::kStore) return false;
-
-            // The field type was cleared by the GC, so we don't know anything
-            // about the contents now.
-            // TODO(bmeurer): It would be awesome to make this saner in the
-            // runtime/GC interaction.
-            field_type = Type::TaggedPointer();
-          } else if (!Type::Any()->Is(field_type)) {
-            // Add proper code dependencies in case of stable field map(s).
-            Handle<Map> field_owner_map(map->FindFieldOwner(number), isolate());
-            dependencies()->AssumeFieldType(field_owner_map);
-          }
-          DCHECK(field_type->Is(Type::TaggedPointer()));
+      switch (details.type()) {
+        case DATA_CONSTANT: {
+          *access_info = PropertyAccessInfo::DataConstant(
+              MapList{receiver_map},
+              handle(descriptors->GetValue(number), isolate()), holder);
+          return true;
         }
-        *access_info = PropertyAccessInfo::DataField(
-            Type::Class(receiver_map, zone()), field_index, field_type, holder);
-        return true;
-      } else {
-        // TODO(bmeurer): Add support for accessors.
-        return false;
+        case DATA: {
+          int index = descriptors->GetFieldIndex(number);
+          Representation field_representation = details.representation();
+          FieldIndex field_index = FieldIndex::ForPropertyIndex(
+              *map, index, field_representation.IsDouble());
+          Type* field_type = Type::Tagged();
+          if (field_representation.IsSmi()) {
+            field_type = type_cache_.kSmi;
+          } else if (field_representation.IsDouble()) {
+            field_type = type_cache_.kFloat64;
+          } else if (field_representation.IsHeapObject()) {
+            // Extract the field type from the property details (make sure its
+            // representation is TaggedPointer to reflect the heap object case).
+            field_type = Type::Intersect(
+                descriptors->GetFieldType(number)->Convert(zone()),
+                Type::TaggedPointer(), zone());
+            if (field_type->Is(Type::None())) {
+              // Store is not safe if the field type was cleared.
+              if (access_mode == AccessMode::kStore) return false;
+
+              // The field type was cleared by the GC, so we don't know anything
+              // about the contents now.
+              // TODO(bmeurer): It would be awesome to make this saner in the
+              // runtime/GC interaction.
+              field_type = Type::TaggedPointer();
+            } else if (!Type::Any()->Is(field_type)) {
+              // Add proper code dependencies in case of stable field map(s).
+              Handle<Map> field_owner_map(map->FindFieldOwner(number),
+                                          isolate());
+              dependencies()->AssumeFieldType(field_owner_map);
+            }
+            if (access_mode == AccessMode::kLoad) {
+              field_type = Type::Any();
+            }
+          }
+          *access_info = PropertyAccessInfo::DataField(
+              MapList{receiver_map}, field_index, field_type, holder);
+          return true;
+        }
+        case ACCESSOR_CONSTANT: {
+          Handle<Object> accessors(descriptors->GetValue(number), isolate());
+          if (!accessors->IsAccessorPair()) return false;
+          Handle<Object> accessor(
+              access_mode == AccessMode::kLoad
+                  ? Handle<AccessorPair>::cast(accessors)->getter()
+                  : Handle<AccessorPair>::cast(accessors)->setter(),
+              isolate());
+          if (!accessor->IsJSFunction()) {
+            // TODO(turbofan): Add support for API accessors.
+            return false;
+          }
+          *access_info = PropertyAccessInfo::AccessorConstant(
+              MapList{receiver_map}, accessor, holder);
+          return true;
+        }
+        case ACCESSOR: {
+          // TODO(turbofan): Add support for general accessors?
+          return false;
+        }
       }
+      UNREACHABLE();
+      return false;
     }
 
     // Don't search on the prototype chain for special indices in case of
@@ -331,8 +374,8 @@
         // The property was not found, return undefined or throw depending
         // on the language mode of the load operation.
         // Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
-        *access_info = PropertyAccessInfo::NotFound(
-            Type::Class(receiver_map, zone()), holder);
+        *access_info =
+            PropertyAccessInfo::NotFound(MapList{receiver_map}, holder);
         return true;
       } else {
         return false;
@@ -350,7 +393,6 @@
   return false;
 }
 
-
 bool AccessInfoFactory::ComputePropertyAccessInfos(
     MapHandleList const& maps, Handle<Name> name, AccessMode access_mode,
     ZoneVector<PropertyAccessInfo>* access_infos) {
@@ -360,7 +402,15 @@
       if (!ComputePropertyAccessInfo(map, name, access_mode, &access_info)) {
         return false;
       }
-      access_infos->push_back(access_info);
+      // Try to merge the {access_info} with an existing one.
+      bool merged = false;
+      for (PropertyAccessInfo& other_info : *access_infos) {
+        if (other_info.Merge(&access_info)) {
+          merged = true;
+          break;
+        }
+      }
+      if (!merged) access_infos->push_back(access_info);
     }
   }
   return true;
@@ -394,8 +444,8 @@
         field_type = type_cache_.kJSArrayLengthType;
       }
     }
-    *access_info = PropertyAccessInfo::DataField(Type::Class(map, zone()),
-                                                 field_index, field_type);
+    *access_info =
+        PropertyAccessInfo::DataField(MapList{map}, field_index, field_type);
     return true;
   }
   return false;
@@ -445,9 +495,8 @@
       DCHECK(field_type->Is(Type::TaggedPointer()));
     }
     dependencies()->AssumeMapNotDeprecated(transition_map);
-    *access_info =
-        PropertyAccessInfo::DataField(Type::Class(map, zone()), field_index,
-                                      field_type, holder, transition_map);
+    *access_info = PropertyAccessInfo::DataField(
+        MapList{map}, field_index, field_type, holder, transition_map);
     return true;
   }
   return false;
diff --git a/src/compiler/access-info.h b/src/compiler/access-info.h
index 1556e0e..daa8722 100644
--- a/src/compiler/access-info.h
+++ b/src/compiler/access-info.h
@@ -19,7 +19,6 @@
 class Factory;
 class TypeCache;
 
-
 namespace compiler {
 
 // Whether we are loading a property or storing to a property.
@@ -27,53 +26,61 @@
 
 std::ostream& operator<<(std::ostream&, AccessMode);
 
+typedef std::vector<Handle<Map>> MapList;
 
 // Mapping of transition source to transition target.
 typedef std::vector<std::pair<Handle<Map>, Handle<Map>>> MapTransitionList;
 
-
 // This class encapsulates all information required to access a certain element.
 class ElementAccessInfo final {
  public:
   ElementAccessInfo();
-  ElementAccessInfo(Type* receiver_type, ElementsKind elements_kind,
-                    MaybeHandle<JSObject> holder);
+  ElementAccessInfo(MapList const& receiver_maps, ElementsKind elements_kind);
 
-  MaybeHandle<JSObject> holder() const { return holder_; }
   ElementsKind elements_kind() const { return elements_kind_; }
-  Type* receiver_type() const { return receiver_type_; }
+  MapList const& receiver_maps() const { return receiver_maps_; }
   MapTransitionList& transitions() { return transitions_; }
   MapTransitionList const& transitions() const { return transitions_; }
 
  private:
   ElementsKind elements_kind_;
-  MaybeHandle<JSObject> holder_;
-  Type* receiver_type_;
+  MapList receiver_maps_;
   MapTransitionList transitions_;
 };
 
-
 // This class encapsulates all information required to access a certain
 // object property, either on the object itself or on the prototype chain.
 class PropertyAccessInfo final {
  public:
-  enum Kind { kInvalid, kNotFound, kDataConstant, kDataField };
+  enum Kind {
+    kInvalid,
+    kNotFound,
+    kDataConstant,
+    kDataField,
+    kAccessorConstant
+  };
 
-  static PropertyAccessInfo NotFound(Type* receiver_type,
+  static PropertyAccessInfo NotFound(MapList const& receiver_maps,
                                      MaybeHandle<JSObject> holder);
-  static PropertyAccessInfo DataConstant(Type* receiver_type,
+  static PropertyAccessInfo DataConstant(MapList const& receiver_maps,
                                          Handle<Object> constant,
                                          MaybeHandle<JSObject> holder);
   static PropertyAccessInfo DataField(
-      Type* receiver_type, FieldIndex field_index, Type* field_type,
+      MapList const& receiver_maps, FieldIndex field_index, Type* field_type,
       MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
       MaybeHandle<Map> transition_map = MaybeHandle<Map>());
+  static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
+                                             Handle<Object> constant,
+                                             MaybeHandle<JSObject> holder);
 
   PropertyAccessInfo();
 
+  bool Merge(PropertyAccessInfo const* that) WARN_UNUSED_RESULT;
+
   bool IsNotFound() const { return kind() == kNotFound; }
   bool IsDataConstant() const { return kind() == kDataConstant; }
   bool IsDataField() const { return kind() == kDataField; }
+  bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
 
   bool HasTransitionMap() const { return !transition_map().is_null(); }
 
@@ -83,18 +90,19 @@
   Handle<Object> constant() const { return constant_; }
   FieldIndex field_index() const { return field_index_; }
   Type* field_type() const { return field_type_; }
-  Type* receiver_type() const { return receiver_type_; }
+  MapList const& receiver_maps() const { return receiver_maps_; }
 
  private:
-  PropertyAccessInfo(MaybeHandle<JSObject> holder, Type* receiver_type);
-  PropertyAccessInfo(MaybeHandle<JSObject> holder, Handle<Object> constant,
-                     Type* receiver_type);
+  PropertyAccessInfo(MaybeHandle<JSObject> holder,
+                     MapList const& receiver_maps);
+  PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
+                     Handle<Object> constant, MapList const& receiver_maps);
   PropertyAccessInfo(MaybeHandle<JSObject> holder,
                      MaybeHandle<Map> transition_map, FieldIndex field_index,
-                     Type* field_type, Type* receiver_type);
+                     Type* field_type, MapList const& receiver_maps);
 
   Kind kind_;
-  Type* receiver_type_;
+  MapList receiver_maps_;
   Handle<Object> constant_;
   MaybeHandle<Map> transition_map_;
   MaybeHandle<JSObject> holder_;
diff --git a/src/compiler/all-nodes.cc b/src/compiler/all-nodes.cc
index ed4a218..8040897 100644
--- a/src/compiler/all-nodes.cc
+++ b/src/compiler/all-nodes.cc
@@ -10,25 +10,33 @@
 namespace internal {
 namespace compiler {
 
-AllNodes::AllNodes(Zone* local_zone, const Graph* graph)
-    : live(local_zone), is_live(graph->NodeCount(), false, local_zone) {
+AllNodes::AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs)
+    : reachable(local_zone),
+      is_reachable_(graph->NodeCount(), false, local_zone),
+      only_inputs_(only_inputs) {
   Node* end = graph->end();
-  is_live[end->id()] = true;
-  live.push_back(end);
-  // Find all live nodes reachable from end.
-  for (size_t i = 0; i < live.size(); i++) {
-    for (Node* const input : live[i]->inputs()) {
-      if (input == nullptr) {
-        // TODO(titzer): print a warning.
+  is_reachable_[end->id()] = true;
+  reachable.push_back(end);
+  // Find all nodes reachable from end.
+  for (size_t i = 0; i < reachable.size(); i++) {
+    for (Node* input : reachable[i]->inputs()) {
+      if (input == nullptr || input->id() >= graph->NodeCount()) {
         continue;
       }
-      if (input->id() >= graph->NodeCount()) {
-        // TODO(titzer): print a warning.
-        continue;
+      if (!is_reachable_[input->id()]) {
+        is_reachable_[input->id()] = true;
+        reachable.push_back(input);
       }
-      if (!is_live[input->id()]) {
-        is_live[input->id()] = true;
-        live.push_back(input);
+    }
+    if (!only_inputs) {
+      for (Node* use : reachable[i]->uses()) {
+        if (use == nullptr || use->id() >= graph->NodeCount()) {
+          continue;
+        }
+        if (!is_reachable_[use->id()]) {
+          is_reachable_[use->id()] = true;
+          reachable.push_back(use);
+        }
       }
     }
   }
diff --git a/src/compiler/all-nodes.h b/src/compiler/all-nodes.h
index 700f007..36f02e9 100644
--- a/src/compiler/all-nodes.h
+++ b/src/compiler/all-nodes.h
@@ -16,19 +16,27 @@
 // from end.
 class AllNodes {
  public:
-  // Constructor. Traverses the graph and builds the {live} sets.
-  AllNodes(Zone* local_zone, const Graph* graph);
+  // Constructor. Traverses the graph and builds the {reachable} sets. When
+  // {only_inputs} is true, find the nodes reachable through input edges;
+  // these are all live nodes.
+  AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs = true);
 
   bool IsLive(Node* node) {
-    if (!node) return false;
-    size_t id = node->id();
-    return id < is_live.size() && is_live[id];
+    CHECK(only_inputs_);
+    return IsReachable(node);
   }
 
-  NodeVector live;  // Nodes reachable from end.
+  bool IsReachable(Node* node) {
+    if (!node) return false;
+    size_t id = node->id();
+    return id < is_reachable_.size() && is_reachable_[id];
+  }
+
+  NodeVector reachable;  // Nodes reachable from end.
 
  private:
-  BoolVector is_live;
+  BoolVector is_reachable_;
+  const bool only_inputs_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index e1cf2a6..4ae282a 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -136,14 +136,25 @@
     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
-};
 
+  FloatRegister InputFloat32Register(size_t index) {
+    return ToFloat32Register(instr_->InputAt(index));
+  }
+
+  FloatRegister OutputFloat32Register() {
+    return ToFloat32Register(instr_->Output());
+  }
+
+  FloatRegister ToFloat32Register(InstructionOperand* op) {
+    return LowDwVfpRegister::from_code(ToDoubleRegister(op).code()).low();
+  }
+};
 
 namespace {
 
-class OutOfLineLoadFloat final : public OutOfLineCode {
+class OutOfLineLoadFloat32 final : public OutOfLineCode {
  public:
-  OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
+  OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final {
@@ -188,7 +199,8 @@
  public:
   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
                        Register value, Register scratch0, Register scratch1,
-                       RecordWriteMode mode)
+                       RecordWriteMode mode,
+                       UnwindingInfoWriter* unwinding_info_writer)
       : OutOfLineCode(gen),
         object_(object),
         index_(index),
@@ -197,11 +209,13 @@
         scratch0_(scratch0),
         scratch1_(scratch1),
         mode_(mode),
-        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+        must_save_lr_(!gen->frame_access_state()->has_frame()),
+        unwinding_info_writer_(unwinding_info_writer) {}
 
   OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
                        Register value, Register scratch0, Register scratch1,
-                       RecordWriteMode mode)
+                       RecordWriteMode mode,
+                       UnwindingInfoWriter* unwinding_info_writer)
       : OutOfLineCode(gen),
         object_(object),
         index_(no_reg),
@@ -210,7 +224,8 @@
         scratch0_(scratch0),
         scratch1_(scratch1),
         mode_(mode),
-        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+        must_save_lr_(!gen->frame_access_state()->has_frame()),
+        unwinding_info_writer_(unwinding_info_writer) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -227,6 +242,7 @@
     if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ Push(lr);
+      unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
     }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
                          remembered_set_action, save_fp_mode);
@@ -239,6 +255,7 @@
     __ CallStub(&stub);
     if (must_save_lr_) {
       __ Pop(lr);
+      unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
     }
   }
 
@@ -251,6 +268,7 @@
   Register const scratch1_;
   RecordWriteMode const mode_;
   bool must_save_lr_;
+  UnwindingInfoWriter* const unwinding_info_writer_;
 };
 
 
@@ -296,6 +314,10 @@
       return vs;
     case kNotOverflow:
       return vc;
+    case kPositiveOrZero:
+      return pl;
+    case kNegative:
+      return mi;
     default:
       break;
   }
@@ -409,23 +431,10 @@
 
 void CodeGenerator::AssembleDeconstructFrame() {
   __ LeaveFrame(StackFrame::MANUAL);
+  unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
 }
 
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta > 0) {
-    __ add(sp, sp, Operand(sp_slot_delta * kPointerSize));
-  }
-  frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta < 0) {
-    __ sub(sp, sp, Operand(-sp_slot_delta * kPointerSize));
-    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
-  }
+void CodeGenerator::AssemblePrepareTailCall() {
   if (frame_access_state()->has_frame()) {
     if (FLAG_enable_embedded_constant_pool) {
       __ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
@@ -461,6 +470,115 @@
   __ bind(&done);
 }
 
+namespace {
+
+void FlushPendingPushRegisters(MacroAssembler* masm,
+                               FrameAccessState* frame_access_state,
+                               ZoneVector<Register>* pending_pushes) {
+  switch (pending_pushes->size()) {
+    case 0:
+      break;
+    case 1:
+      masm->push((*pending_pushes)[0]);
+      break;
+    case 2:
+      masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+      break;
+    case 3:
+      masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+                 (*pending_pushes)[2]);
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  frame_access_state->IncreaseSPDelta(pending_pushes->size());
+  pending_pushes->resize(0);
+}
+
+void AddPendingPushRegister(MacroAssembler* masm,
+                            FrameAccessState* frame_access_state,
+                            ZoneVector<Register>* pending_pushes,
+                            Register reg) {
+  pending_pushes->push_back(reg);
+  if (pending_pushes->size() == 3 || reg.is(ip)) {
+    FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+  }
+}
+
+void AdjustStackPointerForTailCall(
+    MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+    ZoneVector<Register>* pending_pushes = nullptr,
+    bool allow_shrinkage = true) {
+  int current_sp_offset = state->GetSPToFPSlotCount() +
+                          StandardFrameConstants::kFixedSlotCountAboveFp;
+  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+  if (stack_slot_delta > 0) {
+    if (pending_pushes != nullptr) {
+      FlushPendingPushRegisters(masm, state, pending_pushes);
+    }
+    masm->sub(sp, sp, Operand(stack_slot_delta * kPointerSize));
+    state->IncreaseSPDelta(stack_slot_delta);
+  } else if (allow_shrinkage && stack_slot_delta < 0) {
+    if (pending_pushes != nullptr) {
+      FlushPendingPushRegisters(masm, state, pending_pushes);
+    }
+    masm->add(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+    state->IncreaseSPDelta(stack_slot_delta);
+  }
+}
+
+}  // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+                                              int first_unused_stack_slot) {
+  CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+  ZoneVector<MoveOperands*> pushes(zone());
+  GetPushCompatibleMoves(instr, flags, &pushes);
+
+  if (!pushes.empty() &&
+      (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+       first_unused_stack_slot)) {
+    ArmOperandConverter g(this, instr);
+    ZoneVector<Register> pending_pushes(zone());
+    for (auto move : pushes) {
+      LocationOperand destination_location(
+          LocationOperand::cast(move->destination()));
+      InstructionOperand source(move->source());
+      AdjustStackPointerForTailCall(
+          masm(), frame_access_state(),
+          destination_location.index() - pending_pushes.size(),
+          &pending_pushes);
+      if (source.IsStackSlot()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        __ ldr(ip, g.SlotToMemOperand(source_location.index()));
+        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+                               ip);
+      } else if (source.IsRegister()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+                               source_location.GetRegister());
+      } else if (source.IsImmediate()) {
+        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+                               ip);
+      } else {
+        // Pushes of non-scalar data types is not supported.
+        UNIMPLEMENTED();
+      }
+      move->Eliminate();
+    }
+    FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+  }
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot, nullptr, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+                                             int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot);
+}
+
 // Assembles an instruction after register allocation, producing machine code.
 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     Instruction* instr) {
@@ -487,8 +605,6 @@
     }
     case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -503,15 +619,17 @@
         __ Jump(ip);
       }
       DCHECK_EQ(LeaveCC, i.OutputSBit());
+      unwinding_info_writer_.MarkBlockWillExit();
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchTailCallAddress: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       CHECK(!instr->InputAt(0)->IsImmediate());
       __ Jump(i.InputRegister(0));
+      unwinding_info_writer_.MarkBlockWillExit();
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchCallJSFunction: {
@@ -539,8 +657,6 @@
         __ cmp(cp, kScratchReg);
         __ Assert(eq, kWrongFunctionContext);
       }
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -550,6 +666,7 @@
       __ Jump(ip);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchPrepareCallCFunction: {
@@ -560,7 +677,7 @@
       break;
     }
     case kArchPrepareTailCall:
-      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      AssemblePrepareTailCall();
       break;
     case kArchCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
@@ -590,6 +707,9 @@
     case kArchDebugBreak:
       __ stop("kArchDebugBreak");
       break;
+    case kArchImpossible:
+      __ Abort(kConversionFromImpossibleValue);
+      break;
     case kArchComment: {
       Address comment_string = i.InputExternalReference(0).address();
       __ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -646,14 +766,16 @@
           AddressingModeField::decode(instr->opcode());
       if (addressing_mode == kMode_Offset_RI) {
         int32_t index = i.InputInt32(1);
-        ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
-                                                scratch0, scratch1, mode);
+        ool = new (zone())
+            OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
+                                 mode, &unwinding_info_writer_);
         __ str(value, MemOperand(object, index));
       } else {
         DCHECK_EQ(kMode_Offset_RR, addressing_mode);
         Register index(i.InputRegister(1));
-        ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
-                                                scratch0, scratch1, mode);
+        ool = new (zone())
+            OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
+                                 mode, &unwinding_info_writer_);
         __ str(value, MemOperand(object, index));
       }
       __ CheckPageFlag(object, scratch0,
@@ -674,9 +796,24 @@
       __ add(i.OutputRegister(0), base, Operand(offset.offset()));
       break;
     }
+    case kIeee754Float64Acos:
+      ASSEMBLE_IEEE754_UNOP(acos);
+      break;
+    case kIeee754Float64Acosh:
+      ASSEMBLE_IEEE754_UNOP(acosh);
+      break;
+    case kIeee754Float64Asin:
+      ASSEMBLE_IEEE754_UNOP(asin);
+      break;
+    case kIeee754Float64Asinh:
+      ASSEMBLE_IEEE754_UNOP(asinh);
+      break;
     case kIeee754Float64Atan:
       ASSEMBLE_IEEE754_UNOP(atan);
       break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
     case kIeee754Float64Atan2:
       ASSEMBLE_IEEE754_BINOP(atan2);
       break;
@@ -686,15 +823,15 @@
     case kIeee754Float64Cos:
       ASSEMBLE_IEEE754_UNOP(cos);
       break;
+    case kIeee754Float64Cosh:
+      ASSEMBLE_IEEE754_UNOP(cosh);
+      break;
     case kIeee754Float64Exp:
       ASSEMBLE_IEEE754_UNOP(exp);
       break;
     case kIeee754Float64Expm1:
       ASSEMBLE_IEEE754_UNOP(expm1);
       break;
-    case kIeee754Float64Atanh:
-      ASSEMBLE_IEEE754_UNOP(atanh);
-      break;
     case kIeee754Float64Log:
       ASSEMBLE_IEEE754_UNOP(log);
       break;
@@ -707,12 +844,24 @@
     case kIeee754Float64Log10:
       ASSEMBLE_IEEE754_UNOP(log10);
       break;
+    case kIeee754Float64Pow: {
+      MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+      __ CallStub(&stub);
+      __ vmov(d0, d2);
+      break;
+    }
     case kIeee754Float64Sin:
       ASSEMBLE_IEEE754_UNOP(sin);
       break;
+    case kIeee754Float64Sinh:
+      ASSEMBLE_IEEE754_UNOP(sinh);
+      break;
     case kIeee754Float64Tan:
       ASSEMBLE_IEEE754_UNOP(tan);
       break;
+    case kIeee754Float64Tanh:
+      ASSEMBLE_IEEE754_UNOP(tanh);
+      break;
     case kArmAdd:
       __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
              i.OutputSBit());
@@ -740,6 +889,10 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmSmull:
+      __ smull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+               i.InputRegister(1));
+      break;
     case kArmSmmul:
       __ smmul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -932,54 +1085,54 @@
       break;
     case kArmVcmpF32:
       if (instr->InputAt(1)->IsFPRegister()) {
-        __ VFPCompareAndSetFlags(i.InputFloatRegister(0),
-                                 i.InputFloatRegister(1));
+        __ VFPCompareAndSetFlags(i.InputFloat32Register(0),
+                                 i.InputFloat32Register(1));
       } else {
         DCHECK(instr->InputAt(1)->IsImmediate());
         // 0.0 is the only immediate supported by vcmp instructions.
         DCHECK(i.InputFloat32(1) == 0.0f);
-        __ VFPCompareAndSetFlags(i.InputFloatRegister(0), i.InputFloat32(1));
+        __ VFPCompareAndSetFlags(i.InputFloat32Register(0), i.InputFloat32(1));
       }
       DCHECK_EQ(SetCC, i.OutputSBit());
       break;
     case kArmVaddF32:
-      __ vadd(i.OutputFloatRegister(), i.InputFloatRegister(0),
-              i.InputFloatRegister(1));
+      __ vadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
+              i.InputFloat32Register(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVsubF32:
-      __ vsub(i.OutputFloatRegister(), i.InputFloatRegister(0),
-              i.InputFloatRegister(1));
+      __ vsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
+              i.InputFloat32Register(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmulF32:
-      __ vmul(i.OutputFloatRegister(), i.InputFloatRegister(0),
-              i.InputFloatRegister(1));
+      __ vmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
+              i.InputFloat32Register(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmlaF32:
-      __ vmla(i.OutputFloatRegister(), i.InputFloatRegister(1),
-              i.InputFloatRegister(2));
+      __ vmla(i.OutputFloat32Register(), i.InputFloat32Register(1),
+              i.InputFloat32Register(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmlsF32:
-      __ vmls(i.OutputFloatRegister(), i.InputFloatRegister(1),
-              i.InputFloatRegister(2));
+      __ vmls(i.OutputFloat32Register(), i.InputFloat32Register(1),
+              i.InputFloat32Register(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVdivF32:
-      __ vdiv(i.OutputFloatRegister(), i.InputFloatRegister(0),
-              i.InputFloatRegister(1));
+      __ vdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
+              i.InputFloat32Register(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVsqrtF32:
-      __ vsqrt(i.OutputFloatRegister(), i.InputFloatRegister(0));
+      __ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
     case kArmVabsF32:
-      __ vabs(i.OutputFloatRegister(), i.InputFloatRegister(0));
+      __ vabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
     case kArmVnegF32:
-      __ vneg(i.OutputFloatRegister(), i.InputFloatRegister(0));
+      __ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
     case kArmVcmpF64:
       if (instr->InputAt(1)->IsFPRegister()) {
@@ -1047,19 +1200,19 @@
       __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVrintmF32:
-      __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
+      __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
     case kArmVrintmF64:
       __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVrintpF32:
-      __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
+      __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
     case kArmVrintpF64:
       __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVrintzF32:
-      __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
+      __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
     case kArmVrintzF64:
       __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
@@ -1068,32 +1221,32 @@
       __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVrintnF32:
-      __ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
+      __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
     case kArmVrintnF64:
       __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
     case kArmVcvtF32F64: {
-      __ vcvt_f32_f64(i.OutputFloatRegister(), i.InputDoubleRegister(0));
+      __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF64F32: {
-      __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloatRegister(0));
+      __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloat32Register(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF32S32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
-      __ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
+      __ vcvt_f32_s32(i.OutputFloat32Register(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF32U32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
-      __ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
+      __ vcvt_f32_u32(i.OutputFloat32Register(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
@@ -1113,15 +1266,23 @@
     }
     case kArmVcvtS32F32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
+      __ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
       __ vmov(i.OutputRegister(), scratch);
+      // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+      // because INT32_MIN allows easier out-of-bounds detection.
+      __ cmn(i.OutputRegister(), Operand(1));
+      __ mov(i.OutputRegister(), Operand(INT32_MIN), SBit::LeaveCC, vs);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtU32F32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
+      __ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
       __ vmov(i.OutputRegister(), scratch);
+      // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+      // because 0 allows easier out-of-bounds detection.
+      __ cmn(i.OutputRegister(), Operand(1));
+      __ adc(i.OutputRegister(), i.OutputRegister(), Operand::Zero());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
@@ -1140,11 +1301,11 @@
       break;
     }
     case kArmVmovU32F32:
-      __ vmov(i.OutputRegister(), i.InputFloatRegister(0));
+      __ vmov(i.OutputRegister(), i.InputFloat32Register(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmovF32U32:
-      __ vmov(i.OutputFloatRegister(), i.InputRegister(0));
+      __ vmov(i.OutputFloat32Register(), i.InputRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmovLowU32F64:
@@ -1167,6 +1328,11 @@
       __ vmov(i.OutputDoubleRegister(), i.InputRegister(0), i.InputRegister(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
+    case kArmVmovU32U32F64:
+      __ vmov(i.OutputRegister(0), i.OutputRegister(1),
+              i.InputDoubleRegister(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
     case kArmLdrb:
       __ ldrb(i.OutputRegister(), i.InputOffset());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1197,12 +1363,12 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVldrF32: {
-      __ vldr(i.OutputFloatRegister(), i.InputOffset());
+      __ vldr(i.OutputFloat32Register(), i.InputOffset());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVstrF32:
-      __ vstr(i.InputFloatRegister(0), i.InputOffset(1));
+      __ vstr(i.InputFloat32Register(0), i.InputOffset(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVldrF64:
@@ -1214,43 +1380,145 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmFloat32Max: {
-      CpuFeatureScope scope(masm(), ARMv8);
-      // (b < a) ? a : b
-      SwVfpRegister a = i.InputFloatRegister(0);
-      SwVfpRegister b = i.InputFloatRegister(1);
-      SwVfpRegister result = i.OutputFloatRegister();
-      __ VFPCompareAndSetFlags(a, b);
-      __ vsel(gt, result, a, b);
-      break;
-    }
-    case kArmFloat32Min: {
-      CpuFeatureScope scope(masm(), ARMv8);
-      // (a < b) ? a : b
-      SwVfpRegister a = i.InputFloatRegister(0);
-      SwVfpRegister b = i.InputFloatRegister(1);
-      SwVfpRegister result = i.OutputFloatRegister();
-      __ VFPCompareAndSetFlags(b, a);
-      __ vsel(gt, result, a, b);
+      FloatRegister left_reg = i.InputFloat32Register(0);
+      FloatRegister right_reg = i.InputFloat32Register(1);
+      FloatRegister result_reg = i.OutputFloat32Register();
+      Label result_is_nan, return_left, return_right, check_zero, done;
+      __ VFPCompareAndSetFlags(left_reg, right_reg);
+      __ b(mi, &return_right);
+      __ b(gt, &return_left);
+      __ b(vs, &result_is_nan);
+      // Left equals right => check for -0.
+      __ VFPCompareAndSetFlags(left_reg, 0.0);
+      if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+        __ b(ne, &done);  // left == right != 0.
+      } else {
+        __ b(ne, &return_left);  // left == right != 0.
+      }
+      // At this point, both left and right are either 0 or -0.
+      // Since we operate on +0 and/or -0, vadd and vand have the same effect;
+      // the decision for vadd is easy because vand is a NEON instruction.
+      __ vadd(result_reg, left_reg, right_reg);
+      __ b(&done);
+      __ bind(&result_is_nan);
+      __ vadd(result_reg, left_reg, right_reg);
+      __ b(&done);
+      __ bind(&return_right);
+      __ Move(result_reg, right_reg);
+      if (!left_reg.is(result_reg)) __ b(&done);
+      __ bind(&return_left);
+      __ Move(result_reg, left_reg);
+      __ bind(&done);
       break;
     }
     case kArmFloat64Max: {
-      CpuFeatureScope scope(masm(), ARMv8);
-      // (b < a) ? a : b
-      DwVfpRegister a = i.InputDoubleRegister(0);
-      DwVfpRegister b = i.InputDoubleRegister(1);
-      DwVfpRegister result = i.OutputDoubleRegister();
-      __ VFPCompareAndSetFlags(a, b);
-      __ vsel(gt, result, a, b);
+      DwVfpRegister left_reg = i.InputDoubleRegister(0);
+      DwVfpRegister right_reg = i.InputDoubleRegister(1);
+      DwVfpRegister result_reg = i.OutputDoubleRegister();
+      Label result_is_nan, return_left, return_right, check_zero, done;
+      __ VFPCompareAndSetFlags(left_reg, right_reg);
+      __ b(mi, &return_right);
+      __ b(gt, &return_left);
+      __ b(vs, &result_is_nan);
+      // Left equals right => check for -0.
+      __ VFPCompareAndSetFlags(left_reg, 0.0);
+      if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+        __ b(ne, &done);  // left == right != 0.
+      } else {
+        __ b(ne, &return_left);  // left == right != 0.
+      }
+      // At this point, both left and right are either 0 or -0.
+      // Since we operate on +0 and/or -0, vadd and vand have the same effect;
+      // the decision for vadd is easy because vand is a NEON instruction.
+      __ vadd(result_reg, left_reg, right_reg);
+      __ b(&done);
+      __ bind(&result_is_nan);
+      __ vadd(result_reg, left_reg, right_reg);
+      __ b(&done);
+      __ bind(&return_right);
+      __ Move(result_reg, right_reg);
+      if (!left_reg.is(result_reg)) __ b(&done);
+      __ bind(&return_left);
+      __ Move(result_reg, left_reg);
+      __ bind(&done);
+      break;
+    }
+    case kArmFloat32Min: {
+      FloatRegister left_reg = i.InputFloat32Register(0);
+      FloatRegister right_reg = i.InputFloat32Register(1);
+      FloatRegister result_reg = i.OutputFloat32Register();
+      Label result_is_nan, return_left, return_right, check_zero, done;
+      __ VFPCompareAndSetFlags(left_reg, right_reg);
+      __ b(mi, &return_left);
+      __ b(gt, &return_right);
+      __ b(vs, &result_is_nan);
+      // Left equals right => check for -0.
+      __ VFPCompareAndSetFlags(left_reg, 0.0);
+      if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+        __ b(ne, &done);  // left == right != 0.
+      } else {
+        __ b(ne, &return_left);  // left == right != 0.
+      }
+      // At this point, both left and right are either 0 or -0.
+      // We could use a single 'vorr' instruction here if we had NEON support.
+      // The algorithm is: -((-L) + (-R)), which in case of L and R being
+      // different registers is most efficiently expressed as -((-L) - R).
+      __ vneg(left_reg, left_reg);
+      if (left_reg.is(right_reg)) {
+        __ vadd(result_reg, left_reg, right_reg);
+      } else {
+        __ vsub(result_reg, left_reg, right_reg);
+      }
+      __ vneg(result_reg, result_reg);
+      __ b(&done);
+      __ bind(&result_is_nan);
+      __ vadd(result_reg, left_reg, right_reg);
+      __ b(&done);
+      __ bind(&return_right);
+      __ Move(result_reg, right_reg);
+      if (!left_reg.is(result_reg)) __ b(&done);
+      __ bind(&return_left);
+      __ Move(result_reg, left_reg);
+      __ bind(&done);
       break;
     }
     case kArmFloat64Min: {
-      CpuFeatureScope scope(masm(), ARMv8);
-      // (a < b) ? a : b
-      DwVfpRegister a = i.InputDoubleRegister(0);
-      DwVfpRegister b = i.InputDoubleRegister(1);
-      DwVfpRegister result = i.OutputDoubleRegister();
-      __ VFPCompareAndSetFlags(b, a);
-      __ vsel(gt, result, a, b);
+      DwVfpRegister left_reg = i.InputDoubleRegister(0);
+      DwVfpRegister right_reg = i.InputDoubleRegister(1);
+      DwVfpRegister result_reg = i.OutputDoubleRegister();
+      Label result_is_nan, return_left, return_right, check_zero, done;
+      __ VFPCompareAndSetFlags(left_reg, right_reg);
+      __ b(mi, &return_left);
+      __ b(gt, &return_right);
+      __ b(vs, &result_is_nan);
+      // Left equals right => check for -0.
+      __ VFPCompareAndSetFlags(left_reg, 0.0);
+      if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+        __ b(ne, &done);  // left == right != 0.
+      } else {
+        __ b(ne, &return_left);  // left == right != 0.
+      }
+      // At this point, both left and right are either 0 or -0.
+      // We could use a single 'vorr' instruction here if we had NEON support.
+      // The algorithm is: -((-L) + (-R)), which in case of L and R being
+      // different registers is most efficiently expressed as -((-L) - R).
+      __ vneg(left_reg, left_reg);
+      if (left_reg.is(right_reg)) {
+        __ vadd(result_reg, left_reg, right_reg);
+      } else {
+        __ vsub(result_reg, left_reg, right_reg);
+      }
+      __ vneg(result_reg, result_reg);
+      __ b(&done);
+      __ bind(&result_is_nan);
+      __ vadd(result_reg, left_reg, right_reg);
+      __ b(&done);
+      __ bind(&return_right);
+      __ Move(result_reg, right_reg);
+      if (!left_reg.is(result_reg)) __ b(&done);
+      __ bind(&return_left);
+      __ Move(result_reg, left_reg);
+      __ bind(&done);
       break;
     }
     case kArmFloat64SilenceNaN: {
@@ -1267,7 +1535,7 @@
           frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
         } else {
           DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
-          __ vpush(i.InputFloatRegister(0));
+          __ vpush(i.InputFloat32Register(0));
           frame_access_state()->IncreaseSPDelta(1);
         }
       } else {
@@ -1298,7 +1566,7 @@
       ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
       break;
     case kCheckedLoadFloat32:
-      ASSEMBLE_CHECKED_LOAD_FP(Float);
+      ASSEMBLE_CHECKED_LOAD_FP(Float32);
       break;
     case kCheckedLoadFloat64:
       ASSEMBLE_CHECKED_LOAD_FP(Double);
@@ -1313,7 +1581,7 @@
       ASSEMBLE_CHECKED_STORE_INTEGER(str);
       break;
     case kCheckedStoreFloat32:
-      ASSEMBLE_CHECKED_STORE_FP(Float);
+      ASSEMBLE_CHECKED_STORE_FP(Float32);
       break;
     case kCheckedStoreFloat64:
       ASSEMBLE_CHECKED_STORE_FP(Double);
@@ -1418,6 +1686,9 @@
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   __ CheckConstPool(false, false);
   return kSuccess;
@@ -1467,6 +1738,10 @@
     } else {
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
+
+    if (!info()->GeneratePreagedPrologue()) {
+      unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
+    }
   }
 
   int shrink_slots = frame()->GetSpillSlotCount();
@@ -1530,6 +1805,8 @@
             DwVfpRegister::from_code(last));
   }
 
+  unwinding_info_writer_.MarkBlockWillExit();
+
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
@@ -1601,10 +1878,7 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int slot;
-          if (IsMaterializableFromFrame(src_object, &slot)) {
-            __ ldr(dst, g.SlotToMemOperand(slot));
-          } else if (IsMaterializableFromRoot(src_object, &index)) {
+          if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
             __ Move(dst, src_object);
@@ -1622,7 +1896,7 @@
         __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
         __ str(ip, dst);
       } else {
-        SwVfpRegister dst = g.ToFloatRegister(destination);
+        SwVfpRegister dst = g.ToFloat32Register(destination);
         __ vmov(dst, src.ToFloat32());
       }
     } else {
@@ -1636,50 +1910,23 @@
       }
     }
   } else if (source->IsFPRegister()) {
-    MachineRepresentation rep = LocationOperand::cast(source)->representation();
-    if (rep == MachineRepresentation::kFloat64) {
-      DwVfpRegister src = g.ToDoubleRegister(source);
-      if (destination->IsFPRegister()) {
-        DwVfpRegister dst = g.ToDoubleRegister(destination);
-        __ Move(dst, src);
-      } else {
-        DCHECK(destination->IsFPStackSlot());
-        __ vstr(src, g.ToMemOperand(destination));
-      }
+    DwVfpRegister src = g.ToDoubleRegister(source);
+    if (destination->IsFPRegister()) {
+      DwVfpRegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
     } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
-      SwVfpRegister src = g.ToFloatRegister(source);
-      if (destination->IsFPRegister()) {
-        SwVfpRegister dst = g.ToFloatRegister(destination);
-        __ Move(dst, src);
-      } else {
-        DCHECK(destination->IsFPStackSlot());
-        __ vstr(src, g.ToMemOperand(destination));
-      }
+      DCHECK(destination->IsFPStackSlot());
+      __ vstr(src, g.ToMemOperand(destination));
     }
   } else if (source->IsFPStackSlot()) {
     MemOperand src = g.ToMemOperand(source);
-    MachineRepresentation rep =
-        LocationOperand::cast(destination)->representation();
     if (destination->IsFPRegister()) {
-      if (rep == MachineRepresentation::kFloat64) {
         __ vldr(g.ToDoubleRegister(destination), src);
-      } else {
-        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
-        __ vldr(g.ToFloatRegister(destination), src);
-      }
     } else {
       DCHECK(destination->IsFPStackSlot());
-      if (rep == MachineRepresentation::kFloat64) {
         DwVfpRegister temp = kScratchDoubleReg;
         __ vldr(temp, src);
         __ vstr(temp, g.ToMemOperand(destination));
-      } else {
-        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
-        SwVfpRegister temp = kScratchDoubleReg.low();
-        __ vldr(temp, src);
-        __ vstr(temp, g.ToMemOperand(destination));
-      }
     }
   } else {
     UNREACHABLE();
@@ -1719,9 +1966,7 @@
     __ str(temp_0, dst);
     __ vstr(temp_1, src);
   } else if (source->IsFPRegister()) {
-    MachineRepresentation rep = LocationOperand::cast(source)->representation();
     LowDwVfpRegister temp = kScratchDoubleReg;
-    if (rep == MachineRepresentation::kFloat64) {
       DwVfpRegister src = g.ToDoubleRegister(source);
       if (destination->IsFPRegister()) {
         DwVfpRegister dst = g.ToDoubleRegister(destination);
@@ -1735,30 +1980,12 @@
         __ vldr(src, dst);
         __ vstr(temp, dst);
       }
-    } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
-      SwVfpRegister src = g.ToFloatRegister(source);
-      if (destination->IsFPRegister()) {
-        SwVfpRegister dst = g.ToFloatRegister(destination);
-        __ Move(temp.low(), src);
-        __ Move(src, dst);
-        __ Move(dst, temp.low());
-      } else {
-        DCHECK(destination->IsFPStackSlot());
-        MemOperand dst = g.ToMemOperand(destination);
-        __ Move(temp.low(), src);
-        __ vldr(src, dst);
-        __ vstr(temp.low(), dst);
-      }
-    }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPStackSlot());
     Register temp_0 = kScratchReg;
     LowDwVfpRegister temp_1 = kScratchDoubleReg;
     MemOperand src0 = g.ToMemOperand(source);
     MemOperand dst0 = g.ToMemOperand(destination);
-    MachineRepresentation rep = LocationOperand::cast(source)->representation();
-    if (rep == MachineRepresentation::kFloat64) {
       MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
       MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
       __ vldr(temp_1, dst0);  // Save destination in temp_1.
@@ -1767,13 +1994,6 @@
       __ ldr(temp_0, src1);
       __ str(temp_0, dst1);
       __ vstr(temp_1, src0);
-    } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
-      __ vldr(temp_1.low(), dst0);  // Save destination in temp_1.
-      __ ldr(temp_0, src0);  // Then use temp_0 to copy source to destination.
-      __ str(temp_0, dst0);
-      __ vstr(temp_1.low(), src0);
-    }
   } else {
     // No other combinations are possible.
     UNREACHABLE();
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index bc3336f..07c4033 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -27,6 +27,7 @@
   V(ArmMul)                        \
   V(ArmMla)                        \
   V(ArmMls)                        \
+  V(ArmSmull)                      \
   V(ArmSmmul)                      \
   V(ArmSmmla)                      \
   V(ArmUmull)                      \
@@ -99,13 +100,14 @@
   V(ArmVmovHighU32F64)             \
   V(ArmVmovHighF64U32)             \
   V(ArmVmovF64U32U32)              \
+  V(ArmVmovU32U32F64)              \
   V(ArmVldrF32)                    \
   V(ArmVstrF32)                    \
   V(ArmVldrF64)                    \
   V(ArmVstrF64)                    \
   V(ArmFloat32Max)                 \
-  V(ArmFloat32Min)                 \
   V(ArmFloat64Max)                 \
+  V(ArmFloat32Min)                 \
   V(ArmFloat64Min)                 \
   V(ArmFloat64SilenceNaN)          \
   V(ArmLdrb)                       \
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index 065fe52..3f38e5d 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -30,6 +30,7 @@
     case kArmMla:
     case kArmMls:
     case kArmSmmul:
+    case kArmSmull:
     case kArmSmmla:
     case kArmUmull:
     case kArmSdiv:
@@ -101,10 +102,11 @@
     case kArmVmovHighU32F64:
     case kArmVmovHighF64U32:
     case kArmVmovF64U32U32:
-    case kArmFloat64Max:
-    case kArmFloat64Min:
+    case kArmVmovU32U32F64:
     case kArmFloat32Max:
+    case kArmFloat64Max:
     case kArmFloat32Min:
+    case kArmFloat64Min:
     case kArmFloat64SilenceNaN:
       return kNoOpcodeFlags;
 
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index e21e63f..4b0b6af 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -273,7 +273,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -338,6 +338,46 @@
   }
 }
 
+void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
+              InstructionOperand* output, Node* base, Node* index) {
+  ArmOperandGenerator g(selector);
+  InstructionOperand inputs[3];
+  size_t input_count = 2;
+
+  inputs[0] = g.UseRegister(base);
+  if (g.CanBeImmediate(index, opcode)) {
+    inputs[1] = g.UseImmediate(index);
+    opcode |= AddressingModeField::encode(kMode_Offset_RI);
+  } else if ((opcode == kArmLdr) &&
+             TryMatchLSLImmediate(selector, &opcode, index, &inputs[1],
+                                  &inputs[2])) {
+    input_count = 3;
+  } else {
+    inputs[1] = g.UseRegister(index);
+    opcode |= AddressingModeField::encode(kMode_Offset_RR);
+  }
+  selector->Emit(opcode, 1, output, input_count, inputs);
+}
+
+void EmitStore(InstructionSelector* selector, InstructionCode opcode,
+               size_t input_count, InstructionOperand* inputs,
+               Node* index) {
+  ArmOperandGenerator g(selector);
+
+  if (g.CanBeImmediate(index, opcode)) {
+    inputs[input_count++] = g.UseImmediate(index);
+    opcode |= AddressingModeField::encode(kMode_Offset_RI);
+  } else if ((opcode == kArmStr) &&
+             TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
+                                  &inputs[3])) {
+    input_count = 4;
+  } else {
+    inputs[input_count++] = g.UseRegister(index);
+    opcode |= AddressingModeField::encode(kMode_Offset_RR);
+  }
+  selector->Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
 }  // namespace
 
 
@@ -346,9 +386,6 @@
   ArmOperandGenerator g(this);
   Node* base = node->InputAt(0);
   Node* index = node->InputAt(1);
-  InstructionOperand inputs[3];
-  size_t input_count = 0;
-  InstructionOperand outputs[1];
 
   InstructionCode opcode = kArchNop;
   switch (load_rep.representation()) {
@@ -365,6 +402,8 @@
     case MachineRepresentation::kWord16:
       opcode = load_rep.IsUnsigned() ? kArmLdrh : kArmLdrsh;
       break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
     case MachineRepresentation::kWord32:
       opcode = kArmLdr;
@@ -376,24 +415,8 @@
       return;
   }
 
-  outputs[0] = g.DefineAsRegister(node);
-  inputs[0] = g.UseRegister(base);
-
-  if (g.CanBeImmediate(index, opcode)) {
-    input_count = 2;
-    inputs[1] = g.UseImmediate(index);
-    opcode |= AddressingModeField::encode(kMode_Offset_RI);
-  } else if ((opcode == kArmLdr) &&
-             TryMatchLSLImmediate(this, &opcode, index, &inputs[1],
-                                  &inputs[2])) {
-    input_count = 3;
-  } else {
-    input_count = 2;
-    inputs[1] = g.UseRegister(index);
-    opcode |= AddressingModeField::encode(kMode_Offset_RR);
-  }
-
-  Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
+  InstructionOperand output = g.DefineAsRegister(node);
+  EmitLoad(this, opcode, &output, base, index);
 }
 
 
@@ -445,9 +468,6 @@
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
-    InstructionOperand inputs[4];
-    size_t input_count = 0;
-
     InstructionCode opcode = kArchNop;
     switch (rep) {
       case MachineRepresentation::kFloat32:
@@ -463,6 +483,8 @@
       case MachineRepresentation::kWord16:
         opcode = kArmStrh;
         break;
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
       case MachineRepresentation::kTagged:  // Fall through.
       case MachineRepresentation::kWord32:
         opcode = kArmStr;
@@ -474,26 +496,129 @@
         return;
     }
 
-    inputs[0] = g.UseRegister(value);
-    inputs[1] = g.UseRegister(base);
-
-    if (g.CanBeImmediate(index, opcode)) {
-      input_count = 3;
-      inputs[2] = g.UseImmediate(index);
-      opcode |= AddressingModeField::encode(kMode_Offset_RI);
-    } else if ((opcode == kArmStr) &&
-               TryMatchLSLImmediate(this, &opcode, index, &inputs[2],
-                                    &inputs[3])) {
-      input_count = 4;
-    } else {
-      input_count = 3;
-      inputs[2] = g.UseRegister(index);
-      opcode |= AddressingModeField::encode(kMode_Offset_RR);
-    }
-    Emit(opcode, 0, nullptr, input_count, inputs);
+    InstructionOperand inputs[4];
+    size_t input_count = 0;
+    inputs[input_count++] = g.UseRegister(value);
+    inputs[input_count++] = g.UseRegister(base);
+    EmitStore(this, opcode, input_count, inputs, index);
   }
 }
 
+void InstructionSelector::VisitUnalignedLoad(Node* node) {
+  UnalignedLoadRepresentation load_rep =
+      UnalignedLoadRepresentationOf(node->op());
+  ArmOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  InstructionCode opcode = kArmLdr;
+  // Only floating point loads need to be specially handled; integer loads
+  // support unaligned access. We support unaligned FP loads by loading to
+  // integer registers first, then moving to the destination FP register.
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kFloat32: {
+      InstructionOperand temp = g.TempRegister();
+      EmitLoad(this, opcode, &temp, base, index);
+      Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp);
+      return;
+    }
+    case MachineRepresentation::kFloat64: {
+      // TODO(arm): use vld1.8 for this when NEON is available.
+      // Compute the address of the least-significant half of the FP value.
+      // We assume that the base node is unlikely to be an encodable immediate
+      // or the result of a shift operation, so only consider the addressing
+      // mode that should be used for the index node.
+      InstructionCode add_opcode = kArmAdd;
+      InstructionOperand inputs[3];
+      inputs[0] = g.UseRegister(base);
+
+      size_t input_count;
+      if (TryMatchImmediateOrShift(this, &add_opcode, index, &input_count,
+                                   &inputs[1])) {
+        // input_count has been set by TryMatchImmediateOrShift(), so increment
+        // it to account for the base register in inputs[0].
+        input_count++;
+      } else {
+        add_opcode |= AddressingModeField::encode(kMode_Operand2_R);
+        inputs[1] = g.UseRegister(index);
+        input_count = 2;  // Base register and index.
+      }
+
+      InstructionOperand addr = g.TempRegister();
+      Emit(add_opcode, 1, &addr, input_count, inputs);
+
+      // Load both halves and move to an FP register.
+      InstructionOperand fp_lo = g.TempRegister();
+      InstructionOperand fp_hi = g.TempRegister();
+      opcode |= AddressingModeField::encode(kMode_Offset_RI);
+      Emit(opcode, fp_lo, addr, g.TempImmediate(0));
+      Emit(opcode, fp_hi, addr, g.TempImmediate(4));
+      Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), fp_lo, fp_hi);
+      return;
+    }
+    default:
+      // All other cases should support unaligned accesses.
+      UNREACHABLE();
+      return;
+  }
+}
+
+void InstructionSelector::VisitUnalignedStore(Node* node) {
+  ArmOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+
+  UnalignedStoreRepresentation store_rep =
+      UnalignedStoreRepresentationOf(node->op());
+
+  // Only floating point stores need to be specially handled; integer stores
+  // support unaligned access. We support unaligned FP stores by moving the
+  // value to integer registers first, then storing to the destination address.
+  switch (store_rep) {
+    case MachineRepresentation::kFloat32: {
+      inputs[input_count++] = g.TempRegister();
+      Emit(kArmVmovU32F32, inputs[0], g.UseRegister(value));
+      inputs[input_count++] = g.UseRegister(base);
+      EmitStore(this, kArmStr, input_count, inputs, index);
+      return;
+    }
+    case MachineRepresentation::kFloat64: {
+      // TODO(arm): use vst1.8 for this when NEON is available.
+      // Store a 64-bit floating point value using two 32-bit integer stores.
+      // Computing the store address here would require three live temporary
+      // registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after
+      // storing the least-significant half of the value.
+
+      // First, move the 64-bit FP value into two temporary integer registers.
+      InstructionOperand fp[] = {g.TempRegister(), g.TempRegister()};
+      inputs[input_count++] = g.UseRegister(value);
+      Emit(kArmVmovU32U32F64, arraysize(fp), fp, input_count,
+           inputs);
+
+      // Store the least-significant half.
+      inputs[0] = fp[0];  // Low 32-bits of FP value.
+      inputs[input_count++] = g.UseRegister(base);  // First store base address.
+      EmitStore(this, kArmStr, input_count, inputs, index);
+
+      // Store the most-significant half.
+      InstructionOperand base4 = g.TempRegister();
+      Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_I), base4,
+           g.UseRegister(base), g.TempImmediate(4));  // Compute base + 4.
+      inputs[0] = fp[1];  // High 32-bits of FP value.
+      inputs[1] = base4;  // Second store base + 4 address.
+      EmitStore(this, kArmStr, input_count, inputs, index);
+      return;
+    }
+    default:
+      // All other cases should support unaligned accesses.
+      UNREACHABLE();
+      return;
+  }
+}
 
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -519,6 +644,8 @@
       opcode = kCheckedLoadFloat64;
       break;
     case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
@@ -561,6 +688,8 @@
       opcode = kCheckedStoreFloat64;
       break;
     case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
@@ -758,7 +887,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -961,6 +1090,9 @@
   VisitRR(this, kArmRbit, node);
 }
 
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
 
@@ -1093,6 +1225,38 @@
   VisitBinop(this, node, kArmSub, kArmRsb);
 }
 
+namespace {
+
+void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
+                              FlagsContinuation* cont) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand result_operand = g.DefineAsRegister(node);
+  InstructionOperand temp_operand = g.TempRegister();
+  InstructionOperand outputs[] = {result_operand, temp_operand};
+  InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
+                                 g.UseRegister(m.right().node())};
+  selector->Emit(kArmSmull, 2, outputs, 2, inputs);
+
+  // result operand needs shift operator.
+  InstructionOperand shift_31 = g.UseImmediate(31);
+  InstructionCode opcode = cont->Encode(kArmCmp) |
+                           AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, g.NoOutput(), temp_operand, result_operand, shift_31,
+                   g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    InstructionOperand in[] = {temp_operand, result_operand, shift_31};
+    selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->reason(),
+                             cont->frame_state());
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
+                   result_operand, shift_31);
+  }
+}
+
+}  // namespace
 
 void InstructionSelector::VisitInt32Mul(Node* node) {
   ArmOperandGenerator g(this);
@@ -1260,76 +1424,30 @@
   VisitRRR(this, kArmVaddF64, node);
 }
 
-namespace {
-void VisitFloat32SubHelper(InstructionSelector* selector, Node* node) {
-  ArmOperandGenerator g(selector);
-  Float32BinopMatcher m(node);
-  if (m.right().IsFloat32Mul() && selector->CanCover(node, m.right().node())) {
-    Float32BinopMatcher mright(m.right().node());
-    selector->Emit(kArmVmlsF32, g.DefineSameAsFirst(node),
-                   g.UseRegister(m.left().node()),
-                   g.UseRegister(mright.left().node()),
-                   g.UseRegister(mright.right().node()));
-    return;
-  }
-  VisitRRR(selector, kArmVsubF32, node);
-}
-
-void VisitFloat64SubHelper(InstructionSelector* selector, Node* node) {
-  ArmOperandGenerator g(selector);
-  Float64BinopMatcher m(node);
-  if (m.right().IsFloat64Mul() && selector->CanCover(node, m.right().node())) {
-    Float64BinopMatcher mright(m.right().node());
-    selector->Emit(kArmVmlsF64, g.DefineSameAsFirst(node),
-                   g.UseRegister(m.left().node()),
-                   g.UseRegister(mright.left().node()),
-                   g.UseRegister(mright.right().node()));
-    return;
-  }
-  VisitRRR(selector, kArmVsubF64, node);
-}
-}  // namespace
-
 void InstructionSelector::VisitFloat32Sub(Node* node) {
   ArmOperandGenerator g(this);
   Float32BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    Emit(kArmVnegF32, g.DefineAsRegister(node),
-         g.UseRegister(m.right().node()));
+  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+    Float32BinopMatcher mright(m.right().node());
+    Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()));
     return;
   }
-  VisitFloat32SubHelper(this, node);
-}
-
-void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
-  VisitFloat32SubHelper(this, node);
+  VisitRRR(this, kArmVsubF32, node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   ArmOperandGenerator g(this);
   Float64BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    if (m.right().IsFloat64RoundDown() &&
-        CanCover(m.node(), m.right().node())) {
-      if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
-          CanCover(m.right().node(), m.right().InputAt(0))) {
-        Float64BinopMatcher mright0(m.right().InputAt(0));
-        if (mright0.left().IsMinusZero()) {
-          Emit(kArmVrintpF64, g.DefineAsRegister(node),
-               g.UseRegister(mright0.right().node()));
-          return;
-        }
-      }
-    }
-    Emit(kArmVnegF64, g.DefineAsRegister(node),
-         g.UseRegister(m.right().node()));
+  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    Float64BinopMatcher mright(m.right().node());
+    Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()));
     return;
   }
-  VisitFloat64SubHelper(this, node);
-}
-
-void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
-  VisitFloat64SubHelper(this, node);
+  VisitRRR(this, kArmVsubF64, node);
 }
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
@@ -1359,12 +1477,10 @@
 }
 
 void InstructionSelector::VisitFloat32Max(Node* node) {
-  DCHECK(IsSupported(ARMv8));
   VisitRRR(this, kArmFloat32Max, node);
 }
 
 void InstructionSelector::VisitFloat64Max(Node* node) {
-  DCHECK(IsSupported(ARMv8));
   VisitRRR(this, kArmFloat64Max, node);
 }
 
@@ -1373,12 +1489,10 @@
 }
 
 void InstructionSelector::VisitFloat32Min(Node* node) {
-  DCHECK(IsSupported(ARMv8));
   VisitRRR(this, kArmFloat32Min, node);
 }
 
 void InstructionSelector::VisitFloat64Min(Node* node) {
-  DCHECK(IsSupported(ARMv8));
   VisitRRR(this, kArmFloat64Min, node);
 }
 
@@ -1476,7 +1590,7 @@
   // Prepare for C function call.
   if (descriptor->IsCFunctionCall()) {
     Emit(kArchPrepareCallCFunction |
-             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+             MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
          0, nullptr, 0, nullptr);
 
     // Poke any stack arguments.
@@ -1515,7 +1629,7 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
                              cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
@@ -1561,6 +1675,101 @@
   }
 }
 
+// Check whether we can convert:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>.
+// We only generate conditions <cond'> that are a combination of the N
+// and Z flags. This avoids the need to make this function dependent on
+// the flag-setting operation.
+bool CanUseFlagSettingBinop(FlagsCondition cond) {
+  switch (cond) {
+    case kEqual:
+    case kNotEqual:
+    case kSignedLessThan:
+    case kSignedGreaterThanOrEqual:
+    case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
+    case kUnsignedGreaterThan:      // x > 0 -> x != 0
+      return true;
+    default:
+      return false;
+  }
+}
+
+// Map <cond> to <cond'> so that the following transformation is possible:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>.
+FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
+  DCHECK(CanUseFlagSettingBinop(cond));
+  switch (cond) {
+    case kEqual:
+    case kNotEqual:
+      return cond;
+    case kSignedLessThan:
+      return kNegative;
+    case kSignedGreaterThanOrEqual:
+      return kPositiveOrZero;
+    case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
+      return kEqual;
+    case kUnsignedGreaterThan:  // x > 0 -> x != 0
+      return kNotEqual;
+    default:
+      UNREACHABLE();
+      return cond;
+  }
+}
+
+// Check if we can perform the transformation:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>, and if so,
+// updates {node}, {opcode} and {cont} accordingly.
+void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
+                                             Node** node, Node* binop,
+                                             InstructionCode* opcode,
+                                             FlagsCondition cond,
+                                             FlagsContinuation* cont) {
+  InstructionCode binop_opcode;
+  InstructionCode no_output_opcode;
+  switch (binop->opcode()) {
+    case IrOpcode::kInt32Add:
+      binop_opcode = kArmAdd;
+      no_output_opcode = kArmCmn;
+      break;
+    case IrOpcode::kWord32And:
+      binop_opcode = kArmAnd;
+      no_output_opcode = kArmTst;
+      break;
+    case IrOpcode::kWord32Or:
+      binop_opcode = kArmOrr;
+      no_output_opcode = kArmOrr;
+      break;
+    case IrOpcode::kWord32Xor:
+      binop_opcode = kArmEor;
+      no_output_opcode = kArmTeq;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (selector->CanCover(*node, binop)) {
+    // The comparison is the only user of {node}.
+    cont->Overwrite(MapForFlagSettingBinop(cond));
+    *opcode = no_output_opcode;
+    *node = binop;
+  } else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
+    // We can also handle the case where the {node} and the comparison are in
+    // the same basic block, and the comparison is the only user of {node} in
+    // this basic block ({node} has users in other basic blocks).
+    cont->Overwrite(MapForFlagSettingBinop(cond));
+    *opcode = binop_opcode;
+    *node = binop;
+  }
+}
 
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
@@ -1569,8 +1778,10 @@
   Int32BinopMatcher m(node);
   InstructionOperand inputs[5];
   size_t input_count = 0;
-  InstructionOperand outputs[1];
+  InstructionOperand outputs[2];
   size_t output_count = 0;
+  bool has_result = (opcode != kArmCmp) && (opcode != kArmCmn) &&
+                    (opcode != kArmTst) && (opcode != kArmTeq);
 
   if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
                                &input_count, &inputs[1])) {
@@ -1587,6 +1798,17 @@
     inputs[input_count++] = g.UseRegister(m.right().node());
   }
 
+  if (has_result) {
+    if (cont->IsDeoptimize()) {
+      // If we can deoptimize as a result of the binop, we need to make sure
+      // that the deopt inputs are not overwritten by the binop result. One way
+      // to achieve that is to declare the output register as same-as-first.
+      outputs[output_count++] = g.DefineSameAsFirst(node);
+    } else {
+      outputs[output_count++] = g.DefineAsRegister(node);
+    }
+  }
+
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
@@ -1601,7 +1823,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -1610,7 +1832,32 @@
 
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       FlagsContinuation* cont) {
-  VisitWordCompare(selector, node, kArmCmp, cont);
+  InstructionCode opcode = kArmCmp;
+  Int32BinopMatcher m(node);
+
+  FlagsCondition cond = cont->condition();
+  if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32Or() ||
+                          m.left().IsWord32And() || m.left().IsWord32Xor())) {
+    // Emit flag setting instructions for comparisons against zero.
+    if (CanUseFlagSettingBinop(cond)) {
+      Node* binop = m.left().node();
+      MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
+                                              cond, cont);
+    }
+  } else if (m.left().Is(0) &&
+             (m.right().IsInt32Add() || m.right().IsWord32Or() ||
+              m.right().IsWord32And() || m.right().IsWord32Xor())) {
+    // Same as above, but we need to commute the condition before we
+    // continue with the rest of the checks.
+    cond = CommuteFlagsCondition(cond);
+    if (CanUseFlagSettingBinop(cond)) {
+      Node* binop = m.right().node();
+      MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
+                                              cond, cont);
+    }
+  }
+
+  VisitWordCompare(selector, node, opcode, cont);
 }
 
 
@@ -1681,6 +1928,13 @@
               case IrOpcode::kInt32SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop(selector, node, kArmSub, kArmRsb, cont);
+              case IrOpcode::kInt32MulWithOverflow:
+                // ARM doesn't set the overflow flag for multiplication, so we
+                // need to test on kNotEqual. Here is the code sequence used:
+                //   smull resultlow, resulthigh, left, right
+                //   cmp resulthigh, Operand(resultlow, ASR, 31)
+                cont->OverwriteAndNegateIfEqual(kNotEqual);
+                return EmitInt32MulWithOverflow(selector, node, cont);
               default:
                 break;
             }
@@ -1721,7 +1975,7 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
@@ -1738,14 +1992,14 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1822,7 +2076,6 @@
   VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
 }
 
-
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -1832,6 +2085,18 @@
   VisitBinop(this, node, kArmSub, kArmRsb, &cont);
 }
 
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    // ARM doesn't set the overflow flag for multiplication, so we need to test
+    // on kNotEqual. Here is the code sequence used:
+    //   smull resultlow, resulthigh, left, right
+    //   cmp resulthigh, Operand(resultlow, ASR, 31)
+    FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
+    return EmitInt32MulWithOverflow(this, node, &cont);
+  }
+  FlagsContinuation cont;
+  EmitInt32MulWithOverflow(this, node, &cont);
+}
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
@@ -1990,13 +2255,7 @@
              MachineOperatorBuilder::kFloat64RoundTruncate |
              MachineOperatorBuilder::kFloat64RoundTiesAway |
              MachineOperatorBuilder::kFloat32RoundTiesEven |
-             MachineOperatorBuilder::kFloat64RoundTiesEven |
-             MachineOperatorBuilder::kFloat32Min |
-             MachineOperatorBuilder::kFloat32Max |
-             MachineOperatorBuilder::kFloat64Min |
-             MachineOperatorBuilder::kFloat64Max |
-             MachineOperatorBuilder::kFloat32Neg |
-             MachineOperatorBuilder::kFloat64Neg;
+             MachineOperatorBuilder::kFloat64RoundTiesEven;
   }
   return flags;
 }
@@ -2004,8 +2263,11 @@
 // static
 MachineOperatorBuilder::AlignmentRequirements
 InstructionSelector::AlignmentRequirements() {
+  Vector<MachineType> req_aligned = Vector<MachineType>::New(2);
+  req_aligned[0] = MachineType::Float32();
+  req_aligned[1] = MachineType::Float64();
   return MachineOperatorBuilder::AlignmentRequirements::
-      FullUnalignedAccessSupport();
+      SomeUnalignedAccessUnsupported(req_aligned, req_aligned);
 }
 
 }  // namespace compiler
diff --git a/src/compiler/arm/unwinding-info-writer-arm.cc b/src/compiler/arm/unwinding-info-writer-arm.cc
new file mode 100644
index 0000000..a950612
--- /dev/null
+++ b/src/compiler/arm/unwinding-info-writer-arm.cc
@@ -0,0 +1,108 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/arm/unwinding-info-writer-arm.h"
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
+                                                const InstructionBlock* block) {
+  if (!enabled()) return;
+
+  block_will_exit_ = false;
+
+  DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+  const BlockInitialState* initial_state =
+      block_initial_states_[block->rpo_number().ToInt()];
+  if (initial_state) {
+    if (initial_state->saved_lr_ != saved_lr_) {
+      eh_frame_writer_.AdvanceLocation(pc_offset);
+      if (initial_state->saved_lr_) {
+        eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+      } else {
+        eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+      }
+      saved_lr_ = initial_state->saved_lr_;
+    }
+  } else {
+    // The entry block always lacks an explicit initial state.
+    // The exit block may lack an explicit state, if it is only reached by
+    //   the block ending in a bx lr.
+    // All the other blocks must have an explicit initial state.
+    DCHECK(block->predecessors().empty() || block->successors().empty());
+  }
+}
+
+void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
+  if (!enabled() || block_will_exit_) return;
+
+  for (const RpoNumber& successor : block->successors()) {
+    int successor_index = successor.ToInt();
+    DCHECK_LT(successor_index, block_initial_states_.size());
+    const BlockInitialState* existing_state =
+        block_initial_states_[successor_index];
+
+    // If we already had an entry for this BB, check that the values are the
+    // same we are trying to insert.
+    if (existing_state) {
+      DCHECK_EQ(existing_state->saved_lr_, saved_lr_);
+    } else {
+      block_initial_states_[successor_index] =
+          new (zone_) BlockInitialState(saved_lr_);
+    }
+  }
+}
+
+void UnwindingInfoWriter::MarkFrameConstructed(int at_pc) {
+  if (!enabled()) return;
+
+  // Regardless of the type of frame constructed, the relevant part of the
+  // layout is always the one in the diagram:
+  //
+  // |   ....   |         higher addresses
+  // +----------+               ^
+  // |    LR    |               |            |
+  // +----------+               |            |
+  // | saved FP |               |            |
+  // +----------+ <-- FP                     v
+  // |   ....   |                       stack growth
+  //
+  // The LR is pushed on the stack, and we can record this fact at the end of
+  // the construction, since the LR itself is not modified in the process.
+  eh_frame_writer_.AdvanceLocation(at_pc);
+  eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+  saved_lr_ = true;
+}
+
+void UnwindingInfoWriter::MarkFrameDeconstructed(int at_pc) {
+  if (!enabled()) return;
+
+  // The lr is restored by the last operation in LeaveFrame().
+  eh_frame_writer_.AdvanceLocation(at_pc);
+  eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+  saved_lr_ = false;
+}
+
+void UnwindingInfoWriter::MarkLinkRegisterOnTopOfStack(int pc_offset) {
+  if (!enabled()) return;
+
+  eh_frame_writer_.AdvanceLocation(pc_offset);
+  eh_frame_writer_.SetBaseAddressRegisterAndOffset(sp, 0);
+  eh_frame_writer_.RecordRegisterSavedToStack(lr, 0);
+}
+
+void UnwindingInfoWriter::MarkPopLinkRegisterFromTopOfStack(int pc_offset) {
+  if (!enabled()) return;
+
+  eh_frame_writer_.AdvanceLocation(pc_offset);
+  eh_frame_writer_.SetBaseAddressRegisterAndOffset(fp, 0);
+  eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm/unwinding-info-writer-arm.h b/src/compiler/arm/unwinding-info-writer-arm.h
new file mode 100644
index 0000000..d47ca08
--- /dev/null
+++ b/src/compiler/arm/unwinding-info-writer-arm.h
@@ -0,0 +1,72 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM_UNWINDING_INFO_WRITER_H_
+#define V8_COMPILER_ARM_UNWINDING_INFO_WRITER_H_
+
+#include "src/eh-frame.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionBlock;
+
+class UnwindingInfoWriter {
+ public:
+  explicit UnwindingInfoWriter(Zone* zone)
+      : zone_(zone),
+        eh_frame_writer_(zone),
+        saved_lr_(false),
+        block_will_exit_(false),
+        block_initial_states_(zone) {
+    if (enabled()) eh_frame_writer_.Initialize();
+  }
+
+  void SetNumberOfInstructionBlocks(int number) {
+    if (enabled()) block_initial_states_.resize(number);
+  }
+
+  void BeginInstructionBlock(int pc_offset, const InstructionBlock* block);
+  void EndInstructionBlock(const InstructionBlock* block);
+
+  void MarkLinkRegisterOnTopOfStack(int pc_offset);
+  void MarkPopLinkRegisterFromTopOfStack(int pc_offset);
+
+  void MarkFrameConstructed(int at_pc);
+  void MarkFrameDeconstructed(int at_pc);
+
+  void MarkBlockWillExit() { block_will_exit_ = true; }
+
+  void Finish(int code_size) {
+    if (enabled()) eh_frame_writer_.Finish(code_size);
+  }
+
+  EhFrameWriter* eh_frame_writer() {
+    return enabled() ? &eh_frame_writer_ : nullptr;
+  }
+
+ private:
+  bool enabled() const { return FLAG_perf_prof_unwinding_info; }
+
+  class BlockInitialState : public ZoneObject {
+   public:
+    explicit BlockInitialState(bool saved_lr) : saved_lr_(saved_lr) {}
+
+    bool saved_lr_;
+  };
+
+  Zone* zone_;
+  EhFrameWriter eh_frame_writer_;
+  bool saved_lr_;
+  bool block_will_exit_;
+
+  ZoneVector<const BlockInitialState*> block_initial_states_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index 479af7a..35f7e43 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -119,6 +119,8 @@
         return Operand(InputRegister32(index), SXTB);
       case kMode_Operand2_R_SXTH:
         return Operand(InputRegister32(index), SXTH);
+      case kMode_Operand2_R_SXTW:
+        return Operand(InputRegister32(index), SXTW);
       case kMode_MRI:
       case kMode_MRR:
         break;
@@ -147,6 +149,8 @@
         return Operand(InputRegister64(index), SXTB);
       case kMode_Operand2_R_SXTH:
         return Operand(InputRegister64(index), SXTH);
+      case kMode_Operand2_R_SXTW:
+        return Operand(InputRegister64(index), SXTW);
       case kMode_MRI:
       case kMode_MRR:
         break;
@@ -166,6 +170,7 @@
       case kMode_Operand2_R_UXTH:
       case kMode_Operand2_R_SXTB:
       case kMode_Operand2_R_SXTH:
+      case kMode_Operand2_R_SXTW:
         break;
       case kMode_Operand2_R_LSL_I:
         *first_index += 3;
@@ -303,7 +308,8 @@
  public:
   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
                        Register value, Register scratch0, Register scratch1,
-                       RecordWriteMode mode)
+                       RecordWriteMode mode,
+                       UnwindingInfoWriter* unwinding_info_writer)
       : OutOfLineCode(gen),
         object_(object),
         index_(index),
@@ -311,7 +317,8 @@
         scratch0_(scratch0),
         scratch1_(scratch1),
         mode_(mode),
-        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+        must_save_lr_(!gen->frame_access_state()->has_frame()),
+        unwinding_info_writer_(unwinding_info_writer) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -328,6 +335,8 @@
     if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ Push(lr);
+      unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
+                                                           __ StackPointer());
     }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
                          remembered_set_action, save_fp_mode);
@@ -335,6 +344,7 @@
     __ CallStub(&stub);
     if (must_save_lr_) {
       __ Pop(lr);
+      unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
     }
   }
 
@@ -346,6 +356,7 @@
   Register const scratch1_;
   RecordWriteMode const mode_;
   bool must_save_lr_;
+  UnwindingInfoWriter* const unwinding_info_writer_;
 };
 
 
@@ -394,6 +405,10 @@
     case kUnorderedEqual:
     case kUnorderedNotEqual:
       break;
+    case kPositiveOrZero:
+      return pl;
+    case kNegative:
+      return mi;
   }
   UNREACHABLE();
   return nv;
@@ -535,23 +550,11 @@
     __ Mov(jssp, fp);
   }
   __ Pop(fp, lr);
+
+  unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
 }
 
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta > 0) {
-    __ Drop(sp_slot_delta);
-  }
-  frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta < 0) {
-    __ Claim(-sp_slot_delta);
-    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
-  }
+void CodeGenerator::AssemblePrepareTailCall() {
   if (frame_access_state()->has_frame()) {
     __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
     __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -584,6 +587,38 @@
   __ bind(&done);
 }
 
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+                                   FrameAccessState* state,
+                                   int new_slot_above_sp,
+                                   bool allow_shrinkage = true) {
+  int current_sp_offset = state->GetSPToFPSlotCount() +
+                          StandardFrameConstants::kFixedSlotCountAboveFp;
+  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+  if (stack_slot_delta > 0) {
+    masm->Claim(stack_slot_delta);
+    state->IncreaseSPDelta(stack_slot_delta);
+  } else if (allow_shrinkage && stack_slot_delta < 0) {
+    masm->Drop(-stack_slot_delta);
+    state->IncreaseSPDelta(stack_slot_delta);
+  }
+}
+
+}  // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+                                              int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+                                             int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot);
+}
+
 // Assembles an instruction after register allocation, producing machine code.
 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     Instruction* instr) {
@@ -619,8 +654,6 @@
     }
     case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -634,15 +667,17 @@
         __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
         __ Jump(target);
       }
+      unwinding_info_writer_.MarkBlockWillExit();
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchTailCallAddress: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       CHECK(!instr->InputAt(0)->IsImmediate());
       __ Jump(i.InputRegister(0));
+      unwinding_info_writer_.MarkBlockWillExit();
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchCallJSFunction: {
@@ -685,8 +720,6 @@
         __ cmp(cp, temp);
         __ Assert(eq, kWrongFunctionContext);
       }
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -695,6 +728,7 @@
       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(x10);
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchPrepareCallCFunction:
@@ -704,7 +738,7 @@
       UNREACHABLE();
       break;
     case kArchPrepareTailCall:
-      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      AssemblePrepareTailCall();
       break;
     case kArchCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
@@ -732,6 +766,9 @@
     case kArchDebugBreak:
       __ Debug("kArchDebugBreak", 0, BREAK);
       break;
+    case kArchImpossible:
+      __ Abort(kConversionFromImpossibleValue);
+      break;
     case kArchComment: {
       Address comment_string = i.InputExternalReference(0).address();
       __ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -786,8 +823,9 @@
       Register value = i.InputRegister(2);
       Register scratch0 = i.TempRegister(0);
       Register scratch1 = i.TempRegister(1);
-      auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
-                                                   scratch0, scratch1, mode);
+      auto ool = new (zone())
+          OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
+                               mode, &unwinding_info_writer_);
       __ Str(value, MemOperand(object, index));
       __ CheckPageFlagSet(object, scratch0,
                           MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -807,15 +845,33 @@
       __ Add(i.OutputRegister(0), base, Operand(offset.offset()));
       break;
     }
+    case kIeee754Float64Acos:
+      ASSEMBLE_IEEE754_UNOP(acos);
+      break;
+    case kIeee754Float64Acosh:
+      ASSEMBLE_IEEE754_UNOP(acosh);
+      break;
+    case kIeee754Float64Asin:
+      ASSEMBLE_IEEE754_UNOP(asin);
+      break;
+    case kIeee754Float64Asinh:
+      ASSEMBLE_IEEE754_UNOP(asinh);
+      break;
     case kIeee754Float64Atan:
       ASSEMBLE_IEEE754_UNOP(atan);
       break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
     case kIeee754Float64Atan2:
       ASSEMBLE_IEEE754_BINOP(atan2);
       break;
     case kIeee754Float64Cos:
       ASSEMBLE_IEEE754_UNOP(cos);
       break;
+    case kIeee754Float64Cosh:
+      ASSEMBLE_IEEE754_UNOP(cosh);
+      break;
     case kIeee754Float64Cbrt:
       ASSEMBLE_IEEE754_UNOP(cbrt);
       break;
@@ -825,9 +881,6 @@
     case kIeee754Float64Expm1:
       ASSEMBLE_IEEE754_UNOP(expm1);
       break;
-    case kIeee754Float64Atanh:
-      ASSEMBLE_IEEE754_UNOP(atanh);
-      break;
     case kIeee754Float64Log:
       ASSEMBLE_IEEE754_UNOP(log);
       break;
@@ -840,12 +893,23 @@
     case kIeee754Float64Log10:
       ASSEMBLE_IEEE754_UNOP(log10);
       break;
+    case kIeee754Float64Pow: {
+      MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+      __ CallStub(&stub);
+      break;
+    }
     case kIeee754Float64Sin:
       ASSEMBLE_IEEE754_UNOP(sin);
       break;
+    case kIeee754Float64Sinh:
+      ASSEMBLE_IEEE754_UNOP(sinh);
+      break;
     case kIeee754Float64Tan:
       ASSEMBLE_IEEE754_UNOP(tan);
       break;
+    case kIeee754Float64Tanh:
+      ASSEMBLE_IEEE754_UNOP(tanh);
+      break;
     case kArm64Float32RoundDown:
       __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
@@ -892,12 +956,34 @@
       }
       break;
     case kArm64And:
-      __ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
-             i.InputOperand2_64(1));
+      if (FlagsModeField::decode(opcode) != kFlags_none) {
+        // The ands instruction only sets N and Z, so only the following
+        // conditions make sense.
+        DCHECK(FlagsConditionField::decode(opcode) == kEqual ||
+               FlagsConditionField::decode(opcode) == kNotEqual ||
+               FlagsConditionField::decode(opcode) == kPositiveOrZero ||
+               FlagsConditionField::decode(opcode) == kNegative);
+        __ Ands(i.OutputRegister(), i.InputOrZeroRegister64(0),
+                i.InputOperand2_64(1));
+      } else {
+        __ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
+               i.InputOperand2_64(1));
+      }
       break;
     case kArm64And32:
-      __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
-             i.InputOperand2_32(1));
+      if (FlagsModeField::decode(opcode) != kFlags_none) {
+        // The ands instruction only sets N and Z, so only the following
+        // conditions make sense.
+        DCHECK(FlagsConditionField::decode(opcode) == kEqual ||
+               FlagsConditionField::decode(opcode) == kNotEqual ||
+               FlagsConditionField::decode(opcode) == kPositiveOrZero ||
+               FlagsConditionField::decode(opcode) == kNegative);
+        __ Ands(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+                i.InputOperand2_32(1));
+      } else {
+        __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+               i.InputOperand2_32(1));
+      }
       break;
     case kArm64Bic:
       __ Bic(i.OutputRegister(), i.InputOrZeroRegister64(0),
@@ -1188,22 +1274,22 @@
       __ Rbit(i.OutputRegister32(), i.InputRegister32(0));
       break;
     case kArm64Cmp:
-      __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
+      __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
       break;
     case kArm64Cmp32:
       __ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Cmn:
-      __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand(1));
+      __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
       break;
     case kArm64Cmn32:
       __ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Tst:
-      __ Tst(i.InputRegister(0), i.InputOperand(1));
+      __ Tst(i.InputOrZeroRegister64(0), i.InputOperand(1));
       break;
     case kArm64Tst32:
-      __ Tst(i.InputRegister32(0), i.InputOperand32(1));
+      __ Tst(i.InputOrZeroRegister32(0), i.InputOperand32(1));
       break;
     case kArm64Float32Cmp:
       if (instr->InputAt(1)->IsFPRegister()) {
@@ -1231,18 +1317,6 @@
       __ Fdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
               i.InputFloat32Register(1));
       break;
-    case kArm64Float32Max:
-      // (b < a) ? a : b
-      __ Fcmp(i.InputFloat32Register(1), i.InputFloat32Register(0));
-      __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
-               i.InputFloat32Register(1), lo);
-      break;
-    case kArm64Float32Min:
-      // (a < b) ? a : b
-      __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
-      __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
-               i.InputFloat32Register(1), lo);
-      break;
     case kArm64Float32Abs:
       __ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
@@ -1289,18 +1363,26 @@
                        0, 2);
       break;
     }
-    case kArm64Float64Max:
-      // (b < a) ? a : b
-      __ Fcmp(i.InputDoubleRegister(1), i.InputDoubleRegister(0));
-      __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-               i.InputDoubleRegister(1), lo);
+    case kArm64Float32Max: {
+      __ Fmax(i.OutputFloat32Register(), i.InputFloat32Register(0),
+              i.InputFloat32Register(1));
       break;
-    case kArm64Float64Min:
-      // (a < b) ? a : b
-      __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
-      __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-               i.InputDoubleRegister(1), lo);
+    }
+    case kArm64Float64Max: {
+      __ Fmax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
       break;
+    }
+    case kArm64Float32Min: {
+      __ Fmin(i.OutputFloat32Register(), i.InputFloat32Register(0),
+              i.InputFloat32Register(1));
+      break;
+    }
+    case kArm64Float64Min: {
+      __ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    }
     case kArm64Float64Abs:
       __ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
@@ -1318,12 +1400,21 @@
       break;
     case kArm64Float32ToInt32:
       __ Fcvtzs(i.OutputRegister32(), i.InputFloat32Register(0));
+      // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+      // because INT32_MIN allows easier out-of-bounds detection.
+      __ Cmn(i.OutputRegister32(), 1);
+      __ Csinc(i.OutputRegister32(), i.OutputRegister32(), i.OutputRegister32(),
+               vc);
       break;
     case kArm64Float64ToInt32:
       __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
       break;
     case kArm64Float32ToUint32:
       __ Fcvtzu(i.OutputRegister32(), i.InputFloat32Register(0));
+      // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+      // because 0 allows easier out-of-bounds detection.
+      __ Cmn(i.OutputRegister32(), 1);
+      __ Adc(i.OutputRegister32(), i.OutputRegister32(), Operand(0));
       break;
     case kArm64Float64ToUint32:
       __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
@@ -1450,6 +1541,9 @@
     case kArm64Strh:
       __ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
       break;
+    case kArm64Ldrsw:
+      __ Ldrsw(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kArm64LdrW:
       __ Ldr(i.OutputRegister32(), i.MemoryOperand());
       break;
@@ -1665,6 +1759,9 @@
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -1717,6 +1814,10 @@
                         frame()->GetTotalFrameSlotCount());
       }
     }
+
+    if (!info()->GeneratePreagedPrologue()) {
+      unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
+    }
   }
 
   int shrink_slots = frame()->GetSpillSlotCount();
@@ -1776,6 +1877,8 @@
     __ PopCPURegList(saves_fp);
   }
 
+  unwinding_info_writer_.MarkBlockWillExit();
+
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
@@ -1836,10 +1939,7 @@
       if (src.type() == Constant::kHeapObject) {
         Handle<HeapObject> src_object = src.ToHeapObject();
         Heap::RootListIndex index;
-        int slot;
-        if (IsMaterializableFromFrame(src_object, &slot)) {
-          __ Ldr(dst, g.SlotToMemOperand(slot, masm()));
-        } else if (IsMaterializableFromRoot(src_object, &index)) {
+        if (IsMaterializableFromRoot(src_object, &index)) {
           __ LoadRoot(dst, index);
         } else {
           __ LoadObject(dst, src_object);
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
index 2b5fe33..898a9e9 100644
--- a/src/compiler/arm64/instruction-codes-arm64.h
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -89,12 +89,12 @@
   V(Arm64Float32Sub)               \
   V(Arm64Float32Mul)               \
   V(Arm64Float32Div)               \
-  V(Arm64Float32Max)               \
-  V(Arm64Float32Min)               \
   V(Arm64Float32Abs)               \
   V(Arm64Float32Neg)               \
   V(Arm64Float32Sqrt)              \
   V(Arm64Float32RoundDown)         \
+  V(Arm64Float32Max)               \
+  V(Arm64Float32Min)               \
   V(Arm64Float64Cmp)               \
   V(Arm64Float64Add)               \
   V(Arm64Float64Sub)               \
@@ -149,6 +149,7 @@
   V(Arm64Ldrh)                     \
   V(Arm64Ldrsh)                    \
   V(Arm64Strh)                     \
+  V(Arm64Ldrsw)                    \
   V(Arm64LdrW)                     \
   V(Arm64StrW)                     \
   V(Arm64Ldr)                      \
@@ -177,7 +178,8 @@
   V(Operand2_R_UXTB)  /* %r0 UXTB (unsigned extend byte) */     \
   V(Operand2_R_UXTH)  /* %r0 UXTH (unsigned extend halfword) */ \
   V(Operand2_R_SXTB)  /* %r0 SXTB (signed extend byte) */       \
-  V(Operand2_R_SXTH)  /* %r0 SXTH (signed extend halfword) */
+  V(Operand2_R_SXTH)  /* %r0 SXTH (signed extend halfword) */   \
+  V(Operand2_R_SXTW)  /* %r0 SXTW (signed extend word) */
 
 enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
 
diff --git a/src/compiler/arm64/instruction-scheduler-arm64.cc b/src/compiler/arm64/instruction-scheduler-arm64.cc
index f3797c2..d3504df 100644
--- a/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -82,12 +82,12 @@
     case kArm64Float32Sub:
     case kArm64Float32Mul:
     case kArm64Float32Div:
-    case kArm64Float32Max:
-    case kArm64Float32Min:
     case kArm64Float32Abs:
     case kArm64Float32Neg:
     case kArm64Float32Sqrt:
     case kArm64Float32RoundDown:
+    case kArm64Float32Max:
+    case kArm64Float32Min:
     case kArm64Float64Cmp:
     case kArm64Float64Add:
     case kArm64Float64Sub:
@@ -146,6 +146,7 @@
     case kArm64Ldrsb:
     case kArm64Ldrh:
     case kArm64Ldrsh:
+    case kArm64Ldrsw:
     case kArm64LdrW:
     case kArm64Ldr:
       return kIsLoadOperation;
@@ -238,6 +239,7 @@
     case kArm64Ldrh:
     case kArm64Ldrsb:
     case kArm64Ldrsh:
+    case kArm64Ldrsw:
       return 11;
 
     case kCheckedLoadInt8:
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 637acac..9bc5385 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -161,6 +161,77 @@
                  g.UseOperand(node->InputAt(1), operand_mode));
 }
 
+struct ExtendingLoadMatcher {
+  ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
+      : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
+    Initialize(node);
+  }
+
+  bool Matches() const { return matches_; }
+
+  Node* base() const {
+    DCHECK(Matches());
+    return base_;
+  }
+  int64_t immediate() const {
+    DCHECK(Matches());
+    return immediate_;
+  }
+  ArchOpcode opcode() const {
+    DCHECK(Matches());
+    return opcode_;
+  }
+
+ private:
+  bool matches_;
+  InstructionSelector* selector_;
+  Node* base_;
+  int64_t immediate_;
+  ArchOpcode opcode_;
+
+  void Initialize(Node* node) {
+    Int64BinopMatcher m(node);
+    // When loading a 64-bit value and shifting by 32, we should
+    // just load and sign-extend the interesting 4 bytes instead.
+    // This happens, for example, when we're loading and untagging SMIs.
+    DCHECK(m.IsWord64Sar());
+    if (m.left().IsLoad() && m.right().Is(32) &&
+        selector_->CanCover(m.node(), m.left().node())) {
+      Arm64OperandGenerator g(selector_);
+      Node* load = m.left().node();
+      Node* offset = load->InputAt(1);
+      base_ = load->InputAt(0);
+      opcode_ = kArm64Ldrsw;
+      if (g.IsIntegerConstant(offset)) {
+        immediate_ = g.GetIntegerConstantValue(offset) + 4;
+        matches_ = g.CanBeImmediate(immediate_, kLoadStoreImm32);
+      }
+    }
+  }
+};
+
+bool TryMatchExtendingLoad(InstructionSelector* selector, Node* node) {
+  ExtendingLoadMatcher m(node, selector);
+  return m.Matches();
+}
+
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
+  ExtendingLoadMatcher m(node, selector);
+  Arm64OperandGenerator g(selector);
+  if (m.Matches()) {
+    InstructionOperand inputs[2];
+    inputs[0] = g.UseRegister(m.base());
+    InstructionCode opcode =
+        m.opcode() | AddressingModeField::encode(kMode_MRI);
+    DCHECK(is_int32(m.immediate()));
+    inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
+    InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+    selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
+                   inputs);
+    return true;
+  }
+  return false;
+}
 
 bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
                       Node* input_node, InstructionCode* opcode, bool try_ror) {
@@ -180,7 +251,10 @@
       *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
       return true;
     case IrOpcode::kWord32Sar:
+      *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+      return true;
     case IrOpcode::kWord64Sar:
+      if (TryMatchExtendingLoad(selector, input_node)) return false;
       *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
       return true;
     case IrOpcode::kWord32Ror:
@@ -360,18 +434,24 @@
   } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
                               !is_add_sub)) {
     Matcher m_shift(right_node);
-    inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
+    inputs[input_count++] = cont->IsDeoptimize()
+                                ? g.UseRegister(left_node)
+                                : g.UseRegisterOrImmediateZero(left_node);
     inputs[input_count++] = g.UseRegister(m_shift.left().node());
     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
   } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
                                              !is_add_sub)) {
     if (must_commute_cond) cont->Commute();
     Matcher m_shift(left_node);
-    inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
+    inputs[input_count++] = cont->IsDeoptimize()
+                                ? g.UseRegister(right_node)
+                                : g.UseRegisterOrImmediateZero(right_node);
     inputs[input_count++] = g.UseRegister(m_shift.left().node());
     inputs[input_count++] = g.UseImmediate(m_shift.right().node());
   } else {
-    inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
+    inputs[input_count++] = cont->IsDeoptimize()
+                                ? g.UseRegister(left_node)
+                                : g.UseRegisterOrImmediateZero(left_node);
     inputs[input_count++] = g.UseRegister(right_node);
   }
 
@@ -381,7 +461,14 @@
   }
 
   if (!IsComparisonField::decode(properties)) {
-    outputs[output_count++] = g.DefineAsRegister(node);
+    if (cont->IsDeoptimize()) {
+      // If we can deoptimize as a result of the binop, we need to make sure
+      // that the deopt inputs are not overwritten by the binop result. One way
+      // to achieve that is to declare the output register as same-as-first.
+      outputs[output_count++] = g.DefineSameAsFirst(node);
+    } else {
+      outputs[output_count++] = g.DefineAsRegister(node);
+    }
   }
 
   if (cont->IsSet()) {
@@ -396,7 +483,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -445,18 +532,43 @@
 
 }  // namespace
 
-
-void InstructionSelector::VisitLoad(Node* node) {
-  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
-  MachineRepresentation rep = load_rep.representation();
-  Arm64OperandGenerator g(this);
+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
+              ImmediateMode immediate_mode, MachineRepresentation rep,
+              Node* output = nullptr) {
+  Arm64OperandGenerator g(selector);
   Node* base = node->InputAt(0);
   Node* index = node->InputAt(1);
-  InstructionCode opcode = kArchNop;
-  ImmediateMode immediate_mode = kNoImmediate;
   InstructionOperand inputs[3];
   size_t input_count = 0;
   InstructionOperand outputs[1];
+
+  // If output is not nullptr, use that as the output register. This
+  // is used when we merge a conversion into the load.
+  outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
+  inputs[0] = g.UseRegister(base);
+
+  if (g.CanBeImmediate(index, immediate_mode)) {
+    input_count = 2;
+    inputs[1] = g.UseImmediate(index);
+    opcode |= AddressingModeField::encode(kMode_MRI);
+  } else if (TryMatchLoadStoreShift(&g, selector, rep, node, index, &inputs[1],
+                                    &inputs[2])) {
+    input_count = 3;
+    opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
+  } else {
+    input_count = 2;
+    inputs[1] = g.UseRegister(index);
+    opcode |= AddressingModeField::encode(kMode_MRR);
+  }
+
+  selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
+}
+
+void InstructionSelector::VisitLoad(Node* node) {
+  InstructionCode opcode = kArchNop;
+  ImmediateMode immediate_mode = kNoImmediate;
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  MachineRepresentation rep = load_rep.representation();
   switch (rep) {
     case MachineRepresentation::kFloat32:
       opcode = kArm64LdrS;
@@ -479,6 +591,8 @@
       opcode = kArm64LdrW;
       immediate_mode = kLoadStoreImm32;
       break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
     case MachineRepresentation::kWord64:
       opcode = kArm64Ldr;
@@ -489,25 +603,7 @@
       UNREACHABLE();
       return;
   }
-
-  outputs[0] = g.DefineAsRegister(node);
-  inputs[0] = g.UseRegister(base);
-
-  if (g.CanBeImmediate(index, immediate_mode)) {
-    input_count = 2;
-    inputs[1] = g.UseImmediate(index);
-    opcode |= AddressingModeField::encode(kMode_MRI);
-  } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[1],
-                                    &inputs[2])) {
-    input_count = 3;
-    opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
-  } else {
-    input_count = 2;
-    inputs[1] = g.UseRegister(index);
-    opcode |= AddressingModeField::encode(kMode_MRR);
-  }
-
-  Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
+  EmitLoad(this, node, opcode, immediate_mode, rep);
 }
 
 
@@ -587,6 +683,8 @@
         opcode = kArm64StrW;
         immediate_mode = kLoadStoreImm32;
         break;
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
       case MachineRepresentation::kTagged:  // Fall through.
       case MachineRepresentation::kWord64:
         opcode = kArm64Str;
@@ -619,6 +717,11 @@
   }
 }
 
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -647,6 +750,8 @@
       opcode = kCheckedLoadFloat64;
       break;
     case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
@@ -697,6 +802,8 @@
       opcode = kCheckedStoreFloat64;
       break;
     case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
@@ -941,7 +1048,7 @@
   Arm64OperandGenerator g(this);
   Int64BinopMatcher m(node);
   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
-      m.right().IsInRange(32, 63)) {
+      m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
     // 32 bits anyway.
     Emit(kArm64Lsl, g.DefineAsRegister(node),
@@ -1106,6 +1213,7 @@
 
 
 void InstructionSelector::VisitWord64Sar(Node* node) {
+  if (TryEmitExtendingLoad(this, node)) return;
   VisitRRO(this, kArm64Asr, node, kShift64Imm);
 }
 
@@ -1147,6 +1255,9 @@
   VisitRR(this, kArm64Rbit, node);
 }
 
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
 
@@ -1257,6 +1368,33 @@
   VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
 }
 
+namespace {
+
+void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
+                              FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand result = g.DefineAsRegister(node);
+  InstructionOperand left = g.UseRegister(m.left().node());
+  InstructionOperand right = g.UseRegister(m.right().node());
+  selector->Emit(kArm64Smull, result, left, right);
+
+  InstructionCode opcode = cont->Encode(kArm64Cmp) |
+                           AddressingModeField::encode(kMode_Operand2_R_SXTW);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, g.NoOutput(), result, result,
+                   g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    InstructionOperand in[] = {result, result};
+    selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->reason(),
+                             cont->frame_state());
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
+  }
+}
+
+}  // namespace
 
 void InstructionSelector::VisitInt32Mul(Node* node) {
   Arm64OperandGenerator g(this);
@@ -1340,7 +1478,6 @@
   VisitRRR(this, kArm64Mul, node);
 }
 
-
 void InstructionSelector::VisitInt32MulHigh(Node* node) {
   Arm64OperandGenerator g(this);
   InstructionOperand const smull_operand = g.TempRegister();
@@ -1516,7 +1653,35 @@
 
 
 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
-  VisitRR(this, kArm64Sxtw, node);
+  Node* value = node->InputAt(0);
+  if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+    // Generate sign-extending load.
+    LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+    MachineRepresentation rep = load_rep.representation();
+    InstructionCode opcode = kArchNop;
+    ImmediateMode immediate_mode = kNoImmediate;
+    switch (rep) {
+      case MachineRepresentation::kBit:  // Fall through.
+      case MachineRepresentation::kWord8:
+        opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
+        immediate_mode = kLoadStoreImm8;
+        break;
+      case MachineRepresentation::kWord16:
+        opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
+        immediate_mode = kLoadStoreImm16;
+        break;
+      case MachineRepresentation::kWord32:
+        opcode = kArm64Ldrsw;
+        immediate_mode = kLoadStoreImm32;
+        break;
+      default:
+        UNREACHABLE();
+        return;
+    }
+    EmitLoad(this, value, opcode, immediate_mode, rep, node);
+  } else {
+    VisitRR(this, kArm64Sxtw, node);
+  }
 }
 
 
@@ -1590,18 +1755,9 @@
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   Arm64OperandGenerator g(this);
   Node* value = node->InputAt(0);
-  if (CanCover(node, value) && value->InputCount() >= 2) {
-    Int64BinopMatcher m(value);
-    if ((m.IsWord64Sar() && m.right().HasValue() &&
-         (m.right().Value() == 32)) ||
-        (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
-      Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
-           g.UseImmediate(m.right().node()));
-      return;
-    }
-  }
-
-  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+  // The top 32 bits in the 64-bit register will be undefined, and
+  // must not be used by a dependent node.
+  Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
 }
 
 
@@ -1659,34 +1815,7 @@
   VisitRRR(this, kArm64Float32Sub, node);
 }
 
-void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
-  VisitRRR(this, kArm64Float32Sub, node);
-}
-
 void InstructionSelector::VisitFloat64Sub(Node* node) {
-  Arm64OperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    if (m.right().IsFloat64RoundDown() &&
-        CanCover(m.node(), m.right().node())) {
-      if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
-          CanCover(m.right().node(), m.right().InputAt(0))) {
-        Float64BinopMatcher mright0(m.right().InputAt(0));
-        if (mright0.left().IsMinusZero()) {
-          Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
-               g.UseRegister(mright0.right().node()));
-          return;
-        }
-      }
-    }
-    Emit(kArm64Float64Neg, g.DefineAsRegister(node),
-         g.UseRegister(m.right().node()));
-    return;
-  }
-  VisitRRR(this, kArm64Float64Sub, node);
-}
-
-void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
   VisitRRR(this, kArm64Float64Sub, node);
 }
 
@@ -1717,22 +1846,18 @@
        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
 }
 
-
 void InstructionSelector::VisitFloat32Max(Node* node) {
   VisitRRR(this, kArm64Float32Max, node);
 }
 
-
 void InstructionSelector::VisitFloat64Max(Node* node) {
   VisitRRR(this, kArm64Float64Max, node);
 }
 
-
 void InstructionSelector::VisitFloat32Min(Node* node) {
   VisitRRR(this, kArm64Float32Min, node);
 }
 
-
 void InstructionSelector::VisitFloat64Min(Node* node) {
   VisitRRR(this, kArm64Float64Min, node);
 }
@@ -1877,7 +2002,7 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
                              cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
@@ -1908,14 +2033,126 @@
   }
 }
 
+// This function checks whether we can convert:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>.
+// We only generate conditions <cond'> that are a combination of the N
+// and Z flags. This avoids the need to make this function dependent on
+// the flag-setting operation.
+bool CanUseFlagSettingBinop(FlagsCondition cond) {
+  switch (cond) {
+    case kEqual:
+    case kNotEqual:
+    case kSignedLessThan:
+    case kSignedGreaterThanOrEqual:
+    case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
+    case kUnsignedGreaterThan:      // x > 0 -> x != 0
+      return true;
+    default:
+      return false;
+  }
+}
+
+// Map <cond> to <cond'> so that the following transformation is possible:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>.
+FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
+  DCHECK(CanUseFlagSettingBinop(cond));
+  switch (cond) {
+    case kEqual:
+    case kNotEqual:
+      return cond;
+    case kSignedLessThan:
+      return kNegative;
+    case kSignedGreaterThanOrEqual:
+      return kPositiveOrZero;
+    case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
+      return kEqual;
+    case kUnsignedGreaterThan:  // x > 0 -> x != 0
+      return kNotEqual;
+    default:
+      UNREACHABLE();
+      return cond;
+  }
+}
+
+// This function checks if we can perform the transformation:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>, and if so,
+// updates {node}, {opcode} and {cont} accordingly.
+void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
+                                             Node** node, Node* binop,
+                                             ArchOpcode* opcode,
+                                             FlagsCondition cond,
+                                             FlagsContinuation* cont,
+                                             ImmediateMode* immediate_mode) {
+  ArchOpcode binop_opcode;
+  ArchOpcode no_output_opcode;
+  ImmediateMode binop_immediate_mode;
+  switch (binop->opcode()) {
+    case IrOpcode::kInt32Add:
+      binop_opcode = kArm64Add32;
+      no_output_opcode = kArm64Cmn32;
+      binop_immediate_mode = kArithmeticImm;
+      break;
+    case IrOpcode::kWord32And:
+      binop_opcode = kArm64And32;
+      no_output_opcode = kArm64Tst32;
+      binop_immediate_mode = kLogical32Imm;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (selector->CanCover(*node, binop)) {
+    // The comparison is the only user of the add or and, so we can generate
+    // a cmn or tst instead.
+    cont->Overwrite(MapForFlagSettingBinop(cond));
+    *opcode = no_output_opcode;
+    *node = binop;
+    *immediate_mode = binop_immediate_mode;
+  } else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
+    // We can also handle the case where the add and the compare are in the
+    // same basic block, and the compare is the only use of add in this basic
+    // block (the add has users in other basic blocks).
+    cont->Overwrite(MapForFlagSettingBinop(cond));
+    *opcode = binop_opcode;
+    *node = binop;
+    *immediate_mode = binop_immediate_mode;
+  }
+}
 
 void VisitWord32Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
   Int32BinopMatcher m(node);
   ArchOpcode opcode = kArm64Cmp32;
-
-  // Select negated compare for comparisons with negated right input.
-  if (m.right().IsInt32Sub()) {
+  FlagsCondition cond = cont->condition();
+  ImmediateMode immediate_mode = kArithmeticImm;
+  if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32And())) {
+    // Emit flag setting add/and instructions for comparisons against zero.
+    if (CanUseFlagSettingBinop(cond)) {
+      Node* binop = m.left().node();
+      MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
+                                              cond, cont, &immediate_mode);
+    }
+  } else if (m.left().Is(0) &&
+             (m.right().IsInt32Add() || m.right().IsWord32And())) {
+    // Same as above, but we need to commute the condition before we
+    // continue with the rest of the checks.
+    cond = CommuteFlagsCondition(cond);
+    if (CanUseFlagSettingBinop(cond)) {
+      Node* binop = m.right().node();
+      MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
+                                              cond, cont, &immediate_mode);
+    }
+  } else if (m.right().IsInt32Sub()) {
+    // Select negated compare for comparisons with negated right input.
     Node* sub = m.right().node();
     Int32BinopMatcher msub(sub);
     if (msub.left().Is(0)) {
@@ -1933,7 +2170,7 @@
       opcode = kArm64Cmn32;
     }
   }
-  VisitBinop<Int32BinopMatcher>(selector, node, opcode, kArithmeticImm, cont);
+  VisitBinop<Int32BinopMatcher>(selector, node, opcode, immediate_mode, cont);
 }
 
 
@@ -2123,6 +2360,13 @@
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop<Int32BinopMatcher>(
                     selector, node, kArm64Sub32, kArithmeticImm, cont);
+              case IrOpcode::kInt32MulWithOverflow:
+                // ARM64 doesn't set the overflow flag for multiplication, so we
+                // need to test on kNotEqual. Here is the code sequence used:
+                //   smull result, left, right
+                //   cmp result.X(), Operand(result, SXTW)
+                cont->OverwriteAndNegateIfEqual(kNotEqual);
+                return EmitInt32MulWithOverflow(selector, node, cont);
               case IrOpcode::kInt64AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
@@ -2171,7 +2415,7 @@
     DCHECK(cont->IsDeoptimize());
     selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
                              g.UseRegister(value), g.UseRegister(value),
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   }
 }
 
@@ -2184,14 +2428,14 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -2232,20 +2476,24 @@
     if (CanCover(user, value)) {
       switch (value->opcode()) {
         case IrOpcode::kInt32Add:
-          return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
-                                  kArithmeticImm);
+        case IrOpcode::kWord32And:
+          return VisitWord32Compare(this, node, &cont);
         case IrOpcode::kInt32Sub:
           return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
                                   kArithmeticImm);
-        case IrOpcode::kWord32And:
-          return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
-                                  kLogical32Imm);
         case IrOpcode::kWord32Equal: {
           // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
           Int32BinopMatcher mequal(value);
           node->ReplaceInput(0, mequal.left().node());
           node->ReplaceInput(1, mequal.right().node());
           cont.Negate();
+          // {node} still does not cover its new operands, because {mequal} is
+          // still using them.
+          // Since we won't generate any more code for {mequal}, set its
+          // operands to zero to make sure {node} can cover them.
+          // This improves pattern matching in VisitWord32Compare.
+          mequal.node()->ReplaceInput(0, m.right().node());
+          mequal.node()->ReplaceInput(1, m.right().node());
           return VisitWord32Compare(this, node, &cont);
         }
         default:
@@ -2326,6 +2574,18 @@
   VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
 }
 
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    // ARM64 doesn't set the overflow flag for multiplication, so we need to
+    // test on kNotEqual. Here is the code sequence used:
+    //   smull result, left, right
+    //   cmp result.X(), Operand(result, SXTW)
+    FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
+    return EmitInt32MulWithOverflow(this, node, &cont);
+  }
+  FlagsContinuation cont;
+  EmitInt32MulWithOverflow(this, node, &cont);
+}
 
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
@@ -2524,11 +2784,7 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kFloat32Max |
-         MachineOperatorBuilder::kFloat32Min |
-         MachineOperatorBuilder::kFloat32RoundDown |
-         MachineOperatorBuilder::kFloat64Max |
-         MachineOperatorBuilder::kFloat64Min |
+  return MachineOperatorBuilder::kFloat32RoundDown |
          MachineOperatorBuilder::kFloat64RoundDown |
          MachineOperatorBuilder::kFloat32RoundUp |
          MachineOperatorBuilder::kFloat64RoundUp |
@@ -2541,9 +2797,7 @@
          MachineOperatorBuilder::kInt32DivIsSafe |
          MachineOperatorBuilder::kUint32DivIsSafe |
          MachineOperatorBuilder::kWord32ReverseBits |
-         MachineOperatorBuilder::kWord64ReverseBits |
-         MachineOperatorBuilder::kFloat32Neg |
-         MachineOperatorBuilder::kFloat64Neg;
+         MachineOperatorBuilder::kWord64ReverseBits;
 }
 
 // static
diff --git a/src/compiler/arm64/unwinding-info-writer-arm64.cc b/src/compiler/arm64/unwinding-info-writer-arm64.cc
new file mode 100644
index 0000000..f4b732b
--- /dev/null
+++ b/src/compiler/arm64/unwinding-info-writer-arm64.cc
@@ -0,0 +1,109 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/arm64/unwinding-info-writer-arm64.h"
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
+                                                const InstructionBlock* block) {
+  if (!enabled()) return;
+
+  block_will_exit_ = false;
+
+  DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+  const BlockInitialState* initial_state =
+      block_initial_states_[block->rpo_number().ToInt()];
+  if (initial_state) {
+    if (initial_state->saved_lr_ != saved_lr_) {
+      eh_frame_writer_.AdvanceLocation(pc_offset);
+      if (initial_state->saved_lr_) {
+        eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+      } else {
+        eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+      }
+      saved_lr_ = initial_state->saved_lr_;
+    }
+  } else {
+    // The entry block always lacks an explicit initial state.
+    // The exit block may lack an explicit state, if it is only reached by
+    //   the block ending in a ret.
+    // All the other blocks must have an explicit initial state.
+    DCHECK(block->predecessors().empty() || block->successors().empty());
+  }
+}
+
+void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
+  if (!enabled() || block_will_exit_) return;
+
+  for (const RpoNumber& successor : block->successors()) {
+    int successor_index = successor.ToInt();
+    DCHECK_LT(successor_index, block_initial_states_.size());
+    const BlockInitialState* existing_state =
+        block_initial_states_[successor_index];
+
+    // If we already had an entry for this BB, check that the values are the
+    // same we are trying to insert.
+    if (existing_state) {
+      DCHECK_EQ(existing_state->saved_lr_, saved_lr_);
+    } else {
+      block_initial_states_[successor_index] =
+          new (zone_) BlockInitialState(saved_lr_);
+    }
+  }
+}
+
+void UnwindingInfoWriter::MarkFrameConstructed(int at_pc) {
+  if (!enabled()) return;
+
+  // Regardless of the type of frame constructed, the relevant part of the
+  // layout is always the one in the diagram:
+  //
+  // |   ....   |         higher addresses
+  // +----------+               ^
+  // |    LR    |               |            |
+  // +----------+               |            |
+  // | saved FP |               |            |
+  // +----------+ <-- FP                     v
+  // |   ....   |                       stack growth
+  //
+  // The LR is pushed on the stack, and we can record this fact at the end of
+  // the construction, since the LR itself is not modified in the process.
+  eh_frame_writer_.AdvanceLocation(at_pc);
+  eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+  saved_lr_ = true;
+}
+
+void UnwindingInfoWriter::MarkFrameDeconstructed(int at_pc) {
+  if (!enabled()) return;
+
+  // The lr is restored by the last operation in LeaveFrame().
+  eh_frame_writer_.AdvanceLocation(at_pc);
+  eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+  saved_lr_ = false;
+}
+
+void UnwindingInfoWriter::MarkLinkRegisterOnTopOfStack(int pc_offset,
+                                                       const Register& sp) {
+  if (!enabled()) return;
+
+  eh_frame_writer_.AdvanceLocation(pc_offset);
+  eh_frame_writer_.SetBaseAddressRegisterAndOffset(sp, 0);
+  eh_frame_writer_.RecordRegisterSavedToStack(lr, 0);
+}
+
+void UnwindingInfoWriter::MarkPopLinkRegisterFromTopOfStack(int pc_offset) {
+  if (!enabled()) return;
+
+  eh_frame_writer_.AdvanceLocation(pc_offset);
+  eh_frame_writer_.SetBaseAddressRegisterAndOffset(fp, 0);
+  eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/unwinding-info-writer-arm64.h b/src/compiler/arm64/unwinding-info-writer-arm64.h
new file mode 100644
index 0000000..a532851
--- /dev/null
+++ b/src/compiler/arm64/unwinding-info-writer-arm64.h
@@ -0,0 +1,72 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_H_
+#define V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_H_
+
+#include "src/eh-frame.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionBlock;
+
+class UnwindingInfoWriter {
+ public:
+  explicit UnwindingInfoWriter(Zone* zone)
+      : zone_(zone),
+        eh_frame_writer_(zone),
+        saved_lr_(false),
+        block_will_exit_(false),
+        block_initial_states_(zone) {
+    if (enabled()) eh_frame_writer_.Initialize();
+  }
+
+  void SetNumberOfInstructionBlocks(int number) {
+    if (enabled()) block_initial_states_.resize(number);
+  }
+
+  void BeginInstructionBlock(int pc_offset, const InstructionBlock* block);
+  void EndInstructionBlock(const InstructionBlock* block);
+
+  void MarkLinkRegisterOnTopOfStack(int pc_offset, const Register& sp);
+  void MarkPopLinkRegisterFromTopOfStack(int pc_offset);
+
+  void MarkFrameConstructed(int at_pc);
+  void MarkFrameDeconstructed(int at_pc);
+
+  void MarkBlockWillExit() { block_will_exit_ = true; }
+
+  void Finish(int code_size) {
+    if (enabled()) eh_frame_writer_.Finish(code_size);
+  }
+
+  EhFrameWriter* eh_frame_writer() {
+    return enabled() ? &eh_frame_writer_ : nullptr;
+  }
+
+ private:
+  bool enabled() const { return FLAG_perf_prof_unwinding_info; }
+
+  class BlockInitialState : public ZoneObject {
+   public:
+    explicit BlockInitialState(bool saved_lr) : saved_lr_(saved_lr) {}
+
+    bool saved_lr_;
+  };
+
+  Zone* zone_;
+  EhFrameWriter eh_frame_writer_;
+  bool saved_lr_;
+  bool block_will_exit_;
+
+  ZoneVector<const BlockInitialState*> block_initial_states_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index d8d60f3..0f1fb29 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -40,14 +40,14 @@
 
   // Plug a node into this expression context.  Call this function in tail
   // position in the Visit functions for expressions.
-  virtual void ProduceValue(Node* value) = 0;
+  virtual void ProduceValue(Expression* expr, Node* value) = 0;
 
   // Unplugs a node from this expression context.  Call this to retrieve the
   // result of another Visit function that already plugged the context.
   virtual Node* ConsumeValue() = 0;
 
   // Shortcut for "context->ProduceValue(context->ConsumeValue())".
-  void ReplaceValue() { ProduceValue(ConsumeValue()); }
+  void ReplaceValue(Expression* expr) { ProduceValue(expr, ConsumeValue()); }
 
  protected:
   AstContext(AstGraphBuilder* owner, Expression::Context kind);
@@ -75,7 +75,7 @@
   explicit AstEffectContext(AstGraphBuilder* owner)
       : AstContext(owner, Expression::kEffect) {}
   ~AstEffectContext() final;
-  void ProduceValue(Node* value) final;
+  void ProduceValue(Expression* expr, Node* value) final;
   Node* ConsumeValue() final;
 };
 
@@ -86,7 +86,7 @@
   explicit AstValueContext(AstGraphBuilder* owner)
       : AstContext(owner, Expression::kValue) {}
   ~AstValueContext() final;
-  void ProduceValue(Node* value) final;
+  void ProduceValue(Expression* expr, Node* value) final;
   Node* ConsumeValue() final;
 };
 
@@ -97,7 +97,7 @@
   AstTestContext(AstGraphBuilder* owner, TypeFeedbackId feedback_id)
       : AstContext(owner, Expression::kTest), feedback_id_(feedback_id) {}
   ~AstTestContext() final;
-  void ProduceValue(Node* value) final;
+  void ProduceValue(Expression* expr, Node* value) final;
   Node* ConsumeValue() final;
 
  private:
@@ -178,14 +178,14 @@
 
   // Interface to execute a given command in this scope. Returning {true} here
   // indicates successful execution whereas {false} requests to skip scope.
-  virtual bool Execute(Command cmd, Statement* target, Node* value) {
+  virtual bool Execute(Command cmd, Statement* target, Node** value) {
     // For function-level control.
     switch (cmd) {
       case CMD_THROW:
-        builder()->BuildThrow(value);
+        builder()->BuildThrow(*value);
         return true;
       case CMD_RETURN:
-        builder()->BuildReturn(value);
+        builder()->BuildReturn(*value);
         return true;
       case CMD_BREAK:
       case CMD_CONTINUE:
@@ -282,8 +282,7 @@
   }
   Node* NewPathDispatchCondition(Node* t1, Node* t2) {
     return owner_->NewNode(
-        owner_->javascript()->StrictEqual(CompareOperationHints::Any()), t1,
-        t2);
+        owner_->javascript()->StrictEqual(CompareOperationHint::kAny), t1, t2);
   }
 
  private:
@@ -303,7 +302,7 @@
       : ControlScope(owner), target_(target), control_(control) {}
 
  protected:
-  bool Execute(Command cmd, Statement* target, Node* value) override {
+  bool Execute(Command cmd, Statement* target, Node** value) override {
     if (target != target_) return false;  // We are not the command target.
     switch (cmd) {
       case CMD_BREAK:
@@ -331,8 +330,11 @@
       : ControlScope(owner), target_(target), control_(control) {}
 
  protected:
-  bool Execute(Command cmd, Statement* target, Node* value) override {
-    if (target != target_) return false;  // We are not the command target.
+  bool Execute(Command cmd, Statement* target, Node** value) override {
+    if (target != target_) {
+      control_->ExitLoop(value);
+      return false;
+    }
     switch (cmd) {
       case CMD_BREAK:
         control_->Break();
@@ -356,21 +358,20 @@
 // Control scope implementation for a TryCatchStatement.
 class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
  public:
-  ControlScopeForCatch(AstGraphBuilder* owner, TryCatchBuilder* control)
+  ControlScopeForCatch(AstGraphBuilder* owner, TryCatchStatement* stmt,
+                       TryCatchBuilder* control)
       : ControlScope(owner), control_(control) {
     builder()->try_nesting_level_++;  // Increment nesting.
-    builder()->try_catch_nesting_level_++;
   }
   ~ControlScopeForCatch() {
     builder()->try_nesting_level_--;  // Decrement nesting.
-    builder()->try_catch_nesting_level_--;
   }
 
  protected:
-  bool Execute(Command cmd, Statement* target, Node* value) override {
+  bool Execute(Command cmd, Statement* target, Node** value) override {
     switch (cmd) {
       case CMD_THROW:
-        control_->Throw(value);
+        control_->Throw(*value);
         return true;
       case CMD_BREAK:
       case CMD_CONTINUE:
@@ -388,8 +389,8 @@
 // Control scope implementation for a TryFinallyStatement.
 class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
  public:
-  ControlScopeForFinally(AstGraphBuilder* owner, DeferredCommands* commands,
-                         TryFinallyBuilder* control)
+  ControlScopeForFinally(AstGraphBuilder* owner, TryFinallyStatement* stmt,
+                         DeferredCommands* commands, TryFinallyBuilder* control)
       : ControlScope(owner), commands_(commands), control_(control) {
     builder()->try_nesting_level_++;  // Increment nesting.
   }
@@ -398,9 +399,9 @@
   }
 
  protected:
-  bool Execute(Command cmd, Statement* target, Node* value) override {
-    Node* token = commands_->RecordCommand(cmd, target, value);
-    control_->LeaveTry(token, value);
+  bool Execute(Command cmd, Statement* target, Node** value) override {
+    Node* token = commands_->RecordCommand(cmd, target, *value);
+    control_->LeaveTry(token, *value);
     return true;
   }
 
@@ -410,60 +411,6 @@
 };
 
 
-// Helper for generating before and after frame states.
-class AstGraphBuilder::FrameStateBeforeAndAfter {
- public:
-  FrameStateBeforeAndAfter(AstGraphBuilder* builder, BailoutId id_before)
-      : builder_(builder), frame_state_before_(nullptr) {
-    frame_state_before_ = id_before == BailoutId::None()
-                              ? builder_->GetEmptyFrameState()
-                              : builder_->environment()->Checkpoint(id_before);
-    if (id_before != BailoutId::None()) {
-      // Create an explicit checkpoint node for before the operation.
-      Node* node = builder_->NewNode(builder_->common()->Checkpoint());
-      DCHECK_EQ(IrOpcode::kDead,
-                NodeProperties::GetFrameStateInput(node, 0)->opcode());
-      NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_before_);
-    }
-  }
-
-  void AddToNode(
-      Node* node, BailoutId id_after,
-      OutputFrameStateCombine combine = OutputFrameStateCombine::Ignore()) {
-    int count = OperatorProperties::GetFrameStateInputCount(node->op());
-    DCHECK_LE(count, 2);
-
-    if (count >= 1) {
-      // Add the frame state for after the operation.
-      DCHECK_EQ(IrOpcode::kDead,
-                NodeProperties::GetFrameStateInput(node, 0)->opcode());
-
-      bool node_has_exception = NodeProperties::IsExceptionalCall(node);
-
-      Node* frame_state_after =
-          id_after == BailoutId::None()
-              ? builder_->GetEmptyFrameState()
-              : builder_->environment()->Checkpoint(id_after, combine,
-                                                    node_has_exception);
-
-      NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
-    }
-
-    if (count >= 2) {
-      // Add the frame state for before the operation.
-      // TODO(mstarzinger): Get rid of frame state input before!
-      DCHECK_EQ(IrOpcode::kDead,
-                NodeProperties::GetFrameStateInput(node, 1)->opcode());
-      NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
-    }
-  }
-
- private:
-  AstGraphBuilder* builder_;
-  Node* frame_state_before_;
-};
-
-
 AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
                                  JSGraph* jsgraph, LoopAssignmentAnalysis* loop,
                                  TypeHintAnalysis* type_hint_analysis)
@@ -476,7 +423,6 @@
       globals_(0, local_zone),
       execution_control_(nullptr),
       execution_context_(nullptr),
-      try_catch_nesting_level_(0),
       try_nesting_level_(0),
       input_buffer_size_(0),
       input_buffer_(nullptr),
@@ -494,7 +440,7 @@
 
 
 Node* AstGraphBuilder::GetFunctionClosureForContext() {
-  Scope* closure_scope = current_scope()->ClosureScope();
+  DeclarationScope* closure_scope = current_scope()->GetClosureScope();
   if (closure_scope->is_script_scope() ||
       closure_scope->is_module_scope()) {
     // Contexts nested in the native context have a canonical empty function as
@@ -561,7 +507,7 @@
 }
 
 bool AstGraphBuilder::CreateGraph(bool stack_check) {
-  Scope* scope = info()->scope();
+  DeclarationScope* scope = info()->scope();
   DCHECK_NOT_NULL(graph());
 
   // Set up the basic structure of the graph. Outputs for {Start} are the formal
@@ -621,7 +567,7 @@
 
 
 void AstGraphBuilder::CreateGraphBody(bool stack_check) {
-  Scope* scope = info()->scope();
+  DeclarationScope* scope = info()->scope();
 
   // Build the arguments object if it is used.
   BuildArgumentsObject(scope->arguments());
@@ -687,8 +633,8 @@
                                                    : BailoutId::None();
 }
 
-
-static const char* GetDebugParameterName(Zone* zone, Scope* scope, int index) {
+static const char* GetDebugParameterName(Zone* zone, DeclarationScope* scope,
+                                         int index) {
 #if DEBUG
   const AstRawString* name = scope->parameter(index)->raw_name();
   if (name && name->length() > 0) {
@@ -701,9 +647,8 @@
   return nullptr;
 }
 
-
 AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
-                                          Scope* scope,
+                                          DeclarationScope* scope,
                                           Node* control_dependency)
     : builder_(builder),
       parameters_count_(scope->num_parameters() + 1),
@@ -926,6 +871,34 @@
   return result;
 }
 
+void AstGraphBuilder::Environment::PrepareForLoopExit(
+    Node* loop, BitVector* assigned_variables) {
+  if (IsMarkedAsUnreachable()) return;
+
+  DCHECK_EQ(loop->opcode(), IrOpcode::kLoop);
+
+  Node* control = GetControlDependency();
+
+  // Create the loop exit node.
+  Node* loop_exit = graph()->NewNode(common()->LoopExit(), control, loop);
+  UpdateControlDependency(loop_exit);
+
+  // Rename the environmnent values.
+  for (size_t i = 0; i < values()->size(); i++) {
+    if (assigned_variables == nullptr ||
+        static_cast<int>(i) >= assigned_variables->length() ||
+        assigned_variables->Contains(static_cast<int>(i))) {
+      Node* rename = graph()->NewNode(common()->LoopExitValue(), (*values())[i],
+                                      loop_exit);
+      (*values())[i] = rename;
+    }
+  }
+
+  // Rename the effect.
+  Node* effect_rename = graph()->NewNode(common()->LoopExitEffect(),
+                                         GetEffectDependency(), loop_exit);
+  UpdateEffectDependency(effect_rename);
+}
 
 bool AstGraphBuilder::Environment::IsLivenessAnalysisEnabled() {
   return FLAG_analyze_environment_liveness &&
@@ -968,19 +941,22 @@
   DCHECK(environment()->stack_height() == original_height_ + 1);
 }
 
-
-void AstGraphBuilder::AstEffectContext::ProduceValue(Node* value) {
+void AstGraphBuilder::AstEffectContext::ProduceValue(Expression* expr,
+                                                     Node* value) {
   // The value is ignored.
+  owner()->PrepareEagerCheckpoint(expr->id());
 }
 
-
-void AstGraphBuilder::AstValueContext::ProduceValue(Node* value) {
+void AstGraphBuilder::AstValueContext::ProduceValue(Expression* expr,
+                                                    Node* value) {
   environment()->Push(value);
+  owner()->PrepareEagerCheckpoint(expr->id());
 }
 
-
-void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
+void AstGraphBuilder::AstTestContext::ProduceValue(Expression* expr,
+                                                   Node* value) {
   environment()->Push(owner()->BuildToBoolean(value, feedback_id_));
+  owner()->PrepareEagerCheckpoint(expr->id());
 }
 
 
@@ -1015,7 +991,7 @@
   while (current != nullptr) {
     environment()->TrimStack(current->stack_height());
     environment()->TrimContextChain(current->context_length());
-    if (current->Execute(command, target, value)) break;
+    if (current->Execute(command, target, &value)) break;
     current = current->outer_;
   }
   builder()->set_environment(env);
@@ -1069,9 +1045,9 @@
 void AstGraphBuilder::VisitForValue(Expression* expr) {
   AstValueContext for_value(this);
   if (!CheckStackOverflow()) {
-    expr->Accept(this);
+    VisitNoStackOverflowCheck(expr);
   } else {
-    ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+    ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
   }
 }
 
@@ -1079,9 +1055,9 @@
 void AstGraphBuilder::VisitForEffect(Expression* expr) {
   AstEffectContext for_effect(this);
   if (!CheckStackOverflow()) {
-    expr->Accept(this);
+    VisitNoStackOverflowCheck(expr);
   } else {
-    ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+    ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
   }
 }
 
@@ -1089,9 +1065,9 @@
 void AstGraphBuilder::VisitForTest(Expression* expr) {
   AstTestContext for_condition(this, expr->test_id());
   if (!CheckStackOverflow()) {
-    expr->Accept(this);
+    VisitNoStackOverflowCheck(expr);
   } else {
-    ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+    ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
   }
 }
 
@@ -1099,46 +1075,49 @@
 void AstGraphBuilder::Visit(Expression* expr) {
   // Reuses enclosing AstContext.
   if (!CheckStackOverflow()) {
-    expr->Accept(this);
+    VisitNoStackOverflowCheck(expr);
   } else {
-    ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+    ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
   }
 }
 
 
 void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
   Variable* variable = decl->proxy()->var();
-  VariableMode mode = decl->mode();
-  bool hole_init = mode == CONST || mode == LET;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
-    case VariableLocation::UNALLOCATED:
+    case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      globals()->push_back(variable->name());
+      FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
       globals()->push_back(isolate()->factory()->undefined_value());
       break;
+    }
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL:
-      if (hole_init) {
+      if (variable->binding_needs_init()) {
         Node* value = jsgraph()->TheHoleConstant();
         environment()->Bind(variable, value);
       }
       break;
     case VariableLocation::CONTEXT:
-      if (hole_init) {
+      if (variable->binding_needs_init()) {
         Node* value = jsgraph()->TheHoleConstant();
         const Operator* op = javascript()->StoreContext(0, variable->index());
         NewNode(op, current_context(), value);
       }
       break;
     case VariableLocation::LOOKUP: {
-      DCHECK(!hole_init);
+      DCHECK(!variable->binding_needs_init());
       Node* name = jsgraph()->Constant(variable->name());
       const Operator* op = javascript()->CallRuntime(Runtime::kDeclareEvalVar);
       Node* store = NewNode(op, name);
       PrepareFrameState(store, decl->proxy()->id());
       break;
     }
+    case VariableLocation::MODULE:
+      UNREACHABLE();
   }
 }
 
@@ -1152,7 +1131,9 @@
           decl->fun(), info()->script(), info());
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
-      globals()->push_back(variable->name());
+      FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
       globals()->push_back(function);
       break;
     }
@@ -1180,20 +1161,12 @@
       PrepareFrameState(store, decl->proxy()->id());
       break;
     }
+    case VariableLocation::MODULE:
+      UNREACHABLE();
   }
 }
 
 
-void AstGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
-  UNREACHABLE();
-}
-
-
-void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
-  UNREACHABLE();
-}
-
-
 void AstGraphBuilder::VisitBlock(Block* stmt) {
   BlockBuilder block(this);
   ControlScopeForBreakable scope(this, stmt, &block);
@@ -1300,14 +1273,14 @@
     Node* label = environment()->Pop();
     Node* tag = environment()->Top();
 
-    CompareOperationHints hints;
+    CompareOperationHint hint;
     if (!type_hint_analysis_ ||
-        !type_hint_analysis_->GetCompareOperationHints(clause->CompareId(),
-                                                       &hints)) {
-      hints = CompareOperationHints::Any();
+        !type_hint_analysis_->GetCompareOperationHint(clause->CompareId(),
+                                                      &hint)) {
+      hint = CompareOperationHint::kAny;
     }
 
-    const Operator* op = javascript()->StrictEqual(hints);
+    const Operator* op = javascript()->StrictEqual(hint);
     Node* condition = NewNode(op, tag, label);
     compare_switch.BeginLabel(i, condition);
 
@@ -1337,7 +1310,7 @@
 void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
   LoopBuilder while_loop(this);
   while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
-  VisitIterationBody(stmt, &while_loop);
+  VisitIterationBody(stmt, &while_loop, stmt->StackCheckId());
   while_loop.EndBody();
   VisitForTest(stmt->cond());
   Node* condition = environment()->Pop();
@@ -1352,7 +1325,7 @@
   VisitForTest(stmt->cond());
   Node* condition = environment()->Pop();
   while_loop.BreakUnless(condition);
-  VisitIterationBody(stmt, &while_loop);
+  VisitIterationBody(stmt, &while_loop, stmt->StackCheckId());
   while_loop.EndBody();
   while_loop.EndLoop();
 }
@@ -1369,7 +1342,7 @@
   } else {
     for_loop.BreakUnless(jsgraph()->TrueConstant());
   }
-  VisitIterationBody(stmt, &for_loop);
+  VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
   for_loop.EndBody();
   VisitIfNotNull(stmt->next());
   for_loop.EndLoop();
@@ -1383,11 +1356,11 @@
   for_block.BeginBlock();
   // Check for null or undefined before entering loop.
   Node* is_null_cond =
-      NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), object,
+      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), object,
               jsgraph()->NullConstant());
   for_block.BreakWhen(is_null_cond, BranchHint::kFalse);
   Node* is_undefined_cond =
-      NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), object,
+      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), object,
               jsgraph()->UndefinedConstant());
   for_block.BreakWhen(is_undefined_cond, BranchHint::kFalse);
   {
@@ -1432,18 +1405,21 @@
                         OutputFrameStateCombine::Push());
       IfBuilder test_value(this);
       Node* test_value_cond =
-          NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
-                  value, jsgraph()->UndefinedConstant());
+          NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), value,
+                  jsgraph()->UndefinedConstant());
       test_value.If(test_value_cond, BranchHint::kFalse);
       test_value.Then();
       test_value.Else();
       {
+        environment()->Push(value);
+        PrepareEagerCheckpoint(stmt->FilterId());
+        value = environment()->Pop();
         // Bind value and do loop body.
         VectorSlotPair feedback =
             CreateVectorSlotPair(stmt->EachFeedbackSlot());
-        VisitForInAssignment(stmt->each(), value, feedback, stmt->FilterId(),
+        VisitForInAssignment(stmt->each(), value, feedback,
                              stmt->AssignmentId());
-        VisitIterationBody(stmt, &for_loop);
+        VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
       }
       test_value.End();
       for_loop.EndBody();
@@ -1469,7 +1445,7 @@
   Node* condition = environment()->Pop();
   for_loop.BreakWhen(condition);
   VisitForEffect(stmt->assign_each());
-  VisitIterationBody(stmt, &for_loop);
+  VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
   for_loop.EndBody();
   for_loop.EndLoop();
 }
@@ -1482,7 +1458,7 @@
   // that is intercepting 'throw' control commands.
   try_control.BeginTry();
   {
-    ControlScopeForCatch scope(this, &try_control);
+    ControlScopeForCatch scope(this, stmt, &try_control);
     STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
     environment()->Push(current_context());
     Visit(stmt->try_block());
@@ -1528,7 +1504,7 @@
   // that is intercepting all control commands.
   try_control.BeginTry();
   {
-    ControlScopeForFinally scope(this, commands, &try_control);
+    ControlScopeForFinally scope(this, stmt, commands, &try_control);
     STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
     environment()->Push(current_context());
     Visit(stmt->try_block());
@@ -1589,25 +1565,11 @@
   PretenureFlag pretenure = expr->pretenure() ? TENURED : NOT_TENURED;
   const Operator* op = javascript()->CreateClosure(shared_info, pretenure);
   Node* value = NewNode(op);
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
 void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
-  // Visit declarations and class literal in a block scope.
-  if (expr->scope()->ContextLocalCount() > 0) {
-    Node* context = BuildLocalBlockContext(expr->scope());
-    ContextScope scope(this, expr->scope(), context);
-    VisitDeclarations(expr->scope()->declarations());
-    VisitClassLiteralContents(expr);
-  } else {
-    VisitDeclarations(expr->scope()->declarations());
-    VisitClassLiteralContents(expr);
-  }
-}
-
-
-void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
   VisitForValueOrTheHole(expr->extends());
   VisitForValue(expr->constructor());
 
@@ -1704,7 +1666,7 @@
     BuildVariableAssignment(var, literal, Token::INIT, feedback,
                             BailoutId::None());
   }
-  ast_context()->ProduceValue(literal);
+  ast_context()->ProduceValue(expr, literal);
 }
 
 
@@ -1716,7 +1678,7 @@
 void AstGraphBuilder::VisitDoExpression(DoExpression* expr) {
   VisitBlock(expr->block());
   VisitVariableProxy(expr->result());
-  ast_context()->ReplaceValue();
+  ast_context()->ReplaceValue(expr);
 }
 
 
@@ -1730,7 +1692,11 @@
   compare_if.Else();
   Visit(expr->else_expression());
   compare_if.End();
-  ast_context()->ReplaceValue();
+  // Skip plugging AST evaluation contexts of the test kind. This is to stay in
+  // sync with full codegen which doesn't prepare the proper bailout point (see
+  // the implementation of FullCodeGenerator::VisitForControl).
+  if (ast_context()->IsTest()) return;
+  ast_context()->ReplaceValue(expr);
 }
 
 
@@ -1739,13 +1705,13 @@
   PrepareEagerCheckpoint(BeforeId(expr));
   Node* value = BuildVariableLoad(expr->var(), expr->id(), pair,
                                   ast_context()->GetStateCombine());
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
 void AstGraphBuilder::VisitLiteral(Literal* expr) {
   Node* value = jsgraph()->Constant(expr->value());
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
@@ -1757,7 +1723,7 @@
       expr->pattern(), expr->flags(), expr->literal_index());
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
-  ast_context()->ProduceValue(literal);
+  ast_context()->ProduceValue(expr, literal);
 }
 
 
@@ -1794,10 +1760,10 @@
       case ObjectLiteral::Property::COMPUTED: {
         // It is safe to use [[Put]] here because the boilerplate already
         // contains computed properties with an uninitialized value.
-        if (key->value()->IsInternalizedString()) {
+        if (key->IsStringLiteral()) {
+          DCHECK(key->IsPropertyName());
           if (property->emit_store()) {
             VisitForValue(property->value());
-            PrepareEagerCheckpoint(property->value()->id());
             Node* value = environment()->Pop();
             Node* literal = environment()->Top();
             Handle<Name> name = key->AsPropertyName();
@@ -1915,6 +1881,7 @@
       case ObjectLiteral::Property::CONSTANT:
       case ObjectLiteral::Property::COMPUTED:
       case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
+        if (!property->emit_store()) continue;
         Node* attr = jsgraph()->Constant(NONE);
         Node* set_function_name =
             jsgraph()->Constant(property->NeedsSetFunctionName());
@@ -1946,7 +1913,7 @@
     }
   }
 
-  ast_context()->ProduceValue(environment()->Pop());
+  ast_context()->ProduceValue(expr, environment()->Pop());
 }
 
 
@@ -1985,16 +1952,13 @@
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     VisitForValue(subexpr);
-    {
-      PrepareEagerCheckpoint(subexpr->id());
-      VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
-      Node* value = environment()->Pop();
-      Node* index = jsgraph()->Constant(array_index);
-      Node* literal = environment()->Top();
-      Node* store = BuildKeyedStore(literal, index, value, pair);
-      PrepareFrameState(store, expr->GetIdForElement(array_index),
-                        OutputFrameStateCombine::Ignore());
-    }
+    VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
+    Node* value = environment()->Pop();
+    Node* index = jsgraph()->Constant(array_index);
+    Node* literal = environment()->Top();
+    Node* store = BuildKeyedStore(literal, index, value, pair);
+    PrepareFrameState(store, expr->GetIdForElement(array_index),
+                      OutputFrameStateCombine::Ignore());
   }
 
   // In case the array literal contains spread expressions it has two parts. The
@@ -2017,14 +1981,12 @@
     }
   }
 
-  ast_context()->ProduceValue(environment()->Pop());
+  ast_context()->ProduceValue(expr, environment()->Pop());
 }
 
-
 void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
                                            const VectorSlotPair& feedback,
-                                           BailoutId bailout_id_before,
-                                           BailoutId bailout_id_after) {
+                                           BailoutId bailout_id) {
   DCHECK(expr->IsValidReferenceExpressionOrThis());
 
   // Left-hand side can only be a property, a global or a variable slot.
@@ -2035,50 +1997,40 @@
   switch (assign_type) {
     case VARIABLE: {
       Variable* var = expr->AsVariableProxy()->var();
-      environment()->Push(value);
-      PrepareEagerCheckpoint(bailout_id_before);
-      value = environment()->Pop();
-      BuildVariableAssignment(var, value, Token::ASSIGN, feedback,
-                              bailout_id_after);
+      BuildVariableAssignment(var, value, Token::ASSIGN, feedback, bailout_id);
       break;
     }
     case NAMED_PROPERTY: {
       environment()->Push(value);
       VisitForValue(property->obj());
-      PrepareEagerCheckpoint(property->obj()->id());
       Node* object = environment()->Pop();
       value = environment()->Pop();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedStore(object, name, value, feedback);
-      PrepareFrameState(store, bailout_id_after,
-                        OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
       break;
     }
     case KEYED_PROPERTY: {
       environment()->Push(value);
       VisitForValue(property->obj());
       VisitForValue(property->key());
-      PrepareEagerCheckpoint(property->key()->id());
       Node* key = environment()->Pop();
       Node* object = environment()->Pop();
       value = environment()->Pop();
       Node* store = BuildKeyedStore(object, key, value, feedback);
-      PrepareFrameState(store, bailout_id_after,
-                        OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
       environment()->Push(value);
       VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      PrepareEagerCheckpoint(property->obj()->id());
       Node* home_object = environment()->Pop();
       Node* receiver = environment()->Pop();
       value = environment()->Pop();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
-      PrepareFrameState(store, bailout_id_after,
-                        OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
       break;
     }
     case KEYED_SUPER_PROPERTY: {
@@ -2086,14 +2038,12 @@
       VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
       VisitForValue(property->key());
-      PrepareEagerCheckpoint(property->key()->id());
       Node* key = environment()->Pop();
       Node* home_object = environment()->Pop();
       Node* receiver = environment()->Pop();
       value = environment()->Pop();
       Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
-      PrepareFrameState(store, bailout_id_after,
-                        OutputFrameStateCombine::Ignore());
+      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
       break;
     }
   }
@@ -2137,7 +2087,6 @@
       break;
   }
 
-  BailoutId before_store_id = BailoutId::None();
   // Evaluate the value and potentially handle compound assignments by loading
   // the left-hand side value and performing a binary operation.
   if (expr->is_compound()) {
@@ -2157,7 +2106,6 @@
         Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
         VectorSlotPair pair =
             CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        PrepareEagerCheckpoint(property->obj()->id());
         old_value = BuildNamedLoad(object, name, pair);
         PrepareFrameState(old_value, property->LoadId(),
                           OutputFrameStateCombine::Push());
@@ -2168,7 +2116,6 @@
         Node* object = environment()->Peek(1);
         VectorSlotPair pair =
             CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        PrepareEagerCheckpoint(property->key()->id());
         old_value = BuildKeyedLoad(object, key, pair);
         PrepareFrameState(old_value, property->LoadId(),
                           OutputFrameStateCombine::Push());
@@ -2180,7 +2127,6 @@
         Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
         VectorSlotPair pair =
             CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        PrepareEagerCheckpoint(property->obj()->id());
         old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
         PrepareFrameState(old_value, property->LoadId(),
                           OutputFrameStateCombine::Push());
@@ -2192,7 +2138,6 @@
         Node* receiver = environment()->Peek(2);
         VectorSlotPair pair =
             CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        PrepareEagerCheckpoint(property->key()->id());
         old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
         PrepareFrameState(old_value, property->LoadId(),
                           OutputFrameStateCombine::Push());
@@ -2201,30 +2146,22 @@
     }
     environment()->Push(old_value);
     VisitForValue(expr->value());
-    Node* value;
-    {
-      FrameStateBeforeAndAfter states(this, expr->value()->id());
-      Node* right = environment()->Pop();
-      Node* left = environment()->Pop();
-      value =
-          BuildBinaryOp(left, right, expr->binary_op(),
-                        expr->binary_operation()->BinaryOperationFeedbackId());
-      states.AddToNode(value, expr->binary_operation()->id(),
-                       OutputFrameStateCombine::Push());
-    }
+    Node* right = environment()->Pop();
+    Node* left = environment()->Pop();
+    Node* value =
+        BuildBinaryOp(left, right, expr->binary_op(),
+                      expr->binary_operation()->BinaryOperationFeedbackId());
+    PrepareFrameState(value, expr->binary_operation()->id(),
+                      OutputFrameStateCombine::Push());
     environment()->Push(value);
     if (needs_frame_state_before) {
-      before_store_id = expr->binary_operation()->id();
+      PrepareEagerCheckpoint(expr->binary_operation()->id());
     }
   } else {
     VisitForValue(expr->value());
-    if (needs_frame_state_before) {
-      before_store_id = expr->value()->id();
-    }
   }
 
   // Store the value.
-  PrepareEagerCheckpoint(before_store_id);
   Node* value = environment()->Pop();
   VectorSlotPair feedback = CreateVectorSlotPair(expr->AssignmentSlot());
   switch (assign_type) {
@@ -2238,14 +2175,16 @@
       Node* object = environment()->Pop();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedStore(object, name, value, feedback);
-      PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
+      PrepareFrameState(store, expr->AssignmentId(),
+                        OutputFrameStateCombine::Push());
       break;
     }
     case KEYED_PROPERTY: {
       Node* key = environment()->Pop();
       Node* object = environment()->Pop();
       Node* store = BuildKeyedStore(object, key, value, feedback);
-      PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
+      PrepareFrameState(store, expr->AssignmentId(),
+                        OutputFrameStateCombine::Push());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2266,14 +2205,14 @@
     }
   }
 
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
 void AstGraphBuilder::VisitYield(Yield* expr) {
   // Generator functions are supported only by going through Ignition first.
   SetStackOverflow();
-  ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+  ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
 }
 
 
@@ -2281,7 +2220,7 @@
   VisitForValue(expr->exception());
   Node* exception = environment()->Pop();
   Node* value = BuildThrowError(exception, expr->id());
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
@@ -2295,54 +2234,50 @@
       break;
     case NAMED_PROPERTY: {
       VisitForValue(expr->obj());
-      PrepareEagerCheckpoint(expr->obj()->id());
       Node* object = environment()->Pop();
       Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
       value = BuildNamedLoad(object, name, pair);
-      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+      PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
       break;
     }
     case KEYED_PROPERTY: {
       VisitForValue(expr->obj());
       VisitForValue(expr->key());
-      PrepareEagerCheckpoint(expr->key()->id());
       Node* key = environment()->Pop();
       Node* object = environment()->Pop();
       value = BuildKeyedLoad(object, key, pair);
-      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+      PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
       VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
-      PrepareEagerCheckpoint(expr->obj()->id());
       Node* home_object = environment()->Pop();
       Node* receiver = environment()->Pop();
       Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
       value = BuildNamedSuperLoad(receiver, home_object, name, pair);
-      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+      PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
       break;
     }
     case KEYED_SUPER_PROPERTY: {
       VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
       VisitForValue(expr->key());
-      PrepareEagerCheckpoint(expr->key()->id());
       Node* key = environment()->Pop();
       Node* home_object = environment()->Pop();
       Node* receiver = environment()->Pop();
       value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
-      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+      PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
       break;
     }
   }
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
 void AstGraphBuilder::VisitCall(Call* expr) {
   Expression* callee = expr->expression();
-  Call::CallType call_type = expr->GetCallType(isolate());
+  Call::CallType call_type = expr->GetCallType();
 
   // Prepare the callee and the receiver to the function call. This depends on
   // the semantics of the underlying call type.
@@ -2379,7 +2314,6 @@
       VectorSlotPair feedback =
           CreateVectorSlotPair(property->PropertyFeedbackSlot());
       VisitForValue(property->obj());
-      PrepareEagerCheckpoint(property->obj()->id());
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* object = environment()->Top();
       callee_value = BuildNamedLoad(object, name, feedback);
@@ -2398,7 +2332,6 @@
           CreateVectorSlotPair(property->PropertyFeedbackSlot());
       VisitForValue(property->obj());
       VisitForValue(property->key());
-      PrepareEagerCheckpoint(property->key()->id());
       Node* key = environment()->Pop();
       Node* object = environment()->Top();
       callee_value = BuildKeyedLoad(object, key, feedback);
@@ -2420,7 +2353,6 @@
       Node* home = environment()->Peek(1);
       Node* object = environment()->Top();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      PrepareEagerCheckpoint(property->obj()->id());
       callee_value = BuildNamedSuperLoad(object, home, name, VectorSlotPair());
       PrepareFrameState(callee_value, property->LoadId(),
                         OutputFrameStateCombine::Push());
@@ -2443,7 +2375,6 @@
       Node* key = environment()->Pop();
       Node* home = environment()->Pop();
       Node* object = environment()->Pop();
-      PrepareEagerCheckpoint(property->key()->id());
       callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
       PrepareFrameState(callee_value, property->LoadId(),
                         OutputFrameStateCombine::Push());
@@ -2519,12 +2450,12 @@
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
   const Operator* call = javascript()->CallFunction(
       args->length() + 2, feedback, receiver_hint, expr->tail_call_mode());
-  PrepareEagerCheckpoint(expr->CallId());
+  PrepareEagerCheckpoint(possibly_eval ? expr->EvalId() : expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   environment()->Push(value->InputAt(0));  // The callee passed to the call.
   PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
   environment()->Drop(1);
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
@@ -2550,10 +2481,9 @@
   // Create node to perform the super call.
   const Operator* call =
       javascript()->CallConstruct(args->length() + 2, VectorSlotPair());
-  PrepareEagerCheckpoint(super->new_target_var()->id());
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
@@ -2564,11 +2494,6 @@
   ZoneList<Expression*>* args = expr->arguments();
   VisitForValues(args);
 
-  // The baseline compiler doesn't push the new.target, so we need to record
-  // the frame state before the push.
-  PrepareEagerCheckpoint(args->is_empty() ? expr->expression()->id()
-                                          : args->last()->id());
-
   // The new target is the same as the callee.
   environment()->Push(environment()->Peek(args->length()));
 
@@ -2578,7 +2503,7 @@
       javascript()->CallConstruct(args->length() + 2, feedback);
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
@@ -2600,7 +2525,7 @@
   PrepareEagerCheckpoint(expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
@@ -2618,10 +2543,13 @@
   // Create node to perform the runtime call.
   Runtime::FunctionId functionId = expr->function()->function_id;
   const Operator* call = javascript()->CallRuntime(functionId, args->length());
-  PrepareEagerCheckpoint(expr->CallId());
+  if (expr->function()->intrinsic_type == Runtime::IntrinsicType::RUNTIME ||
+      expr->function()->function_id == Runtime::kInlineCall) {
+    PrepareEagerCheckpoint(expr->CallId());
+  }
   Node* value = ProcessArguments(call, args->length());
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
@@ -2669,7 +2597,6 @@
     }
     case NAMED_PROPERTY: {
       VisitForValue(property->obj());
-      PrepareEagerCheckpoint(property->obj()->id());
       Node* object = environment()->Top();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       VectorSlotPair pair =
@@ -2683,7 +2610,6 @@
     case KEYED_PROPERTY: {
       VisitForValue(property->obj());
       VisitForValue(property->key());
-      PrepareEagerCheckpoint(property->key()->id());
       Node* key = environment()->Top();
       Node* object = environment()->Peek(1);
       VectorSlotPair pair =
@@ -2697,7 +2623,6 @@
     case NAMED_SUPER_PROPERTY: {
       VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      PrepareEagerCheckpoint(property->obj()->id());
       Node* home_object = environment()->Top();
       Node* receiver = environment()->Peek(1);
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
@@ -2713,7 +2638,6 @@
       VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
       VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
       VisitForValue(property->key());
-      PrepareEagerCheckpoint(property->obj()->id());
       Node* key = environment()->Top();
       Node* home_object = environment()->Peek(1);
       Node* receiver = environment()->Peek(2);
@@ -2734,7 +2658,7 @@
 
   // Create a proper eager frame state for the stores.
   environment()->Push(old_value);
-  FrameStateBeforeAndAfter binop_states(this, expr->ToNumberId());
+  PrepareEagerCheckpoint(expr->ToNumberId());
   old_value = environment()->Pop();
 
   // Save result for postfix expressions at correct stack depth.
@@ -2747,12 +2671,10 @@
   }
 
   // Create node to perform +1/-1 operation.
-  // TODO(bmeurer): Cleanup this feedback/bailout mess!
   Node* value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
                               expr->binary_op(), expr->CountBinOpFeedbackId());
-  // This should never deoptimize because we have converted to number before.
-  binop_states.AddToNode(value, BailoutId::None(),
-                         OutputFrameStateCombine::Ignore());
+  // This should never lazy deopt because we have converted to number before.
+  PrepareFrameState(value, BailoutId::None());
 
   // Store the value.
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CountSlot());
@@ -2769,20 +2691,16 @@
       Node* object = environment()->Pop();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedStore(object, name, value, feedback);
-      environment()->Push(value);
       PrepareFrameState(store, expr->AssignmentId(),
-                        OutputFrameStateCombine::Ignore());
-      environment()->Pop();
+                        OutputFrameStateCombine::Push());
       break;
     }
     case KEYED_PROPERTY: {
       Node* key = environment()->Pop();
       Node* object = environment()->Pop();
       Node* store = BuildKeyedStore(object, key, value, feedback);
-      environment()->Push(value);
       PrepareFrameState(store, expr->AssignmentId(),
-                        OutputFrameStateCombine::Ignore());
-      environment()->Pop();
+                        OutputFrameStateCombine::Push());
       break;
     }
     case NAMED_SUPER_PROPERTY: {
@@ -2790,10 +2708,8 @@
       Node* receiver = environment()->Pop();
       Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
       Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
-      environment()->Push(value);
       PrepareFrameState(store, expr->AssignmentId(),
-                        OutputFrameStateCombine::Ignore());
-      environment()->Pop();
+                        OutputFrameStateCombine::Push());
       break;
     }
     case KEYED_SUPER_PROPERTY: {
@@ -2801,10 +2717,8 @@
       Node* home_object = environment()->Pop();
       Node* receiver = environment()->Pop();
       Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
-      environment()->Push(value);
       PrepareFrameState(store, expr->AssignmentId(),
-                        OutputFrameStateCombine::Ignore());
-      environment()->Pop();
+                        OutputFrameStateCombine::Push());
       break;
     }
   }
@@ -2812,7 +2726,7 @@
   // Restore old value for postfix expressions.
   if (is_postfix) value = environment()->Pop();
 
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
@@ -2826,13 +2740,12 @@
     default: {
       VisitForValue(expr->left());
       VisitForValue(expr->right());
-      FrameStateBeforeAndAfter states(this, expr->right()->id());
       Node* right = environment()->Pop();
       Node* left = environment()->Pop();
       Node* value = BuildBinaryOp(left, right, expr->op(),
                                   expr->BinaryOperationFeedbackId());
-      states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
-      ast_context()->ProduceValue(value);
+      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+      ast_context()->ProduceValue(expr, value);
     }
   }
 }
@@ -2843,32 +2756,30 @@
   const Operator* op = nullptr;
   switch (expr->op()) {
     case Token::EQ:
-      op = javascript()->Equal(CompareOperationHints::Any());
+      op = javascript()->Equal(CompareOperationHint::kAny);
       break;
     case Token::EQ_STRICT:
-      op = javascript()->StrictEqual(CompareOperationHints::Any());
+      op = javascript()->StrictEqual(CompareOperationHint::kAny);
       break;
     default:
       UNREACHABLE();
   }
   VisitForValue(sub_expr);
-  PrepareEagerCheckpoint(sub_expr->id());
   Node* value_to_compare = environment()->Pop();
   Node* value = NewNode(op, value_to_compare, nil_value);
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
-  return ast_context()->ProduceValue(value);
+  return ast_context()->ProduceValue(expr, value);
 }
 
 void AstGraphBuilder::VisitLiteralCompareTypeof(CompareOperation* expr,
                                                 Expression* sub_expr,
                                                 Handle<String> check) {
   VisitTypeofExpression(sub_expr);
-  PrepareEagerCheckpoint(sub_expr->id());
   Node* typeof_arg = NewNode(javascript()->TypeOf(), environment()->Pop());
-  Node* value = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+  Node* value = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
                         typeof_arg, jsgraph()->Constant(check));
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
-  return ast_context()->ProduceValue(value);
+  return ast_context()->ProduceValue(expr, value);
 }
 
 void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
@@ -2888,38 +2799,38 @@
     return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
   }
 
-  CompareOperationHints hints;
+  CompareOperationHint hint;
   if (!type_hint_analysis_ ||
-      !type_hint_analysis_->GetCompareOperationHints(
-          expr->CompareOperationFeedbackId(), &hints)) {
-    hints = CompareOperationHints::Any();
+      !type_hint_analysis_->GetCompareOperationHint(
+          expr->CompareOperationFeedbackId(), &hint)) {
+    hint = CompareOperationHint::kAny;
   }
 
   const Operator* op;
   switch (expr->op()) {
     case Token::EQ:
-      op = javascript()->Equal(hints);
+      op = javascript()->Equal(hint);
       break;
     case Token::NE:
-      op = javascript()->NotEqual(hints);
+      op = javascript()->NotEqual(hint);
       break;
     case Token::EQ_STRICT:
-      op = javascript()->StrictEqual(hints);
+      op = javascript()->StrictEqual(hint);
       break;
     case Token::NE_STRICT:
-      op = javascript()->StrictNotEqual(hints);
+      op = javascript()->StrictNotEqual(hint);
       break;
     case Token::LT:
-      op = javascript()->LessThan(hints);
+      op = javascript()->LessThan(hint);
       break;
     case Token::GT:
-      op = javascript()->GreaterThan(hints);
+      op = javascript()->GreaterThan(hint);
       break;
     case Token::LTE:
-      op = javascript()->LessThanOrEqual(hints);
+      op = javascript()->LessThanOrEqual(hint);
       break;
     case Token::GTE:
-      op = javascript()->GreaterThanOrEqual(hints);
+      op = javascript()->GreaterThanOrEqual(hint);
       break;
     case Token::INSTANCEOF:
       op = javascript()->InstanceOf();
@@ -2933,12 +2844,11 @@
   }
   VisitForValue(expr->left());
   VisitForValue(expr->right());
-  FrameStateBeforeAndAfter states(this, expr->right()->id());
   Node* right = environment()->Pop();
   Node* left = environment()->Pop();
   Node* value = NewNode(op, left, right);
-  states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
-  ast_context()->ProduceValue(value);
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(expr, value);
 }
 
 
@@ -2956,14 +2866,14 @@
 
 void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
   Node* value = GetFunctionClosure();
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
 void AstGraphBuilder::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
   Node* value = BuildThrowUnsupportedSuperError(expr->id());
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
@@ -2981,17 +2891,20 @@
 
 void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
   DCHECK(globals()->empty());
-  AstVisitor::VisitDeclarations(declarations);
+  AstVisitor<AstGraphBuilder>::VisitDeclarations(declarations);
   if (globals()->empty()) return;
   int array_index = 0;
+  Handle<TypeFeedbackVector> feedback_vector(
+      info()->closure()->feedback_vector());
   Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
       static_cast<int>(globals()->size()), TENURED);
   for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
   int encoded_flags = info()->GetDeclareGlobalsFlags();
   Node* flags = jsgraph()->Constant(encoded_flags);
   Node* pairs = jsgraph()->Constant(data);
+  Node* vector = jsgraph()->Constant(feedback_vector);
   const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
-  Node* call = NewNode(op, pairs, flags);
+  Node* call = NewNode(op, pairs, flags, vector);
   PrepareFrameState(call, BailoutId::Declarations());
   globals()->clear();
 }
@@ -3009,13 +2922,13 @@
   Visit(stmt);
 }
 
-
 void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
-                                         LoopBuilder* loop) {
+                                         LoopBuilder* loop,
+                                         BailoutId stack_check_id) {
   ControlScopeForIteration scope(this, stmt, loop);
   if (FLAG_turbo_loop_stackcheck || !info()->shared_info()->asm_function()) {
     Node* node = NewNode(javascript()->StackCheck());
-    PrepareFrameState(node, stmt->StackCheckId());
+    PrepareFrameState(node, stack_check_id);
   }
   Visit(stmt->body());
 }
@@ -3024,12 +2937,10 @@
 void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
   Node* value;
   if (expr->expression()->IsVariableProxy()) {
-    // Delete of an unqualified identifier is only allowed in classic mode but
-    // deleting "this" is allowed in all language modes.
-    Variable* variable = expr->expression()->AsVariableProxy()->var();
     // Delete of an unqualified identifier is disallowed in strict mode but
     // "delete this" is allowed.
-    DCHECK(is_sloppy(language_mode()) || variable->HasThisName(isolate()));
+    Variable* variable = expr->expression()->AsVariableProxy()->var();
+    DCHECK(is_sloppy(language_mode()) || variable->is_this());
     value = BuildVariableDelete(variable, expr->id(),
                                 ast_context()->GetStateCombine());
   } else if (expr->expression()->IsProperty()) {
@@ -3044,14 +2955,14 @@
     VisitForEffect(expr->expression());
     value = jsgraph()->TrueConstant();
   }
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
 void AstGraphBuilder::VisitVoid(UnaryOperation* expr) {
   VisitForEffect(expr->expression());
   Node* value = jsgraph()->UndefinedConstant();
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 void AstGraphBuilder::VisitTypeofExpression(Expression* expr) {
@@ -3073,33 +2984,50 @@
 void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
   VisitTypeofExpression(expr->expression());
   Node* value = NewNode(javascript()->TypeOf(), environment()->Pop());
-  ast_context()->ProduceValue(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
 void AstGraphBuilder::VisitNot(UnaryOperation* expr) {
-  VisitForValue(expr->expression());
-  Node* operand = environment()->Pop();
-  Node* input = BuildToBoolean(operand, expr->expression()->test_id());
+  VisitForTest(expr->expression());
+  Node* input = environment()->Pop();
   Node* value = NewNode(common()->Select(MachineRepresentation::kTagged), input,
                         jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
-  ast_context()->ProduceValue(value);
+  // Skip plugging AST evaluation contexts of the test kind. This is to stay in
+  // sync with full codegen which doesn't prepare the proper bailout point (see
+  // the implementation of FullCodeGenerator::VisitForControl).
+  if (ast_context()->IsTest()) return environment()->Push(value);
+  ast_context()->ProduceValue(expr, value);
 }
 
 
 void AstGraphBuilder::VisitComma(BinaryOperation* expr) {
   VisitForEffect(expr->left());
   Visit(expr->right());
-  ast_context()->ReplaceValue();
+  // Skip plugging AST evaluation contexts of the test kind. This is to stay in
+  // sync with full codegen which doesn't prepare the proper bailout point (see
+  // the implementation of FullCodeGenerator::VisitForControl).
+  if (ast_context()->IsTest()) return;
+  ast_context()->ReplaceValue(expr);
 }
 
 
 void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
   bool is_logical_and = expr->op() == Token::AND;
   IfBuilder compare_if(this);
-  VisitForValue(expr->left());
-  Node* condition = environment()->Top();
-  compare_if.If(BuildToBoolean(condition, expr->left()->test_id()));
+  // Only use an AST evaluation context of the value kind when this expression
+  // is evaluated as value as well. Otherwise stick to a test context which is
+  // in sync with full codegen (see FullCodeGenerator::VisitLogicalExpression).
+  Node* condition = nullptr;
+  if (ast_context()->IsValue()) {
+    VisitForValue(expr->left());
+    Node* left = environment()->Top();
+    condition = BuildToBoolean(left, expr->left()->test_id());
+  } else {
+    VisitForTest(expr->left());
+    condition = environment()->Top();
+  }
+  compare_if.If(condition);
   compare_if.Then();
   if (is_logical_and) {
     environment()->Pop();
@@ -3119,7 +3047,11 @@
     environment()->Poke(0, jsgraph()->FalseConstant());
   }
   compare_if.End();
-  ast_context()->ReplaceValue();
+  // Skip plugging AST evaluation contexts of the test kind. This is to stay in
+  // sync with full codegen which doesn't prepare the proper bailout point (see
+  // the implementation of FullCodeGenerator::VisitForControl).
+  if (ast_context()->IsTest()) return;
+  ast_context()->ReplaceValue(expr);
 }
 
 
@@ -3152,15 +3084,10 @@
 
 uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
   DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
-  bool found_eval_scope = false;
   uint32_t check_depths = 0;
   for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
     if (s->num_heap_slots() <= 0) continue;
-    // TODO(mstarzinger): If we have reached an eval scope, we check all
-    // extensions from this point. Replicated from full-codegen, figure out
-    // whether this is still needed. If not, drop {found_eval_scope} below.
-    if (s->is_eval_scope()) found_eval_scope = true;
-    if (!s->calls_sloppy_eval() && !found_eval_scope) continue;
+    if (!s->calls_sloppy_eval()) continue;
     int depth = current_scope()->ContextChainLength(s);
     if (depth > kMaxCheckDepth) return kFullCheckRequired;
     check_depths |= 1 << depth;
@@ -3196,7 +3123,7 @@
 
 
 Node* AstGraphBuilder::BuildLocalActivationContext(Node* context) {
-  Scope* scope = info()->scope();
+  DeclarationScope* scope = info()->scope();
 
   // Allocate a new local context.
   Node* local_context = scope->is_script_scope()
@@ -3338,7 +3265,7 @@
                                                BailoutId bailout_id) {
   IfBuilder hole_check(this);
   Node* the_hole = jsgraph()->TheHoleConstant();
-  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
                         value, the_hole);
   hole_check.If(check);
   hole_check.Then();
@@ -3356,7 +3283,7 @@
                                                BailoutId bailout_id) {
   IfBuilder hole_check(this);
   Node* the_hole = jsgraph()->TheHoleConstant();
-  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
                         value, the_hole);
   hole_check.If(check);
   hole_check.Then();
@@ -3374,7 +3301,7 @@
   IfBuilder prototype_check(this);
   Node* prototype_string =
       jsgraph()->Constant(isolate()->factory()->prototype_string());
-  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
                         name, prototype_string);
   prototype_check.If(check);
   prototype_check.Then();
@@ -3393,7 +3320,6 @@
                                          OutputFrameStateCombine combine,
                                          TypeofMode typeof_mode) {
   Node* the_hole = jsgraph()->TheHoleConstant();
-  VariableMode mode = variable->mode();
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
@@ -3408,7 +3334,7 @@
     case VariableLocation::LOCAL: {
       // Local var, const, or let variable.
       Node* value = environment()->Lookup(variable);
-      if (mode == LET || mode == CONST) {
+      if (variable->binding_needs_init()) {
         // Perform check for uninitialized let/const variables.
         if (value->op() == the_hole->op()) {
           value = BuildThrowReferenceError(variable, bailout_id);
@@ -3428,7 +3354,7 @@
       // TODO(titzer): initialization checks are redundant for already
       // initialized immutable context loads, but only specialization knows.
       // Maybe specializer should be a parameter to the graph builder?
-      if (mode == LET || mode == CONST) {
+      if (variable->binding_needs_init()) {
         // Perform check for uninitialized let/const variables.
         value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
       }
@@ -3445,6 +3371,8 @@
       PrepareFrameState(value, bailout_id, combine);
       return value;
     }
+    case VariableLocation::MODULE:
+      UNREACHABLE();
   }
   UNREACHABLE();
   return nullptr;
@@ -3469,7 +3397,7 @@
     case VariableLocation::LOCAL:
     case VariableLocation::CONTEXT: {
       // Local var, const, or let variable or context variable.
-      return jsgraph()->BooleanConstant(variable->HasThisName(isolate()));
+      return jsgraph()->BooleanConstant(variable->is_this());
     }
     case VariableLocation::LOOKUP: {
       // Dynamic lookup of context variable (anywhere in the chain).
@@ -3480,6 +3408,8 @@
       PrepareFrameState(result, bailout_id, combine);
       return result;
     }
+    case VariableLocation::MODULE:
+      UNREACHABLE();
   }
   UNREACHABLE();
   return nullptr;
@@ -3517,7 +3447,8 @@
         // baseline code might contain debug code that inspects the variable.
         Node* current = environment()->Lookup(variable);
         CHECK_NOT_NULL(current);
-      } else if (mode == LET && op != Token::INIT) {
+      } else if (mode == LET && op != Token::INIT &&
+                 variable->binding_needs_init()) {
         // Perform an initialization check for let declared variables.
         Node* current = environment()->Lookup(variable);
         if (current->op() == the_hole->op()) {
@@ -3534,13 +3465,15 @@
           value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
         }
       } else if (mode == CONST && op != Token::INIT) {
-        // Assignment to const is exception in all modes.
-        Node* current = environment()->Lookup(variable);
-        if (current->op() == the_hole->op()) {
-          return BuildThrowReferenceError(variable, bailout_id);
-        } else if (current->opcode() == IrOpcode::kPhi) {
-          BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+        if (variable->binding_needs_init()) {
+          Node* current = environment()->Lookup(variable);
+          if (current->op() == the_hole->op()) {
+            return BuildThrowReferenceError(variable, bailout_id);
+          } else if (current->opcode() == IrOpcode::kPhi) {
+            BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+          }
         }
+        // Assignment to const is exception in all modes.
         return BuildThrowConstAssignError(bailout_id);
       }
       environment()->Bind(variable, value);
@@ -3556,7 +3489,8 @@
           return BuildThrowConstAssignError(bailout_id);
         }
         return value;
-      } else if (mode == LET && op != Token::INIT) {
+      } else if (mode == LET && op != Token::INIT &&
+                 variable->binding_needs_init()) {
         // Perform an initialization check for let declared variables.
         const Operator* op =
             javascript()->LoadContext(depth, variable->index(), false);
@@ -3573,11 +3507,13 @@
           value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
         }
       } else if (mode == CONST && op != Token::INIT) {
+        if (variable->binding_needs_init()) {
+          const Operator* op =
+              javascript()->LoadContext(depth, variable->index(), false);
+          Node* current = NewNode(op, current_context());
+          BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+        }
         // Assignment to const is exception in all modes.
-        const Operator* op =
-            javascript()->LoadContext(depth, variable->index(), false);
-        Node* current = NewNode(op, current_context());
-        BuildHoleCheckThenThrow(current, variable, value, bailout_id);
         return BuildThrowConstAssignError(bailout_id);
       }
       const Operator* op = javascript()->StoreContext(depth, variable->index());
@@ -3590,6 +3526,8 @@
       PrepareFrameState(store, bailout_id, combine);
       return store;
     }
+    case VariableLocation::MODULE:
+      UNREACHABLE();
   }
   UNREACHABLE();
   return nullptr;
@@ -3739,7 +3677,7 @@
 Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
   if (Node* node = TryFastToName(input)) return node;
   Node* name = NewNode(javascript()->ToName(), input);
-  PrepareFrameState(name, bailout_id);
+  PrepareFrameState(name, bailout_id, OutputFrameStateCombine::Push());
   return name;
 }
 
@@ -3844,44 +3782,44 @@
 Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
                                      TypeFeedbackId feedback_id) {
   const Operator* js_op;
-  BinaryOperationHints hints;
+  BinaryOperationHint hint;
   if (!type_hint_analysis_ ||
-      !type_hint_analysis_->GetBinaryOperationHints(feedback_id, &hints)) {
-    hints = BinaryOperationHints::Any();
+      !type_hint_analysis_->GetBinaryOperationHint(feedback_id, &hint)) {
+    hint = BinaryOperationHint::kAny;
   }
   switch (op) {
     case Token::BIT_OR:
-      js_op = javascript()->BitwiseOr(hints);
+      js_op = javascript()->BitwiseOr(hint);
       break;
     case Token::BIT_AND:
-      js_op = javascript()->BitwiseAnd(hints);
+      js_op = javascript()->BitwiseAnd(hint);
       break;
     case Token::BIT_XOR:
-      js_op = javascript()->BitwiseXor(hints);
+      js_op = javascript()->BitwiseXor(hint);
       break;
     case Token::SHL:
-      js_op = javascript()->ShiftLeft(hints);
+      js_op = javascript()->ShiftLeft(hint);
       break;
     case Token::SAR:
-      js_op = javascript()->ShiftRight(hints);
+      js_op = javascript()->ShiftRight(hint);
       break;
     case Token::SHR:
-      js_op = javascript()->ShiftRightLogical(hints);
+      js_op = javascript()->ShiftRightLogical(hint);
       break;
     case Token::ADD:
-      js_op = javascript()->Add(hints);
+      js_op = javascript()->Add(hint);
       break;
     case Token::SUB:
-      js_op = javascript()->Subtract(hints);
+      js_op = javascript()->Subtract(hint);
       break;
     case Token::MUL:
-      js_op = javascript()->Multiply(hints);
+      js_op = javascript()->Multiply(hint);
       break;
     case Token::DIV:
-      js_op = javascript()->Divide(hints);
+      js_op = javascript()->Divide(hint);
       break;
     case Token::MOD:
-      js_op = javascript()->Modulus(hints);
+      js_op = javascript()->Modulus(hint);
       break;
     default:
       UNREACHABLE();
@@ -3926,7 +3864,7 @@
           javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
           current_context());
       Node* check =
-          NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), load,
+          NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
                   jsgraph()->TheHoleConstant());
       fast_block.BreakUnless(check, BranchHint::kTrue);
     }
@@ -3973,7 +3911,7 @@
           javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
           current_context());
       Node* check =
-          NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), load,
+          NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
                   jsgraph()->TheHoleConstant());
       fast_block.BreakUnless(check, BranchHint::kTrue);
     }
@@ -4061,15 +3999,14 @@
 
 void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
                                         OutputFrameStateCombine combine) {
-  if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+  if (OperatorProperties::HasFrameStateInput(node->op())) {
+    DCHECK(ast_id.IsNone() || info()->shared_info()->VerifyBailoutId(ast_id));
     DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
-
     DCHECK_EQ(IrOpcode::kDead,
-              NodeProperties::GetFrameStateInput(node, 0)->opcode());
-    bool node_has_exception = NodeProperties::IsExceptionalCall(node);
-    NodeProperties::ReplaceFrameStateInput(
-        node, 0,
-        environment()->Checkpoint(ast_id, combine, node_has_exception));
+              NodeProperties::GetFrameStateInput(node)->opcode());
+    bool has_exception = NodeProperties::IsExceptionalCall(node);
+    Node* state = environment()->Checkpoint(ast_id, combine, has_exception);
+    NodeProperties::ReplaceFrameStateInput(node, state);
   }
 }
 
@@ -4080,11 +4017,12 @@
     return;
   }
   if (ast_id != BailoutId::None()) {
+    DCHECK(info()->shared_info()->VerifyBailoutId(ast_id));
     Node* node = NewNode(common()->Checkpoint());
     DCHECK_EQ(IrOpcode::kDead,
-              NodeProperties::GetFrameStateInput(node, 0)->opcode());
-    NodeProperties::ReplaceFrameStateInput(node, 0,
-                                           environment()->Checkpoint(ast_id));
+              NodeProperties::GetFrameStateInput(node)->opcode());
+    Node* state = environment()->Checkpoint(ast_id);
+    NodeProperties::ReplaceFrameStateInput(node, state);
   }
 }
 
@@ -4110,7 +4048,7 @@
   DCHECK_EQ(op->ValueInputCount(), value_input_count);
 
   bool has_context = OperatorProperties::HasContextInput(op);
-  int frame_state_count = OperatorProperties::GetFrameStateInputCount(op);
+  bool has_frame_state = OperatorProperties::HasFrameStateInput(op);
   bool has_control = op->ControlInputCount() == 1;
   bool has_effect = op->EffectInputCount() == 1;
 
@@ -4118,13 +4056,13 @@
   DCHECK(op->EffectInputCount() < 2);
 
   Node* result = nullptr;
-  if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
+  if (!has_context && !has_frame_state && !has_control && !has_effect) {
     result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
   } else {
     bool inside_try_scope = try_nesting_level_ > 0;
     int input_count_with_deps = value_input_count;
     if (has_context) ++input_count_with_deps;
-    input_count_with_deps += frame_state_count;
+    if (has_frame_state) ++input_count_with_deps;
     if (has_control) ++input_count_with_deps;
     if (has_effect) ++input_count_with_deps;
     Node** buffer = EnsureInputBufferSize(input_count_with_deps);
@@ -4133,7 +4071,7 @@
     if (has_context) {
       *current_input++ = current_context();
     }
-    for (int i = 0; i < frame_state_count; i++) {
+    if (has_frame_state) {
       // The frame state will be inserted later. Here we misuse
       // the {Dead} node as a sentinel to be later overwritten
       // with the real frame state.
@@ -4157,13 +4095,9 @@
       }
       // Add implicit exception continuation for throwing nodes.
       if (!result->op()->HasProperty(Operator::kNoThrow) && inside_try_scope) {
-        // Conservative prediction whether caught locally.
-        IfExceptionHint hint = try_catch_nesting_level_ > 0
-                                   ? IfExceptionHint::kLocallyCaught
-                                   : IfExceptionHint::kLocallyUncaught;
         // Copy the environment for the success continuation.
         Environment* success_env = environment()->CopyForConditional();
-        const Operator* op = common()->IfException(hint);
+        const Operator* op = common()->IfException();
         Node* effect = environment()->GetEffectDependency();
         Node* on_exception = graph()->NewNode(op, effect, result);
         environment_->UpdateControlDependency(on_exception);
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 8346a51..bd307ba 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -32,11 +32,14 @@
 // underlying AST. The produced graph can either be compiled into a
 // stand-alone function or be wired into another graph for the purposes
 // of function inlining.
-class AstGraphBuilder : public AstVisitor {
+// This AstVistor is not final, and provides the AstVisitor methods as virtual
+// methods so they can be specialized by subclasses.
+class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
  public:
   AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
                   LoopAssignmentAnalysis* loop_assignment = nullptr,
                   TypeHintAnalysis* type_hint_analysis = nullptr);
+  virtual ~AstGraphBuilder() {}
 
   // Creates a graph by visiting the entire AST.
   bool CreateGraph(bool stack_check = true);
@@ -51,13 +54,13 @@
   }
 
  protected:
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   // Visiting functions for AST nodes make this an AstVisitor.
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
   // Visiting function for declarations list is overridden.
-  void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+  void VisitDeclarations(ZoneList<Declaration*>* declarations);
 
  private:
   class AstContext;
@@ -71,7 +74,6 @@
   class ControlScopeForCatch;
   class ControlScopeForFinally;
   class Environment;
-  class FrameStateBeforeAndAfter;
   friend class ControlBuilder;
 
   Isolate* isolate_;
@@ -96,7 +98,6 @@
   SetOncePointer<Node> new_target_;
 
   // Tracks how many try-blocks are currently entered.
-  int try_catch_nesting_level_;
   int try_nesting_level_;
 
   // Temporary storage for building node input lists.
@@ -414,7 +415,8 @@
   void VisitForValues(ZoneList<Expression*>* exprs);
 
   // Common for all IterationStatement bodies.
-  void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop);
+  void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop,
+                          BailoutId stack_check_id);
 
   // Dispatched from VisitCall.
   void VisitCallSuper(Call* expr);
@@ -445,16 +447,12 @@
   // Dispatched from VisitForInStatement.
   void VisitForInAssignment(Expression* expr, Node* value,
                             const VectorSlotPair& feedback,
-                            BailoutId bailout_id_before,
-                            BailoutId bailout_id_after);
+                            BailoutId bailout_id);
 
   // Dispatched from VisitObjectLiteral.
   void VisitObjectLiteralAccessor(Node* home_object,
                                   ObjectLiteralProperty* property);
 
-  // Dispatched from VisitClassLiteral.
-  void VisitClassLiteralContents(ClassLiteral* expr);
-
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
   DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
 };
@@ -470,7 +468,8 @@
 //
 class AstGraphBuilder::Environment : public ZoneObject {
  public:
-  Environment(AstGraphBuilder* builder, Scope* scope, Node* control_dependency);
+  Environment(AstGraphBuilder* builder, DeclarationScope* scope,
+              Node* control_dependency);
 
   int parameters_count() const { return parameters_count_; }
   int locals_count() const { return locals_count_; }
@@ -539,6 +538,10 @@
                                          OutputFrameStateCombine::Ignore(),
                    bool node_has_exception = false);
 
+  // Inserts a loop exit control node and renames the environment.
+  // This is useful for loop peeling to insert phis at loop exits.
+  void PrepareForLoopExit(Node* loop, BitVector* assigned_variables);
+
   // Control dependency tracked by this environment.
   Node* GetControlDependency() { return control_dependency_; }
   void UpdateControlDependency(Node* dependency) {
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
index 334c597..f1469f7 100644
--- a/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -55,8 +55,6 @@
 
 void ALAA::VisitVariableDeclaration(VariableDeclaration* leaf) {}
 void ALAA::VisitFunctionDeclaration(FunctionDeclaration* leaf) {}
-void ALAA::VisitImportDeclaration(ImportDeclaration* leaf) {}
-void ALAA::VisitExportDeclaration(ExportDeclaration* leaf) {}
 void ALAA::VisitEmptyStatement(EmptyStatement* leaf) {}
 void ALAA::VisitContinueStatement(ContinueStatement* leaf) {}
 void ALAA::VisitBreakStatement(BreakStatement* leaf) {}
@@ -254,10 +252,12 @@
 
 
 void ALAA::VisitForInStatement(ForInStatement* loop) {
+  Expression* l = loop->each();
   Enter(loop);
-  Visit(loop->each());
+  Visit(l);
   Visit(loop->subject());
   Visit(loop->body());
+  if (l->IsVariableProxy()) AnalyzeAssignment(l->AsVariableProxy()->var());
   Exit(loop);
 }
 
@@ -299,17 +299,15 @@
   }
 }
 
-
-int ALAA::GetVariableIndex(Scope* scope, Variable* var) {
+int ALAA::GetVariableIndex(DeclarationScope* scope, Variable* var) {
   CHECK(var->IsStackAllocated());
   if (var->is_this()) return 0;
   if (var->IsParameter()) return 1 + var->index();
   return 1 + scope->num_parameters() + var->index();
 }
 
-
-int LoopAssignmentAnalysis::GetAssignmentCountForTesting(Scope* scope,
-                                                         Variable* var) {
+int LoopAssignmentAnalysis::GetAssignmentCountForTesting(
+    DeclarationScope* scope, Variable* var) {
   int count = 0;
   int var_index = AstLoopAssignmentAnalyzer::GetVariableIndex(scope, var);
   for (size_t i = 0; i < list_.size(); i++) {
diff --git a/src/compiler/ast-loop-assignment-analyzer.h b/src/compiler/ast-loop-assignment-analyzer.h
index a4a4609..0893fd1 100644
--- a/src/compiler/ast-loop-assignment-analyzer.h
+++ b/src/compiler/ast-loop-assignment-analyzer.h
@@ -30,7 +30,7 @@
     return nullptr;
   }
 
-  int GetAssignmentCountForTesting(Scope* scope, Variable* var);
+  int GetAssignmentCountForTesting(DeclarationScope* scope, Variable* var);
 
  private:
   friend class AstLoopAssignmentAnalyzer;
@@ -40,17 +40,18 @@
 
 
 // The class that performs loop assignment analysis by walking the AST.
-class AstLoopAssignmentAnalyzer : public AstVisitor {
+class AstLoopAssignmentAnalyzer final
+    : public AstVisitor<AstLoopAssignmentAnalyzer> {
  public:
   AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info);
 
   LoopAssignmentAnalysis* Analyze();
 
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
-  static int GetVariableIndex(Scope* scope, Variable* var);
+  static int GetVariableIndex(DeclarationScope* scope, Variable* var);
 
  private:
   CompilationInfo* info_;
diff --git a/src/compiler/branch-elimination.cc b/src/compiler/branch-elimination.cc
index 236fbca..9b36eb1 100644
--- a/src/compiler/branch-elimination.cc
+++ b/src/compiler/branch-elimination.cc
@@ -83,6 +83,7 @@
   DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
          node->opcode() == IrOpcode::kDeoptimizeUnless);
   bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
+  DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
   Node* condition = NodeProperties::GetValueInput(node, 0);
   Node* frame_state = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -92,8 +93,7 @@
   // yet because we will have to recompute anyway once we compute the
   // predecessor.
   if (conditions == nullptr) {
-    DCHECK_NULL(node_conditions_.Get(node));
-    return NoChange();
+    return UpdateConditions(node, conditions);
   }
   Maybe<bool> condition_value = conditions->LookupCondition(condition);
   if (condition_value.IsJust()) {
@@ -103,8 +103,9 @@
       // with the {control} node that already contains the right information.
       ReplaceWithValue(node, dead(), effect, control);
     } else {
-      control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                                 frame_state, effect, control);
+      control =
+          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
+                           frame_state, effect, control);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), control);
       Revisit(graph()->end());
@@ -123,8 +124,7 @@
   // yet because we will have to recompute anyway once we compute the
   // predecessor.
   if (from_branch == nullptr) {
-    DCHECK(node_conditions_.Get(node) == nullptr);
-    return NoChange();
+    return UpdateConditions(node, nullptr);
   }
   Node* condition = branch->InputAt(0);
   return UpdateConditions(
@@ -145,8 +145,7 @@
   // input.
   for (int i = 0; i < node->InputCount(); i++) {
     if (node_conditions_.Get(node->InputAt(i)) == nullptr) {
-      DCHECK(node_conditions_.Get(node) == nullptr);
-      return NoChange();
+      return UpdateConditions(node, nullptr);
     }
   }
 
@@ -209,7 +208,8 @@
   // Only signal that the node has Changed if the condition information has
   // changed.
   if (conditions != original) {
-    if (original == nullptr || *conditions != *original) {
+    if (conditions == nullptr || original == nullptr ||
+        *conditions != *original) {
       node_conditions_.Set(node, conditions);
       return Changed(node);
     }
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index 79d8ff2..a17947a 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -60,6 +60,9 @@
   Environment* CopyForConditional() const;
   Environment* CopyForLoop();
   void Merge(Environment* other);
+  void PrepareForOsr();
+
+  void PrepareForLoopExit(Node* loop);
 
  private:
   explicit Environment(const Environment* copy);
@@ -112,8 +115,8 @@
     // Create an explicit checkpoint node for before the operation.
     Node* node = builder_->NewNode(builder_->common()->Checkpoint());
     DCHECK_EQ(IrOpcode::kDead,
-              NodeProperties::GetFrameStateInput(node, 0)->opcode());
-    NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_before_);
+              NodeProperties::GetFrameStateInput(node)->opcode());
+    NodeProperties::ReplaceFrameStateInput(node, frame_state_before_);
   }
 
   ~FrameStateBeforeAndAfter() {
@@ -128,30 +131,21 @@
 
   void AddToNode(Node* node, OutputFrameStateCombine combine) {
     DCHECK(!added_to_node_);
-    int count = OperatorProperties::GetFrameStateInputCount(node->op());
-    DCHECK_LE(count, 2);
-    if (count >= 1) {
+    bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
+    if (has_frame_state) {
       // Add the frame state for after the operation.
       DCHECK_EQ(IrOpcode::kDead,
-                NodeProperties::GetFrameStateInput(node, 0)->opcode());
+                NodeProperties::GetFrameStateInput(node)->opcode());
       Node* frame_state_after =
           builder_->environment()->Checkpoint(id_after_, combine);
-      NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
-    }
-
-    if (count >= 2) {
-      // Add the frame state for before the operation.
-      // TODO(mstarzinger): Get rid of frame state input before!
-      DCHECK_EQ(IrOpcode::kDead,
-                NodeProperties::GetFrameStateInput(node, 1)->opcode());
-      NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
+      NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
     }
 
     if (!combine.IsOutputIgnored()) {
       output_poke_offset_ = static_cast<int>(combine.GetOffsetToPokeAt());
       output_poke_count_ = node->op()->ValueOutputCount();
     }
-    frame_states_unused_ = count == 0;
+    frame_states_unused_ = !has_frame_state;
     added_to_node_ = true;
   }
 
@@ -358,6 +352,36 @@
   builder()->exit_controls_.push_back(terminate);
 }
 
+void BytecodeGraphBuilder::Environment::PrepareForOsr() {
+  DCHECK_EQ(IrOpcode::kLoop, GetControlDependency()->opcode());
+  DCHECK_EQ(1, GetControlDependency()->InputCount());
+  Node* start = graph()->start();
+
+  // Create a control node for the OSR entry point and merge it into the loop
+  // header. Update the current environment's control dependency accordingly.
+  Node* entry = graph()->NewNode(common()->OsrLoopEntry(), start, start);
+  Node* control = builder()->MergeControl(GetControlDependency(), entry);
+  UpdateControlDependency(control);
+
+  // Create a merge of the effect from the OSR entry and the existing effect
+  // dependency. Update the current environment's effect dependency accordingly.
+  Node* effect = builder()->MergeEffect(GetEffectDependency(), entry, control);
+  UpdateEffectDependency(effect);
+
+  // Rename all values in the environment which will extend or introduce Phi
+  // nodes to contain the OSR values available at the entry point.
+  Node* osr_context = graph()->NewNode(
+      common()->OsrValue(Linkage::kOsrContextSpillSlotIndex), entry);
+  context_ = builder()->MergeValue(context_, osr_context, control);
+  int size = static_cast<int>(values()->size());
+  for (int i = 0; i < size; i++) {
+    int idx = i;  // Indexing scheme follows {StandardFrame}, adapt accordingly.
+    if (i >= register_base()) idx += InterpreterFrameConstants::kExtraSlotCount;
+    if (i >= accumulator_base()) idx = Linkage::kOsrAccumulatorRegisterIndex;
+    Node* osr_value = graph()->NewNode(common()->OsrValue(idx), entry);
+    values_[i] = builder()->MergeValue(values_[i], osr_value, control);
+  }
+}
 
 bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
     Node** state_values, int offset, int count) {
@@ -375,6 +399,31 @@
   return false;
 }
 
+void BytecodeGraphBuilder::Environment::PrepareForLoopExit(Node* loop) {
+  DCHECK_EQ(loop->opcode(), IrOpcode::kLoop);
+
+  Node* control = GetControlDependency();
+
+  // Create the loop exit node.
+  Node* loop_exit = graph()->NewNode(common()->LoopExit(), control, loop);
+  UpdateControlDependency(loop_exit);
+
+  // Rename the effect.
+  Node* effect_rename = graph()->NewNode(common()->LoopExitEffect(),
+                                         GetEffectDependency(), loop_exit);
+  UpdateEffectDependency(effect_rename);
+
+  // TODO(jarin) We should also rename context here. However, uncoditional
+  // renaming confuses global object and native context specialization.
+  // We should only rename if the context is assigned in the loop.
+
+  // Rename the environmnent values.
+  for (size_t i = 0; i < values_.size(); i++) {
+    Node* rename =
+        graph()->NewNode(common()->LoopExitValue(), values_[i], loop_exit);
+    values_[i] = rename;
+  }
+}
 
 void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
                                                           int offset,
@@ -447,6 +496,7 @@
           FrameStateType::kInterpretedFunction,
           bytecode_array()->parameter_count(),
           bytecode_array()->register_count(), info->shared_info())),
+      osr_ast_id_(info->osr_ast_id()),
       merge_environments_(local_zone),
       exception_handlers_(local_zone),
       current_exception_handler_(0),
@@ -521,6 +571,10 @@
                   GetFunctionContext());
   set_environment(&env);
 
+  // For OSR add an {OsrNormalEntry} as the start of the top-level environment.
+  // It will be replaced with {Dead} after typing and optimizations.
+  if (!osr_ast_id_.IsNone()) NewNode(common()->OsrNormalEntry());
+
   VisitBytecodes();
 
   // Finish the basic structure of the graph.
@@ -535,8 +589,11 @@
 
 void BytecodeGraphBuilder::VisitBytecodes() {
   BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
+  BytecodeLoopAnalysis loop_analysis(bytecode_array(), &analysis, local_zone());
   analysis.Analyze();
+  loop_analysis.Analyze();
   set_branch_analysis(&analysis);
+  set_loop_analysis(&loop_analysis);
   interpreter::BytecodeArrayIterator iterator(bytecode_array());
   set_bytecode_iterator(&iterator);
   while (!iterator.done()) {
@@ -869,6 +926,43 @@
   environment()->BindAccumulator(closure);
 }
 
+void BytecodeGraphBuilder::VisitCreateBlockContext() {
+  Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(0));
+
+  const Operator* op = javascript()->CreateBlockContext(scope_info);
+  Node* context = NewNode(op, environment()->LookupAccumulator());
+  environment()->BindAccumulator(context);
+}
+
+void BytecodeGraphBuilder::VisitCreateFunctionContext() {
+  uint32_t slots = bytecode_iterator().GetIndexOperand(0);
+  const Operator* op = javascript()->CreateFunctionContext(slots);
+  Node* context = NewNode(op, GetFunctionClosure());
+  environment()->BindAccumulator(context);
+}
+
+void BytecodeGraphBuilder::VisitCreateCatchContext() {
+  interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
+  Node* exception = environment()->LookupRegister(reg);
+  Handle<String> name =
+      Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+  Node* closure = environment()->LookupAccumulator();
+
+  const Operator* op = javascript()->CreateCatchContext(name);
+  Node* context = NewNode(op, exception, closure);
+  environment()->BindAccumulator(context);
+}
+
+void BytecodeGraphBuilder::VisitCreateWithContext() {
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+
+  const Operator* op = javascript()->CreateWithContext();
+  Node* context = NewNode(op, object, environment()->LookupAccumulator());
+  environment()->BindAccumulator(context);
+}
+
 void BytecodeGraphBuilder::BuildCreateArguments(CreateArgumentsType type) {
   FrameStateBeforeAndAfter states(this);
   const Operator* op = javascript()->CreateArguments(type);
@@ -916,6 +1010,7 @@
 }
 
 void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
+  FrameStateBeforeAndAfter states(this);
   Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
       bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
@@ -924,9 +1019,12 @@
       interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
   // TODO(mstarzinger): Thread through number of properties.
   int number_of_properties = constant_properties->length() / 2;
-  const Operator* op = javascript()->CreateLiteralObject(
-      constant_properties, literal_flags, literal_index, number_of_properties);
-  BuildCreateLiteral(op);
+  Node* literal = NewNode(
+      javascript()->CreateLiteralObject(constant_properties, literal_flags,
+                                        literal_index, number_of_properties),
+      GetFunctionClosure());
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3),
+                              literal, &states);
 }
 
 Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
@@ -947,14 +1045,15 @@
 
 void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode) {
   FrameStateBeforeAndAfter states(this);
-  // TODO(rmcilroy): Set receiver_hint correctly based on whether the receiver
-  // register has been loaded with null / undefined explicitly or we are sure it
-  // is not null / undefined.
   ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
   Node* callee =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+
+  // Slot index of 0 is used indicate no feedback slot is available. Assert
+  // the assumption that slot index 0 is never a valid feedback slot.
+  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
   VectorSlotPair feedback =
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
 
@@ -1078,6 +1177,7 @@
 }
 
 void BytecodeGraphBuilder::VisitThrow() {
+  BuildLoopExitsForFunctionExit();
   BuildThrow();
   Node* call = environment()->LookupAccumulator();
   Node* control = NewNode(common()->Throw(), call);
@@ -1085,6 +1185,7 @@
 }
 
 void BytecodeGraphBuilder::VisitReThrow() {
+  BuildLoopExitsForFunctionExit();
   Node* value = environment()->LookupAccumulator();
   Node* call = NewNode(javascript()->CallRuntime(Runtime::kReThrow), value);
   Node* control = NewNode(common()->Throw(), call);
@@ -1100,74 +1201,130 @@
   environment()->BindAccumulator(node, &states);
 }
 
+// Helper function to create binary operation hint from the recorded type
+// feedback.
+BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
+    int operand_index) {
+  FeedbackVectorSlot slot = feedback_vector()->ToSlot(
+      bytecode_iterator().GetIndexOperand(operand_index));
+  DCHECK_EQ(FeedbackVectorSlotKind::GENERAL, feedback_vector()->GetKind(slot));
+  Object* feedback = feedback_vector()->Get(slot);
+  BinaryOperationHint hint = BinaryOperationHint::kAny;
+  if (feedback->IsSmi()) {
+    hint = BinaryOperationHintFromFeedback((Smi::cast(feedback))->value());
+  }
+  return hint;
+}
+
 void BytecodeGraphBuilder::VisitAdd() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Add(hints));
+  BuildBinaryOp(
+      javascript()->Add(GetBinaryOperationHint(kBinaryOperationHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitSub() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Subtract(hints));
+  BuildBinaryOp(javascript()->Subtract(
+      GetBinaryOperationHint(kBinaryOperationHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitMul() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Multiply(hints));
+  BuildBinaryOp(javascript()->Multiply(
+      GetBinaryOperationHint(kBinaryOperationHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitDiv() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Divide(hints));
+  BuildBinaryOp(
+      javascript()->Divide(GetBinaryOperationHint(kBinaryOperationHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitMod() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Modulus(hints));
+  BuildBinaryOp(
+      javascript()->Modulus(GetBinaryOperationHint(kBinaryOperationHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitBitwiseOr() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->BitwiseOr(hints));
+  BuildBinaryOp(javascript()->BitwiseOr(
+      GetBinaryOperationHint(kBinaryOperationHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitBitwiseXor() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->BitwiseXor(hints));
+  BuildBinaryOp(javascript()->BitwiseXor(
+      GetBinaryOperationHint(kBinaryOperationHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitBitwiseAnd() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->BitwiseAnd(hints));
+  BuildBinaryOp(javascript()->BitwiseAnd(
+      GetBinaryOperationHint(kBinaryOperationHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitShiftLeft() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->ShiftLeft(hints));
+  BuildBinaryOp(javascript()->ShiftLeft(
+      GetBinaryOperationHint(kBinaryOperationHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitShiftRight() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->ShiftRight(hints));
+  BuildBinaryOp(javascript()->ShiftRight(
+      GetBinaryOperationHint(kBinaryOperationHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitShiftRightLogical() {
-  BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->ShiftRightLogical(hints));
+  BuildBinaryOp(javascript()->ShiftRightLogical(
+      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+}
+
+void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* js_op) {
+  FrameStateBeforeAndAfter states(this);
+  Node* left =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+  Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
+  Node* node = NewNode(js_op, left, right);
+  environment()->BindAccumulator(node, &states);
+}
+
+void BytecodeGraphBuilder::VisitAddSmi() {
+  BuildBinaryOpWithImmediate(
+      javascript()->Add(GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+}
+
+void BytecodeGraphBuilder::VisitSubSmi() {
+  BuildBinaryOpWithImmediate(javascript()->Subtract(
+      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+}
+
+void BytecodeGraphBuilder::VisitBitwiseOrSmi() {
+  BuildBinaryOpWithImmediate(javascript()->BitwiseOr(
+      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+}
+
+void BytecodeGraphBuilder::VisitBitwiseAndSmi() {
+  BuildBinaryOpWithImmediate(javascript()->BitwiseAnd(
+      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+}
+
+void BytecodeGraphBuilder::VisitShiftLeftSmi() {
+  BuildBinaryOpWithImmediate(javascript()->ShiftLeft(
+      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+}
+
+void BytecodeGraphBuilder::VisitShiftRightSmi() {
+  BuildBinaryOpWithImmediate(javascript()->ShiftRight(
+      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
 }
 
 void BytecodeGraphBuilder::VisitInc() {
   FrameStateBeforeAndAfter states(this);
   // Note: Use subtract -1 here instead of add 1 to ensure we always convert to
   // a number, not a string.
-  const Operator* js_op = javascript()->Subtract(BinaryOperationHints::Any());
+  const Operator* js_op =
+      javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
   Node* node = NewNode(js_op, environment()->LookupAccumulator(),
-                       jsgraph()->Constant(-1.0));
+                       jsgraph()->Constant(-1));
   environment()->BindAccumulator(node, &states);
 }
 
 void BytecodeGraphBuilder::VisitDec() {
   FrameStateBeforeAndAfter states(this);
-  const Operator* js_op = javascript()->Subtract(BinaryOperationHints::Any());
+  const Operator* js_op =
+      javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
   Node* node = NewNode(js_op, environment()->LookupAccumulator(),
                        jsgraph()->OneConstant());
   environment()->BindAccumulator(node, &states);
@@ -1222,38 +1379,38 @@
 }
 
 void BytecodeGraphBuilder::VisitTestEqual() {
-  CompareOperationHints hints = CompareOperationHints::Any();
-  BuildCompareOp(javascript()->Equal(hints));
+  CompareOperationHint hint = CompareOperationHint::kAny;
+  BuildCompareOp(javascript()->Equal(hint));
 }
 
 void BytecodeGraphBuilder::VisitTestNotEqual() {
-  CompareOperationHints hints = CompareOperationHints::Any();
-  BuildCompareOp(javascript()->NotEqual(hints));
+  CompareOperationHint hint = CompareOperationHint::kAny;
+  BuildCompareOp(javascript()->NotEqual(hint));
 }
 
 void BytecodeGraphBuilder::VisitTestEqualStrict() {
-  CompareOperationHints hints = CompareOperationHints::Any();
-  BuildCompareOp(javascript()->StrictEqual(hints));
+  CompareOperationHint hint = CompareOperationHint::kAny;
+  BuildCompareOp(javascript()->StrictEqual(hint));
 }
 
 void BytecodeGraphBuilder::VisitTestLessThan() {
-  CompareOperationHints hints = CompareOperationHints::Any();
-  BuildCompareOp(javascript()->LessThan(hints));
+  CompareOperationHint hint = CompareOperationHint::kAny;
+  BuildCompareOp(javascript()->LessThan(hint));
 }
 
 void BytecodeGraphBuilder::VisitTestGreaterThan() {
-  CompareOperationHints hints = CompareOperationHints::Any();
-  BuildCompareOp(javascript()->GreaterThan(hints));
+  CompareOperationHint hint = CompareOperationHint::kAny;
+  BuildCompareOp(javascript()->GreaterThan(hint));
 }
 
 void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
-  CompareOperationHints hints = CompareOperationHints::Any();
-  BuildCompareOp(javascript()->LessThanOrEqual(hints));
+  CompareOperationHint hint = CompareOperationHint::kAny;
+  BuildCompareOp(javascript()->LessThanOrEqual(hint));
 }
 
 void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
-  CompareOperationHints hints = CompareOperationHints::Any();
-  BuildCompareOp(javascript()->GreaterThanOrEqual(hints));
+  CompareOperationHint hint = CompareOperationHint::kAny;
+  BuildCompareOp(javascript()->GreaterThanOrEqual(hint));
 }
 
 void BytecodeGraphBuilder::VisitTestIn() {
@@ -1266,8 +1423,9 @@
 
 void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
   FrameStateBeforeAndAfter states(this);
-  Node* node = NewNode(js_op, environment()->LookupAccumulator());
-  environment()->BindAccumulator(node, &states);
+  Node* value = NewNode(js_op, environment()->LookupAccumulator());
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value,
+                              &states);
 }
 
 void BytecodeGraphBuilder::VisitToName() {
@@ -1347,7 +1505,17 @@
   environment()->RecordAfterState(node, &states);
 }
 
+void BytecodeGraphBuilder::VisitOsrPoll() {
+  // TODO(4764): This should be moved into the {VisitBytecodes} once we merge
+  // the polling with existing bytecode. This will also guarantee that we are
+  // not missing the OSR entry point, which we wouldn't catch right now.
+  if (osr_ast_id_.ToInt() == bytecode_iterator().current_offset()) {
+    environment()->PrepareForOsr();
+  }
+}
+
 void BytecodeGraphBuilder::VisitReturn() {
+  BuildLoopExitsForFunctionExit();
   Node* control =
       NewNode(common()->Return(), environment()->LookupAccumulator());
   MergeControlToLeaveFunction(control);
@@ -1368,10 +1536,11 @@
 
 void BytecodeGraphBuilder::BuildForInPrepare() {
   FrameStateBeforeAndAfter states(this);
-  Node* receiver = environment()->LookupAccumulator();
+  Node* receiver =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* prepare = NewNode(javascript()->ForInPrepare(), receiver);
   environment()->BindRegistersToProjections(
-      bytecode_iterator().GetRegisterOperand(0), prepare, &states);
+      bytecode_iterator().GetRegisterOperand(1), prepare, &states);
 }
 
 void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
@@ -1493,6 +1662,7 @@
 }
 
 void BytecodeGraphBuilder::MergeIntoSuccessorEnvironment(int target_offset) {
+  BuildLoopExitsForBranch(target_offset);
   if (merge_environments_[target_offset] == nullptr) {
     // Append merge nodes to the environment. We may merge here with another
     // environment. So add a place holder for merge nodes. We may add redundant
@@ -1511,6 +1681,28 @@
   set_environment(nullptr);
 }
 
+void BytecodeGraphBuilder::BuildLoopExitsForBranch(int target_offset) {
+  int origin_offset = bytecode_iterator().current_offset();
+  // Only build loop exits for forward edges.
+  if (target_offset > origin_offset) {
+    BuildLoopExitsUntilLoop(loop_analysis()->GetLoopOffsetFor(target_offset));
+  }
+}
+
+void BytecodeGraphBuilder::BuildLoopExitsUntilLoop(int loop_offset) {
+  int origin_offset = bytecode_iterator().current_offset();
+  int current_loop = loop_analysis()->GetLoopOffsetFor(origin_offset);
+  while (loop_offset < current_loop) {
+    Node* loop_node = merge_environments_[current_loop]->GetControlDependency();
+    environment()->PrepareForLoopExit(loop_node);
+    current_loop = loop_analysis()->GetParentLoopFor(current_loop);
+  }
+}
+
+void BytecodeGraphBuilder::BuildLoopExitsForFunctionExit() {
+  BuildLoopExitsUntilLoop(-1);
+}
+
 void BytecodeGraphBuilder::BuildJump() {
   MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
 }
@@ -1529,7 +1721,7 @@
 void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
   Node* accumulator = environment()->LookupAccumulator();
   Node* condition =
-      NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
               accumulator, comperand);
   BuildConditionalJump(condition);
 }
@@ -1540,15 +1732,15 @@
   Node* to_boolean =
       NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
   Node* condition =
-      NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
-              to_boolean, comperand);
+      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), to_boolean,
+              comperand);
   BuildConditionalJump(condition);
 }
 
 void BytecodeGraphBuilder::BuildJumpIfNotHole() {
   Node* accumulator = environment()->LookupAccumulator();
   Node* condition =
-      NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
               accumulator, jsgraph()->TheHoleConstant());
   Node* node =
       NewNode(common()->Select(MachineRepresentation::kTagged), condition,
@@ -1583,10 +1775,8 @@
     int next_end = table->GetRangeEnd(current_exception_handler_);
     int next_handler = table->GetRangeHandler(current_exception_handler_);
     int context_register = table->GetRangeData(current_exception_handler_);
-    CatchPrediction pred =
-        table->GetRangePrediction(current_exception_handler_);
     exception_handlers_.push(
-        {next_start, next_end, next_handler, context_register, pred});
+        {next_start, next_end, next_handler, context_register});
     current_exception_handler_++;
   }
 }
@@ -1596,7 +1786,7 @@
   DCHECK_EQ(op->ValueInputCount(), value_input_count);
 
   bool has_context = OperatorProperties::HasContextInput(op);
-  int frame_state_count = OperatorProperties::GetFrameStateInputCount(op);
+  bool has_frame_state = OperatorProperties::HasFrameStateInput(op);
   bool has_control = op->ControlInputCount() == 1;
   bool has_effect = op->EffectInputCount() == 1;
 
@@ -1604,13 +1794,13 @@
   DCHECK_LT(op->EffectInputCount(), 2);
 
   Node* result = nullptr;
-  if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
+  if (!has_context && !has_frame_state && !has_control && !has_effect) {
     result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
   } else {
     bool inside_handler = !exception_handlers_.empty();
     int input_count_with_deps = value_input_count;
     if (has_context) ++input_count_with_deps;
-    input_count_with_deps += frame_state_count;
+    if (has_frame_state) ++input_count_with_deps;
     if (has_control) ++input_count_with_deps;
     if (has_effect) ++input_count_with_deps;
     Node** buffer = EnsureInputBufferSize(input_count_with_deps);
@@ -1619,7 +1809,7 @@
     if (has_context) {
       *current_input++ = environment()->Context();
     }
-    for (int i = 0; i < frame_state_count; i++) {
+    if (has_frame_state) {
       // The frame state will be inserted later. Here we misuse
       // the {Dead} node as a sentinel to be later overwritten
       // with the real frame state.
@@ -1644,13 +1834,9 @@
     if (!result->op()->HasProperty(Operator::kNoThrow) && inside_handler) {
       int handler_offset = exception_handlers_.top().handler_offset_;
       int context_index = exception_handlers_.top().context_register_;
-      CatchPrediction prediction = exception_handlers_.top().pred_;
       interpreter::Register context_register(context_index);
-      IfExceptionHint hint = prediction == CatchPrediction::CAUGHT
-                                 ? IfExceptionHint::kLocallyCaught
-                                 : IfExceptionHint::kLocallyUncaught;
       Environment* success_env = environment()->CopyForConditional();
-      const Operator* op = common()->IfException(hint);
+      const Operator* op = common()->IfException();
       Node* effect = environment()->GetEffectDependency();
       Node* on_exception = graph()->NewNode(op, effect, result);
       Node* context = environment()->LookupRegister(context_register);
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index 66cd96e..2f3acc1 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -7,8 +7,11 @@
 
 #include "src/compiler.h"
 #include "src/compiler/bytecode-branch-analysis.h"
+#include "src/compiler/bytecode-loop-analysis.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/type-hint-analyzer.h"
 #include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-flags.h"
 #include "src/interpreter/bytecodes.h"
 
 namespace v8 {
@@ -124,6 +127,7 @@
   void BuildCall(TailCallMode tail_call_mode);
   void BuildThrow();
   void BuildBinaryOp(const Operator* op);
+  void BuildBinaryOpWithImmediate(const Operator* op);
   void BuildCompareOp(const Operator* op);
   void BuildDelete(LanguageMode language_mode);
   void BuildCastOperator(const Operator* op);
@@ -131,6 +135,10 @@
   void BuildForInNext();
   void BuildInvokeIntrinsic();
 
+  // Helper function to create binary operation hint from the recorded
+  // type feedback.
+  BinaryOperationHint GetBinaryOperationHint(int operand_index);
+
   // Control flow plumbing.
   void BuildJump();
   void BuildConditionalJump(Node* condition);
@@ -146,6 +154,12 @@
   // Simulates control flow that exits the function body.
   void MergeControlToLeaveFunction(Node* exit);
 
+  // Builds loop exit nodes for every exited loop between the current bytecode
+  // offset and {target_offset}.
+  void BuildLoopExitsForBranch(int target_offset);
+  void BuildLoopExitsForFunctionExit();
+  void BuildLoopExitsUntilLoop(int loop_offset);
+
   // Simulates entry and exit of exception handlers.
   void EnterAndExitExceptionHandlers(int current_offset);
 
@@ -153,9 +167,6 @@
   // new nodes.
   static const int kInputBufferSizeIncrement = 64;
 
-  // The catch prediction from the handler table is reused.
-  typedef HandlerTable::CatchPrediction CatchPrediction;
-
   // An abstract representation for an exception handler that is being
   // entered and exited while the graph builder is iterating over the
   // underlying bytecode. The exception handlers within the bytecode are
@@ -165,7 +176,6 @@
     int end_offset_;        // End offset of the handled area in the bytecode.
     int handler_offset_;    // Handler entry offset within the bytecode.
     int context_register_;  // Index of register holding handler context.
-    CatchPrediction pred_;  // Prediction of whether handler is catching.
   };
 
   // Field accessors
@@ -205,6 +215,12 @@
     branch_analysis_ = branch_analysis;
   }
 
+  const BytecodeLoopAnalysis* loop_analysis() const { return loop_analysis_; }
+
+  void set_loop_analysis(const BytecodeLoopAnalysis* loop_analysis) {
+    loop_analysis_ = loop_analysis;
+  }
+
 #define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
   BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
 #undef DECLARE_VISIT_BYTECODE
@@ -217,7 +233,9 @@
   const FrameStateFunctionInfo* frame_state_function_info_;
   const interpreter::BytecodeArrayIterator* bytecode_iterator_;
   const BytecodeBranchAnalysis* branch_analysis_;
+  const BytecodeLoopAnalysis* loop_analysis_;
   Environment* environment_;
+  BailoutId osr_ast_id_;
 
   // Merge environments are snapshots of the environment at points where the
   // control flow merges. This models a forward data flow propagation of all
@@ -240,6 +258,10 @@
   // Control nodes that exit the function body.
   ZoneVector<Node*> exit_controls_;
 
+  static int const kBinaryOperationHintIndex = 1;
+  static int const kCountOperationHintIndex = 0;
+  static int const kBinaryOperationSmiHintIndex = 2;
+
   DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
 };
 
diff --git a/src/compiler/bytecode-loop-analysis.cc b/src/compiler/bytecode-loop-analysis.cc
new file mode 100644
index 0000000..03c11f7
--- /dev/null
+++ b/src/compiler/bytecode-loop-analysis.cc
@@ -0,0 +1,100 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-loop-analysis.h"
+
+#include "src/compiler/bytecode-branch-analysis.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+BytecodeLoopAnalysis::BytecodeLoopAnalysis(
+    Handle<BytecodeArray> bytecode_array,
+    const BytecodeBranchAnalysis* branch_analysis, Zone* zone)
+    : bytecode_array_(bytecode_array),
+      branch_analysis_(branch_analysis),
+      zone_(zone),
+      current_loop_offset_(-1),
+      found_current_backedge_(false),
+      backedge_to_header_(zone),
+      loop_header_to_parent_(zone) {}
+
+void BytecodeLoopAnalysis::Analyze() {
+  current_loop_offset_ = -1;
+  found_current_backedge_ = false;
+  interpreter::BytecodeArrayIterator iterator(bytecode_array());
+  while (!iterator.done()) {
+    interpreter::Bytecode bytecode = iterator.current_bytecode();
+    int current_offset = iterator.current_offset();
+    if (branch_analysis_->backward_branches_target(current_offset)) {
+      AddLoopEntry(current_offset);
+    } else if (interpreter::Bytecodes::IsJump(bytecode)) {
+      AddBranch(current_offset, iterator.GetJumpTargetOffset());
+    }
+    iterator.Advance();
+  }
+}
+
+void BytecodeLoopAnalysis::AddLoopEntry(int entry_offset) {
+  if (found_current_backedge_) {
+    // We assume that all backedges of a loop must occur together and before
+    // another loop entry or an outer loop backedge.
+    // This is guaranteed by the invariants from AddBranch, such that every
+    // backedge must either go to the current loop or be the first of the
+    // backedges to the parent loop.
+    // Thus here, the current loop actually ended before and we have a loop
+    // with the same parent.
+    current_loop_offset_ = loop_header_to_parent_[current_loop_offset_];
+    found_current_backedge_ = false;
+  }
+  loop_header_to_parent_[entry_offset] = current_loop_offset_;
+  current_loop_offset_ = entry_offset;
+}
+
+void BytecodeLoopAnalysis::AddBranch(int origin_offset, int target_offset) {
+  // If this is a backedge, record it.
+  if (target_offset < origin_offset) {
+    backedge_to_header_[origin_offset] = target_offset;
+    // Check whether this is actually a backedge of the outer loop and we have
+    // already finished the current loop.
+    if (target_offset < current_loop_offset_) {
+      DCHECK(found_current_backedge_);
+      int parent_offset = loop_header_to_parent_[current_loop_offset_];
+      DCHECK_EQ(target_offset, parent_offset);
+      current_loop_offset_ = parent_offset;
+    } else {
+      DCHECK_EQ(target_offset, current_loop_offset_);
+      found_current_backedge_ = true;
+    }
+  }
+}
+
+int BytecodeLoopAnalysis::GetLoopOffsetFor(int offset) const {
+  auto next_backedge = backedge_to_header_.lower_bound(offset);
+  // If there is no next backedge => offset is not in a loop.
+  if (next_backedge == backedge_to_header_.end()) {
+    return -1;
+  }
+  // If the header preceeds the offset, it is the backedge of the containing
+  // loop.
+  if (next_backedge->second <= offset) {
+    return next_backedge->second;
+  }
+  // Otherwise there is a nested loop after this offset. We just return the
+  // parent of the next nested loop.
+  return loop_header_to_parent_.upper_bound(offset)->second;
+}
+
+int BytecodeLoopAnalysis::GetParentLoopFor(int header_offset) const {
+  auto parent = loop_header_to_parent_.find(header_offset);
+  DCHECK(parent != loop_header_to_parent_.end());
+  return parent->second;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/bytecode-loop-analysis.h b/src/compiler/bytecode-loop-analysis.h
new file mode 100644
index 0000000..59fabce
--- /dev/null
+++ b/src/compiler/bytecode-loop-analysis.h
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
+#define V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
+
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace compiler {
+
+class BytecodeBranchAnalysis;
+
+class BytecodeLoopAnalysis BASE_EMBEDDED {
+ public:
+  BytecodeLoopAnalysis(Handle<BytecodeArray> bytecode_array,
+                       const BytecodeBranchAnalysis* branch_analysis,
+                       Zone* zone);
+
+  // Analyze the bytecodes to find the branch sites and their
+  // targets. No other methods in this class return valid information
+  // until this has been called.
+  void Analyze();
+
+  // Get the loop header offset of the containing loop for arbitrary
+  // {offset}, or -1 if the {offset} is not inside any loop.
+  int GetLoopOffsetFor(int offset) const;
+  // Gets the loop header offset of the parent loop of the loop header
+  // at {header_offset}, or -1 for outer-most loops.
+  int GetParentLoopFor(int header_offset) const;
+
+ private:
+  void AddLoopEntry(int entry_offset);
+  void AddBranch(int origin_offset, int target_offset);
+
+  Zone* zone() const { return zone_; }
+  Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+  Handle<BytecodeArray> bytecode_array_;
+  const BytecodeBranchAnalysis* branch_analysis_;
+  Zone* zone_;
+
+  int current_loop_offset_;
+  bool found_current_backedge_;
+
+  // Map from the offset of a backedge jump to the offset of the corresponding
+  // loop header. There might be multiple backedges for do-while loops.
+  ZoneMap<int, int> backedge_to_header_;
+  // Map from the offset of a loop header to the offset of its parent's loop
+  // header. This map will have as many entries as there are loops in the
+  // function.
+  ZoneMap<int, int> loop_header_to_parent_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeLoopAnalysis);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
diff --git a/src/compiler/c-linkage.cc b/src/compiler/c-linkage.cc
index b38e529..f79497a 100644
--- a/src/compiler/c-linkage.cc
+++ b/src/compiler/c-linkage.cc
@@ -14,8 +14,8 @@
 namespace compiler {
 
 namespace {
-LinkageLocation regloc(Register reg) {
-  return LinkageLocation::ForRegister(reg.code());
+LinkageLocation regloc(Register reg, MachineType type) {
+  return LinkageLocation::ForRegister(reg.code(), type);
 }
 
 
@@ -182,21 +182,20 @@
   CHECK(locations.return_count_ <= 2);
 
   if (locations.return_count_ > 0) {
-    locations.AddReturn(regloc(kReturnRegister0));
+    locations.AddReturn(regloc(kReturnRegister0, msig->GetReturn(0)));
   }
   if (locations.return_count_ > 1) {
-    locations.AddReturn(regloc(kReturnRegister1));
+    locations.AddReturn(regloc(kReturnRegister1, msig->GetReturn(1)));
   }
 
   const int parameter_count = static_cast<int>(msig->parameter_count());
 
 #ifdef PARAM_REGISTERS
-  static const Register kParamRegisters[] = {PARAM_REGISTERS};
-  static const int kParamRegisterCount =
-      static_cast<int>(arraysize(kParamRegisters));
+  const Register kParamRegisters[] = {PARAM_REGISTERS};
+  const int kParamRegisterCount = static_cast<int>(arraysize(kParamRegisters));
 #else
-  static const Register* kParamRegisters = nullptr;
-  static const int kParamRegisterCount = 0;
+  const Register* kParamRegisters = nullptr;
+  const int kParamRegisterCount = 0;
 #endif
 
 #ifdef STACK_SHADOW_WORDS
@@ -207,10 +206,10 @@
   // Add register and/or stack parameter(s).
   for (int i = 0; i < parameter_count; i++) {
     if (i < kParamRegisterCount) {
-      locations.AddParam(regloc(kParamRegisters[i]));
+      locations.AddParam(regloc(kParamRegisters[i], msig->GetParam(i)));
     } else {
-      locations.AddParam(
-          LinkageLocation::ForCallerFrameSlot(-1 - stack_offset));
+      locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+          -1 - stack_offset, msig->GetParam(i)));
       stack_offset++;
     }
   }
@@ -229,7 +228,7 @@
 
   // The target for C calls is always an address (i.e. machine pointer).
   MachineType target_type = MachineType::Pointer();
-  LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+  LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
   CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
   if (set_initialize_root_flag) {
     flags |= CallDescriptor::kInitializeRootRegister;
@@ -239,7 +238,6 @@
       CallDescriptor::kCallAddress,  // kind
       target_type,                   // target MachineType
       target_loc,                    // target location
-      msig,                          // machine_sig
       locations.Build(),             // location_sig
       0,                             // stack_parameter_count
       Operator::kNoProperties,       // properties
diff --git a/src/compiler/checkpoint-elimination.cc b/src/compiler/checkpoint-elimination.cc
index d81e109..d44dfdf 100644
--- a/src/compiler/checkpoint-elimination.cc
+++ b/src/compiler/checkpoint-elimination.cc
@@ -30,14 +30,24 @@
 
 }  // namespace
 
-Reduction CheckpointElimination::Reduce(Node* node) {
-  if (node->opcode() != IrOpcode::kCheckpoint) return NoChange();
+Reduction CheckpointElimination::ReduceCheckpoint(Node* node) {
+  DCHECK_EQ(IrOpcode::kCheckpoint, node->opcode());
   if (IsRedundantCheckpoint(node)) {
     return Replace(NodeProperties::GetEffectInput(node));
   }
   return NoChange();
 }
 
+Reduction CheckpointElimination::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kCheckpoint:
+      return ReduceCheckpoint(node);
+    default:
+      break;
+  }
+  return NoChange();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/checkpoint-elimination.h b/src/compiler/checkpoint-elimination.h
index 4d6aada..edaa0e7 100644
--- a/src/compiler/checkpoint-elimination.h
+++ b/src/compiler/checkpoint-elimination.h
@@ -18,6 +18,9 @@
   ~CheckpointElimination() final {}
 
   Reduction Reduce(Node* node) final;
+
+ private:
+  Reduction ReduceCheckpoint(Node* node);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc
index e598c09..4dd7e79 100644
--- a/src/compiler/code-assembler.cc
+++ b/src/compiler/code-assembler.cc
@@ -51,7 +51,8 @@
     : raw_assembler_(new RawMachineAssembler(
           isolate, new (zone) Graph(zone), call_descriptor,
           MachineType::PointerRepresentation(),
-          InstructionSelector::SupportedMachineOperatorFlags())),
+          InstructionSelector::SupportedMachineOperatorFlags(),
+          InstructionSelector::AlignmentRequirements())),
       flags_(flags),
       name_(name),
       code_generated_(false),
@@ -68,8 +69,8 @@
 
   Schedule* schedule = raw_assembler_->Export();
   Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
-      isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags_,
-      name_);
+      isolate(), raw_assembler_->call_descriptor(), raw_assembler_->graph(),
+      schedule, flags_, name_);
 
   code_generated_ = true;
   return code;
@@ -197,10 +198,6 @@
   return raw_assembler_->LoadStackPointer();
 }
 
-Node* CodeAssembler::SmiShiftBitsConstant() {
-  return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
-}
-
 #define DEFINE_CODE_ASSEMBLER_BINARY_OP(name)   \
   Node* CodeAssembler::name(Node* a, Node* b) { \
     return raw_assembler_->name(a, b);          \
@@ -209,11 +206,18 @@
 #undef DEFINE_CODE_ASSEMBLER_BINARY_OP
 
 Node* CodeAssembler::WordShl(Node* value, int shift) {
-  return raw_assembler_->WordShl(value, IntPtrConstant(shift));
+  return (shift != 0) ? raw_assembler_->WordShl(value, IntPtrConstant(shift))
+                      : value;
 }
 
 Node* CodeAssembler::WordShr(Node* value, int shift) {
-  return raw_assembler_->WordShr(value, IntPtrConstant(shift));
+  return (shift != 0) ? raw_assembler_->WordShr(value, IntPtrConstant(shift))
+                      : value;
+}
+
+Node* CodeAssembler::Word32Shr(Node* value, int shift) {
+  return (shift != 0) ? raw_assembler_->Word32Shr(value, IntPtrConstant(shift))
+                      : value;
 }
 
 Node* CodeAssembler::ChangeUint32ToWord(Node* value) {
@@ -308,6 +312,26 @@
   Goto(if_false);
 }
 
+void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
+                                    Variable* exception_var) {
+  Label success(this), exception(this, Label::kDeferred);
+  success.MergeVariables();
+  exception.MergeVariables();
+  DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
+
+  raw_assembler_->Continuations(node, success.label_, exception.label_);
+
+  Bind(&exception);
+  const Operator* op = raw_assembler_->common()->IfException();
+  Node* exception_value = raw_assembler_->AddNode(op, node, node);
+  if (exception_var != nullptr) {
+    exception_var->Bind(exception_value);
+  }
+  Goto(if_exception);
+
+  Bind(&success);
+}
+
 Node* CodeAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
                            Node** args) {
   CallPrologue();
@@ -394,6 +418,13 @@
                                           context);
 }
 
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                     Node* context, Node* arg1, Node* arg2,
+                                     Node* arg3, Node* arg4, Node* arg5) {
+  return raw_assembler_->TailCallRuntime5(function_id, arg1, arg2, arg3, arg4,
+                                          arg5, context);
+}
+
 Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
                               Node* arg1, size_t result_size) {
   Node* target = HeapConstant(callable.code());
@@ -422,6 +453,19 @@
 }
 
 Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                              Node* target, Node* context, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(1);
+  args[0] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
                               Node* target, Node* context, Node* arg1,
                               size_t result_size) {
   CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
@@ -508,6 +552,91 @@
   return CallN(call_descriptor, target, args);
 }
 
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                              Node* target, Node* context, const Arg& arg1,
+                              const Arg& arg2, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  const int kArgsCount = 3;
+  Node** args = zone()->NewArray<Node*>(kArgsCount);
+  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+  args[arg1.index] = arg1.value;
+  args[arg2.index] = arg2.value;
+  args[kArgsCount - 1] = context;
+  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                              Node* target, Node* context, const Arg& arg1,
+                              const Arg& arg2, const Arg& arg3,
+                              size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  const int kArgsCount = 4;
+  Node** args = zone()->NewArray<Node*>(kArgsCount);
+  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+  args[arg1.index] = arg1.value;
+  args[arg2.index] = arg2.value;
+  args[arg3.index] = arg3.value;
+  args[kArgsCount - 1] = context;
+  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                              Node* target, Node* context, const Arg& arg1,
+                              const Arg& arg2, const Arg& arg3, const Arg& arg4,
+                              size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  const int kArgsCount = 5;
+  Node** args = zone()->NewArray<Node*>(kArgsCount);
+  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+  args[arg1.index] = arg1.value;
+  args[arg2.index] = arg2.value;
+  args[arg3.index] = arg3.value;
+  args[arg4.index] = arg4.value;
+  args[kArgsCount - 1] = context;
+  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                              Node* target, Node* context, const Arg& arg1,
+                              const Arg& arg2, const Arg& arg3, const Arg& arg4,
+                              const Arg& arg5, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  const int kArgsCount = 6;
+  Node** args = zone()->NewArray<Node*>(kArgsCount);
+  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+  args[arg1.index] = arg1.value;
+  args[arg2.index] = arg2.value;
+  args[arg3.index] = arg3.value;
+  args[arg4.index] = arg4.value;
+  args[arg5.index] = arg5.value;
+  args[kArgsCount - 1] = context;
+  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+  return CallN(call_descriptor, target, args);
+}
+
 Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
                                Node* target, Node** args, size_t result_size) {
   CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
@@ -519,6 +648,13 @@
 }
 
 Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+                                  Node* arg1, size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return TailCallStub(callable.descriptor(), target, context, arg1,
+                      result_size);
+}
+
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
                                   Node* arg1, Node* arg2, size_t result_size) {
   Node* target = HeapConstant(callable.code());
   return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
@@ -533,6 +669,29 @@
                       result_size);
 }
 
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+                                  Node* arg1, Node* arg2, Node* arg3,
+                                  Node* arg4, size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+                      arg4, result_size);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(2);
+  args[0] = arg1;
+  args[1] = context;
+
+  return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
 Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
                                   Node* target, Node* context, Node* arg1,
                                   Node* arg2, size_t result_size) {
@@ -585,6 +744,52 @@
   return raw_assembler_->TailCallN(call_descriptor, target, args);
 }
 
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, const Arg& arg1,
+                                  const Arg& arg2, const Arg& arg3,
+                                  const Arg& arg4, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  const int kArgsCount = 5;
+  Node** args = zone()->NewArray<Node*>(kArgsCount);
+  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+  args[arg1.index] = arg1.value;
+  args[arg2.index] = arg2.value;
+  args[arg3.index] = arg3.value;
+  args[arg4.index] = arg4.value;
+  args[kArgsCount - 1] = context;
+  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+  return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, const Arg& arg1,
+                                  const Arg& arg2, const Arg& arg3,
+                                  const Arg& arg4, const Arg& arg5,
+                                  size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  const int kArgsCount = 6;
+  Node** args = zone()->NewArray<Node*>(kArgsCount);
+  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+  args[arg1.index] = arg1.value;
+  args[arg2.index] = arg2.value;
+  args[arg3.index] = arg3.value;
+  args[arg4.index] = arg4.value;
+  args[arg5.index] = arg5.value;
+  args[kArgsCount - 1] = context;
+  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+  return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
 Node* CodeAssembler::TailCallBytecodeDispatch(
     const CallInterfaceDescriptor& interface_descriptor,
     Node* code_target_address, Node** args) {
@@ -680,7 +885,7 @@
 }
 
 void CodeAssembler::Switch(Node* index, Label* default_label,
-                           int32_t* case_values, Label** case_labels,
+                           const int32_t* case_values, Label** case_labels,
                            size_t case_count) {
   RawMachineLabel** labels =
       new (zone()->New(sizeof(RawMachineLabel*) * case_count))
@@ -694,13 +899,32 @@
                                 labels, case_count);
 }
 
+Node* CodeAssembler::Select(Node* condition, Node* true_value,
+                            Node* false_value, MachineRepresentation rep) {
+  Variable value(this, rep);
+  Label vtrue(this), vfalse(this), end(this);
+  Branch(condition, &vtrue, &vfalse);
+
+  Bind(&vtrue);
+  {
+    value.Bind(true_value);
+    Goto(&end);
+  }
+  Bind(&vfalse);
+  {
+    value.Bind(false_value);
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return value.value();
+}
+
 // RawMachineAssembler delegate helpers:
 Isolate* CodeAssembler::isolate() const { return raw_assembler_->isolate(); }
 
 Factory* CodeAssembler::factory() const { return isolate()->factory(); }
 
-Graph* CodeAssembler::graph() const { return raw_assembler_->graph(); }
-
 Zone* CodeAssembler::zone() const { return raw_assembler_->zone(); }
 
 // The core implementation of Variable is stored through an indirection so
diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h
index c33605c..bea999b 100644
--- a/src/compiler/code-assembler.h
+++ b/src/compiler/code-assembler.h
@@ -6,11 +6,12 @@
 #define V8_COMPILER_CODE_ASSEMBLER_H_
 
 #include <map>
+#include <memory>
 
 // Clients of this interface shouldn't depend on lots of compiler internals.
 // Do not include anything from src/compiler here!
 #include "src/allocation.h"
-#include "src/builtins.h"
+#include "src/builtins/builtins.h"
 #include "src/heap/heap.h"
 #include "src/machine-type.h"
 #include "src/runtime/runtime.h"
@@ -28,12 +29,9 @@
 namespace compiler {
 
 class CallDescriptor;
-class Graph;
 class Node;
-class Operator;
 class RawMachineAssembler;
 class RawMachineLabel;
-class Schedule;
 
 #define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
   V(Float32Equal)                                \
@@ -56,6 +54,7 @@
   V(IntPtrGreaterThanOrEqual)                    \
   V(IntPtrEqual)                                 \
   V(Uint32LessThan)                              \
+  V(Uint32GreaterThanOrEqual)                    \
   V(UintPtrLessThan)                             \
   V(UintPtrGreaterThanOrEqual)                   \
   V(WordEqual)                                   \
@@ -73,6 +72,7 @@
   V(Float64Div)                            \
   V(Float64Mod)                            \
   V(Float64Atan2)                          \
+  V(Float64Pow)                            \
   V(Float64InsertLowWord32)                \
   V(Float64InsertHighWord32)               \
   V(IntPtrAdd)                             \
@@ -84,7 +84,9 @@
   V(Int32AddWithOverflow)                  \
   V(Int32Sub)                              \
   V(Int32Mul)                              \
+  V(Int32MulWithOverflow)                  \
   V(Int32Div)                              \
+  V(Int32Mod)                              \
   V(WordOr)                                \
   V(WordAnd)                               \
   V(WordXor)                               \
@@ -107,9 +109,15 @@
   V(Word64Ror)
 
 #define CODE_ASSEMBLER_UNARY_OP_LIST(V) \
+  V(Float64Abs)                         \
+  V(Float64Acos)                        \
+  V(Float64Acosh)                       \
+  V(Float64Asin)                        \
+  V(Float64Asinh)                       \
   V(Float64Atan)                        \
   V(Float64Atanh)                       \
   V(Float64Cos)                         \
+  V(Float64Cosh)                        \
   V(Float64Exp)                         \
   V(Float64Expm1)                       \
   V(Float64Log)                         \
@@ -119,13 +127,17 @@
   V(Float64Cbrt)                        \
   V(Float64Neg)                         \
   V(Float64Sin)                         \
+  V(Float64Sinh)                        \
   V(Float64Sqrt)                        \
   V(Float64Tan)                         \
+  V(Float64Tanh)                        \
   V(Float64ExtractLowWord32)            \
   V(Float64ExtractHighWord32)           \
   V(BitcastWordToTagged)                \
+  V(TruncateFloat64ToFloat32)           \
   V(TruncateFloat64ToWord32)            \
   V(TruncateInt64ToInt32)               \
+  V(ChangeFloat32ToFloat64)             \
   V(ChangeFloat64ToUint32)              \
   V(ChangeInt32ToFloat64)               \
   V(ChangeInt32ToInt64)                 \
@@ -192,14 +204,6 @@
     CodeAssembler* assembler_;
   };
 
-  enum AllocationFlag : uint8_t {
-    kNone = 0,
-    kDoubleAlignment = 1,
-    kPretenured = 1 << 1
-  };
-
-  typedef base::Flags<AllocationFlag> AllocationFlags;
-
   // ===========================================================================
   // Base Assembler
   // ===========================================================================
@@ -232,9 +236,12 @@
   void GotoUnless(Node* condition, Label* false_label);
   void Branch(Node* condition, Label* true_label, Label* false_label);
 
-  void Switch(Node* index, Label* default_label, int32_t* case_values,
+  void Switch(Node* index, Label* default_label, const int32_t* case_values,
               Label** case_labels, size_t case_count);
 
+  Node* Select(Node* condition, Node* true_value, Node* false_value,
+               MachineRepresentation rep = MachineRepresentation::kTagged);
+
   // Access to the frame pointer
   Node* LoadFramePointer();
   Node* LoadParentFramePointer();
@@ -269,6 +276,7 @@
 
   Node* WordShl(Node* value, int shift);
   Node* WordShr(Node* value, int shift);
+  Node* Word32Shr(Node* value, int shift);
 
 // Unary
 #define DECLARE_CODE_ASSEMBLER_UNARY_OP(name) Node* name(Node* a);
@@ -304,6 +312,18 @@
                         Node* arg1, Node* arg2, Node* arg3);
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
                         Node* arg1, Node* arg2, Node* arg3, Node* arg4);
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+                        Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+                        Node* arg5);
+
+  // A pair of a zero-based argument index and a value.
+  // It helps writing arguments order independent code.
+  struct Arg {
+    Arg(int index, Node* value) : index(index), value(value) {}
+
+    int const index;
+    Node* const value;
+  };
 
   Node* CallStub(Callable const& callable, Node* context, Node* arg1,
                  size_t result_size = 1);
@@ -315,6 +335,8 @@
                   size_t result_size = 1);
 
   Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                  Node* context, Node* arg1, size_t result_size = 1);
   Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                  Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
@@ -327,13 +349,35 @@
   Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                  Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
                  Node* arg5, size_t result_size = 1);
+
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, const Arg& arg1, const Arg& arg2,
+                 size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, const Arg& arg1, const Arg& arg2,
+                 const Arg& arg3, size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, const Arg& arg1, const Arg& arg2,
+                 const Arg& arg3, const Arg& arg4, size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, const Arg& arg1, const Arg& arg2,
+                 const Arg& arg3, const Arg& arg4, const Arg& arg5,
+                 size_t result_size = 1);
+
   Node* CallStubN(const CallInterfaceDescriptor& descriptor, Node* target,
                   Node** args, size_t result_size = 1);
 
   Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+                     size_t result_size = 1);
+  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
                      Node* arg2, size_t result_size = 1);
   Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
                      Node* arg2, Node* arg3, size_t result_size = 1);
+  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+                     Node* arg2, Node* arg3, Node* arg4,
+                     size_t result_size = 1);
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, Node* arg1, size_t result_size = 1);
   Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                      Node* context, Node* arg1, Node* arg2,
                      size_t result_size = 1);
@@ -344,6 +388,14 @@
                      Node* context, Node* arg1, Node* arg2, Node* arg3,
                      Node* arg4, size_t result_size = 1);
 
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, const Arg& arg1, const Arg& arg2,
+                     const Arg& arg3, const Arg& arg4, size_t result_size = 1);
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, const Arg& arg1, const Arg& arg2,
+                     const Arg& arg3, const Arg& arg4, const Arg& arg5,
+                     size_t result_size = 1);
+
   Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
                                  Node* code_target_address, Node** args);
 
@@ -354,6 +406,10 @@
   Node* CallJS(Callable const& callable, Node* context, Node* function,
                Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
 
+  // Exception handling support.
+  void GotoIfException(Node* node, Label* if_exception,
+                       Variable* exception_var = nullptr);
+
   // Branching helpers.
   void BranchIf(Node* condition, Label* if_true, Label* if_false);
 
@@ -370,11 +426,6 @@
   Zone* zone() const;
 
  protected:
-  // Protected helpers which delegate to RawMachineAssembler.
-  Graph* graph() const;
-
-  Node* SmiShiftBitsConstant();
-
   // Enables subclasses to perform operations before and after a call.
   virtual void CallPrologue();
   virtual void CallEpilogue();
@@ -386,7 +437,7 @@
   Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
   Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
 
-  base::SmartPointer<RawMachineAssembler> raw_assembler_;
+  std::unique_ptr<RawMachineAssembler> raw_assembler_;
   Code::Flags flags_;
   const char* name_;
   bool code_generated_;
@@ -395,8 +446,6 @@
   DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
 };
 
-DEFINE_OPERATORS_FOR_FLAGS(CodeAssembler::AllocationFlags);
-
 class CodeAssembler::Label {
  public:
   enum Type { kDeferred, kNonDeferred };
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 4e09a27..4dccdc9 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -39,6 +39,10 @@
     return ToDoubleRegister(instr_->InputAt(index));
   }
 
+  Simd128Register InputSimd128Register(size_t index) {
+    return ToSimd128Register(instr_->InputAt(index));
+  }
+
   double InputDouble(size_t index) { return ToDouble(instr_->InputAt(index)); }
 
   float InputFloat32(size_t index) { return ToFloat32(instr_->InputAt(index)); }
@@ -101,6 +105,10 @@
     return ToDoubleRegister(instr_->Output());
   }
 
+  Simd128Register OutputSimd128Register() {
+    return ToSimd128Register(instr_->Output());
+  }
+
   // -- Conversions for operands -----------------------------------------------
 
   Label* ToLabel(InstructionOperand* op) {
@@ -115,12 +123,16 @@
     return LocationOperand::cast(op)->GetRegister();
   }
 
+  FloatRegister ToFloatRegister(InstructionOperand* op) {
+    return LocationOperand::cast(op)->GetFloatRegister();
+  }
+
   DoubleRegister ToDoubleRegister(InstructionOperand* op) {
     return LocationOperand::cast(op)->GetDoubleRegister();
   }
 
-  FloatRegister ToFloatRegister(InstructionOperand* op) {
-    return LocationOperand::cast(op)->GetFloatRegister();
+  Simd128Register ToSimd128Register(InstructionOperand* op) {
+    return LocationOperand::cast(op)->GetSimd128Register();
   }
 
   Constant ToConstant(InstructionOperand* op) {
@@ -198,8 +210,6 @@
 static inline void FinishCode(MacroAssembler* masm) {
 #if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
   masm->CheckConstPool(true, false);
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-  masm->ud2();
 #endif
 }
 
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index f388659..03136a7 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -5,6 +5,7 @@
 #include "src/compiler/code-generator.h"
 
 #include "src/address-map.h"
+#include "src/base/adapters.h"
 #include "src/compiler/code-generator-impl.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/pipeline.h"
@@ -36,11 +37,12 @@
     : frame_access_state_(nullptr),
       linkage_(linkage),
       code_(code),
+      unwinding_info_writer_(zone()),
       info_(info),
       labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
       current_block_(RpoNumber::Invalid()),
       current_source_position_(SourcePosition::Unknown()),
-      masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kYes),
+      masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kNo),
       resolver_(this),
       safepoints_(code->zone()),
       handlers_(code->zone()),
@@ -52,7 +54,9 @@
       last_lazy_deopt_pc_(0),
       jump_tables_(nullptr),
       ools_(nullptr),
-      osr_pc_offset_(-1) {
+      osr_pc_offset_(-1),
+      source_position_table_builder_(code->zone(),
+                                     info->SourcePositionRecordingMode()) {
   for (int i = 0; i < code->InstructionBlockCount(); ++i) {
     new (&labels_[i]) Label;
   }
@@ -72,10 +76,6 @@
   // the frame (that is done in AssemblePrologue).
   FrameScope frame_scope(masm(), StackFrame::MANUAL);
 
-  // Emit a code line info recording start event.
-  PositionsRecorder* recorder = masm()->positions_recorder();
-  LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
-
   // Place function entry hook if requested to do so.
   if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm());
@@ -102,6 +102,9 @@
     }
   }
 
+  unwinding_info_writer_.SetNumberOfInstructionBlocks(
+      code()->InstructionBlockCount());
+
   // Assemble all non-deferred blocks, followed by deferred ones.
   for (int deferred = 0; deferred < 2; ++deferred) {
     for (const InstructionBlock* block : code()->instruction_blocks()) {
@@ -114,6 +117,7 @@
       if (block->IsHandler()) EnsureSpaceForLazyDeopt();
       // Bind a label for a block.
       current_block_ = block->rpo_number();
+      unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block);
       if (FLAG_code_comments) {
         // TODO(titzer): these code comments are a giant memory leak.
         Vector<char> buffer = Vector<char>::New(200);
@@ -164,6 +168,7 @@
         result = AssembleBlock(block);
       }
       if (result != kSuccess) return Handle<Code>();
+      unwinding_info_writer_.EndInstructionBlock(block);
     }
   }
 
@@ -204,11 +209,17 @@
 
   safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
 
-  Handle<Code> result =
-      v8::internal::CodeGenerator::MakeCodeEpilogue(masm(), info);
+  unwinding_info_writer_.Finish(masm()->pc_offset());
+
+  Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
+      masm(), unwinding_info_writer_.eh_frame_writer(), info, Handle<Object>());
   result->set_is_turbofanned(true);
   result->set_stack_slots(frame()->GetTotalFrameSlotCount());
   result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
+  Handle<ByteArray> source_positions =
+      source_position_table_builder_.ToSourcePositionTable(
+          isolate(), Handle<AbstractCode>::cast(result));
+  result->set_source_position_table(*source_positions);
 
   // Emit exception handler table.
   if (!handlers_.empty()) {
@@ -217,12 +228,8 @@
             HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
             TENURED));
     for (size_t i = 0; i < handlers_.size(); ++i) {
-      int position = handlers_[i].handler->pos();
-      HandlerTable::CatchPrediction prediction = handlers_[i].caught_locally
-                                                     ? HandlerTable::CAUGHT
-                                                     : HandlerTable::UNCAUGHT;
       table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
-      table->SetReturnHandler(static_cast<int>(i), position, prediction);
+      table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
     }
     result->set_handler_table(*table);
   }
@@ -234,11 +241,6 @@
     Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
   }
 
-  // Emit a code line info recording stop event.
-  void* line_info = recorder->DetachJITHandlerData();
-  LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(
-                                AbstractCode::cast(*result), line_info));
-
   return result;
 }
 
@@ -276,21 +278,6 @@
   }
 }
 
-bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
-                                              int* slot_return) {
-  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
-    if (object.is_identical_to(info()->context()) && !info()->is_osr()) {
-      *slot_return = Frame::kContextSlot;
-      return true;
-    } else if (object.is_identical_to(info()->closure())) {
-      *slot_return = Frame::kJSFunctionSlot;
-      return true;
-    }
-  }
-  return false;
-}
-
-
 bool CodeGenerator::IsMaterializableFromRoot(
     Handle<HeapObject> object, Heap::RootListIndex* index_return) {
   const CallDescriptor* incoming_descriptor =
@@ -316,9 +303,95 @@
   return kSuccess;
 }
 
+bool CodeGenerator::IsValidPush(InstructionOperand source,
+                                CodeGenerator::PushTypeFlags push_type) {
+  if (source.IsImmediate() &&
+      ((push_type & CodeGenerator::kImmediatePush) != 0)) {
+    return true;
+  }
+  if ((source.IsRegister() || source.IsStackSlot()) &&
+      ((push_type & CodeGenerator::kScalarPush) != 0)) {
+    return true;
+  }
+  if ((source.IsFloatRegister() || source.IsFloatStackSlot()) &&
+      ((push_type & CodeGenerator::kFloat32Push) != 0)) {
+    return true;
+  }
+  if ((source.IsDoubleRegister() || source.IsFloatStackSlot()) &&
+      ((push_type & CodeGenerator::kFloat64Push) != 0)) {
+    return true;
+  }
+  return false;
+}
+
+void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
+                                           PushTypeFlags push_type,
+                                           ZoneVector<MoveOperands*>* pushes) {
+  pushes->clear();
+  for (int i = Instruction::FIRST_GAP_POSITION;
+       i <= Instruction::LAST_GAP_POSITION; ++i) {
+    Instruction::GapPosition inner_pos =
+        static_cast<Instruction::GapPosition>(i);
+    ParallelMove* parallel_move = instr->GetParallelMove(inner_pos);
+    if (parallel_move != nullptr) {
+      for (auto move : *parallel_move) {
+        InstructionOperand source = move->source();
+        InstructionOperand destination = move->destination();
+        int first_push_compatible_index =
+            V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
+        // If there are any moves from slots that will be overridden by pushes,
+        // then the full gap resolver must be used since optimization with
+        // pushes don't participate in the parallel move and might clobber
+        // values needed for the gap resolve.
+        if (source.IsStackSlot() &&
+            LocationOperand::cast(source).index() >=
+                first_push_compatible_index) {
+          pushes->clear();
+          return;
+        }
+        // TODO(danno): Right now, only consider moves from the FIRST gap for
+        // pushes. Theoretically, we could extract pushes for both gaps (there
+        // are cases where this happens), but the logic for that would also have
+        // to check to make sure that non-memory inputs to the pushes from the
+        // LAST gap don't get clobbered in the FIRST gap.
+        if (i == Instruction::FIRST_GAP_POSITION) {
+          if (destination.IsStackSlot() &&
+              LocationOperand::cast(destination).index() >=
+                  first_push_compatible_index) {
+            int index = LocationOperand::cast(destination).index();
+            if (IsValidPush(source, push_type)) {
+              if (index >= static_cast<int>(pushes->size())) {
+                pushes->resize(index + 1);
+              }
+              (*pushes)[index] = move;
+            }
+          }
+        }
+      }
+    }
+  }
+
+  // For now, only support a set of continuous pushes at the end of the list.
+  size_t push_count_upper_bound = pushes->size();
+  size_t push_begin = push_count_upper_bound;
+  for (auto move : base::Reversed(*pushes)) {
+    if (move == nullptr) break;
+    push_begin--;
+  }
+  size_t push_count = pushes->size() - push_begin;
+  std::copy(pushes->begin() + push_begin,
+            pushes->begin() + push_begin + push_count, pushes->begin());
+  pushes->resize(push_count);
+}
+
 CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
     Instruction* instr, const InstructionBlock* block) {
+  int first_unused_stack_slot;
+  bool adjust_stack =
+      GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
+  if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
   AssembleGaps(instr);
+  if (adjust_stack) AssembleTailCallAfterGap(instr, first_unused_stack_slot);
   DCHECK_IMPLIES(
       block->must_deconstruct_frame(),
       instr != code()->InstructionAt(block->last_instruction_index()) ||
@@ -398,7 +471,8 @@
   current_source_position_ = source_position;
   if (source_position.IsUnknown()) return;
   int code_pos = source_position.raw();
-  masm()->positions_recorder()->RecordPosition(code_pos);
+  source_position_table_builder_.AddPosition(masm()->pc_offset(), code_pos,
+                                             false);
   if (FLAG_code_comments) {
     CompilationInfo* info = this->info();
     if (!info->parse_info()) return;
@@ -417,6 +491,16 @@
   }
 }
 
+bool CodeGenerator::GetSlotAboveSPBeforeTailCall(Instruction* instr,
+                                                 int* slot) {
+  if (instr->IsTailCall()) {
+    InstructionOperandConverter g(this, instr);
+    *slot = g.InputInt32(instr->InputCount() - 1);
+    return true;
+  } else {
+    return false;
+  }
+}
 
 void CodeGenerator::AssembleGaps(Instruction* instr) {
   for (int i = Instruction::FIRST_GAP_POSITION;
@@ -502,9 +586,8 @@
 
   if (flags & CallDescriptor::kHasExceptionHandler) {
     InstructionOperandConverter i(this, instr);
-    bool caught = flags & CallDescriptor::kHasLocalCatchHandler;
     RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
-    handlers_.push_back({caught, GetLabel(handler_rpo), masm()->pc_offset()});
+    handlers_.push_back({GetLabel(handler_rpo), masm()->pc_offset()});
   }
 
   if (needs_frame_state) {
@@ -513,7 +596,7 @@
     // code address).
     size_t frame_state_offset = 1;
     FrameStateDescriptor* descriptor =
-        GetFrameStateDescriptor(instr, frame_state_offset);
+        GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
     int pc_offset = masm()->pc_offset();
     int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
                                           descriptor->state_combine());
@@ -550,15 +633,19 @@
   return result;
 }
 
-
-FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
+DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
     Instruction* instr, size_t frame_state_offset) {
   InstructionOperandConverter i(this, instr);
-  InstructionSequence::StateId state_id =
-      InstructionSequence::StateId::FromInt(i.InputInt32(frame_state_offset));
-  return code()->GetFrameStateDescriptor(state_id);
+  int const state_id = i.InputInt32(frame_state_offset);
+  return code()->GetDeoptimizationEntry(state_id);
 }
 
+DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
+    int deoptimization_id) const {
+  size_t const index = static_cast<size_t>(deoptimization_id);
+  DCHECK_LT(index, deoptimization_states_.size());
+  return deoptimization_states_[index]->reason();
+}
 
 void CodeGenerator::TranslateStateValueDescriptor(
     StateValueDescriptor* desc, Translation* translation,
@@ -667,6 +754,12 @@
           shared_info_id,
           static_cast<unsigned int>(descriptor->parameters_count()));
       break;
+    case FrameStateType::kGetterStub:
+      translation->BeginGetterStubFrame(shared_info_id);
+      break;
+    case FrameStateType::kSetterStub:
+      translation->BeginSetterStubFrame(shared_info_id);
+      break;
   }
 
   TranslateFrameStateDescriptorOperands(descriptor, iter, state_combine,
@@ -677,8 +770,9 @@
 int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
                                     size_t frame_state_offset,
                                     OutputFrameStateCombine state_combine) {
-  FrameStateDescriptor* descriptor =
-      GetFrameStateDescriptor(instr, frame_state_offset);
+  DeoptimizationEntry const& entry =
+      GetDeoptimizationEntry(instr, frame_state_offset);
+  FrameStateDescriptor* const descriptor = entry.descriptor();
   frame_state_offset++;
 
   Translation translation(
@@ -691,7 +785,8 @@
   int deoptimization_id = static_cast<int>(deoptimization_states_.size());
 
   deoptimization_states_.push_back(new (zone()) DeoptimizationState(
-      descriptor->bailout_id(), translation.index(), pc_offset));
+      descriptor->bailout_id(), translation.index(), pc_offset,
+      entry.reason()));
 
   return deoptimization_id;
 }
@@ -751,10 +846,33 @@
     Handle<Object> constant_object;
     switch (constant.type()) {
       case Constant::kInt32:
-        DCHECK(type == MachineType::Int32() || type == MachineType::Uint32() ||
-               type.representation() == MachineRepresentation::kBit);
+        if (type.representation() == MachineRepresentation::kTagged) {
+          // When pointers are 4 bytes, we can use int32 constants to represent
+          // Smis.
+          DCHECK_EQ(4, kPointerSize);
+          constant_object =
+              handle(reinterpret_cast<Smi*>(constant.ToInt32()), isolate());
+          DCHECK(constant_object->IsSmi());
+        } else {
+          DCHECK(type == MachineType::Int32() ||
+                 type == MachineType::Uint32() ||
+                 type.representation() == MachineRepresentation::kBit ||
+                 type.representation() == MachineRepresentation::kNone);
+          DCHECK(type.representation() != MachineRepresentation::kNone ||
+                 constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
+
+          constant_object =
+              isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+        }
+        break;
+      case Constant::kInt64:
+        // When pointers are 8 bytes, we can use int64 constants to represent
+        // Smis.
+        DCHECK_EQ(type.representation(), MachineRepresentation::kTagged);
+        DCHECK_EQ(8, kPointerSize);
         constant_object =
-            isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+            handle(reinterpret_cast<Smi*>(constant.ToInt64()), isolate());
+        DCHECK(constant_object->IsSmi());
         break;
       case Constant::kFloat32:
         DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
@@ -799,18 +917,6 @@
   return exit;
 }
 
-int CodeGenerator::TailCallFrameStackSlotDelta(int stack_param_delta) {
-  // Leave the PC on the stack on platforms that have that as part of their ABI
-  int pc_slots = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
-  int sp_slot_delta = frame_access_state()->has_frame()
-                          ? (frame()->GetTotalFrameSlotCount() - pc_slots)
-                          : 0;
-  // Discard only slots that won't be used by new parameters.
-  sp_slot_delta += stack_param_delta;
-  return sp_slot_delta;
-}
-
-
 OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
     : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
   gen->ools_ = this;
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index 5f35e8a..21c13f8 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -5,11 +5,14 @@
 #ifndef V8_COMPILER_CODE_GENERATOR_H_
 #define V8_COMPILER_CODE_GENERATOR_H_
 
+#include "src/compiler.h"
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/instruction.h"
+#include "src/compiler/unwinding-info-writer.h"
 #include "src/deoptimizer.h"
 #include "src/macro-assembler.h"
 #include "src/safepoint-table.h"
+#include "src/source-position-table.h"
 
 namespace v8 {
 namespace internal {
@@ -81,9 +84,6 @@
   void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
                        int arguments, Safepoint::DeoptMode deopt_mode);
 
-  // Check if a heap object can be materialized by loading from the frame, which
-  // is usually way cheaper than materializing the actual heap object constant.
-  bool IsMaterializableFromFrame(Handle<HeapObject> object, int* slot_return);
   // Check if a heap object can be materialized by loading from a heap root,
   // which is cheaper on some platforms than materializing the actual heap
   // object constant.
@@ -101,6 +101,11 @@
   void AssembleSourcePosition(Instruction* instr);
   void AssembleGaps(Instruction* instr);
 
+  // Returns true if a instruction is a tail call that needs to adjust the stack
+  // pointer before execution. The stack slot index to the empty slot above the
+  // adjusted stack pointer is returned in |slot|.
+  bool GetSlotAboveSPBeforeTailCall(Instruction* instr, int* slot);
+
   // ===========================================================================
   // ============= Architecture-specific code generation methods. ==============
   // ===========================================================================
@@ -123,18 +128,49 @@
   // to tear down a stack frame.
   void AssembleReturn();
 
-  // Generates code to deconstruct a the caller's frame, including arguments.
-  void AssembleDeconstructActivationRecord(int stack_param_delta);
-
   void AssembleDeconstructFrame();
 
   // Generates code to manipulate the stack in preparation for a tail call.
-  void AssemblePrepareTailCall(int stack_param_delta);
+  void AssemblePrepareTailCall();
 
   // Generates code to pop current frame if it is an arguments adaptor frame.
   void AssemblePopArgumentsAdaptorFrame(Register args_reg, Register scratch1,
                                         Register scratch2, Register scratch3);
 
+  enum PushTypeFlag {
+    kImmediatePush = 0x1,
+    kScalarPush = 0x2,
+    kFloat32Push = 0x4,
+    kFloat64Push = 0x8,
+    kFloatPush = kFloat32Push | kFloat64Push
+  };
+
+  typedef base::Flags<PushTypeFlag> PushTypeFlags;
+
+  static bool IsValidPush(InstructionOperand source, PushTypeFlags push_type);
+
+  // Generate a list moves from an instruction that are candidates to be turned
+  // into push instructions on platforms that support them. In general, the list
+  // of push candidates are moves to a set of contiguous destination
+  // InstructionOperand locations on the stack that don't clobber values that
+  // are needed for resolve the gap or use values generated by the gap,
+  // i.e. moves that can be hoisted together before the actual gap and assembled
+  // together.
+  static void GetPushCompatibleMoves(Instruction* instr,
+                                     PushTypeFlags push_type,
+                                     ZoneVector<MoveOperands*>* pushes);
+
+  // Called before a tail call |instr|'s gap moves are assembled and allows
+  // gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
+  // need it before gap moves or conversion of certain gap moves into pushes.
+  void AssembleTailCallBeforeGap(Instruction* instr,
+                                 int first_unused_stack_slot);
+  // Called after a tail call |instr|'s gap moves are assembled and allows
+  // gap-specific post-processing, e.g. adjustment of the sp for tail calls that
+  // need it after gap moves.
+  void AssembleTailCallAfterGap(Instruction* instr,
+                                int first_unused_stack_slot);
+
   // ===========================================================================
   // ============== Architecture-specific gap resolver methods. ================
   // ===========================================================================
@@ -164,8 +200,9 @@
   void RecordCallPosition(Instruction* instr);
   void PopulateDeoptimizationData(Handle<Code> code);
   int DefineDeoptimizationLiteral(Handle<Object> literal);
-  FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
-                                                size_t frame_state_offset);
+  DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
+                                                    size_t frame_state_offset);
+  DeoptimizeReason GetDeoptimizationReason(int deoptimization_id) const;
   int BuildTranslation(Instruction* instr, int pc_offset,
                        size_t frame_state_offset,
                        OutputFrameStateCombine state_combine);
@@ -187,33 +224,30 @@
   DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
                                             size_t frame_state_offset);
 
-  // Converts the delta in the number of stack parameter passed from a tail
-  // caller to the callee into the distance (in pointers) the SP must be
-  // adjusted, taking frame elision and other relevant factors into
-  // consideration.
-  int TailCallFrameStackSlotDelta(int stack_param_delta);
-
   // ===========================================================================
 
-  struct DeoptimizationState : ZoneObject {
+  class DeoptimizationState final : public ZoneObject {
    public:
+    DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset,
+                        DeoptimizeReason reason)
+        : bailout_id_(bailout_id),
+          translation_id_(translation_id),
+          pc_offset_(pc_offset),
+          reason_(reason) {}
+
     BailoutId bailout_id() const { return bailout_id_; }
     int translation_id() const { return translation_id_; }
     int pc_offset() const { return pc_offset_; }
-
-    DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset)
-        : bailout_id_(bailout_id),
-          translation_id_(translation_id),
-          pc_offset_(pc_offset) {}
+    DeoptimizeReason reason() const { return reason_; }
 
    private:
     BailoutId bailout_id_;
     int translation_id_;
     int pc_offset_;
+    DeoptimizeReason reason_;
   };
 
   struct HandlerInfo {
-    bool caught_locally;
     Label* handler;
     int pc_offset;
   };
@@ -223,6 +257,7 @@
   FrameAccessState* frame_access_state_;
   Linkage* const linkage_;
   InstructionSequence* const code_;
+  UnwindingInfoWriter unwinding_info_writer_;
   CompilationInfo* const info_;
   Label* const labels_;
   Label return_label_;
@@ -241,6 +276,7 @@
   JumpTable* jump_tables_;
   OutOfLineCode* ools_;
   int osr_pc_offset_;
+  SourcePositionTableBuilder source_position_table_builder_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index 5c3d3d7..9527c75 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -122,6 +122,7 @@
   DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
          node->opcode() == IrOpcode::kDeoptimizeUnless);
   bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
+  DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
   Node* condition = NodeProperties::GetValueInput(node, 0);
   Node* frame_state = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -133,8 +134,8 @@
   if (condition->opcode() == IrOpcode::kBooleanNot) {
     NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
     NodeProperties::ChangeOp(node, condition_is_true
-                                       ? common()->DeoptimizeIf()
-                                       : common()->DeoptimizeUnless());
+                                       ? common()->DeoptimizeIf(reason)
+                                       : common()->DeoptimizeUnless(reason));
     return Changed(node);
   }
   Decision const decision = DecideCondition(condition);
@@ -142,8 +143,9 @@
   if (condition_is_true == (decision == Decision::kTrue)) {
     ReplaceWithValue(node, dead(), effect, control);
   } else {
-    control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                               frame_state, effect, control);
+    control =
+        graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
+                         frame_state, effect, control);
     // TODO(bmeurer): This should be on the AdvancedReducer somehow.
     NodeProperties::MergeControlToEnd(graph(), common(), control);
     Revisit(graph()->end());
@@ -245,17 +247,6 @@
             return Change(node, machine()->Float32Abs(), vtrue);
           }
         }
-        if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
-            machine()->Float32Min().IsSupported()) {
-          // We might now be able to further reduce the {merge} node.
-          Revisit(merge);
-          return Change(node, machine()->Float32Min().op(), vtrue, vfalse);
-        } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
-                   machine()->Float32Max().IsSupported()) {
-          // We might now be able to further reduce the {merge} node.
-          Revisit(merge);
-          return Change(node, machine()->Float32Max().op(), vtrue, vfalse);
-        }
       } else if (cond->opcode() == IrOpcode::kFloat64LessThan) {
         Float64BinopMatcher mcond(cond);
         if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
@@ -267,17 +258,6 @@
             return Change(node, machine()->Float64Abs(), vtrue);
           }
         }
-        if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
-            machine()->Float64Min().IsSupported()) {
-          // We might now be able to further reduce the {merge} node.
-          Revisit(merge);
-          return Change(node, machine()->Float64Min().op(), vtrue, vfalse);
-        } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
-                   machine()->Float64Max().IsSupported()) {
-          // We might now be able to further reduce the {merge} node.
-          Revisit(merge);
-          return Change(node, machine()->Float64Max().op(), vtrue, vfalse);
-        }
       }
     }
   }
@@ -301,8 +281,16 @@
 Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
   DCHECK_EQ(IrOpcode::kReturn, node->opcode());
   Node* const value = node->InputAt(0);
-  Node* const effect = node->InputAt(1);
-  Node* const control = node->InputAt(2);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
+  bool changed = false;
+  if (effect->opcode() == IrOpcode::kCheckpoint) {
+    // Any {Return} node can never be used to insert a deoptimization point,
+    // hence checkpoints can be cut out of the effect chain flowing into it.
+    effect = NodeProperties::GetEffectInput(effect);
+    NodeProperties::ReplaceEffectInput(node, effect);
+    changed = true;
+  }
   if (value->opcode() == IrOpcode::kPhi &&
       NodeProperties::GetControlInput(value) == control &&
       effect->opcode() == IrOpcode::kEffectPhi &&
@@ -327,7 +315,7 @@
     Replace(control, dead());
     return Replace(dead());
   }
-  return NoChange();
+  return changed ? Changed(node) : NoChange();
 }
 
 
@@ -355,13 +343,6 @@
           return Change(node, machine()->Float32Abs(), vtrue);
         }
       }
-      if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
-          machine()->Float32Min().IsSupported()) {
-        return Change(node, machine()->Float32Min().op(), vtrue, vfalse);
-      } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
-                 machine()->Float32Max().IsSupported()) {
-        return Change(node, machine()->Float32Max().op(), vtrue, vfalse);
-      }
       break;
     }
     case IrOpcode::kFloat64LessThan: {
@@ -373,13 +354,6 @@
           return Change(node, machine()->Float64Abs(), vtrue);
         }
       }
-      if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
-          machine()->Float64Min().IsSupported()) {
-        return Change(node, machine()->Float64Min().op(), vtrue, vfalse);
-      } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
-                 machine()->Float64Max().IsSupported()) {
-        return Change(node, machine()->Float64Max().op(), vtrue, vfalse);
-      }
       break;
     }
     default:
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index 4f5ead8..f732375 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -35,10 +35,14 @@
   return OpParameter<BranchHint>(op);
 }
 
+DeoptimizeReason DeoptimizeReasonOf(Operator const* const op) {
+  DCHECK(op->opcode() == IrOpcode::kDeoptimizeIf ||
+         op->opcode() == IrOpcode::kDeoptimizeUnless);
+  return OpParameter<DeoptimizeReason>(op);
+}
 
 size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
 
-
 std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
   switch (kind) {
     case DeoptimizeKind::kEager:
@@ -50,25 +54,25 @@
   return os;
 }
 
-
-DeoptimizeKind DeoptimizeKindOf(const Operator* const op) {
-  DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
-  return OpParameter<DeoptimizeKind>(op);
+bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
+  return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason();
 }
 
+bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
+  return !(lhs == rhs);
+}
 
-size_t hash_value(IfExceptionHint hint) { return static_cast<size_t>(hint); }
+size_t hash_value(DeoptimizeParameters p) {
+  return base::hash_combine(p.kind(), p.reason());
+}
 
+std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
+  return os << p.kind() << ":" << p.reason();
+}
 
-std::ostream& operator<<(std::ostream& os, IfExceptionHint hint) {
-  switch (hint) {
-    case IfExceptionHint::kLocallyCaught:
-      return os << "Caught";
-    case IfExceptionHint::kLocallyUncaught:
-      return os << "Uncaught";
-  }
-  UNREACHABLE();
-  return os;
+DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
+  DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
+  return OpParameter<DeoptimizeParameters>(op);
 }
 
 
@@ -187,6 +191,11 @@
   return OpParameter<RegionObservability>(op);
 }
 
+Type* TypeGuardTypeOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kTypeGuard, op->opcode());
+  return OpParameter<Type*>(op);
+}
+
 std::ostream& operator<<(std::ostream& os,
                          const ZoneVector<MachineType>* types) {
   // Print all the MachineTypes, separated by commas.
@@ -201,20 +210,23 @@
   return os;
 }
 
-#define CACHED_OP_LIST(V)                                    \
-  V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1)             \
-  V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 1, 1)     \
-  V(DeoptimizeUnless, Operator::kFoldable, 2, 1, 1, 0, 1, 1) \
-  V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1)            \
-  V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1)           \
-  V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1)         \
-  V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1)         \
-  V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1)             \
-  V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)         \
-  V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)   \
-  V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)     \
-  V(Checkpoint, Operator::kKontrol, 0, 1, 1, 0, 1, 0)        \
-  V(FinishRegion, Operator::kKontrol, 1, 1, 0, 1, 1, 0)
+#define CACHED_OP_LIST(V)                                                     \
+  V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1)                              \
+  V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1)                             \
+  V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1)                            \
+  V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1)                          \
+  V(IfException, Operator::kKontrol, 0, 1, 1, 1, 1, 1)                        \
+  V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1)                          \
+  V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1)                              \
+  V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)                          \
+  V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)                    \
+  V(OsrLoopEntry, Operator::kFoldable | Operator::kNoThrow, 0, 1, 1, 0, 1, 1) \
+  V(LoopExit, Operator::kKontrol, 0, 0, 2, 0, 0, 1)                           \
+  V(LoopExitValue, Operator::kPure, 1, 0, 1, 1, 0, 0)                         \
+  V(LoopExitEffect, Operator::kNoThrow, 0, 1, 1, 0, 1, 0)                     \
+  V(Checkpoint, Operator::kKontrol, 0, 1, 1, 0, 1, 0)                         \
+  V(FinishRegion, Operator::kKontrol, 1, 1, 0, 1, 1, 0)                       \
+  V(Retain, Operator::kKontrol, 1, 1, 0, 0, 1, 0)
 
 #define CACHED_RETURN_LIST(V) \
   V(1)                        \
@@ -241,6 +253,11 @@
   V(5)                            \
   V(6)
 
+#define CACHED_INDUCTION_VARIABLE_PHI_LIST(V) \
+  V(4)                                        \
+  V(5)                                        \
+  V(6)                                        \
+  V(7)
 
 #define CACHED_LOOP_LIST(V) \
   V(1)                      \
@@ -257,6 +274,30 @@
   V(7)                       \
   V(8)
 
+#define CACHED_DEOPTIMIZE_LIST(V)                        \
+  V(Eager, MinusZero)                                    \
+  V(Eager, NoReason)                                     \
+  V(Eager, WrongMap)                                     \
+  V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
+  V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
+
+#define CACHED_DEOPTIMIZE_IF_LIST(V) \
+  V(DivisionByZero)                  \
+  V(Hole)                            \
+  V(MinusZero)                       \
+  V(Overflow)                        \
+  V(Smi)
+
+#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
+  V(LostPrecision)                       \
+  V(LostPrecisionOrNaN)                  \
+  V(NoReason)                            \
+  V(NotAHeapNumber)                      \
+  V(NotAHeapNumberUndefinedBoolean)      \
+  V(NotASmi)                             \
+  V(OutOfBounds)                         \
+  V(WrongInstanceType)                   \
+  V(WrongMap)
 
 #define CACHED_PARAMETER_LIST(V) \
   V(0)                           \
@@ -317,30 +358,6 @@
   CACHED_OP_LIST(CACHED)
 #undef CACHED
 
-  template <DeoptimizeKind kKind>
-  struct DeoptimizeOperator final : public Operator1<DeoptimizeKind> {
-    DeoptimizeOperator()
-        : Operator1<DeoptimizeKind>(                      // --
-              IrOpcode::kDeoptimize, Operator::kNoThrow,  // opcode
-              "Deoptimize",                               // name
-              1, 1, 1, 0, 0, 1,                           // counts
-              kKind) {}                                   // parameter
-  };
-  DeoptimizeOperator<DeoptimizeKind::kEager> kDeoptimizeEagerOperator;
-  DeoptimizeOperator<DeoptimizeKind::kSoft> kDeoptimizeSoftOperator;
-
-  template <IfExceptionHint kCaughtLocally>
-  struct IfExceptionOperator final : public Operator1<IfExceptionHint> {
-    IfExceptionOperator()
-        : Operator1<IfExceptionHint>(                      // --
-              IrOpcode::kIfException, Operator::kKontrol,  // opcode
-              "IfException",                               // name
-              0, 1, 1, 1, 1, 1,                            // counts
-              kCaughtLocally) {}                           // parameter
-  };
-  IfExceptionOperator<IfExceptionHint::kLocallyCaught> kIfExceptionCOperator;
-  IfExceptionOperator<IfExceptionHint::kLocallyUncaught> kIfExceptionUOperator;
-
   template <size_t kInputCount>
   struct EndOperator final : public Operator {
     EndOperator()
@@ -383,10 +400,10 @@
   template <int kEffectInputCount>
   struct EffectPhiOperator final : public Operator {
     EffectPhiOperator()
-        : Operator(                                   // --
-              IrOpcode::kEffectPhi, Operator::kPure,  // opcode
-              "EffectPhi",                            // name
-              0, kEffectInputCount, 1, 0, 1, 0) {}    // counts
+        : Operator(                                      // --
+              IrOpcode::kEffectPhi, Operator::kKontrol,  // opcode
+              "EffectPhi",                               // name
+              0, kEffectInputCount, 1, 0, 1, 0) {}       // counts
   };
 #define CACHED_EFFECT_PHI(input_count) \
   EffectPhiOperator<input_count> kEffectPhi##input_count##Operator;
@@ -433,6 +450,54 @@
   CACHED_MERGE_LIST(CACHED_MERGE)
 #undef CACHED_MERGE
 
+  template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+  struct DeoptimizeOperator final : public Operator1<DeoptimizeParameters> {
+    DeoptimizeOperator()
+        : Operator1<DeoptimizeParameters>(               // --
+              IrOpcode::kDeoptimize,                     // opcode
+              Operator::kFoldable | Operator::kNoThrow,  // properties
+              "Deoptimize",                              // name
+              1, 1, 1, 0, 0, 1,                          // counts
+              DeoptimizeParameters(kKind, kReason)) {}   // parameter
+  };
+#define CACHED_DEOPTIMIZE(Kind, Reason)                                    \
+  DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
+      kDeoptimize##Kind##Reason##Operator;
+  CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
+#undef CACHED_DEOPTIMIZE
+
+  template <DeoptimizeReason kReason>
+  struct DeoptimizeIfOperator final : public Operator1<DeoptimizeReason> {
+    DeoptimizeIfOperator()
+        : Operator1<DeoptimizeReason>(                   // --
+              IrOpcode::kDeoptimizeIf,                   // opcode
+              Operator::kFoldable | Operator::kNoThrow,  // properties
+              "DeoptimizeIf",                            // name
+              2, 1, 1, 0, 1, 1,                          // counts
+              kReason) {}                                // parameter
+  };
+#define CACHED_DEOPTIMIZE_IF(Reason)                \
+  DeoptimizeIfOperator<DeoptimizeReason::k##Reason> \
+      kDeoptimizeIf##Reason##Operator;
+  CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
+#undef CACHED_DEOPTIMIZE_IF
+
+  template <DeoptimizeReason kReason>
+  struct DeoptimizeUnlessOperator final : public Operator1<DeoptimizeReason> {
+    DeoptimizeUnlessOperator()
+        : Operator1<DeoptimizeReason>(                   // --
+              IrOpcode::kDeoptimizeUnless,               // opcode
+              Operator::kFoldable | Operator::kNoThrow,  // properties
+              "DeoptimizeUnless",                        // name
+              2, 1, 1, 0, 1, 1,                          // counts
+              kReason) {}                                // parameter
+  };
+#define CACHED_DEOPTIMIZE_UNLESS(Reason)                \
+  DeoptimizeUnlessOperator<DeoptimizeReason::k##Reason> \
+      kDeoptimizeUnless##Reason##Operator;
+  CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
+#undef CACHED_DEOPTIMIZE_UNLESS
+
   template <MachineRepresentation kRep, int kInputCount>
   struct PhiOperator final : public Operator1<MachineRepresentation> {
     PhiOperator()
@@ -448,6 +513,20 @@
   CACHED_PHI_LIST(CACHED_PHI)
 #undef CACHED_PHI
 
+  template <int kInputCount>
+  struct InductionVariablePhiOperator final : public Operator {
+    InductionVariablePhiOperator()
+        : Operator(                                              //--
+              IrOpcode::kInductionVariablePhi, Operator::kPure,  // opcode
+              "InductionVariablePhi",                            // name
+              kInputCount, 0, 1, 1, 0, 0) {}                     // counts
+  };
+#define CACHED_INDUCTION_VARIABLE_PHI(input_count) \
+  InductionVariablePhiOperator<input_count>        \
+      kInductionVariablePhi##input_count##Operator;
+  CACHED_INDUCTION_VARIABLE_PHI_LIST(CACHED_INDUCTION_VARIABLE_PHI)
+#undef CACHED_INDUCTION_VARIABLE_PHI
+
   template <int kIndex>
   struct ParameterOperator final : public Operator1<ParameterInfo> {
     ParameterOperator()
@@ -560,28 +639,62 @@
   return nullptr;
 }
 
-
-const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind) {
-  switch (kind) {
-    case DeoptimizeKind::kEager:
-      return &cache_.kDeoptimizeEagerOperator;
-    case DeoptimizeKind::kSoft:
-      return &cache_.kDeoptimizeSoftOperator;
+const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
+                                                  DeoptimizeReason reason) {
+#define CACHED_DEOPTIMIZE(Kind, Reason)                 \
+  if (kind == DeoptimizeKind::k##Kind &&                \
+      reason == DeoptimizeReason::k##Reason) {          \
+    return &cache_.kDeoptimize##Kind##Reason##Operator; \
   }
-  UNREACHABLE();
-  return nullptr;
+  CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
+#undef CACHED_DEOPTIMIZE
+  // Uncached
+  DeoptimizeParameters parameter(kind, reason);
+  return new (zone()) Operator1<DeoptimizeParameters>(  // --
+      IrOpcode::kDeoptimize,                            // opcodes
+      Operator::kFoldable | Operator::kNoThrow,         // properties
+      "Deoptimize",                                     // name
+      1, 1, 1, 0, 0, 1,                                 // counts
+      parameter);                                       // parameter
 }
 
-
-const Operator* CommonOperatorBuilder::IfException(IfExceptionHint hint) {
-  switch (hint) {
-    case IfExceptionHint::kLocallyCaught:
-      return &cache_.kIfExceptionCOperator;
-    case IfExceptionHint::kLocallyUncaught:
-      return &cache_.kIfExceptionUOperator;
+const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeReason reason) {
+  switch (reason) {
+#define CACHED_DEOPTIMIZE_IF(Reason) \
+  case DeoptimizeReason::k##Reason:  \
+    return &cache_.kDeoptimizeIf##Reason##Operator;
+    CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
+#undef CACHED_DEOPTIMIZE_IF
+    default:
+      break;
   }
-  UNREACHABLE();
-  return nullptr;
+  // Uncached
+  return new (zone()) Operator1<DeoptimizeReason>(  // --
+      IrOpcode::kDeoptimizeIf,                      // opcode
+      Operator::kFoldable | Operator::kNoThrow,     // properties
+      "DeoptimizeIf",                               // name
+      2, 1, 1, 0, 1, 1,                             // counts
+      reason);                                      // parameter
+}
+
+const Operator* CommonOperatorBuilder::DeoptimizeUnless(
+    DeoptimizeReason reason) {
+  switch (reason) {
+#define CACHED_DEOPTIMIZE_UNLESS(Reason) \
+  case DeoptimizeReason::k##Reason:      \
+    return &cache_.kDeoptimizeUnless##Reason##Operator;
+    CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
+#undef CACHED_DEOPTIMIZE_UNLESS
+    default:
+      break;
+  }
+  // Uncached
+  return new (zone()) Operator1<DeoptimizeReason>(  // --
+      IrOpcode::kDeoptimizeUnless,                  // opcode
+      Operator::kFoldable | Operator::kNoThrow,     // properties
+      "DeoptimizeUnless",                           // name
+      2, 1, 1, 0, 1, 1,                             // counts
+      reason);                                      // parameter
 }
 
 
@@ -603,10 +716,10 @@
 
 
 const Operator* CommonOperatorBuilder::Start(int value_output_count) {
-  return new (zone()) Operator(               // --
-      IrOpcode::kStart, Operator::kFoldable,  // opcode
-      "Start",                                // name
-      0, 0, 0, value_output_count, 1, 1);     // counts
+  return new (zone()) Operator(                                    // --
+      IrOpcode::kStart, Operator::kFoldable | Operator::kNoThrow,  // opcode
+      "Start",                                                     // name
+      0, 0, 0, value_output_count, 1, 1);                          // counts
 }
 
 
@@ -787,6 +900,13 @@
       rep);                                              // parameter
 }
 
+const Operator* CommonOperatorBuilder::TypeGuard(Type* type) {
+  return new (zone()) Operator1<Type*>(       // --
+      IrOpcode::kTypeGuard, Operator::kPure,  // opcode
+      "TypeGuard",                            // name
+      1, 0, 1, 1, 0, 0,                       // counts
+      type);                                  // parameter
+}
 
 const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
   DCHECK(effect_input_count > 0);  // Disallow empty effect phis.
@@ -800,10 +920,29 @@
       break;
   }
   // Uncached.
-  return new (zone()) Operator(               // --
-      IrOpcode::kEffectPhi, Operator::kPure,  // opcode
-      "EffectPhi",                            // name
-      0, effect_input_count, 1, 0, 1, 0);     // counts
+  return new (zone()) Operator(                  // --
+      IrOpcode::kEffectPhi, Operator::kKontrol,  // opcode
+      "EffectPhi",                               // name
+      0, effect_input_count, 1, 0, 1, 0);        // counts
+}
+
+const Operator* CommonOperatorBuilder::InductionVariablePhi(int input_count) {
+  DCHECK(input_count >= 4);  // There must be always the entry, backedge,
+                             // increment and at least one bound.
+  switch (input_count) {
+#define CACHED_INDUCTION_VARIABLE_PHI(input_count) \
+  case input_count:                                \
+    return &cache_.kInductionVariablePhi##input_count##Operator;
+    CACHED_INDUCTION_VARIABLE_PHI_LIST(CACHED_INDUCTION_VARIABLE_PHI)
+#undef CACHED_INDUCTION_VARIABLE_PHI
+    default:
+      break;
+  }
+  // Uncached.
+  return new (zone()) Operator(                          // --
+      IrOpcode::kInductionVariablePhi, Operator::kPure,  // opcode
+      "InductionVariablePhi",                            // name
+      input_count, 0, 1, 1, 0, 0);                       // counts
 }
 
 const Operator* CommonOperatorBuilder::BeginRegion(
@@ -878,7 +1017,7 @@
               Operator::ZeroIfPure(descriptor->properties()),
               Operator::ZeroIfNoThrow(descriptor->properties()), descriptor) {}
 
-    void PrintParameter(std::ostream& os) const override {
+    void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
       os << "[" << *parameter() << "]";
     }
   };
@@ -892,11 +1031,12 @@
    public:
     explicit TailCallOperator(const CallDescriptor* descriptor)
         : Operator1<const CallDescriptor*>(
-              IrOpcode::kTailCall, descriptor->properties(), "TailCall",
+              IrOpcode::kTailCall,
+              descriptor->properties() | Operator::kNoThrow, "TailCall",
               descriptor->InputCount() + descriptor->FrameStateCount(), 1, 1, 0,
               0, 1, descriptor) {}
 
-    void PrintParameter(std::ostream& os) const override {
+    void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
       os << "[" << *parameter() << "]";
     }
   };
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 77d53de..9e4d259 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -7,6 +7,7 @@
 
 #include "src/assembler.h"
 #include "src/compiler/frame-states.h"
+#include "src/deoptimize-reason.h"
 #include "src/machine-type.h"
 #include "src/zone-containers.h"
 
@@ -42,6 +43,8 @@
 
 BranchHint BranchHintOf(const Operator* const);
 
+// Deoptimize reason for Deoptimize, DeoptimizeIf and DeoptimizeUnless.
+DeoptimizeReason DeoptimizeReasonOf(Operator const* const);
 
 // Deoptimize bailout kind.
 enum class DeoptimizeKind : uint8_t { kEager, kSoft };
@@ -50,15 +53,28 @@
 
 std::ostream& operator<<(std::ostream&, DeoptimizeKind);
 
-DeoptimizeKind DeoptimizeKindOf(const Operator* const);
+// Parameters for the {Deoptimize} operator.
+class DeoptimizeParameters final {
+ public:
+  DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason)
+      : kind_(kind), reason_(reason) {}
 
+  DeoptimizeKind kind() const { return kind_; }
+  DeoptimizeReason reason() const { return reason_; }
 
-// Prediction whether throw-site is surrounded by any local catch-scope.
-enum class IfExceptionHint { kLocallyUncaught, kLocallyCaught };
+ private:
+  DeoptimizeKind const kind_;
+  DeoptimizeReason const reason_;
+};
 
-size_t hash_value(IfExceptionHint hint);
+bool operator==(DeoptimizeParameters, DeoptimizeParameters);
+bool operator!=(DeoptimizeParameters, DeoptimizeParameters);
 
-std::ostream& operator<<(std::ostream&, IfExceptionHint);
+size_t hast_value(DeoptimizeParameters p);
+
+std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
+
+DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const);
 
 
 class SelectParameters final {
@@ -153,6 +169,8 @@
 std::ostream& operator<<(std::ostream& os,
                          const ZoneVector<MachineType>* types);
 
+Type* TypeGuardTypeOf(Operator const*) WARN_UNUSED_RESULT;
+
 // Interface for building common operators that can be used at any level of IR,
 // including JavaScript, mid-level, and low-level.
 class CommonOperatorBuilder final : public ZoneObject {
@@ -165,14 +183,14 @@
   const Operator* IfTrue();
   const Operator* IfFalse();
   const Operator* IfSuccess();
-  const Operator* IfException(IfExceptionHint hint);
+  const Operator* IfException();
   const Operator* Switch(size_t control_output_count);
   const Operator* IfValue(int32_t value);
   const Operator* IfDefault();
   const Operator* Throw();
-  const Operator* Deoptimize(DeoptimizeKind kind);
-  const Operator* DeoptimizeIf();
-  const Operator* DeoptimizeUnless();
+  const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
+  const Operator* DeoptimizeIf(DeoptimizeReason reason);
+  const Operator* DeoptimizeUnless(DeoptimizeReason reason);
   const Operator* Return(int value_input_count = 1);
   const Operator* Terminate();
 
@@ -202,6 +220,10 @@
   const Operator* Phi(MachineRepresentation representation,
                       int value_input_count);
   const Operator* EffectPhi(int effect_input_count);
+  const Operator* InductionVariablePhi(int value_input_count);
+  const Operator* LoopExit();
+  const Operator* LoopExitValue();
+  const Operator* LoopExitEffect();
   const Operator* Checkpoint();
   const Operator* BeginRegion(RegionObservability);
   const Operator* FinishRegion();
@@ -214,6 +236,8 @@
   const Operator* Call(const CallDescriptor* descriptor);
   const Operator* TailCall(const CallDescriptor* descriptor);
   const Operator* Projection(size_t index);
+  const Operator* Retain();
+  const Operator* TypeGuard(Type* type);
 
   // Constructs a new merge or phi operator with the same opcode as {op}, but
   // with {size} inputs.
diff --git a/src/compiler/control-builders.cc b/src/compiler/control-builders.cc
index 6905ef5..b159bb2 100644
--- a/src/compiler/control-builders.cc
+++ b/src/compiler/control-builders.cc
@@ -36,6 +36,7 @@
   loop_environment_ = environment()->CopyForLoop(assigned, is_osr);
   continue_environment_ = environment()->CopyAsUnreachable();
   break_environment_ = environment()->CopyAsUnreachable();
+  assigned_ = assigned;
 }
 
 
@@ -60,6 +61,7 @@
 void LoopBuilder::EndLoop() {
   loop_environment_->Merge(environment());
   set_environment(break_environment_);
+  ExitLoop();
 }
 
 
@@ -82,6 +84,16 @@
   control_if.End();
 }
 
+void LoopBuilder::ExitLoop(Node** extra_value_to_rename) {
+  if (extra_value_to_rename) {
+    environment()->Push(*extra_value_to_rename);
+  }
+  environment()->PrepareForLoopExit(loop_environment_->GetControlDependency(),
+                                    assigned_);
+  if (extra_value_to_rename) {
+    *extra_value_to_rename = environment()->Pop();
+  }
+}
 
 void SwitchBuilder::BeginSwitch() {
   body_environment_ = environment()->CopyAsUnreachable();
diff --git a/src/compiler/control-builders.h b/src/compiler/control-builders.h
index 6ff00be..a59dcb6 100644
--- a/src/compiler/control-builders.h
+++ b/src/compiler/control-builders.h
@@ -63,7 +63,8 @@
       : ControlBuilder(builder),
         loop_environment_(nullptr),
         continue_environment_(nullptr),
-        break_environment_(nullptr) {}
+        break_environment_(nullptr),
+        assigned_(nullptr) {}
 
   // Primitive control commands.
   void BeginLoop(BitVector* assigned, bool is_osr = false);
@@ -74,6 +75,10 @@
   // Primitive support for break.
   void Break() final;
 
+  // Loop exit support. Used to introduce explicit loop exit control
+  // node and variable markers.
+  void ExitLoop(Node** extra_value_to_rename = nullptr);
+
   // Compound control commands for conditional break.
   void BreakUnless(Node* condition);
   void BreakWhen(Node* condition);
@@ -82,6 +87,7 @@
   Environment* loop_environment_;      // Environment of the loop header.
   Environment* continue_environment_;  // Environment after the loop body.
   Environment* break_environment_;     // Environment after the loop exits.
+  BitVector* assigned_;                // Assigned values in the environment.
 };
 
 
diff --git a/src/compiler/control-flow-optimizer.cc b/src/compiler/control-flow-optimizer.cc
index 3fc3bce..6027c82 100644
--- a/src/compiler/control-flow-optimizer.cc
+++ b/src/compiler/control-flow-optimizer.cc
@@ -63,146 +63,10 @@
 void ControlFlowOptimizer::VisitBranch(Node* node) {
   DCHECK_EQ(IrOpcode::kBranch, node->opcode());
   if (TryBuildSwitch(node)) return;
-  if (TryCloneBranch(node)) return;
   VisitNode(node);
 }
 
 
-bool ControlFlowOptimizer::TryCloneBranch(Node* node) {
-  DCHECK_EQ(IrOpcode::kBranch, node->opcode());
-
-  // This optimization is a special case of (super)block cloning. It takes an
-  // input graph as shown below and clones the Branch node for every predecessor
-  // to the Merge, essentially removing the Merge completely. This avoids
-  // materializing the bit for the Phi and may offer potential for further
-  // branch folding optimizations (i.e. because one or more inputs to the Phi is
-  // a constant). Note that there may be more Phi nodes hanging off the Merge,
-  // but we can only a certain subset of them currently (actually only Phi and
-  // EffectPhi nodes whose uses have either the IfTrue or IfFalse as control
-  // input).
-
-  //   Control1 ... ControlN
-  //      ^            ^
-  //      |            |   Cond1 ... CondN
-  //      +----+  +----+     ^         ^
-  //           |  |          |         |
-  //           |  |     +----+         |
-  //          Merge<--+ | +------------+
-  //            ^      \|/
-  //            |      Phi
-  //            |       |
-  //          Branch----+
-  //            ^
-  //            |
-  //      +-----+-----+
-  //      |           |
-  //    IfTrue     IfFalse
-  //      ^           ^
-  //      |           |
-
-  // The resulting graph (modulo the Phi and EffectPhi nodes) looks like this:
-
-  // Control1 Cond1 ... ControlN CondN
-  //    ^      ^           ^      ^
-  //    \      /           \      /
-  //     Branch     ...     Branch
-  //       ^                  ^
-  //       |                  |
-  //   +---+---+          +---+----+
-  //   |       |          |        |
-  // IfTrue IfFalse ... IfTrue  IfFalse
-  //   ^       ^          ^        ^
-  //   |       |          |        |
-  //   +--+ +-------------+        |
-  //      | |  +--------------+ +--+
-  //      | |                 | |
-  //     Merge               Merge
-  //       ^                   ^
-  //       |                   |
-
-  Node* branch = node;
-  Node* cond = NodeProperties::GetValueInput(branch, 0);
-  if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return false;
-  Node* merge = NodeProperties::GetControlInput(branch);
-  if (merge->opcode() != IrOpcode::kMerge ||
-      NodeProperties::GetControlInput(cond) != merge) {
-    return false;
-  }
-  // Grab the IfTrue/IfFalse projections of the Branch.
-  BranchMatcher matcher(branch);
-  // Check/collect other Phi/EffectPhi nodes hanging off the Merge.
-  NodeVector phis(zone());
-  for (Node* const use : merge->uses()) {
-    if (use == branch || use == cond) continue;
-    // We cannot currently deal with non-Phi/EffectPhi nodes hanging off the
-    // Merge. Ideally, we would just clone the nodes (and everything that
-    // depends on it to some distant join point), but that requires knowledge
-    // about dominance/post-dominance.
-    if (!NodeProperties::IsPhi(use)) return false;
-    for (Edge edge : use->use_edges()) {
-      // Right now we can only handle Phi/EffectPhi nodes whose uses are
-      // directly control-dependend on either the IfTrue or the IfFalse
-      // successor, because we know exactly how to update those uses.
-      // TODO(turbofan): Generalize this to all Phi/EffectPhi nodes using
-      // dominance/post-dominance on the sea of nodes.
-      if (edge.from()->op()->ControlInputCount() != 1) return false;
-      Node* control = NodeProperties::GetControlInput(edge.from());
-      if (NodeProperties::IsPhi(edge.from())) {
-        control = NodeProperties::GetControlInput(control, edge.index());
-      }
-      if (control != matcher.IfTrue() && control != matcher.IfFalse())
-        return false;
-    }
-    phis.push_back(use);
-  }
-  BranchHint const hint = BranchHintOf(branch->op());
-  int const input_count = merge->op()->ControlInputCount();
-  DCHECK_LE(1, input_count);
-  Node** const inputs = zone()->NewArray<Node*>(2 * input_count);
-  Node** const merge_true_inputs = &inputs[0];
-  Node** const merge_false_inputs = &inputs[input_count];
-  for (int index = 0; index < input_count; ++index) {
-    Node* cond1 = NodeProperties::GetValueInput(cond, index);
-    Node* control1 = NodeProperties::GetControlInput(merge, index);
-    Node* branch1 = graph()->NewNode(common()->Branch(hint), cond1, control1);
-    merge_true_inputs[index] = graph()->NewNode(common()->IfTrue(), branch1);
-    merge_false_inputs[index] = graph()->NewNode(common()->IfFalse(), branch1);
-    Enqueue(branch1);
-  }
-  Node* const merge_true = graph()->NewNode(common()->Merge(input_count),
-                                            input_count, merge_true_inputs);
-  Node* const merge_false = graph()->NewNode(common()->Merge(input_count),
-                                             input_count, merge_false_inputs);
-  for (Node* const phi : phis) {
-    for (int index = 0; index < input_count; ++index) {
-      inputs[index] = phi->InputAt(index);
-    }
-    inputs[input_count] = merge_true;
-    Node* phi_true = graph()->NewNode(phi->op(), input_count + 1, inputs);
-    inputs[input_count] = merge_false;
-    Node* phi_false = graph()->NewNode(phi->op(), input_count + 1, inputs);
-    for (Edge edge : phi->use_edges()) {
-      Node* control = NodeProperties::GetControlInput(edge.from());
-      if (NodeProperties::IsPhi(edge.from())) {
-        control = NodeProperties::GetControlInput(control, edge.index());
-      }
-      DCHECK(control == matcher.IfTrue() || control == matcher.IfFalse());
-      edge.UpdateTo((control == matcher.IfTrue()) ? phi_true : phi_false);
-    }
-    phi->Kill();
-  }
-  // Fix up IfTrue and IfFalse and kill all dead nodes.
-  matcher.IfFalse()->ReplaceUses(merge_false);
-  matcher.IfTrue()->ReplaceUses(merge_true);
-  matcher.IfFalse()->Kill();
-  matcher.IfTrue()->Kill();
-  branch->Kill();
-  cond->Kill();
-  merge->Kill();
-  return true;
-}
-
-
 bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
   DCHECK_EQ(IrOpcode::kBranch, node->opcode());
 
diff --git a/src/compiler/dead-code-elimination.cc b/src/compiler/dead-code-elimination.cc
index 697d7f8..81bf299 100644
--- a/src/compiler/dead-code-elimination.cc
+++ b/src/compiler/dead-code-elimination.cc
@@ -28,6 +28,8 @@
     case IrOpcode::kLoop:
     case IrOpcode::kMerge:
       return ReduceLoopOrMerge(node);
+    case IrOpcode::kLoopExit:
+      return ReduceLoopExit(node);
     default:
       return ReduceNode(node);
   }
@@ -96,6 +98,9 @@
     for (Node* const use : node->uses()) {
       if (NodeProperties::IsPhi(use)) {
         Replace(use, use->InputAt(0));
+      } else if (use->opcode() == IrOpcode::kLoopExit &&
+                 use->InputAt(1) == node) {
+        RemoveLoopExit(use);
       } else if (use->opcode() == IrOpcode::kTerminate) {
         DCHECK_EQ(IrOpcode::kLoop, node->opcode());
         Replace(use, dead());
@@ -121,6 +126,18 @@
   return NoChange();
 }
 
+Reduction DeadCodeElimination::RemoveLoopExit(Node* node) {
+  DCHECK_EQ(IrOpcode::kLoopExit, node->opcode());
+  for (Node* const use : node->uses()) {
+    if (use->opcode() == IrOpcode::kLoopExitValue ||
+        use->opcode() == IrOpcode::kLoopExitEffect) {
+      Replace(use, use->InputAt(0));
+    }
+  }
+  Node* control = NodeProperties::GetControlInput(node, 0);
+  Replace(node, control);
+  return Replace(control);
+}
 
 Reduction DeadCodeElimination::ReduceNode(Node* node) {
   // If {node} has exactly one control input and this is {Dead},
@@ -133,6 +150,15 @@
   return NoChange();
 }
 
+Reduction DeadCodeElimination::ReduceLoopExit(Node* node) {
+  Node* control = NodeProperties::GetControlInput(node, 0);
+  Node* loop = NodeProperties::GetControlInput(node, 1);
+  if (control->opcode() == IrOpcode::kDead ||
+      loop->opcode() == IrOpcode::kDead) {
+    return RemoveLoopExit(node);
+  }
+  return NoChange();
+}
 
 void DeadCodeElimination::TrimMergeOrPhi(Node* node, int size) {
   const Operator* const op = common()->ResizeMergeOrPhi(node->op(), size);
diff --git a/src/compiler/dead-code-elimination.h b/src/compiler/dead-code-elimination.h
index e5996c8..8e18561 100644
--- a/src/compiler/dead-code-elimination.h
+++ b/src/compiler/dead-code-elimination.h
@@ -30,8 +30,11 @@
  private:
   Reduction ReduceEnd(Node* node);
   Reduction ReduceLoopOrMerge(Node* node);
+  Reduction ReduceLoopExit(Node* node);
   Reduction ReduceNode(Node* node);
 
+  Reduction RemoveLoopExit(Node* node);
+
   void TrimMergeOrPhi(Node* node, int size);
 
   Graph* graph() const { return graph_; }
diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc
index b7f6b12..9cc6ddc 100644
--- a/src/compiler/effect-control-linearizer.cc
+++ b/src/compiler/effect-control-linearizer.cc
@@ -8,6 +8,7 @@
 #include "src/compiler/access-builder.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
 #include "src/compiler/schedule.h"
@@ -35,11 +36,30 @@
 namespace {
 
 struct BlockEffectControlData {
-  Node* current_effect = nullptr;  // New effect.
-  Node* current_control = nullptr;  // New control.
+  Node* current_effect = nullptr;       // New effect.
+  Node* current_control = nullptr;      // New control.
   Node* current_frame_state = nullptr;  // New frame state.
 };
 
+class BlockEffectControlMap {
+ public:
+  explicit BlockEffectControlMap(Zone* temp_zone) : map_(temp_zone) {}
+
+  BlockEffectControlData& For(BasicBlock* from, BasicBlock* to) {
+    return map_[std::make_pair(from->rpo_number(), to->rpo_number())];
+  }
+
+  const BlockEffectControlData& For(BasicBlock* from, BasicBlock* to) const {
+    return map_.at(std::make_pair(from->rpo_number(), to->rpo_number()));
+  }
+
+ private:
+  typedef std::pair<int32_t, int32_t> Key;
+  typedef ZoneMap<Key, BlockEffectControlData> Map;
+
+  Map map_;
+};
+
 // Effect phis that need to be updated after the first pass.
 struct PendingEffectPhi {
   Node* effect_phi;
@@ -50,7 +70,7 @@
 };
 
 void UpdateEffectPhi(Node* node, BasicBlock* block,
-                     ZoneVector<BlockEffectControlData>* block_effects) {
+                     BlockEffectControlMap* block_effects) {
   // Update all inputs to an effect phi with the effects from the given
   // block->effect map.
   DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
@@ -58,16 +78,16 @@
   for (int i = 0; i < node->op()->EffectInputCount(); i++) {
     Node* input = node->InputAt(i);
     BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
-    Node* input_effect =
-        (*block_effects)[predecessor->rpo_number()].current_effect;
-    if (input != input_effect) {
-      node->ReplaceInput(i, input_effect);
+    const BlockEffectControlData& block_effect =
+        block_effects->For(predecessor, block);
+    if (input != block_effect.current_effect) {
+      node->ReplaceInput(i, block_effect.current_effect);
     }
   }
 }
 
 void UpdateBlockControl(BasicBlock* block,
-                        ZoneVector<BlockEffectControlData>* block_effects) {
+                        BlockEffectControlMap* block_effects) {
   Node* control = block->NodeAt(0);
   DCHECK(NodeProperties::IsControl(control));
 
@@ -75,14 +95,19 @@
   if (control->opcode() == IrOpcode::kEnd) return;
 
   // Update all inputs to the given control node with the correct control.
-  DCHECK_EQ(control->op()->ControlInputCount(), block->PredecessorCount());
+  DCHECK(control->opcode() == IrOpcode::kMerge ||
+         control->op()->ControlInputCount() == block->PredecessorCount());
+  if (control->op()->ControlInputCount() != block->PredecessorCount()) {
+    return;  // We already re-wired the control inputs of this node.
+  }
   for (int i = 0; i < control->op()->ControlInputCount(); i++) {
     Node* input = NodeProperties::GetControlInput(control, i);
     BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
-    Node* input_control =
-        (*block_effects)[predecessor->rpo_number()].current_control;
-    if (input != input_control) {
-      NodeProperties::ReplaceControlInput(control, input_control, i);
+    const BlockEffectControlData& block_effect =
+        block_effects->For(predecessor, block);
+    if (input != block_effect.current_control) {
+      NodeProperties::ReplaceControlInput(control, block_effect.current_control,
+                                          i);
     }
   }
 }
@@ -114,13 +139,164 @@
   node->Kill();
 }
 
+void TryCloneBranch(Node* node, BasicBlock* block, Graph* graph,
+                    CommonOperatorBuilder* common,
+                    BlockEffectControlMap* block_effects) {
+  DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+
+  // This optimization is a special case of (super)block cloning. It takes an
+  // input graph as shown below and clones the Branch node for every predecessor
+  // to the Merge, essentially removing the Merge completely. This avoids
+  // materializing the bit for the Phi and may offer potential for further
+  // branch folding optimizations (i.e. because one or more inputs to the Phi is
+  // a constant). Note that there may be more Phi nodes hanging off the Merge,
+  // but we can only a certain subset of them currently (actually only Phi and
+  // EffectPhi nodes whose uses have either the IfTrue or IfFalse as control
+  // input).
+
+  //   Control1 ... ControlN
+  //      ^            ^
+  //      |            |   Cond1 ... CondN
+  //      +----+  +----+     ^         ^
+  //           |  |          |         |
+  //           |  |     +----+         |
+  //          Merge<--+ | +------------+
+  //            ^      \|/
+  //            |      Phi
+  //            |       |
+  //          Branch----+
+  //            ^
+  //            |
+  //      +-----+-----+
+  //      |           |
+  //    IfTrue     IfFalse
+  //      ^           ^
+  //      |           |
+
+  // The resulting graph (modulo the Phi and EffectPhi nodes) looks like this:
+
+  // Control1 Cond1 ... ControlN CondN
+  //    ^      ^           ^      ^
+  //    \      /           \      /
+  //     Branch     ...     Branch
+  //       ^                  ^
+  //       |                  |
+  //   +---+---+          +---+----+
+  //   |       |          |        |
+  // IfTrue IfFalse ... IfTrue  IfFalse
+  //   ^       ^          ^        ^
+  //   |       |          |        |
+  //   +--+ +-------------+        |
+  //      | |  +--------------+ +--+
+  //      | |                 | |
+  //     Merge               Merge
+  //       ^                   ^
+  //       |                   |
+
+  Node* branch = node;
+  Node* cond = NodeProperties::GetValueInput(branch, 0);
+  if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return;
+  Node* merge = NodeProperties::GetControlInput(branch);
+  if (merge->opcode() != IrOpcode::kMerge ||
+      NodeProperties::GetControlInput(cond) != merge) {
+    return;
+  }
+  // Grab the IfTrue/IfFalse projections of the Branch.
+  BranchMatcher matcher(branch);
+  // Check/collect other Phi/EffectPhi nodes hanging off the Merge.
+  NodeVector phis(graph->zone());
+  for (Node* const use : merge->uses()) {
+    if (use == branch || use == cond) continue;
+    // We cannot currently deal with non-Phi/EffectPhi nodes hanging off the
+    // Merge. Ideally, we would just clone the nodes (and everything that
+    // depends on it to some distant join point), but that requires knowledge
+    // about dominance/post-dominance.
+    if (!NodeProperties::IsPhi(use)) return;
+    for (Edge edge : use->use_edges()) {
+      // Right now we can only handle Phi/EffectPhi nodes whose uses are
+      // directly control-dependend on either the IfTrue or the IfFalse
+      // successor, because we know exactly how to update those uses.
+      if (edge.from()->op()->ControlInputCount() != 1) return;
+      Node* control = NodeProperties::GetControlInput(edge.from());
+      if (NodeProperties::IsPhi(edge.from())) {
+        control = NodeProperties::GetControlInput(control, edge.index());
+      }
+      if (control != matcher.IfTrue() && control != matcher.IfFalse()) return;
+    }
+    phis.push_back(use);
+  }
+  BranchHint const hint = BranchHintOf(branch->op());
+  int const input_count = merge->op()->ControlInputCount();
+  DCHECK_LE(1, input_count);
+  Node** const inputs = graph->zone()->NewArray<Node*>(2 * input_count);
+  Node** const merge_true_inputs = &inputs[0];
+  Node** const merge_false_inputs = &inputs[input_count];
+  for (int index = 0; index < input_count; ++index) {
+    Node* cond1 = NodeProperties::GetValueInput(cond, index);
+    Node* control1 = NodeProperties::GetControlInput(merge, index);
+    Node* branch1 = graph->NewNode(common->Branch(hint), cond1, control1);
+    merge_true_inputs[index] = graph->NewNode(common->IfTrue(), branch1);
+    merge_false_inputs[index] = graph->NewNode(common->IfFalse(), branch1);
+  }
+  Node* const merge_true = matcher.IfTrue();
+  Node* const merge_false = matcher.IfFalse();
+  merge_true->TrimInputCount(0);
+  merge_false->TrimInputCount(0);
+  for (int i = 0; i < input_count; ++i) {
+    merge_true->AppendInput(graph->zone(), merge_true_inputs[i]);
+    merge_false->AppendInput(graph->zone(), merge_false_inputs[i]);
+  }
+  DCHECK_EQ(2, block->SuccessorCount());
+  NodeProperties::ChangeOp(matcher.IfTrue(), common->Merge(input_count));
+  NodeProperties::ChangeOp(matcher.IfFalse(), common->Merge(input_count));
+  int const true_index =
+      block->SuccessorAt(0)->NodeAt(0) == matcher.IfTrue() ? 0 : 1;
+  BlockEffectControlData* true_block_data =
+      &block_effects->For(block, block->SuccessorAt(true_index));
+  BlockEffectControlData* false_block_data =
+      &block_effects->For(block, block->SuccessorAt(true_index ^ 1));
+  for (Node* const phi : phis) {
+    for (int index = 0; index < input_count; ++index) {
+      inputs[index] = phi->InputAt(index);
+    }
+    inputs[input_count] = merge_true;
+    Node* phi_true = graph->NewNode(phi->op(), input_count + 1, inputs);
+    inputs[input_count] = merge_false;
+    Node* phi_false = graph->NewNode(phi->op(), input_count + 1, inputs);
+    if (phi->UseCount() == 0) {
+      DCHECK_EQ(phi->opcode(), IrOpcode::kEffectPhi);
+      DCHECK_EQ(input_count, block->SuccessorCount());
+    } else {
+      for (Edge edge : phi->use_edges()) {
+        Node* control = NodeProperties::GetControlInput(edge.from());
+        if (NodeProperties::IsPhi(edge.from())) {
+          control = NodeProperties::GetControlInput(control, edge.index());
+        }
+        DCHECK(control == matcher.IfTrue() || control == matcher.IfFalse());
+        edge.UpdateTo((control == matcher.IfTrue()) ? phi_true : phi_false);
+      }
+    }
+    if (phi->opcode() == IrOpcode::kEffectPhi) {
+      true_block_data->current_effect = phi_true;
+      false_block_data->current_effect = phi_false;
+    }
+    phi->Kill();
+  }
+  // Fix up IfTrue and IfFalse and kill all dead nodes.
+  if (branch == block->control_input()) {
+    true_block_data->current_control = merge_true;
+    false_block_data->current_control = merge_false;
+  }
+  branch->Kill();
+  cond->Kill();
+  merge->Kill();
+}
 }  // namespace
 
 void EffectControlLinearizer::Run() {
-  ZoneVector<BlockEffectControlData> block_effects(temp_zone());
+  BlockEffectControlMap block_effects(temp_zone());
   ZoneVector<PendingEffectPhi> pending_effect_phis(temp_zone());
   ZoneVector<BasicBlock*> pending_block_controls(temp_zone());
-  block_effects.resize(schedule()->RpoBlockCount());
   NodeVector inputs_buffer(temp_zone());
 
   for (BasicBlock* block : *(schedule()->rpo_order())) {
@@ -186,13 +362,13 @@
         DCHECK_EQ(1u, block->size());
         effect = nullptr;
       } else {
-        // If all the predecessors have the same effect, we can use it
-        // as our current effect.
-        int rpo_number = block->PredecessorAt(0)->rpo_number();
-        effect = block_effects[rpo_number].current_effect;
-        for (size_t i = 1; i < block->PredecessorCount(); i++) {
-          int rpo_number = block->PredecessorAt(i)->rpo_number();
-          if (block_effects[rpo_number].current_effect != effect) {
+        // If all the predecessors have the same effect, we can use it as our
+        // current effect.
+        effect =
+            block_effects.For(block->PredecessorAt(0), block).current_effect;
+        for (size_t i = 1; i < block->PredecessorCount(); ++i) {
+          if (block_effects.For(block->PredecessorAt(i), block)
+                  .current_effect != effect) {
             effect = nullptr;
             break;
           }
@@ -202,13 +378,17 @@
           // The input blocks do not have the same effect. We have
           // to create an effect phi node.
           inputs_buffer.clear();
-          inputs_buffer.resize(block->PredecessorCount(), graph()->start());
+          inputs_buffer.resize(block->PredecessorCount(), jsgraph()->Dead());
           inputs_buffer.push_back(control);
           effect = graph()->NewNode(
               common()->EffectPhi(static_cast<int>(block->PredecessorCount())),
               static_cast<int>(inputs_buffer.size()), &(inputs_buffer.front()));
-          // Let us update the effect phi node later.
-          pending_effect_phis.push_back(PendingEffectPhi(effect, block));
+          // For loops, we update the effect phi node later to break cycles.
+          if (control->opcode() == IrOpcode::kLoop) {
+            pending_effect_phis.push_back(PendingEffectPhi(effect, block));
+          } else {
+            UpdateEffectPhi(effect, block, &block_effects);
+          }
         } else if (control->opcode() == IrOpcode::kIfException) {
           // The IfException is connected into the effect chain, so we need
           // to update the effect here.
@@ -232,11 +412,11 @@
     if (block != schedule()->start()) {
       // If all the predecessors have the same effect, we can use it
       // as our current effect.
-      int rpo_number = block->PredecessorAt(0)->rpo_number();
-      frame_state = block_effects[rpo_number].current_frame_state;
+      frame_state =
+          block_effects.For(block->PredecessorAt(0), block).current_frame_state;
       for (size_t i = 1; i < block->PredecessorCount(); i++) {
-        int rpo_number = block->PredecessorAt(i)->rpo_number();
-        if (block_effects[rpo_number].current_frame_state != frame_state) {
+        if (block_effects.For(block->PredecessorAt(i), block)
+                .current_frame_state != frame_state) {
           frame_state = nullptr;
           break;
         }
@@ -256,19 +436,31 @@
 
       case BasicBlock::kCall:
       case BasicBlock::kTailCall:
-      case BasicBlock::kBranch:
       case BasicBlock::kSwitch:
       case BasicBlock::kReturn:
       case BasicBlock::kDeoptimize:
       case BasicBlock::kThrow:
         ProcessNode(block->control_input(), &frame_state, &effect, &control);
         break;
+
+      case BasicBlock::kBranch:
+        ProcessNode(block->control_input(), &frame_state, &effect, &control);
+        TryCloneBranch(block->control_input(), block, graph(), common(),
+                       &block_effects);
+        break;
     }
 
-    // Store the effect for later use.
-    block_effects[block->rpo_number()].current_effect = effect;
-    block_effects[block->rpo_number()].current_control = control;
-    block_effects[block->rpo_number()].current_frame_state = frame_state;
+    // Store the effect, control and frame state for later use.
+    for (BasicBlock* successor : block->successors()) {
+      BlockEffectControlData* data = &block_effects.For(block, successor);
+      if (data->current_effect == nullptr) {
+        data->current_effect = effect;
+      }
+      if (data->current_control == nullptr) {
+        data->current_control = control;
+      }
+      data->current_frame_state = frame_state;
+    }
   }
 
   // Update the incoming edges of the effect phis that could not be processed
@@ -339,8 +531,7 @@
     // Unlink the check point; effect uses will be updated to the incoming
     // effect that is passed. The frame state is preserved for lowering.
     DCHECK_EQ(RegionObservability::kObservable, region_observability_);
-    *frame_state = NodeProperties::GetFrameStateInput(node, 0);
-    node->TrimInputCount(0);
+    *frame_state = NodeProperties::GetFrameStateInput(node);
     return;
   }
 
@@ -395,9 +586,6 @@
                                                    Node** control) {
   ValueEffectControl state(nullptr, nullptr, nullptr);
   switch (node->opcode()) {
-    case IrOpcode::kTypeGuard:
-      state = LowerTypeGuard(node, *effect, *control);
-      break;
     case IrOpcode::kChangeBitToTagged:
       state = LowerChangeBitToTagged(node, *effect, *control);
       break;
@@ -434,6 +622,18 @@
     case IrOpcode::kCheckBounds:
       state = LowerCheckBounds(node, frame_state, *effect, *control);
       break;
+    case IrOpcode::kCheckMaps:
+      state = LowerCheckMaps(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckNumber:
+      state = LowerCheckNumber(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckString:
+      state = LowerCheckString(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckIf:
+      state = LowerCheckIf(node, frame_state, *effect, *control);
+      break;
     case IrOpcode::kCheckTaggedPointer:
       state = LowerCheckTaggedPointer(node, frame_state, *effect, *control);
       break;
@@ -446,12 +646,31 @@
     case IrOpcode::kCheckedInt32Sub:
       state = LowerCheckedInt32Sub(node, frame_state, *effect, *control);
       break;
+    case IrOpcode::kCheckedInt32Div:
+      state = LowerCheckedInt32Div(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckedInt32Mod:
+      state = LowerCheckedInt32Mod(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckedUint32Div:
+      state = LowerCheckedUint32Div(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckedUint32Mod:
+      state = LowerCheckedUint32Mod(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kCheckedInt32Mul:
+      state = LowerCheckedInt32Mul(node, frame_state, *effect, *control);
+      break;
     case IrOpcode::kCheckedUint32ToInt32:
       state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
       break;
     case IrOpcode::kCheckedFloat64ToInt32:
       state = LowerCheckedFloat64ToInt32(node, frame_state, *effect, *control);
       break;
+    case IrOpcode::kCheckedTaggedSignedToInt32:
+      state =
+          LowerCheckedTaggedSignedToInt32(node, frame_state, *effect, *control);
+      break;
     case IrOpcode::kCheckedTaggedToInt32:
       state = LowerCheckedTaggedToInt32(node, frame_state, *effect, *control);
       break;
@@ -461,6 +680,10 @@
     case IrOpcode::kTruncateTaggedToWord32:
       state = LowerTruncateTaggedToWord32(node, *effect, *control);
       break;
+    case IrOpcode::kCheckedTruncateTaggedToWord32:
+      state = LowerCheckedTruncateTaggedToWord32(node, frame_state, *effect,
+                                                 *control);
+      break;
     case IrOpcode::kObjectIsCallable:
       state = LowerObjectIsCallable(node, *effect, *control);
       break;
@@ -482,12 +705,18 @@
     case IrOpcode::kStringFromCharCode:
       state = LowerStringFromCharCode(node, *effect, *control);
       break;
+    case IrOpcode::kStringCharCodeAt:
+      state = LowerStringCharCodeAt(node, *effect, *control);
+      break;
     case IrOpcode::kCheckFloat64Hole:
       state = LowerCheckFloat64Hole(node, frame_state, *effect, *control);
       break;
     case IrOpcode::kCheckTaggedHole:
       state = LowerCheckTaggedHole(node, frame_state, *effect, *control);
       break;
+    case IrOpcode::kConvertTaggedHoleToUndefined:
+      state = LowerConvertTaggedHoleToUndefined(node, *effect, *control);
+      break;
     case IrOpcode::kPlainPrimitiveToNumber:
       state = LowerPlainPrimitiveToNumber(node, *effect, *control);
       break;
@@ -497,6 +726,30 @@
     case IrOpcode::kPlainPrimitiveToFloat64:
       state = LowerPlainPrimitiveToFloat64(node, *effect, *control);
       break;
+    case IrOpcode::kEnsureWritableFastElements:
+      state = LowerEnsureWritableFastElements(node, *effect, *control);
+      break;
+    case IrOpcode::kMaybeGrowFastElements:
+      state = LowerMaybeGrowFastElements(node, frame_state, *effect, *control);
+      break;
+    case IrOpcode::kTransitionElementsKind:
+      state = LowerTransitionElementsKind(node, *effect, *control);
+      break;
+    case IrOpcode::kLoadTypedElement:
+      state = LowerLoadTypedElement(node, *effect, *control);
+      break;
+    case IrOpcode::kStoreTypedElement:
+      state = LowerStoreTypedElement(node, *effect, *control);
+      break;
+    case IrOpcode::kFloat64RoundUp:
+      state = LowerFloat64RoundUp(node, *effect, *control);
+      break;
+    case IrOpcode::kFloat64RoundDown:
+      state = LowerFloat64RoundDown(node, *effect, *control);
+      break;
+    case IrOpcode::kFloat64RoundTruncate:
+      state = LowerFloat64RoundTruncate(node, *effect, *control);
+      break;
     default:
       return false;
   }
@@ -507,15 +760,9 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTypeGuard(Node* node, Node* effect,
-                                        Node* control) {
-  Node* value = node->InputAt(0);
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
                                                     Node* control) {
+  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
   Node* value = node->InputAt(0);
 
   Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
@@ -528,29 +775,32 @@
   Node* vsmi;
   Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
 
-  // Check if {value} is -0.
-  Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
-                                      jsgraph()->Int32Constant(0));
-  Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check_zero, if_smi);
+  if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+    // Check if {value} is -0.
+    Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+                                        jsgraph()->Int32Constant(0));
+    Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                         check_zero, if_smi);
 
-  Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
-  Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+    Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+    Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
 
-  // In case of 0, we need to check the high bits for the IEEE -0 pattern.
-  Node* check_negative = graph()->NewNode(
-      machine()->Int32LessThan(),
-      graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
-      jsgraph()->Int32Constant(0));
-  Node* branch_negative = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                           check_negative, if_zero);
+    // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+    Node* check_negative = graph()->NewNode(
+        machine()->Int32LessThan(),
+        graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+        jsgraph()->Int32Constant(0));
+    Node* branch_negative = graph()->NewNode(
+        common()->Branch(BranchHint::kFalse), check_negative, if_zero);
 
-  Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
-  Node* if_notnegative = graph()->NewNode(common()->IfFalse(), branch_negative);
+    Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
+    Node* if_notnegative =
+        graph()->NewNode(common()->IfFalse(), branch_negative);
 
-  // We need to create a box for negative 0.
-  if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
-  if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
+    // We need to create a box for negative 0.
+    if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
+    if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
+  }
 
   // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
   // machines we need to deal with potential overflow and fallback to boxing.
@@ -800,26 +1050,128 @@
   Node* limit = node->InputAt(1);
 
   Node* check = graph()->NewNode(machine()->Uint32LessThan(), index, limit);
-  control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                      frame_state, effect, control);
-
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
+  control = effect = graph()->NewNode(
+      common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check,
+      frame_state, effect, control);
 
   return ValueEffectControl(index, effect, control);
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state,
+                                        Node* effect, Node* control) {
+  Node* value = node->InputAt(0);
+
+  // Load the current map of the {value}.
+  Node* value_map = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+
+  int const map_count = node->op()->ValueInputCount() - 1;
+  Node** controls = temp_zone()->NewArray<Node*>(map_count);
+  Node** effects = temp_zone()->NewArray<Node*>(map_count + 1);
+
+  for (int i = 0; i < map_count; ++i) {
+    Node* map = node->InputAt(1 + i);
+
+    Node* check = graph()->NewNode(machine()->WordEqual(), value_map, map);
+    if (i == map_count - 1) {
+      controls[i] = effects[i] = graph()->NewNode(
+          common()->DeoptimizeUnless(DeoptimizeReason::kWrongMap), check,
+          frame_state, effect, control);
+    } else {
+      control = graph()->NewNode(common()->Branch(), check, control);
+      controls[i] = graph()->NewNode(common()->IfTrue(), control);
+      control = graph()->NewNode(common()->IfFalse(), control);
+      effects[i] = effect;
+    }
+  }
+
+  control = graph()->NewNode(common()->Merge(map_count), map_count, controls);
+  effects[map_count] = control;
+  effect =
+      graph()->NewNode(common()->EffectPhi(map_count), map_count + 1, effects);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state,
+                                          Node* effect, Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check0 = ObjectIsSmi(value);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  {
+    Node* value_map = efalse0 =
+        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                         value, efalse0, if_false0);
+    Node* check1 = graph()->NewNode(machine()->WordEqual(), value_map,
+                                    jsgraph()->HeapNumberMapConstant());
+    if_false0 = efalse0 = graph()->NewNode(
+        common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check1,
+        frame_state, efalse0, if_false0);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state,
+                                          Node* effect, Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check0 = ObjectIsSmi(value);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check0,
+                       frame_state, effect, control);
+
+  Node* value_map = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+  Node* value_instance_type = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+      effect, control);
+
+  Node* check1 =
+      graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
+                       jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+  control = effect = graph()->NewNode(
+      common()->DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType), check1,
+      frame_state, effect, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state,
+                                      Node* effect, Node* control) {
+  Node* value = node->InputAt(0);
+
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNoReason),
+                       value, frame_state, effect, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerCheckTaggedPointer(Node* node, Node* frame_state,
                                                  Node* effect, Node* control) {
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
-  control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
-                                      frame_state, effect, control);
-
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
+                       frame_state, effect, control);
 
   return ValueEffectControl(value, effect, control);
 }
@@ -830,11 +1182,9 @@
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
-  control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                      frame_state, effect, control);
-
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
+                       check, frame_state, effect, control);
 
   return ValueEffectControl(value, effect, control);
 }
@@ -849,14 +1199,12 @@
       graph()->NewNode(machine()->Int32AddWithOverflow(), lhs, rhs, control);
 
   Node* check = graph()->NewNode(common()->Projection(1), value, control);
-  control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
-                                      frame_state, effect, control);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
+                       check, frame_state, effect, control);
 
   value = graph()->NewNode(common()->Projection(0), value, control);
 
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
-
   return ValueEffectControl(value, effect, control);
 }
 
@@ -870,13 +1218,298 @@
       graph()->NewNode(machine()->Int32SubWithOverflow(), lhs, rhs, control);
 
   Node* check = graph()->NewNode(common()->Projection(1), value, control);
-  control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
-                                      frame_state, effect, control);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
+                       check, frame_state, effect, control);
 
   value = graph()->NewNode(common()->Projection(0), value, control);
 
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* frame_state,
+                                              Node* effect, Node* control) {
+  Node* zero = jsgraph()->Int32Constant(0);
+  Node* minusone = jsgraph()->Int32Constant(-1);
+  Node* minint = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::min());
+
+  Node* lhs = node->InputAt(0);
+  Node* rhs = node->InputAt(1);
+
+  // Check if {rhs} is positive (and not zero).
+  Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+  Node* vtrue0;
+  {
+    // Fast case, no additional checking required.
+    vtrue0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  Node* vfalse0;
+  {
+    // Check if {rhs} is zero.
+    Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+    if_false0 = efalse0 = graph()->NewNode(
+        common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
+        frame_state, efalse0, if_false0);
+
+    // Check if {lhs} is zero, as that would produce minus zero.
+    check = graph()->NewNode(machine()->Word32Equal(), lhs, zero);
+    if_false0 = efalse0 =
+        graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
+                         check, frame_state, efalse0, if_false0);
+
+    // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
+    // to return -kMinInt, which is not representable.
+    Node* check1 = graph()->NewNode(machine()->Word32Equal(), lhs, minint);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* etrue1 = efalse0;
+    {
+      // Check if {rhs} is -1.
+      Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, minusone);
+      if_true1 = etrue1 =
+          graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
+                           check, frame_state, etrue1, if_true1);
+    }
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* efalse1 = efalse0;
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    efalse0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+
+    // Perform the actual integer division.
+    vfalse0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_false0);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  Node* value =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
+                       vfalse0, control);
+
+  // Check if the remainder is non-zero.
+  Node* check =
+      graph()->NewNode(machine()->Word32Equal(), lhs,
+                       graph()->NewNode(machine()->Int32Mul(), rhs, value));
+  control = effect = graph()->NewNode(
+      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
+      frame_state, effect, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
+                                              Node* effect, Node* control) {
+  Node* zero = jsgraph()->Int32Constant(0);
+  Node* one = jsgraph()->Int32Constant(1);
+
+  // General case for signed integer modulus, with optimization for (unknown)
+  // power of 2 right hand side.
+  //
+  //   if rhs <= 0 then
+  //     rhs = -rhs
+  //     deopt if rhs == 0
+  //   if lhs < 0 then
+  //     let res = lhs % rhs in
+  //     deopt if res == 0
+  //     res
+  //   else
+  //     let msk = rhs - 1 in
+  //     if rhs & msk == 0 then
+  //       lhs & msk
+  //     else
+  //       lhs % rhs
+  //
+  Node* lhs = node->InputAt(0);
+  Node* rhs = node->InputAt(1);
+
+  // Check if {rhs} is not strictly positive.
+  Node* check0 = graph()->NewNode(machine()->Int32LessThanOrEqual(), rhs, zero);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+  Node* vtrue0;
+  {
+    // Negate {rhs}, might still produce a negative result in case of
+    // -2^31, but that is handled safely below.
+    vtrue0 = graph()->NewNode(machine()->Int32Sub(), zero, rhs);
+
+    // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
+    Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue0, zero);
+    if_true0 = etrue0 = graph()->NewNode(
+        common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
+        frame_state, etrue0, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  Node* vfalse0 = rhs;
+
+  // At this point {rhs} is either greater than zero or -2^31, both are
+  // fine for the code that follows.
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  rhs = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                         vtrue0, vfalse0, control);
+
+  // Check if {lhs} is negative.
+  Node* check1 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
+  Node* branch1 =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
+
+  Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+  Node* etrue1 = effect;
+  Node* vtrue1;
+  {
+    // Compute the remainder using {lhs % msk}.
+    vtrue1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
+
+    // Check if we would have to return -0.
+    Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue1, zero);
+    if_true1 = etrue1 =
+        graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
+                         check, frame_state, etrue1, if_true1);
+  }
+
+  Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+  Node* efalse1 = effect;
+  Node* vfalse1;
+  {
+    Node* msk = graph()->NewNode(machine()->Int32Sub(), rhs, one);
+
+    // Check if {rhs} minus one is a valid mask.
+    Node* check2 = graph()->NewNode(
+        machine()->Word32Equal(),
+        graph()->NewNode(machine()->Word32And(), rhs, msk), zero);
+    Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
+
+    // Compute the remainder using {lhs & msk}.
+    Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+    Node* vtrue2 = graph()->NewNode(machine()->Word32And(), lhs, msk);
+
+    // Compute the remainder using the generic {lhs % rhs}.
+    Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+    Node* vfalse2 =
+        graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_false2);
+
+    if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+    vfalse1 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                               vtrue2, vfalse2, if_false1);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
+  Node* value =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue1,
+                       vfalse1, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedUint32Div(Node* node, Node* frame_state,
+                                               Node* effect, Node* control) {
+  Node* zero = jsgraph()->Int32Constant(0);
+
+  Node* lhs = node->InputAt(0);
+  Node* rhs = node->InputAt(1);
+
+  // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
+  Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+  control = effect = graph()->NewNode(
+      common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
+      frame_state, effect, control);
+
+  // Perform the actual unsigned integer division.
+  Node* value = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, control);
+
+  // Check if the remainder is non-zero.
+  check = graph()->NewNode(machine()->Word32Equal(), lhs,
+                           graph()->NewNode(machine()->Int32Mul(), rhs, value));
+  control = effect = graph()->NewNode(
+      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
+      frame_state, effect, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedUint32Mod(Node* node, Node* frame_state,
+                                               Node* effect, Node* control) {
+  Node* zero = jsgraph()->Int32Constant(0);
+
+  Node* lhs = node->InputAt(0);
+  Node* rhs = node->InputAt(1);
+
+  // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
+  Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+  control = effect = graph()->NewNode(
+      common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
+      frame_state, effect, control);
+
+  // Perform the actual unsigned integer modulus.
+  Node* value = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, Node* frame_state,
+                                              Node* effect, Node* control) {
+  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+  Node* zero = jsgraph()->Int32Constant(0);
+  Node* lhs = node->InputAt(0);
+  Node* rhs = node->InputAt(1);
+
+  Node* projection =
+      graph()->NewNode(machine()->Int32MulWithOverflow(), lhs, rhs, control);
+
+  Node* check = graph()->NewNode(common()->Projection(1), projection, control);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
+                       check, frame_state, effect, control);
+
+  Node* value = graph()->NewNode(common()->Projection(0), projection, control);
+
+  if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+    Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value, zero);
+    Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                         check_zero, control);
+
+    Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+    Node* e_if_zero = effect;
+    {
+      // We may need to return negative zero.
+      Node* or_inputs = graph()->NewNode(machine()->Word32Or(), lhs, rhs);
+      Node* check_or =
+          graph()->NewNode(machine()->Int32LessThan(), or_inputs, zero);
+      if_zero = e_if_zero =
+          graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
+                           check_or, frame_state, e_if_zero, if_zero);
+    }
+
+    Node* if_not_zero = graph()->NewNode(common()->IfFalse(), branch_zero);
+    Node* e_if_not_zero = effect;
+
+    control = graph()->NewNode(common()->Merge(2), if_zero, if_not_zero);
+    effect = graph()->NewNode(common()->EffectPhi(2), e_if_zero, e_if_not_zero,
+                              control);
+  }
 
   return ValueEffectControl(value, effect, control);
 }
@@ -890,17 +1523,16 @@
   Node* max_int = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::max());
   Node* is_safe =
       graph()->NewNode(machine()->Uint32LessThanOrEqual(), value, max_int);
-  control = effect = graph()->NewNode(common()->DeoptimizeUnless(), is_safe,
-                                      frame_state, effect, control);
-
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
+  control = effect = graph()->NewNode(
+      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), is_safe,
+      frame_state, effect, control);
 
   return ValueEffectControl(value, effect, control);
 }
 
 EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildCheckedFloat64ToInt32(Node* value,
+EffectControlLinearizer::BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
+                                                    Node* value,
                                                     Node* frame_state,
                                                     Node* effect,
                                                     Node* control) {
@@ -908,34 +1540,37 @@
   Node* check_same = graph()->NewNode(
       machine()->Float64Equal(), value,
       graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
-  control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check_same,
-                                      frame_state, effect, control);
+  control = effect = graph()->NewNode(
+      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN),
+      check_same, frame_state, effect, control);
 
-  // Check if {value} is -0.
-  Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
-                                      jsgraph()->Int32Constant(0));
-  Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check_zero, control);
+  if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+    // Check if {value} is -0.
+    Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+                                        jsgraph()->Int32Constant(0));
+    Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                         check_zero, control);
 
-  Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
-  Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+    Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+    Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
 
-  // In case of 0, we need to check the high bits for the IEEE -0 pattern.
-  Node* check_negative = graph()->NewNode(
-      machine()->Int32LessThan(),
-      graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
-      jsgraph()->Int32Constant(0));
+    // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+    Node* check_negative = graph()->NewNode(
+        machine()->Int32LessThan(),
+        graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+        jsgraph()->Int32Constant(0));
 
-  Node* deopt_minus_zero = graph()->NewNode(
-      common()->DeoptimizeIf(), check_negative, frame_state, effect, if_zero);
+    Node* deopt_minus_zero =
+        graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
+                         check_negative, frame_state, effect, if_zero);
 
-  Node* merge =
-      graph()->NewNode(common()->Merge(2), deopt_minus_zero, if_notzero);
+    control =
+        graph()->NewNode(common()->Merge(2), deopt_minus_zero, if_notzero);
+    effect = graph()->NewNode(common()->EffectPhi(2), deopt_minus_zero, effect,
+                              control);
+  }
 
-  effect =
-      graph()->NewNode(common()->EffectPhi(2), deopt_minus_zero, effect, merge);
-
-  return ValueEffectControl(value32, effect, merge);
+  return ValueEffectControl(value32, effect, control);
 }
 
 EffectControlLinearizer::ValueEffectControl
@@ -943,12 +1578,26 @@
                                                     Node* frame_state,
                                                     Node* effect,
                                                     Node* control) {
+  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
   Node* value = node->InputAt(0);
 
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
+  return BuildCheckedFloat64ToInt32(mode, value, frame_state, effect, control);
+}
 
-  return BuildCheckedFloat64ToInt32(value, frame_state, effect, control);
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(Node* node,
+                                                         Node* frame_state,
+                                                         Node* effect,
+                                                         Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
+                       check, frame_state, effect, control);
+  value = ChangeSmiToInt32(value);
+
+  return ValueEffectControl(value, effect, control);
 }
 
 EffectControlLinearizer::ValueEffectControl
@@ -956,6 +1605,7 @@
                                                    Node* frame_state,
                                                    Node* effect,
                                                    Node* control) {
+  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
@@ -978,13 +1628,14 @@
                          value, efalse, if_false);
     Node* check = graph()->NewNode(machine()->WordEqual(), value_map,
                                    jsgraph()->HeapNumberMapConstant());
-    if_false = efalse = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                         frame_state, efalse, if_false);
+    if_false = efalse = graph()->NewNode(
+        common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check,
+        frame_state, efalse, if_false);
     vfalse = efalse = graph()->NewNode(
         simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
         efalse, if_false);
     ValueEffectControl state =
-        BuildCheckedFloat64ToInt32(vfalse, frame_state, efalse, if_false);
+        BuildCheckedFloat64ToInt32(mode, vfalse, frame_state, efalse, if_false);
     if_false = state.control;
     efalse = state.effect;
     vfalse = state.value;
@@ -995,48 +1646,59 @@
   value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
                            vtrue, vfalse, control);
 
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
-
   return ValueEffectControl(value, effect, control);
 }
 
 EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
-    Node* value, Node* frame_state, Node* effect, Node* control) {
+    CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
+    Node* control) {
   Node* value_map = effect = graph()->NewNode(
       simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+
   Node* check_number = graph()->NewNode(machine()->WordEqual(), value_map,
                                         jsgraph()->HeapNumberMapConstant());
 
-  Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                  check_number, control);
+  switch (mode) {
+    case CheckTaggedInputMode::kNumber: {
+      control = effect = graph()->NewNode(
+          common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber),
+          check_number, frame_state, effect, control);
+      break;
+    }
+    case CheckTaggedInputMode::kNumberOrOddball: {
+      Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                      check_number, control);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
+      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+      Node* etrue = effect;
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  // For oddballs also contain the numeric value, let us just check that
-  // we have an oddball here.
-  Node* efalse = effect;
-  Node* instance_type = efalse = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
-      efalse, if_false);
-  Node* check_oddball =
-      graph()->NewNode(machine()->Word32Equal(), instance_type,
-                       jsgraph()->Int32Constant(ODDBALL_TYPE));
-  if_false = efalse =
-      graph()->NewNode(common()->DeoptimizeUnless(), check_oddball, frame_state,
-                       efalse, if_false);
-  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+      // For oddballs also contain the numeric value, let us just check that
+      // we have an oddball here.
+      Node* efalse = effect;
+      Node* instance_type = efalse = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+          value_map, efalse, if_false);
+      Node* check_oddball =
+          graph()->NewNode(machine()->Word32Equal(), instance_type,
+                           jsgraph()->Int32Constant(ODDBALL_TYPE));
+      if_false = efalse = graph()->NewNode(
+          common()->DeoptimizeUnless(
+              DeoptimizeReason::kNotAHeapNumberUndefinedBoolean),
+          check_oddball, frame_state, efalse, if_false);
+      STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+      control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+      effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+      break;
+    }
+  }
 
-  Node* result = effect = graph()->NewNode(
+  value = effect = graph()->NewNode(
       simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
       effect, control);
-  return ValueEffectControl(result, effect, control);
+  return ValueEffectControl(value, effect, control);
 }
 
 EffectControlLinearizer::ValueEffectControl
@@ -1044,6 +1706,7 @@
                                                      Node* frame_state,
                                                      Node* effect,
                                                      Node* control) {
+  CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
@@ -1059,7 +1722,7 @@
   // Otherwise, check heap numberness and load the number.
   Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
   ValueEffectControl number_state = BuildCheckedHeapNumberOrOddballToFloat64(
-      value, frame_state, effect, if_false);
+      mode, value, frame_state, effect, if_false);
 
   Node* merge =
       graph()->NewNode(common()->Merge(2), if_true, number_state.control);
@@ -1069,9 +1732,6 @@
       graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), vtrue,
                        number_state.value, merge);
 
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
-
   return ValueEffectControl(result, effect_phi, merge);
 }
 
@@ -1108,6 +1768,42 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(Node* node,
+                                                            Node* frame_state,
+                                                            Node* effect,
+                                                            Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  // In the Smi case, just convert to int32.
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = ChangeSmiToInt32(value);
+
+  // Otherwise, check that it's a heap number or oddball and truncate the value
+  // to int32.
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  ValueEffectControl false_state = BuildCheckedHeapNumberOrOddballToFloat64(
+      CheckTaggedInputMode::kNumberOrOddball, value, frame_state, effect,
+      if_false);
+  false_state.value =
+      graph()->NewNode(machine()->TruncateFloat64ToWord32(), false_state.value);
+
+  Node* merge =
+      graph()->NewNode(common()->Merge(2), if_true, false_state.control);
+  Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
+                                      false_state.effect, merge);
+  Node* result =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue,
+                       false_state.value, merge);
+
+  return ValueEffectControl(result, effect_phi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerObjectIsCallable(Node* node, Node* effect,
                                                Node* control) {
   Node* value = node->InputAt(0);
@@ -1300,6 +1996,273 @@
 }
 
 EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringCharCodeAt(Node* node, Node* effect,
+                                               Node* control) {
+  Node* subject = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  // We may need to loop several times for ConsString/SlicedString {subject}s.
+  Node* loop =
+      graph()->NewNode(common()->Loop(4), control, control, control, control);
+  Node* lsubject =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 4),
+                       subject, subject, subject, subject, loop);
+  Node* lindex =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 4), index,
+                       index, index, index, loop);
+  Node* leffect = graph()->NewNode(common()->EffectPhi(4), effect, effect,
+                                   effect, effect, loop);
+
+  control = loop;
+  effect = leffect;
+
+  // Determine the instance type of {lsubject}.
+  Node* lsubject_map = effect =
+      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                       lsubject, effect, control);
+  Node* lsubject_instance_type = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+      lsubject_map, effect, control);
+
+  // Check if {lsubject} is a SeqString.
+  Node* check0 = graph()->NewNode(
+      machine()->Word32Equal(),
+      graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
+                       jsgraph()->Int32Constant(kStringRepresentationMask)),
+      jsgraph()->Int32Constant(kSeqStringTag));
+  Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+  Node* vtrue0;
+  {
+    // Check if the {lsubject} is a TwoByteSeqString or a OneByteSeqString.
+    Node* check1 = graph()->NewNode(
+        machine()->Word32Equal(),
+        graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
+                         jsgraph()->Int32Constant(kStringEncodingMask)),
+        jsgraph()->Int32Constant(kTwoByteStringTag));
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* etrue1 = etrue0;
+    Node* vtrue1 = etrue1 =
+        graph()->NewNode(simplified()->LoadElement(
+                             AccessBuilder::ForSeqTwoByteStringCharacter()),
+                         lsubject, lindex, etrue1, if_true1);
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* efalse1 = etrue0;
+    Node* vfalse1 = efalse1 =
+        graph()->NewNode(simplified()->LoadElement(
+                             AccessBuilder::ForSeqOneByteStringCharacter()),
+                         lsubject, lindex, efalse1, if_false1);
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    etrue0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  Node* vfalse0;
+  {
+    // Check if the {lsubject} is a ConsString.
+    Node* check1 = graph()->NewNode(
+        machine()->Word32Equal(),
+        graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
+                         jsgraph()->Int32Constant(kStringRepresentationMask)),
+        jsgraph()->Int32Constant(kConsStringTag));
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* etrue1 = efalse0;
+    {
+      // Load the right hand side of the {lsubject} ConsString.
+      Node* lsubject_second = etrue1 = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForConsStringSecond()),
+          lsubject, etrue1, if_true1);
+
+      // Check whether the right hand side is the empty string (i.e. if
+      // this is really a flat string in a cons string). If that is not
+      // the case we flatten the string first.
+      Node* check2 = graph()->NewNode(machine()->WordEqual(), lsubject_second,
+                                      jsgraph()->EmptyStringConstant());
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                       check2, if_true1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* etrue2 = etrue1;
+      Node* vtrue2 = etrue2 = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForConsStringFirst()),
+          lsubject, etrue2, if_true2);
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* efalse2 = etrue1;
+      Node* vfalse2;
+      {
+        // Flatten the {lsubject} ConsString first.
+        Operator::Properties properties =
+            Operator::kNoDeopt | Operator::kNoThrow;
+        Runtime::FunctionId id = Runtime::kFlattenString;
+        CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+            graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+        vfalse2 = efalse2 = graph()->NewNode(
+            common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
+            jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
+            jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(),
+            efalse2, if_false2);
+      }
+
+      // Retry the {loop} with the new subject.
+      loop->ReplaceInput(1, if_true2);
+      lindex->ReplaceInput(1, lindex);
+      leffect->ReplaceInput(1, etrue2);
+      lsubject->ReplaceInput(1, vtrue2);
+      loop->ReplaceInput(2, if_false2);
+      lindex->ReplaceInput(2, lindex);
+      leffect->ReplaceInput(2, efalse2);
+      lsubject->ReplaceInput(2, vfalse2);
+    }
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* efalse1 = efalse0;
+    Node* vfalse1;
+    {
+      // Check if the {lsubject} is an ExternalString.
+      Node* check2 = graph()->NewNode(
+          machine()->Word32Equal(),
+          graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
+                           jsgraph()->Int32Constant(kStringRepresentationMask)),
+          jsgraph()->Int32Constant(kExternalStringTag));
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* etrue2 = efalse1;
+      Node* vtrue2;
+      {
+        // Check if the {lsubject} is a short external string.
+        Node* check3 = graph()->NewNode(
+            machine()->Word32Equal(),
+            graph()->NewNode(
+                machine()->Word32And(), lsubject_instance_type,
+                jsgraph()->Int32Constant(kShortExternalStringMask)),
+            jsgraph()->Int32Constant(0));
+        Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                         check3, if_true2);
+
+        Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
+        Node* etrue3 = etrue2;
+        Node* vtrue3;
+        {
+          // Load the actual resource data from the {lsubject}.
+          Node* lsubject_resource_data = etrue3 = graph()->NewNode(
+              simplified()->LoadField(
+                  AccessBuilder::ForExternalStringResourceData()),
+              lsubject, etrue3, if_true3);
+
+          // Check if the {lsubject} is a TwoByteExternalString or a
+          // OneByteExternalString.
+          Node* check4 = graph()->NewNode(
+              machine()->Word32Equal(),
+              graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
+                               jsgraph()->Int32Constant(kStringEncodingMask)),
+              jsgraph()->Int32Constant(kTwoByteStringTag));
+          Node* branch4 =
+              graph()->NewNode(common()->Branch(), check4, if_true3);
+
+          Node* if_true4 = graph()->NewNode(common()->IfTrue(), branch4);
+          Node* etrue4 = etrue3;
+          Node* vtrue4 = etrue4 = graph()->NewNode(
+              simplified()->LoadElement(
+                  AccessBuilder::ForExternalTwoByteStringCharacter()),
+              lsubject_resource_data, lindex, etrue4, if_true4);
+
+          Node* if_false4 = graph()->NewNode(common()->IfFalse(), branch4);
+          Node* efalse4 = etrue3;
+          Node* vfalse4 = efalse4 = graph()->NewNode(
+              simplified()->LoadElement(
+                  AccessBuilder::ForExternalOneByteStringCharacter()),
+              lsubject_resource_data, lindex, efalse4, if_false4);
+
+          if_true3 = graph()->NewNode(common()->Merge(2), if_true4, if_false4);
+          etrue3 = graph()->NewNode(common()->EffectPhi(2), etrue4, efalse4,
+                                    if_true3);
+          vtrue3 =
+              graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                               vtrue4, vfalse4, if_true3);
+        }
+
+        Node* if_false3 = graph()->NewNode(common()->IfFalse(), branch3);
+        Node* efalse3 = etrue2;
+        Node* vfalse3;
+        {
+          // The {lsubject} might be compressed, call the runtime.
+          Operator::Properties properties =
+              Operator::kNoDeopt | Operator::kNoThrow;
+          Runtime::FunctionId id = Runtime::kExternalStringGetChar;
+          CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+              graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+          vfalse3 = efalse3 = graph()->NewNode(
+              common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
+              ChangeInt32ToSmi(lindex),
+              jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
+              jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(),
+              efalse3, if_false3);
+          vfalse3 = ChangeSmiToInt32(vfalse3);
+        }
+
+        if_true2 = graph()->NewNode(common()->Merge(2), if_true3, if_false3);
+        etrue2 =
+            graph()->NewNode(common()->EffectPhi(2), etrue3, efalse3, if_true2);
+        vtrue2 =
+            graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                             vtrue3, vfalse3, if_true2);
+      }
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* efalse2 = efalse1;
+      {
+        // The {lsubject} is a SlicedString, continue with its parent.
+        Node* lsubject_parent = efalse2 = graph()->NewNode(
+            simplified()->LoadField(AccessBuilder::ForSlicedStringParent()),
+            lsubject, efalse2, if_false2);
+        Node* lsubject_offset = efalse2 = graph()->NewNode(
+            simplified()->LoadField(AccessBuilder::ForSlicedStringOffset()),
+            lsubject, efalse2, if_false2);
+        Node* lsubject_index = graph()->NewNode(
+            machine()->Int32Add(), lindex, ChangeSmiToInt32(lsubject_offset));
+
+        // Retry the {loop} with the parent subject.
+        loop->ReplaceInput(3, if_false2);
+        leffect->ReplaceInput(3, efalse2);
+        lindex->ReplaceInput(3, lsubject_index);
+        lsubject->ReplaceInput(3, lsubject_parent);
+      }
+
+      if_false1 = if_true2;
+      efalse1 = etrue2;
+      vfalse1 = vtrue2;
+    }
+
+    if_false0 = if_false1;
+    efalse0 = efalse1;
+    vfalse0 = vfalse1;
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  Node* value =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
+                       vfalse0, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerStringFromCharCode(Node* node, Node* effect,
                                                  Node* control) {
   Node* value = node->InputAt(0);
@@ -1429,11 +2392,9 @@
       machine()->Word32Equal(),
       graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
       jsgraph()->Int32Constant(kHoleNanUpper32));
-  control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
-                                      frame_state, effect, control);
-
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
+                       frame_state, effect, control);
 
   return ValueEffectControl(value, effect, control);
 }
@@ -1441,24 +2402,35 @@
 EffectControlLinearizer::ValueEffectControl
 EffectControlLinearizer::LowerCheckTaggedHole(Node* node, Node* frame_state,
                                               Node* effect, Node* control) {
-  CheckTaggedHoleMode mode = CheckTaggedHoleModeOf(node->op());
   Node* value = node->InputAt(0);
   Node* check = graph()->NewNode(machine()->WordEqual(), value,
                                  jsgraph()->TheHoleConstant());
-  switch (mode) {
-    case CheckTaggedHoleMode::kConvertHoleToUndefined:
-      value = graph()->NewNode(
-          common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
-          check, jsgraph()->UndefinedConstant(), value);
-      break;
-    case CheckTaggedHoleMode::kNeverReturnHole:
-      control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
-                                          frame_state, effect, control);
-      break;
-  }
+  control = effect =
+      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
+                       frame_state, effect, control);
 
-  // Make sure the lowered node does not appear in any use lists.
-  node->TrimInputCount(0);
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node,
+                                                           Node* effect,
+                                                           Node* control) {
+  Node* value = node->InputAt(0);
+  Node* check = graph()->NewNode(machine()->WordEqual(), value,
+                                 jsgraph()->TheHoleConstant());
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* vtrue = jsgraph()->UndefinedConstant();
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* vfalse = value;
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                           vtrue, vfalse, control);
 
   return ValueEffectControl(value, effect, control);
 }
@@ -1529,7 +2501,7 @@
   Node* value = node->InputAt(0);
   Node* result = effect =
       graph()->NewNode(ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(),
-                       value, jsgraph()->NoContextConstant(), effect, control);
+                       value, jsgraph()->NoContextConstant(), effect);
   return ValueEffectControl(result, effect, control);
 }
 
@@ -1552,7 +2524,7 @@
   {
     vfalse0 = efalse0 = graph()->NewNode(
         ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
-        jsgraph()->NoContextConstant(), efalse0, if_false0);
+        jsgraph()->NoContextConstant(), efalse0);
 
     Node* check1 = ObjectIsSmi(vfalse0);
     Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
@@ -1608,7 +2580,7 @@
   {
     vfalse0 = efalse0 = graph()->NewNode(
         ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
-        jsgraph()->NoContextConstant(), efalse0, if_false0);
+        jsgraph()->NoContextConstant(), efalse0);
 
     Node* check1 = ObjectIsSmi(vfalse0);
     Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
@@ -1645,6 +2617,659 @@
   return ValueEffectControl(value, effect, control);
 }
 
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node,
+                                                         Node* effect,
+                                                         Node* control) {
+  Node* object = node->InputAt(0);
+  Node* elements = node->InputAt(1);
+
+  // Load the current map of {elements}.
+  Node* elements_map = effect =
+      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                       elements, effect, control);
+
+  // Check if {elements} is not a copy-on-write FixedArray.
+  Node* check = graph()->NewNode(machine()->WordEqual(), elements_map,
+                                 jsgraph()->FixedArrayMapConstant());
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  // Nothing to do if the {elements} are not copy-on-write.
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = elements;
+
+  // We need to take a copy of the {elements} and set them up for {object}.
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    // We need to create a copy of the {elements} for {object}.
+    Operator::Properties properties = Operator::kEliminatable;
+    Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
+    CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+        properties);
+    vfalse = efalse = graph()->NewNode(
+        common()->Call(desc), jsgraph()->HeapConstant(callable.code()), object,
+        jsgraph()->NoContextConstant(), efalse);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  Node* value = graph()->NewNode(
+      common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
+                                                    Node* frame_state,
+                                                    Node* effect,
+                                                    Node* control) {
+  GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
+  Node* object = node->InputAt(0);
+  Node* elements = node->InputAt(1);
+  Node* index = node->InputAt(2);
+  Node* length = node->InputAt(3);
+
+  Node* check0 = graph()->NewNode((flags & GrowFastElementsFlag::kHoleyElements)
+                                      ? machine()->Uint32LessThanOrEqual()
+                                      : machine()->Word32Equal(),
+                                  length, index);
+  Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+  Node* vtrue0 = elements;
+  {
+    // Load the length of the {elements} backing store.
+    Node* elements_length = etrue0 = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
+        etrue0, if_true0);
+    elements_length = ChangeSmiToInt32(elements_length);
+
+    // Check if we need to grow the {elements} backing store.
+    Node* check1 =
+        graph()->NewNode(machine()->Uint32LessThan(), index, elements_length);
+    Node* branch1 =
+        graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* etrue1 = etrue0;
+    Node* vtrue1 = vtrue0;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* efalse1 = etrue0;
+    Node* vfalse1 = vtrue0;
+    {
+      // We need to grow the {elements} for {object}.
+      Operator::Properties properties = Operator::kEliminatable;
+      Callable callable =
+          (flags & GrowFastElementsFlag::kDoubleElements)
+              ? CodeFactory::GrowFastDoubleElements(isolate())
+              : CodeFactory::GrowFastSmiOrObjectElements(isolate());
+      CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+      CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+          properties);
+      vfalse1 = efalse1 = graph()->NewNode(
+          common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+          object, ChangeInt32ToSmi(index), jsgraph()->NoContextConstant(),
+          efalse1);
+
+      // Ensure that we were able to grow the {elements}.
+      // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
+      // but maybe we should just introduce a reason that makes sense.
+      efalse1 = if_false1 = graph()->NewNode(
+          common()->DeoptimizeIf(DeoptimizeReason::kSmi), ObjectIsSmi(vfalse1),
+          frame_state, efalse1, if_false1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    etrue0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                              vtrue1, vfalse1, if_true0);
+
+    // For JSArray {object}s we also need to update the "length".
+    if (flags & GrowFastElementsFlag::kArrayObject) {
+      // Compute the new {length}.
+      Node* object_length = ChangeInt32ToSmi(graph()->NewNode(
+          machine()->Int32Add(), index, jsgraph()->Int32Constant(1)));
+
+      // Update the "length" property of the {object}.
+      etrue0 =
+          graph()->NewNode(simplified()->StoreField(
+                               AccessBuilder::ForJSArrayLength(FAST_ELEMENTS)),
+                           object, object_length, etrue0, if_true0);
+    }
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  Node* vfalse0 = elements;
+  {
+    // In case of non-holey {elements}, we need to verify that the {index} is
+    // in-bounds, otherwise for holey {elements}, the check above already
+    // guards the index (and the operator forces {index} to be unsigned).
+    if (!(flags & GrowFastElementsFlag::kHoleyElements)) {
+      Node* check1 =
+          graph()->NewNode(machine()->Uint32LessThan(), index, length);
+      efalse0 = if_false0 = graph()->NewNode(
+          common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check1,
+          frame_state, efalse0, if_false0);
+    }
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  Node* value =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
+                       vfalse0, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTransitionElementsKind(Node* node, Node* effect,
+                                                     Node* control) {
+  ElementsTransition const transition = ElementsTransitionOf(node->op());
+  Node* object = node->InputAt(0);
+  Node* source_map = node->InputAt(1);
+  Node* target_map = node->InputAt(2);
+
+  // Load the current map of {object}.
+  Node* object_map = effect =
+      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
+                       effect, control);
+
+  // Check if {object_map} is the same as {source_map}.
+  Node* check =
+      graph()->NewNode(machine()->WordEqual(), object_map, source_map);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+  // Migrate the {object} from {source_map} to {target_map}.
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  {
+    switch (transition) {
+      case ElementsTransition::kFastTransition: {
+        // In-place migration of {object}, just store the {target_map}.
+        etrue =
+            graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+                             object, target_map, etrue, if_true);
+        break;
+      }
+      case ElementsTransition::kSlowTransition: {
+        // Instance migration, call out to the runtime for {object}.
+        Operator::Properties properties =
+            Operator::kNoDeopt | Operator::kNoThrow;
+        Runtime::FunctionId id = Runtime::kTransitionElementsKind;
+        CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+            graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+        etrue = graph()->NewNode(
+            common()->Call(desc), jsgraph()->CEntryStubConstant(1), object,
+            target_map,
+            jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
+            jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(), etrue,
+            if_true);
+        break;
+      }
+    }
+  }
+
+  // Nothing to do if the {object} doesn't have the {source_map}.
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+  return ValueEffectControl(nullptr, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerLoadTypedElement(Node* node, Node* effect,
+                                               Node* control) {
+  ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
+  Node* buffer = node->InputAt(0);
+  Node* base = node->InputAt(1);
+  Node* external = node->InputAt(2);
+  Node* index = node->InputAt(3);
+
+  // We need to keep the {buffer} alive so that the GC will not release the
+  // ArrayBuffer (if there's any) as long as we are still operating on it.
+  effect = graph()->NewNode(common()->Retain(), buffer, effect);
+
+  // Compute the effective storage pointer.
+  Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
+                                            external, effect, control);
+
+  // Perform the actual typed element access.
+  Node* value = effect = graph()->NewNode(
+      simplified()->LoadElement(
+          AccessBuilder::ForTypedArrayElement(array_type, true)),
+      storage, index, effect, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStoreTypedElement(Node* node, Node* effect,
+                                                Node* control) {
+  ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
+  Node* buffer = node->InputAt(0);
+  Node* base = node->InputAt(1);
+  Node* external = node->InputAt(2);
+  Node* index = node->InputAt(3);
+  Node* value = node->InputAt(4);
+
+  // We need to keep the {buffer} alive so that the GC will not release the
+  // ArrayBuffer (if there's any) as long as we are still operating on it.
+  effect = graph()->NewNode(common()->Retain(), buffer, effect);
+
+  // Compute the effective storage pointer.
+  Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
+                                            external, effect, control);
+
+  // Perform the actual typed element access.
+  effect = graph()->NewNode(
+      simplified()->StoreElement(
+          AccessBuilder::ForTypedArrayElement(array_type, true)),
+      storage, index, value, effect, control);
+
+  return ValueEffectControl(nullptr, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerFloat64RoundUp(Node* node, Node* effect,
+                                             Node* control) {
+  // Nothing to be done if a fast hardware instruction is available.
+  if (machine()->Float64RoundUp().IsSupported()) {
+    return ValueEffectControl(node, effect, control);
+  }
+
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const zero = jsgraph()->Float64Constant(0.0);
+  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+  Node* const input = node->InputAt(0);
+
+  // General case for ceil.
+  //
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if temp1 < input then
+  //         temp1 + 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
+  //         -0 - temp3
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+
+  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* vtrue0;
+  {
+    Node* check1 =
+        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* temp1 = graph()->NewNode(
+          machine()->Float64Sub(),
+          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+      vfalse1 = graph()->NewNode(
+          common()->Select(MachineRepresentation::kFloat64),
+          graph()->NewNode(machine()->Float64LessThan(), temp1, input),
+          graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* vfalse0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+                                      input, minus_two_52);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* vtrue2 = input;
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* vfalse2;
+      {
+        Node* temp1 =
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+        Node* temp2 = graph()->NewNode(
+            machine()->Float64Sub(),
+            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+        Node* temp3 = graph()->NewNode(
+            common()->Select(MachineRepresentation::kFloat64),
+            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
+            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
+        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
+      }
+
+      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+      vfalse1 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue2, vfalse2, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  Node* value =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                       vtrue0, vfalse0, merge0);
+  return ValueEffectControl(value, effect, merge0);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerFloat64RoundDown(Node* node, Node* effect,
+                                               Node* control) {
+  // Nothing to be done if a fast hardware instruction is available.
+  if (machine()->Float64RoundDown().IsSupported()) {
+    return ValueEffectControl(node, effect, control);
+  }
+
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const zero = jsgraph()->Float64Constant(0.0);
+  Node* const minus_one = jsgraph()->Float64Constant(-1.0);
+  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+  Node* const input = node->InputAt(0);
+
+  // General case for floor.
+  //
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if input < temp1 then
+  //         temp1 - 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         if temp2 < temp1 then
+  //           -1 - temp2
+  //         else
+  //           -0 - temp2
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+
+  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* vtrue0;
+  {
+    Node* check1 =
+        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* temp1 = graph()->NewNode(
+          machine()->Float64Sub(),
+          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+      vfalse1 = graph()->NewNode(
+          common()->Select(MachineRepresentation::kFloat64),
+          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* vfalse0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+                                      input, minus_two_52);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* vtrue2 = input;
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* vfalse2;
+      {
+        Node* temp1 =
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+        Node* temp2 = graph()->NewNode(
+            machine()->Float64Sub(),
+            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+        vfalse2 = graph()->NewNode(
+            common()->Select(MachineRepresentation::kFloat64),
+            graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
+            graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
+      }
+
+      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+      vfalse1 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue2, vfalse2, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  Node* value =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                       vtrue0, vfalse0, merge0);
+  return ValueEffectControl(value, effect, merge0);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node, Node* effect,
+                                                   Node* control) {
+  // Nothing to be done if a fast hardware instruction is available.
+  if (machine()->Float64RoundTruncate().IsSupported()) {
+    return ValueEffectControl(node, effect, control);
+  }
+
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const zero = jsgraph()->Float64Constant(0.0);
+  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+  Node* const input = node->InputAt(0);
+
+  // General case for trunc.
+  //
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if input < temp1 then
+  //         temp1 - 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
+  //         -0 - temp3
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+
+  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* vtrue0;
+  {
+    Node* check1 =
+        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* temp1 = graph()->NewNode(
+          machine()->Float64Sub(),
+          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+      vfalse1 = graph()->NewNode(
+          common()->Select(MachineRepresentation::kFloat64),
+          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* vfalse0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+                                      input, minus_two_52);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* vtrue2 = input;
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* vfalse2;
+      {
+        Node* temp1 =
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+        Node* temp2 = graph()->NewNode(
+            machine()->Float64Sub(),
+            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+        Node* temp3 = graph()->NewNode(
+            common()->Select(MachineRepresentation::kFloat64),
+            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
+            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
+        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
+      }
+
+      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+      vfalse1 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue2, vfalse2, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  Node* value =
+      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                       vtrue0, vfalse0, merge0);
+  return ValueEffectControl(value, effect, merge0);
+}
+
 Factory* EffectControlLinearizer::factory() const {
   return isolate()->factory();
 }
@@ -1659,7 +3284,7 @@
     CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
     CallDescriptor* desc = Linkage::GetStubCallDescriptor(
         isolate(), graph()->zone(), callable.descriptor(), 0, flags,
-        Operator::kNoThrow);
+        Operator::kEliminatable);
     to_number_operator_.set(common()->Call(desc));
   }
   return to_number_operator_.get();
diff --git a/src/compiler/effect-control-linearizer.h b/src/compiler/effect-control-linearizer.h
index 280b4b7..98f08c7 100644
--- a/src/compiler/effect-control-linearizer.h
+++ b/src/compiler/effect-control-linearizer.h
@@ -43,7 +43,6 @@
 
   bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
                             Node** control);
-  ValueEffectControl LowerTypeGuard(Node* node, Node* effect, Node* control);
   ValueEffectControl LowerChangeBitToTagged(Node* node, Node* effect,
                                             Node* control);
   ValueEffectControl LowerChangeInt31ToTaggedSigned(Node* node, Node* effect,
@@ -64,6 +63,14 @@
                                                Node* control);
   ValueEffectControl LowerCheckBounds(Node* node, Node* frame_state,
                                       Node* effect, Node* control);
+  ValueEffectControl LowerCheckMaps(Node* node, Node* frame_state, Node* effect,
+                                    Node* control);
+  ValueEffectControl LowerCheckNumber(Node* node, Node* frame_state,
+                                      Node* effect, Node* control);
+  ValueEffectControl LowerCheckString(Node* node, Node* frame_state,
+                                      Node* effect, Node* control);
+  ValueEffectControl LowerCheckIf(Node* node, Node* frame_state, Node* effect,
+                                  Node* control);
   ValueEffectControl LowerCheckTaggedPointer(Node* node, Node* frame_state,
                                              Node* effect, Node* control);
   ValueEffectControl LowerCheckTaggedSigned(Node* node, Node* frame_state,
@@ -72,10 +79,24 @@
                                           Node* effect, Node* control);
   ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
                                           Node* effect, Node* control);
+  ValueEffectControl LowerCheckedInt32Div(Node* node, Node* frame_state,
+                                          Node* effect, Node* control);
+  ValueEffectControl LowerCheckedInt32Mod(Node* node, Node* frame_state,
+                                          Node* effect, Node* control);
+  ValueEffectControl LowerCheckedUint32Div(Node* node, Node* frame_state,
+                                           Node* effect, Node* control);
+  ValueEffectControl LowerCheckedUint32Mod(Node* node, Node* frame_state,
+                                           Node* effect, Node* control);
+  ValueEffectControl LowerCheckedInt32Mul(Node* node, Node* frame_state,
+                                          Node* effect, Node* control);
   ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
                                                Node* effect, Node* control);
   ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
                                                 Node* effect, Node* control);
+  ValueEffectControl LowerCheckedTaggedSignedToInt32(Node* node,
+                                                     Node* frame_state,
+                                                     Node* effect,
+                                                     Node* control);
   ValueEffectControl LowerCheckedTaggedToInt32(Node* node, Node* frame_state,
                                                Node* effect, Node* control);
   ValueEffectControl LowerCheckedTaggedToFloat64(Node* node, Node* frame_state,
@@ -86,6 +107,10 @@
                                                   Node* control);
   ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
                                                  Node* control);
+  ValueEffectControl LowerCheckedTruncateTaggedToWord32(Node* node,
+                                                        Node* frame_state,
+                                                        Node* effect,
+                                                        Node* control);
   ValueEffectControl LowerObjectIsCallable(Node* node, Node* effect,
                                            Node* control);
   ValueEffectControl LowerObjectIsNumber(Node* node, Node* effect,
@@ -97,27 +122,49 @@
                                          Node* control);
   ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
                                                Node* control);
+  ValueEffectControl LowerStringCharCodeAt(Node* node, Node* effect,
+                                           Node* control);
   ValueEffectControl LowerStringFromCharCode(Node* node, Node* effect,
                                              Node* control);
   ValueEffectControl LowerCheckFloat64Hole(Node* node, Node* frame_state,
                                            Node* effect, Node* control);
   ValueEffectControl LowerCheckTaggedHole(Node* node, Node* frame_state,
                                           Node* effect, Node* control);
+  ValueEffectControl LowerConvertTaggedHoleToUndefined(Node* node, Node* effect,
+                                                       Node* control);
   ValueEffectControl LowerPlainPrimitiveToNumber(Node* node, Node* effect,
                                                  Node* control);
   ValueEffectControl LowerPlainPrimitiveToWord32(Node* node, Node* effect,
                                                  Node* control);
   ValueEffectControl LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
                                                   Node* control);
+  ValueEffectControl LowerEnsureWritableFastElements(Node* node, Node* effect,
+                                                     Node* control);
+  ValueEffectControl LowerMaybeGrowFastElements(Node* node, Node* frame_state,
+                                                Node* effect, Node* control);
+  ValueEffectControl LowerTransitionElementsKind(Node* node, Node* effect,
+                                                 Node* control);
+  ValueEffectControl LowerLoadTypedElement(Node* node, Node* effect,
+                                           Node* control);
+  ValueEffectControl LowerStoreTypedElement(Node* node, Node* effect,
+                                            Node* control);
+
+  // Lowering of optional operators.
+  ValueEffectControl LowerFloat64RoundUp(Node* node, Node* effect,
+                                         Node* control);
+  ValueEffectControl LowerFloat64RoundDown(Node* node, Node* effect,
+                                           Node* control);
+  ValueEffectControl LowerFloat64RoundTruncate(Node* node, Node* effect,
+                                               Node* control);
 
   ValueEffectControl AllocateHeapNumberWithValue(Node* node, Node* effect,
                                                  Node* control);
-  ValueEffectControl BuildCheckedFloat64ToInt32(Node* value, Node* frame_state,
+  ValueEffectControl BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
+                                                Node* value, Node* frame_state,
                                                 Node* effect, Node* control);
-  ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(Node* value,
-                                                              Node* frame_state,
-                                                              Node* effect,
-                                                              Node* control);
+  ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(
+      CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
+      Node* control);
 
   Node* ChangeInt32ToSmi(Node* value);
   Node* ChangeUint32ToSmi(Node* value);
diff --git a/src/compiler/escape-analysis-reducer.cc b/src/compiler/escape-analysis-reducer.cc
index 8402366..c69b86c 100644
--- a/src/compiler/escape-analysis-reducer.cc
+++ b/src/compiler/escape-analysis-reducer.cc
@@ -268,7 +268,7 @@
     }
   }
   if (node->opcode() == IrOpcode::kFrameState) {
-    Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
+    Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
     if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
       if (Node* ret =
               ReduceDeoptState(outer_frame_state, effect, multiple_users_rec)) {
@@ -277,7 +277,7 @@
           node = clone = jsgraph()->graph()->CloneNode(node);
           TRACE(" to #%d\n", node->id());
         }
-        NodeProperties::ReplaceFrameStateInput(node, 0, ret);
+        NodeProperties::ReplaceFrameStateInput(node, ret);
       }
     }
   }
@@ -331,7 +331,7 @@
 void EscapeAnalysisReducer::VerifyReplacement() const {
 #ifdef DEBUG
   AllNodes all(zone(), jsgraph()->graph());
-  for (Node* node : all.live) {
+  for (Node* node : all.reachable) {
     if (node->opcode() == IrOpcode::kAllocate) {
       CHECK(!escape_analysis_->IsVirtual(node));
     }
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index 9409a27..437c01f 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -793,7 +793,6 @@
         }
         break;
       case IrOpcode::kSelect:
-      case IrOpcode::kTypeGuard:
       // TODO(mstarzinger): The following list of operators will eventually be
       // handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
       case IrOpcode::kObjectIsCallable:
@@ -1155,7 +1154,7 @@
           effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
           node->id());
     if (status_analysis_->IsEffectBranchPoint(effect) ||
-        OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+        OperatorProperties::HasFrameStateInput(node->op())) {
       virtual_states_[node->id()]->SetCopyRequired();
       TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
             effect->id());
diff --git a/src/compiler/frame-states.cc b/src/compiler/frame-states.cc
index 91827d0..a02fb01 100644
--- a/src/compiler/frame-states.cc
+++ b/src/compiler/frame-states.cc
@@ -64,6 +64,12 @@
     case FrameStateType::kConstructStub:
       os << "CONSTRUCT_STUB";
       break;
+    case FrameStateType::kGetterStub:
+      os << "GETTER_STUB";
+      break;
+    case FrameStateType::kSetterStub:
+      os << "SETTER_STUB";
+      break;
   }
   return os;
 }
diff --git a/src/compiler/frame-states.h b/src/compiler/frame-states.h
index 2552bcb..0d0ec47 100644
--- a/src/compiler/frame-states.h
+++ b/src/compiler/frame-states.h
@@ -80,7 +80,9 @@
   kInterpretedFunction,  // Represents an InterpretedFrame.
   kArgumentsAdaptor,     // Represents an ArgumentsAdaptorFrame.
   kTailCallerFunction,   // Represents a frame removed by tail call elimination.
-  kConstructStub         // Represents a ConstructStubFrame.
+  kConstructStub,        // Represents a ConstructStubFrame.
+  kGetterStub,           // Represents a GetterStubFrame.
+  kSetterStub            // Represents a SetterStubFrame.
 };
 
 class FrameStateFunctionInfo {
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index de2ae1a..8d463df 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -132,12 +132,20 @@
 
  private:
   int AllocateAlignedFrameSlot(int width) {
-    DCHECK(width == 4 || width == 8);
-    // Skip one slot if necessary.
-    if (width > kPointerSize) {
-      DCHECK(width == kPointerSize * 2);
-      frame_slot_count_++;
-      frame_slot_count_ |= 1;
+    DCHECK(width == 4 || width == 8 || width == 16);
+    if (kPointerSize == 4) {
+      // Skip one slot if necessary.
+      if (width > kPointerSize) {
+        frame_slot_count_++;
+        frame_slot_count_ |= 1;
+        // 2 extra slots if width == 16.
+        frame_slot_count_ += (width & 16) / 8;
+      }
+    } else {
+      // No alignment when slots are 8 bytes.
+      DCHECK_EQ(8, kPointerSize);
+      // 1 extra slot if width == 16.
+      frame_slot_count_ += (width & 16) / 16;
     }
     return frame_slot_count_++;
   }
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
index 7c39700..7b04198 100644
--- a/src/compiler/gap-resolver.cc
+++ b/src/compiler/gap-resolver.cc
@@ -34,7 +34,6 @@
   }
 }
 
-
 void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
   // Each call to this function performs a move and deletes it from the move
   // graph.  We first recursively perform any move blocking this one.  We mark a
diff --git a/src/compiler/graph-reducer.cc b/src/compiler/graph-reducer.cc
index 2ef1ba1..b13b954 100644
--- a/src/compiler/graph-reducer.cc
+++ b/src/compiler/graph-reducer.cc
@@ -168,6 +168,10 @@
 
 
 void GraphReducer::Replace(Node* node, Node* replacement, NodeId max_id) {
+  if (FLAG_trace_turbo_reduction) {
+    OFStream os(stdout);
+    os << "- Replacing " << *node << " with " << *replacement << std::endl;
+  }
   if (node == graph()->start()) graph()->SetStart(replacement);
   if (node == graph()->end()) graph()->SetEnd(replacement);
   if (replacement->id() <= max_id) {
diff --git a/src/compiler/graph-replay.cc b/src/compiler/graph-replay.cc
index cb775e9..352b171 100644
--- a/src/compiler/graph-replay.cc
+++ b/src/compiler/graph-replay.cc
@@ -24,7 +24,7 @@
   AllNodes nodes(&zone, graph);
 
   // Allocate the nodes first.
-  for (Node* node : nodes.live) {
+  for (Node* node : nodes.reachable) {
     PrintReplayOpCreator(node->op());
     PrintF("  Node* n%d = graph()->NewNode(op", node->id());
     for (int i = 0; i < node->InputCount(); ++i) {
@@ -34,7 +34,7 @@
   }
 
   // Connect the nodes to their inputs.
-  for (Node* node : nodes.live) {
+  for (Node* node : nodes.reachable) {
     for (int i = 0; i < node->InputCount(); i++) {
       PrintF("  n%d->ReplaceInput(%d, n%d);\n", node->id(), i,
              node->InputAt(i)->id());
diff --git a/src/compiler/graph-trimmer.cc b/src/compiler/graph-trimmer.cc
index 75071c6..74626fe 100644
--- a/src/compiler/graph-trimmer.cc
+++ b/src/compiler/graph-trimmer.cc
@@ -33,7 +33,7 @@
     for (Edge edge : live->use_edges()) {
       Node* const user = edge.from();
       if (!IsLive(user)) {
-        if (FLAG_trace_turbo_reduction) {
+        if (FLAG_trace_turbo_trimming) {
           OFStream os(stdout);
           os << "DeadLink: " << *user << "(" << edge.index() << ") -> " << *live
              << std::endl;
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 2e39764..9fd80ea 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -4,17 +4,19 @@
 
 #include "src/compiler/graph-visualizer.h"
 
+#include <memory>
 #include <sstream>
 #include <string>
 
 #include "src/code-stubs.h"
+#include "src/compiler.h"
 #include "src/compiler/all-nodes.h"
 #include "src/compiler/graph.h"
-#include "src/compiler/node.h"
 #include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
 #include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/operator.h"
 #include "src/compiler/register-allocator.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/scheduler.h"
@@ -25,10 +27,11 @@
 namespace internal {
 namespace compiler {
 
-base::SmartArrayPointer<const char> GetVisualizerLogFileName(
-    CompilationInfo* info, const char* phase, const char* suffix) {
+std::unique_ptr<char[]> GetVisualizerLogFileName(CompilationInfo* info,
+                                                 const char* phase,
+                                                 const char* suffix) {
   EmbeddedVector<char, 256> filename(0);
-  base::SmartArrayPointer<char> debug_name = info->GetDebugName();
+  std::unique_ptr<char[]> debug_name = info->GetDebugName();
   if (strlen(debug_name.get()) > 0) {
     SNPrintF(filename, "turbo-%s", debug_name.get());
   } else if (info->has_shared_info()) {
@@ -69,7 +72,7 @@
   char* buffer = new char[full_filename.length() + 1];
   memcpy(buffer, full_filename.start(), full_filename.length());
   buffer[full_filename.length()] = '\0';
-  return base::SmartArrayPointer<const char>(buffer);
+  return std::unique_ptr<char[]>(buffer);
 }
 
 
@@ -83,7 +86,7 @@
 class Escaped {
  public:
   explicit Escaped(const std::ostringstream& os,
-                   const char* escaped_chars = "<>|{}")
+                   const char* escaped_chars = "<>|{}\\")
       : str_(os.str()), escaped_chars_(escaped_chars) {}
 
   friend std::ostream& operator<<(std::ostream& os, const Escaped& e) {
@@ -111,10 +114,14 @@
  public:
   JSONGraphNodeWriter(std::ostream& os, Zone* zone, const Graph* graph,
                       const SourcePositionTable* positions)
-      : os_(os), all_(zone, graph), positions_(positions), first_node_(true) {}
+      : os_(os),
+        all_(zone, graph, false),
+        live_(zone, graph, true),
+        positions_(positions),
+        first_node_(true) {}
 
   void Print() {
-    for (Node* const node : all_.live) PrintNode(node);
+    for (Node* const node : all_.reachable) PrintNode(node);
     os_ << "\n";
   }
 
@@ -124,10 +131,15 @@
     } else {
       os_ << ",\n";
     }
-    std::ostringstream label;
-    label << *node->op();
-    os_ << "{\"id\":" << SafeId(node) << ",\"label\":\"" << Escaped(label, "\"")
-        << "\"";
+    std::ostringstream label, title, properties;
+    node->op()->PrintTo(label, Operator::PrintVerbosity::kSilent);
+    node->op()->PrintTo(title, Operator::PrintVerbosity::kVerbose);
+    node->op()->PrintPropsTo(properties);
+    os_ << "{\"id\":" << SafeId(node) << ",\"label\":\""
+        << Escaped(label, "\"\\") << "\""
+        << ",\"title\":\"" << Escaped(title, "\"\\") << "\""
+        << ",\"live\": " << (live_.IsLive(node) ? "true" : "false")
+        << ",\"properties\":\"" << Escaped(properties, "\"\\") << "\"";
     IrOpcode::Value opcode = node->opcode();
     if (IrOpcode::IsPhiOpcode(opcode)) {
       os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
@@ -149,11 +161,17 @@
     os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
     os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
                                                                : "false");
+    os_ << ",\"opinfo\":\"" << node->op()->ValueInputCount() << " v "
+        << node->op()->EffectInputCount() << " eff "
+        << node->op()->ControlInputCount() << " ctrl in, "
+        << node->op()->ValueOutputCount() << " v "
+        << node->op()->EffectOutputCount() << " eff "
+        << node->op()->ControlOutputCount() << " ctrl out\"";
     if (NodeProperties::IsTyped(node)) {
       Type* type = NodeProperties::GetType(node);
       std::ostringstream type_out;
       type->PrintTo(type_out);
-      os_ << ",\"type\":\"" << Escaped(type_out, "\"") << "\"";
+      os_ << ",\"type\":\"" << Escaped(type_out, "\"\\") << "\"";
     }
     os_ << "}";
   }
@@ -161,6 +179,7 @@
  private:
   std::ostream& os_;
   AllNodes all_;
+  AllNodes live_;
   const SourcePositionTable* positions_;
   bool first_node_;
 
@@ -171,10 +190,10 @@
 class JSONGraphEdgeWriter {
  public:
   JSONGraphEdgeWriter(std::ostream& os, Zone* zone, const Graph* graph)
-      : os_(os), all_(zone, graph), first_edge_(true) {}
+      : os_(os), all_(zone, graph, false), first_edge_(true) {}
 
   void Print() {
-    for (Node* const node : all_.live) PrintEdges(node);
+    for (Node* const node : all_.reachable) PrintEdges(node);
     os_ << "\n";
   }
 
@@ -326,7 +345,7 @@
 
 void GraphC1Visualizer::PrintCompilation(const CompilationInfo* info) {
   Tag tag(this, "compilation");
-  base::SmartArrayPointer<char> name = info->GetDebugName();
+  std::unique_ptr<char[]> name = info->GetDebugName();
   if (info->IsOptimizing()) {
     PrintStringProperty("name", name.get());
     PrintIndent();
@@ -581,9 +600,9 @@
             << "\"";
       } else {
         index = AllocatedOperand::cast(top->GetSpillOperand())->index();
-        if (top->kind() == FP_REGISTERS) {
-          os_ << " \"double_stack:" << index << "\"";
-        } else if (top->kind() == GENERAL_REGISTERS) {
+        if (IsFloatingPoint(top->representation())) {
+          os_ << " \"fp_stack:" << index << "\"";
+        } else {
           os_ << " \"stack:" << index << "\"";
         }
       }
diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h
index 85b0cf7..700d7a7 100644
--- a/src/compiler/graph-visualizer.h
+++ b/src/compiler/graph-visualizer.h
@@ -7,8 +7,7 @@
 
 #include <stdio.h>
 #include <iosfwd>
-
-#include "src/base/smart-pointers.h"
+#include <memory>
 
 namespace v8 {
 namespace internal {
@@ -23,8 +22,9 @@
 class Schedule;
 class SourcePositionTable;
 
-base::SmartArrayPointer<const char> GetVisualizerLogFileName(
-    CompilationInfo* info, const char* phase, const char* suffix);
+std::unique_ptr<char[]> GetVisualizerLogFileName(CompilationInfo* info,
+                                                 const char* phase,
+                                                 const char* suffix);
 
 struct AsJSON {
   AsJSON(const Graph& g, SourcePositionTable* p) : graph(g), positions(p) {}
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index 6df22f6..ad1a992 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -197,18 +197,33 @@
   Register const result_;
 };
 
-
-class OutOfLineLoadFloat final : public OutOfLineCode {
+class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
  public:
-  OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
+  OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
       : OutOfLineCode(gen), result_(result) {}
 
-  void Generate() final { __ pcmpeqd(result_, result_); }
+  void Generate() final {
+    __ xorps(result_, result_);
+    __ divss(result_, result_);
+  }
 
  private:
   XMMRegister const result_;
 };
 
+class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
+ public:
+  OutOfLineLoadFloat64NaN(CodeGenerator* gen, XMMRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final {
+    __ xorpd(result_, result_);
+    __ divsd(result_, result_);
+  }
+
+ private:
+  XMMRegister const result_;
+};
 
 class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
  public:
@@ -271,23 +286,21 @@
 
 }  // namespace
 
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                          \
-  do {                                                                  \
-    auto result = i.OutputDoubleRegister();                             \
-    auto offset = i.InputRegister(0);                                   \
-    if (instr->InputAt(1)->IsRegister()) {                              \
-      __ cmp(offset, i.InputRegister(1));                               \
-    } else {                                                            \
-      __ cmp(offset, i.InputImmediate(1));                              \
-    }                                                                   \
-    OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
-    __ j(above_equal, ool->entry());                                    \
-    __ asm_instr(result, i.MemoryOperand(2));                           \
-    __ bind(ool->exit());                                               \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN)      \
+  do {                                                                \
+    auto result = i.OutputDoubleRegister();                           \
+    auto offset = i.InputRegister(0);                                 \
+    if (instr->InputAt(1)->IsRegister()) {                            \
+      __ cmp(offset, i.InputRegister(1));                             \
+    } else {                                                          \
+      __ cmp(offset, i.InputImmediate(1));                            \
+    }                                                                 \
+    OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
+    __ j(above_equal, ool->entry());                                  \
+    __ asm_instr(result, i.MemoryOperand(2));                         \
+    __ bind(ool->exit());                                             \
   } while (false)
 
-
 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                          \
   do {                                                                    \
     auto result = i.OutputRegister();                                     \
@@ -400,21 +413,7 @@
   __ pop(ebp);
 }
 
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta > 0) {
-    __ add(esp, Immediate(sp_slot_delta * kPointerSize));
-  }
-  frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta < 0) {
-    __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
-    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
-  }
+void CodeGenerator::AssemblePrepareTailCall() {
   if (frame_access_state()->has_frame()) {
     __ mov(ebp, MemOperand(ebp, 0));
   }
@@ -459,6 +458,68 @@
   __ bind(&done);
 }
 
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+                                   FrameAccessState* state,
+                                   int new_slot_above_sp,
+                                   bool allow_shrinkage = true) {
+  int current_sp_offset = state->GetSPToFPSlotCount() +
+                          StandardFrameConstants::kFixedSlotCountAboveFp;
+  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+  if (stack_slot_delta > 0) {
+    masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
+    state->IncreaseSPDelta(stack_slot_delta);
+  } else if (allow_shrinkage && stack_slot_delta < 0) {
+    masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
+    state->IncreaseSPDelta(stack_slot_delta);
+  }
+}
+
+}  // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+                                              int first_unused_stack_slot) {
+  CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+  ZoneVector<MoveOperands*> pushes(zone());
+  GetPushCompatibleMoves(instr, flags, &pushes);
+
+  if (!pushes.empty() &&
+      (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+       first_unused_stack_slot)) {
+    IA32OperandConverter g(this, instr);
+    for (auto move : pushes) {
+      LocationOperand destination_location(
+          LocationOperand::cast(move->destination()));
+      InstructionOperand source(move->source());
+      AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                    destination_location.index());
+      if (source.IsStackSlot()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        __ push(g.SlotToOperand(source_location.index()));
+      } else if (source.IsRegister()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        __ push(source_location.GetRegister());
+      } else if (source.IsImmediate()) {
+        __ push(Immediate(ImmediateOperand::cast(source).inline_value()));
+      } else {
+        // Pushes of non-scalar data types is not supported.
+        UNIMPLEMENTED();
+      }
+      frame_access_state()->IncreaseSPDelta(1);
+      move->Eliminate();
+    }
+  }
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+                                             int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot);
+}
+
 // Assembles an instruction after register allocation, producing machine code.
 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     Instruction* instr) {
@@ -482,8 +543,6 @@
     }
     case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          no_reg, no_reg, no_reg);
@@ -497,15 +556,15 @@
         __ jmp(reg);
       }
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchTailCallAddress: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       CHECK(!HasImmediateInput(instr, 0));
       Register reg = i.InputRegister(0);
       __ jmp(reg);
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchCallJSFunction: {
@@ -529,14 +588,13 @@
         __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
         __ Assert(equal, kWrongFunctionContext);
       }
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          no_reg, no_reg, no_reg);
       }
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchPrepareCallCFunction: {
@@ -547,7 +605,7 @@
       break;
     }
     case kArchPrepareTailCall:
-      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      AssemblePrepareTailCall();
       break;
     case kArchCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
@@ -579,6 +637,9 @@
     case kArchDebugBreak:
       __ int3();
       break;
+    case kArchImpossible:
+      __ Abort(kConversionFromImpossibleValue);
+      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -649,9 +710,24 @@
       __ lea(i.OutputRegister(), Operand(base, offset.offset()));
       break;
     }
+    case kIeee754Float64Acos:
+      ASSEMBLE_IEEE754_UNOP(acos);
+      break;
+    case kIeee754Float64Acosh:
+      ASSEMBLE_IEEE754_UNOP(acosh);
+      break;
+    case kIeee754Float64Asin:
+      ASSEMBLE_IEEE754_UNOP(asin);
+      break;
+    case kIeee754Float64Asinh:
+      ASSEMBLE_IEEE754_UNOP(asinh);
+      break;
     case kIeee754Float64Atan:
       ASSEMBLE_IEEE754_UNOP(atan);
       break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
     case kIeee754Float64Atan2:
       ASSEMBLE_IEEE754_BINOP(atan2);
       break;
@@ -661,15 +737,15 @@
     case kIeee754Float64Cos:
       ASSEMBLE_IEEE754_UNOP(cos);
       break;
+    case kIeee754Float64Cosh:
+      ASSEMBLE_IEEE754_UNOP(cosh);
+      break;
     case kIeee754Float64Expm1:
       ASSEMBLE_IEEE754_UNOP(expm1);
       break;
     case kIeee754Float64Exp:
       ASSEMBLE_IEEE754_UNOP(exp);
       break;
-    case kIeee754Float64Atanh:
-      ASSEMBLE_IEEE754_UNOP(atanh);
-      break;
     case kIeee754Float64Log:
       ASSEMBLE_IEEE754_UNOP(log);
       break;
@@ -682,12 +758,33 @@
     case kIeee754Float64Log10:
       ASSEMBLE_IEEE754_UNOP(log10);
       break;
+    case kIeee754Float64Pow: {
+      // TODO(bmeurer): Improve integration of the stub.
+      if (!i.InputDoubleRegister(1).is(xmm2)) {
+        __ movaps(xmm2, i.InputDoubleRegister(0));
+        __ movaps(xmm1, i.InputDoubleRegister(1));
+      } else {
+        __ movaps(xmm0, i.InputDoubleRegister(0));
+        __ movaps(xmm1, xmm2);
+        __ movaps(xmm2, xmm0);
+      }
+      MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+      __ CallStub(&stub);
+      __ movaps(i.OutputDoubleRegister(), xmm3);
+      break;
+    }
     case kIeee754Float64Sin:
       ASSEMBLE_IEEE754_UNOP(sin);
       break;
+    case kIeee754Float64Sinh:
+      ASSEMBLE_IEEE754_UNOP(sinh);
+      break;
     case kIeee754Float64Tan:
       ASSEMBLE_IEEE754_UNOP(tan);
       break;
+    case kIeee754Float64Tanh:
+      ASSEMBLE_IEEE754_UNOP(tanh);
+      break;
     case kIA32Add:
       if (HasImmediateInput(instr, 1)) {
         __ add(i.InputOperand(0), i.InputImmediate(1));
@@ -909,12 +1006,6 @@
       // when there is a (v)mulss depending on the result.
       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
       break;
-    case kSSEFloat32Max:
-      __ maxss(i.InputDoubleRegister(0), i.InputOperand(1));
-      break;
-    case kSSEFloat32Min:
-      __ minss(i.InputDoubleRegister(0), i.InputOperand(1));
-      break;
     case kSSEFloat32Sqrt:
       __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
@@ -957,12 +1048,117 @@
       // when there is a (v)mulsd depending on the result.
       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
       break;
-    case kSSEFloat64Max:
-      __ maxsd(i.InputDoubleRegister(0), i.InputOperand(1));
+    case kSSEFloat32Max: {
+      Label compare_nan, compare_swap, done_compare;
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      auto ool =
+          new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(above, &done_compare, Label::kNear);
+      __ j(below, &compare_swap, Label::kNear);
+      __ movmskps(i.TempRegister(0), i.InputDoubleRegister(0));
+      __ test(i.TempRegister(0), Immediate(1));
+      __ j(zero, &done_compare, Label::kNear);
+      __ bind(&compare_swap);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      __ bind(&done_compare);
+      __ bind(ool->exit());
       break;
-    case kSSEFloat64Min:
-      __ minsd(i.InputDoubleRegister(0), i.InputOperand(1));
+    }
+
+    case kSSEFloat64Max: {
+      Label compare_nan, compare_swap, done_compare;
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      auto ool =
+          new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(above, &done_compare, Label::kNear);
+      __ j(below, &compare_swap, Label::kNear);
+      __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(0));
+      __ test(i.TempRegister(0), Immediate(1));
+      __ j(zero, &done_compare, Label::kNear);
+      __ bind(&compare_swap);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      __ bind(&done_compare);
+      __ bind(ool->exit());
       break;
+    }
+    case kSSEFloat32Min: {
+      Label compare_swap, done_compare;
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      auto ool =
+          new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(below, &done_compare, Label::kNear);
+      __ j(above, &compare_swap, Label::kNear);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ movmskps(i.TempRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ movss(kScratchDoubleReg, i.InputOperand(1));
+        __ movmskps(i.TempRegister(0), kScratchDoubleReg);
+      }
+      __ test(i.TempRegister(0), Immediate(1));
+      __ j(zero, &done_compare, Label::kNear);
+      __ bind(&compare_swap);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      __ bind(&done_compare);
+      __ bind(ool->exit());
+      break;
+    }
+    case kSSEFloat64Min: {
+      Label compare_swap, done_compare;
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      auto ool =
+          new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(below, &done_compare, Label::kNear);
+      __ j(above, &compare_swap, Label::kNear);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ movsd(kScratchDoubleReg, i.InputOperand(1));
+        __ movmskpd(i.TempRegister(0), kScratchDoubleReg);
+      }
+      __ test(i.TempRegister(0), Immediate(1));
+      __ j(zero, &done_compare, Label::kNear);
+      __ bind(&compare_swap);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      __ bind(&done_compare);
+      __ bind(ool->exit());
+      break;
+    }
     case kSSEFloat64Mod: {
       // TODO(dcarney): alignment is wrong.
       __ sub(esp, Immediate(kDoubleSize));
@@ -1109,18 +1305,6 @@
       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
       break;
     }
-    case kAVXFloat32Max: {
-      CpuFeatureScope avx_scope(masm(), AVX);
-      __ vmaxss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputOperand(1));
-      break;
-    }
-    case kAVXFloat32Min: {
-      CpuFeatureScope avx_scope(masm(), AVX);
-      __ vminss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputOperand(1));
-      break;
-    }
     case kAVXFloat64Add: {
       CpuFeatureScope avx_scope(masm(), AVX);
       __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1148,18 +1332,6 @@
       __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
       break;
     }
-    case kAVXFloat64Max: {
-      CpuFeatureScope avx_scope(masm(), AVX);
-      __ vmaxsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputOperand(1));
-      break;
-    }
-    case kAVXFloat64Min: {
-      CpuFeatureScope avx_scope(masm(), AVX);
-      __ vminsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputOperand(1));
-      break;
-    }
     case kAVXFloat32Abs: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
@@ -1398,10 +1570,10 @@
       ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
       break;
     case kCheckedLoadFloat32:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN);
       break;
     case kCheckedLoadFloat64:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN);
       break;
     case kCheckedStoreWord8:
       ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
@@ -1618,6 +1790,9 @@
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -1865,18 +2040,7 @@
     Constant src_constant = g.ToConstant(source);
     if (src_constant.type() == Constant::kHeapObject) {
       Handle<HeapObject> src = src_constant.ToHeapObject();
-      int slot;
-      if (IsMaterializableFromFrame(src, &slot)) {
-        if (destination->IsRegister()) {
-          Register dst = g.ToRegister(destination);
-          __ mov(dst, g.SlotToOperand(slot));
-        } else {
-          DCHECK(destination->IsStackSlot());
-          Operand dst = g.ToOperand(destination);
-          __ push(g.SlotToOperand(slot));
-          __ pop(dst);
-        }
-      } else if (destination->IsRegister()) {
+      if (destination->IsRegister()) {
         Register dst = g.ToRegister(destination);
         __ LoadHeapObject(dst, src);
       } else {
@@ -1931,18 +2095,44 @@
     } else {
       DCHECK(destination->IsFPStackSlot());
       Operand dst = g.ToOperand(destination);
-      __ movsd(dst, src);
+      MachineRepresentation rep =
+          LocationOperand::cast(source)->representation();
+      if (rep == MachineRepresentation::kFloat64) {
+        __ movsd(dst, src);
+      } else if (rep == MachineRepresentation::kFloat32) {
+        __ movss(dst, src);
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        __ movups(dst, src);
+      }
     }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     Operand src = g.ToOperand(source);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
     if (destination->IsFPRegister()) {
       XMMRegister dst = g.ToDoubleRegister(destination);
-      __ movsd(dst, src);
+      if (rep == MachineRepresentation::kFloat64) {
+        __ movsd(dst, src);
+      } else if (rep == MachineRepresentation::kFloat32) {
+        __ movss(dst, src);
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        __ movups(dst, src);
+      }
     } else {
       Operand dst = g.ToOperand(destination);
-      __ movsd(kScratchDoubleReg, src);
-      __ movsd(dst, kScratchDoubleReg);
+      if (rep == MachineRepresentation::kFloat64) {
+        __ movsd(kScratchDoubleReg, src);
+        __ movsd(dst, kScratchDoubleReg);
+      } else if (rep == MachineRepresentation::kFloat32) {
+        __ movss(kScratchDoubleReg, src);
+        __ movss(dst, kScratchDoubleReg);
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        __ movups(kScratchDoubleReg, src);
+        __ movups(dst, kScratchDoubleReg);
+      }
     }
   } else {
     UNREACHABLE();
@@ -1995,21 +2185,51 @@
     // XMM register-memory swap.
     XMMRegister reg = g.ToDoubleRegister(source);
     Operand other = g.ToOperand(destination);
-    __ movsd(kScratchDoubleReg, other);
-    __ movsd(other, reg);
-    __ movaps(reg, kScratchDoubleReg);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
+    if (rep == MachineRepresentation::kFloat64) {
+      __ movsd(kScratchDoubleReg, other);
+      __ movsd(other, reg);
+      __ movaps(reg, kScratchDoubleReg);
+    } else if (rep == MachineRepresentation::kFloat32) {
+      __ movss(kScratchDoubleReg, other);
+      __ movss(other, reg);
+      __ movaps(reg, kScratchDoubleReg);
+    } else {
+      DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+      __ movups(kScratchDoubleReg, other);
+      __ movups(other, reg);
+      __ movups(reg, kScratchDoubleReg);
+    }
   } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
     // Double-width memory-to-memory.
     Operand src0 = g.ToOperand(source);
-    Operand src1 = g.HighOperand(source);
     Operand dst0 = g.ToOperand(destination);
-    Operand dst1 = g.HighOperand(destination);
-    __ movsd(kScratchDoubleReg, dst0);  // Save destination in scratch register.
-    __ push(src0);  // Then use stack to copy source to destination.
-    __ pop(dst0);
-    __ push(src1);
-    __ pop(dst1);
-    __ movsd(src0, kScratchDoubleReg);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
+    if (rep == MachineRepresentation::kFloat64) {
+      Operand src1 = g.HighOperand(source);
+      Operand dst1 = g.HighOperand(destination);
+      __ movsd(kScratchDoubleReg, dst0);  // Save dst in scratch register.
+      __ push(src0);  // Then use stack to copy src to destination.
+      __ pop(dst0);
+      __ push(src1);
+      __ pop(dst1);
+      __ movsd(src0, kScratchDoubleReg);
+    } else if (rep == MachineRepresentation::kFloat32) {
+      __ movss(kScratchDoubleReg, dst0);  // Save dst in scratch register.
+      __ push(src0);  // Then use stack to copy src to destination.
+      __ pop(dst0);
+      __ movss(src0, kScratchDoubleReg);
+    } else {
+      DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+      // Use the XOR trick to swap without a temporary.
+      __ movups(kScratchDoubleReg, src0);
+      __ xorps(kScratchDoubleReg, dst0);  // scratch contains src ^ dst.
+      __ movups(src0, kScratchDoubleReg);
+      __ xorps(kScratchDoubleReg, dst0);  // scratch contains src.
+      __ movups(dst0, kScratchDoubleReg);
+      __ xorps(kScratchDoubleReg, src0);  // scratch contains dst.
+      __ movups(src0, kScratchDoubleReg);
+    }
   } else {
     // No other combinations are possible.
     UNREACHABLE();
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 09d4615..7cf0a11 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -48,8 +48,6 @@
   V(SSEFloat32Sub)                 \
   V(SSEFloat32Mul)                 \
   V(SSEFloat32Div)                 \
-  V(SSEFloat32Max)                 \
-  V(SSEFloat32Min)                 \
   V(SSEFloat32Abs)                 \
   V(SSEFloat32Neg)                 \
   V(SSEFloat32Sqrt)                \
@@ -60,7 +58,9 @@
   V(SSEFloat64Mul)                 \
   V(SSEFloat64Div)                 \
   V(SSEFloat64Mod)                 \
+  V(SSEFloat32Max)                 \
   V(SSEFloat64Max)                 \
+  V(SSEFloat32Min)                 \
   V(SSEFloat64Min)                 \
   V(SSEFloat64Abs)                 \
   V(SSEFloat64Neg)                 \
@@ -86,14 +86,10 @@
   V(AVXFloat32Sub)                 \
   V(AVXFloat32Mul)                 \
   V(AVXFloat32Div)                 \
-  V(AVXFloat32Max)                 \
-  V(AVXFloat32Min)                 \
   V(AVXFloat64Add)                 \
   V(AVXFloat64Sub)                 \
   V(AVXFloat64Mul)                 \
   V(AVXFloat64Div)                 \
-  V(AVXFloat64Max)                 \
-  V(AVXFloat64Min)                 \
   V(AVXFloat64Abs)                 \
   V(AVXFloat64Neg)                 \
   V(AVXFloat32Abs)                 \
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index f19c328..1c62de5 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -51,8 +51,6 @@
     case kSSEFloat32Sub:
     case kSSEFloat32Mul:
     case kSSEFloat32Div:
-    case kSSEFloat32Max:
-    case kSSEFloat32Min:
     case kSSEFloat32Abs:
     case kSSEFloat32Neg:
     case kSSEFloat32Sqrt:
@@ -63,7 +61,9 @@
     case kSSEFloat64Mul:
     case kSSEFloat64Div:
     case kSSEFloat64Mod:
+    case kSSEFloat32Max:
     case kSSEFloat64Max:
+    case kSSEFloat32Min:
     case kSSEFloat64Min:
     case kSSEFloat64Abs:
     case kSSEFloat64Neg:
@@ -89,14 +89,10 @@
     case kAVXFloat32Sub:
     case kAVXFloat32Mul:
     case kAVXFloat32Div:
-    case kAVXFloat32Max:
-    case kAVXFloat32Min:
     case kAVXFloat64Add:
     case kAVXFloat64Sub:
     case kAVXFloat64Mul:
     case kAVXFloat64Div:
-    case kAVXFloat64Max:
-    case kAVXFloat64Min:
     case kAVXFloat64Abs:
     case kAVXFloat64Neg:
     case kAVXFloat32Abs:
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 3ffdd30..4a1e19b 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -82,12 +82,16 @@
 
   AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
                                              Node* displacement_node,
+                                             DisplacementMode displacement_mode,
                                              InstructionOperand inputs[],
                                              size_t* input_count) {
     AddressingMode mode = kMode_MRI;
     int32_t displacement = (displacement_node == nullptr)
                                ? 0
                                : OpParameter<int32_t>(displacement_node);
+    if (displacement_mode == kNegativeDisplacement) {
+      displacement = -displacement;
+    }
     if (base != nullptr) {
       if (base->opcode() == IrOpcode::kInt32Constant) {
         displacement += OpParameter<int32_t>(base);
@@ -142,11 +146,12 @@
   AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
                                                   InstructionOperand inputs[],
                                                   size_t* input_count) {
-    BaseWithIndexAndDisplacement32Matcher m(node, true);
+    BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
     DCHECK(m.matches());
     if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
-      return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
-                                         m.displacement(), inputs, input_count);
+      return GenerateMemoryOperandInputs(
+          m.index(), m.scale(), m.base(), m.displacement(),
+          m.displacement_mode(), inputs, input_count);
     } else {
       inputs[(*input_count)++] = UseRegister(node->InputAt(0));
       inputs[(*input_count)++] = UseRegister(node->InputAt(1));
@@ -221,7 +226,9 @@
     case MachineRepresentation::kWord16:
       opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
       break;
-    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
     case MachineRepresentation::kWord32:
       opcode = kIA32Movl;
       break;
@@ -305,7 +312,9 @@
       case MachineRepresentation::kWord16:
         opcode = kIA32Movw;
         break;
-      case MachineRepresentation::kTagged:  // Fall through.
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
+      case MachineRepresentation::kTagged:         // Fall through.
       case MachineRepresentation::kWord32:
         opcode = kIA32Movl;
         break;
@@ -338,6 +347,11 @@
   }
 }
 
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -362,10 +376,12 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:      // Fall through.
-    case MachineRepresentation::kTagged:   // Fall through.
-    case MachineRepresentation::kWord64:   // Fall through.
-    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kBit:            // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
+    case MachineRepresentation::kWord64:         // Fall through.
+    case MachineRepresentation::kSimd128:        // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -409,10 +425,12 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:      // Fall through.
-    case MachineRepresentation::kTagged:   // Fall through.
-    case MachineRepresentation::kWord64:   // Fall through.
-    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kBit:            // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
+    case MachineRepresentation::kWord64:         // Fall through.
+    case MachineRepresentation::kSimd128:        // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -493,7 +511,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -577,12 +595,14 @@
 }
 
 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
-             int scale, Node* base, Node* displacement) {
+             int scale, Node* base, Node* displacement,
+             DisplacementMode displacement_mode) {
   IA32OperandGenerator g(selector);
   InstructionOperand inputs[4];
   size_t input_count = 0;
-  AddressingMode mode = g.GenerateMemoryOperandInputs(
-      index, scale, base, displacement, inputs, &input_count);
+  AddressingMode mode =
+      g.GenerateMemoryOperandInputs(index, scale, base, displacement,
+                                    displacement_mode, inputs, &input_count);
 
   DCHECK_NE(0u, input_count);
   DCHECK_GE(arraysize(inputs), input_count);
@@ -603,7 +623,7 @@
   if (m.matches()) {
     Node* index = node->InputAt(0);
     Node* base = m.power_of_two_plus_one() ? index : nullptr;
-    EmitLea(this, node, index, m.scale(), base, nullptr);
+    EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
     return;
   }
   VisitShift(this, node, kIA32Shl);
@@ -726,6 +746,9 @@
 
 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   IA32OperandGenerator g(this);
@@ -743,7 +766,8 @@
     InstructionOperand inputs[4];
     size_t input_count = 0;
     AddressingMode mode = g.GenerateMemoryOperandInputs(
-        m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
+        m.index(), m.scale(), m.base(), m.displacement(), m.displacement_mode(),
+        inputs, &input_count);
 
     DCHECK_NE(0u, input_count);
     DCHECK_GE(arraysize(inputs), input_count);
@@ -777,7 +801,7 @@
   if (m.matches()) {
     Node* index = node->InputAt(0);
     Node* base = m.power_of_two_plus_one() ? index : nullptr;
-    EmitLea(this, node, index, m.scale(), base, nullptr);
+    EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
     return;
   }
   IA32OperandGenerator g(this);
@@ -913,44 +937,10 @@
 
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
-  IA32OperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
-                   kSSEFloat32Neg);
-    return;
-  }
-  VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
-}
-
-void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
   VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
-  IA32OperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    if (m.right().IsFloat64RoundDown() &&
-        CanCover(m.node(), m.right().node())) {
-      if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
-          CanCover(m.right().node(), m.right().InputAt(0))) {
-        Float64BinopMatcher mright0(m.right().InputAt(0));
-        if (mright0.left().IsMinusZero()) {
-          Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
-               g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
-          return;
-        }
-      }
-    }
-    VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
-                   kSSEFloat64Neg);
-    return;
-  }
-  VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
-}
-
-void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
   VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
 }
 
@@ -982,24 +972,36 @@
        temps);
 }
 
-
 void InstructionSelector::VisitFloat32Max(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat32Max, kSSEFloat32Max);
+  IA32OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempRegister()};
+  Emit(kSSEFloat32Max, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
+       arraysize(temps), temps);
 }
 
-
 void InstructionSelector::VisitFloat64Max(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat64Max, kSSEFloat64Max);
+  IA32OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempRegister()};
+  Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
+       arraysize(temps), temps);
 }
 
-
 void InstructionSelector::VisitFloat32Min(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat32Min, kSSEFloat32Min);
+  IA32OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempRegister()};
+  Emit(kSSEFloat32Min, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
+       arraysize(temps), temps);
 }
 
-
 void InstructionSelector::VisitFloat64Min(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat64Min, kSSEFloat64Min);
+  IA32OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempRegister()};
+  Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
+       arraysize(temps), temps);
 }
 
 
@@ -1068,9 +1070,13 @@
   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
 }
 
-void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
+}
 
-void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
+}
 
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
@@ -1097,7 +1103,7 @@
     InstructionOperand temps[] = {g.TempRegister()};
     size_t const temp_count = arraysize(temps);
     Emit(kArchPrepareCallCFunction |
-             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+             MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
          0, nullptr, 0, nullptr, temp_count, temps);
 
     // Poke any stack arguments.
@@ -1161,7 +1167,7 @@
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1179,7 +1185,7 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
                              cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
@@ -1202,10 +1208,7 @@
 // Tries to match the size of the given opcode to that of the operands, if
 // possible.
 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
-                                    Node* right) {
-  if (opcode != kIA32Cmp && opcode != kIA32Test) {
-    return opcode;
-  }
+                                    Node* right, FlagsContinuation* cont) {
   // Currently, if one of the two operands is not a Load, we don't know what its
   // machine representation is, so we bail out.
   // TODO(epertoso): we can probably get some size information out of immediates
@@ -1215,19 +1218,39 @@
   }
   // If the load representations don't match, both operands will be
   // zero/sign-extended to 32bit.
-  LoadRepresentation left_representation = LoadRepresentationOf(left->op());
-  if (left_representation != LoadRepresentationOf(right->op())) {
-    return opcode;
+  MachineType left_type = LoadRepresentationOf(left->op());
+  MachineType right_type = LoadRepresentationOf(right->op());
+  if (left_type == right_type) {
+    switch (left_type.representation()) {
+      case MachineRepresentation::kBit:
+      case MachineRepresentation::kWord8: {
+        if (opcode == kIA32Test) return kIA32Test8;
+        if (opcode == kIA32Cmp) {
+          if (left_type.semantic() == MachineSemantic::kUint32) {
+            cont->OverwriteUnsignedIfSigned();
+          } else {
+            CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+          }
+          return kIA32Cmp8;
+        }
+        break;
+      }
+      case MachineRepresentation::kWord16:
+        if (opcode == kIA32Test) return kIA32Test16;
+        if (opcode == kIA32Cmp) {
+          if (left_type.semantic() == MachineSemantic::kUint32) {
+            cont->OverwriteUnsignedIfSigned();
+          } else {
+            CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+          }
+          return kIA32Cmp16;
+        }
+        break;
+      default:
+        break;
+    }
   }
-  switch (left_representation.representation()) {
-    case MachineRepresentation::kBit:
-    case MachineRepresentation::kWord8:
-      return opcode == kIA32Cmp ? kIA32Cmp8 : kIA32Test8;
-    case MachineRepresentation::kWord16:
-      return opcode == kIA32Cmp ? kIA32Cmp16 : kIA32Test16;
-    default:
-      return opcode;
-  }
+  return opcode;
 }
 
 // Shared routine for multiple float32 compare operations (inputs commuted).
@@ -1254,7 +1277,8 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+  InstructionCode narrowed_opcode =
+      TryNarrowOpcodeSize(opcode, left, right, cont);
 
   int effect_level = selector->GetEffectLevel(node);
   if (cont->IsBranch()) {
@@ -1318,7 +1342,7 @@
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
       } else if (cont->IsDeoptimize()) {
-        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
                                  cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
@@ -1399,6 +1423,9 @@
               case IrOpcode::kInt32SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop(selector, node, kIA32Sub, cont);
+              case IrOpcode::kInt32MulWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kIA32Imul, cont);
               default:
                 break;
             }
@@ -1429,14 +1456,14 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1523,6 +1550,14 @@
   VisitBinop(this, node, kIA32Sub, &cont);
 }
 
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop(this, node, kIA32Imul, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kIA32Imul, &cont);
+}
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
   FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
@@ -1657,10 +1692,6 @@
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
   MachineOperatorBuilder::Flags flags =
-      MachineOperatorBuilder::kFloat32Max |
-      MachineOperatorBuilder::kFloat32Min |
-      MachineOperatorBuilder::kFloat64Max |
-      MachineOperatorBuilder::kFloat64Min |
       MachineOperatorBuilder::kWord32ShiftIsSafe |
       MachineOperatorBuilder::kWord32Ctz;
   if (CpuFeatures::IsSupported(POPCNT)) {
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index 0b3132f..c6689d8 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -57,6 +57,7 @@
   V(ArchTableSwitch)                      \
   V(ArchNop)                              \
   V(ArchDebugBreak)                       \
+  V(ArchImpossible)                       \
   V(ArchComment)                          \
   V(ArchThrowTerminator)                  \
   V(ArchDeoptimize)                       \
@@ -89,19 +90,27 @@
   V(AtomicStoreWord8)                     \
   V(AtomicStoreWord16)                    \
   V(AtomicStoreWord32)                    \
+  V(Ieee754Float64Acos)                   \
+  V(Ieee754Float64Acosh)                  \
+  V(Ieee754Float64Asin)                   \
+  V(Ieee754Float64Asinh)                  \
   V(Ieee754Float64Atan)                   \
-  V(Ieee754Float64Atan2)                  \
   V(Ieee754Float64Atanh)                  \
+  V(Ieee754Float64Atan2)                  \
   V(Ieee754Float64Cbrt)                   \
   V(Ieee754Float64Cos)                    \
+  V(Ieee754Float64Cosh)                   \
   V(Ieee754Float64Exp)                    \
   V(Ieee754Float64Expm1)                  \
   V(Ieee754Float64Log)                    \
   V(Ieee754Float64Log1p)                  \
   V(Ieee754Float64Log10)                  \
   V(Ieee754Float64Log2)                   \
+  V(Ieee754Float64Pow)                    \
   V(Ieee754Float64Sin)                    \
-  V(Ieee754Float64Tan)
+  V(Ieee754Float64Sinh)                   \
+  V(Ieee754Float64Tan)                    \
+  V(Ieee754Float64Tanh)
 
 #define ARCH_OPCODE_LIST(V)  \
   COMMON_ARCH_OPCODE_LIST(V) \
@@ -170,7 +179,9 @@
   kUnorderedEqual,
   kUnorderedNotEqual,
   kOverflow,
-  kNotOverflow
+  kNotOverflow,
+  kPositiveOrZero,
+  kNegative
 };
 
 inline FlagsCondition NegateFlagsCondition(FlagsCondition condition) {
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index 3ef7c08..2e10794 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -83,8 +83,8 @@
       last_side_effect_instr_(nullptr),
       pending_loads_(zone),
       last_live_in_reg_marker_(nullptr),
-      last_deopt_(nullptr) {
-}
+      last_deopt_(nullptr),
+      operands_map_(zone) {}
 
 
 void InstructionScheduler::StartBlock(RpoNumber rpo) {
@@ -93,6 +93,7 @@
   DCHECK(pending_loads_.empty());
   DCHECK(last_live_in_reg_marker_ == nullptr);
   DCHECK(last_deopt_ == nullptr);
+  DCHECK(operands_map_.empty());
   sequence()->StartBlock(rpo);
 }
 
@@ -109,6 +110,7 @@
   pending_loads_.clear();
   last_live_in_reg_marker_ = nullptr;
   last_deopt_ = nullptr;
+  operands_map_.clear();
 }
 
 
@@ -165,9 +167,26 @@
     }
 
     // Look for operand dependencies.
-    for (ScheduleGraphNode* node : graph_) {
-      if (HasOperandDependency(node->instruction(), instr)) {
-        node->AddSuccessor(new_node);
+    for (size_t i = 0; i < instr->InputCount(); ++i) {
+      const InstructionOperand* input = instr->InputAt(i);
+      if (input->IsUnallocated()) {
+        int32_t vreg = UnallocatedOperand::cast(input)->virtual_register();
+        auto it = operands_map_.find(vreg);
+        if (it != operands_map_.end()) {
+          it->second->AddSuccessor(new_node);
+        }
+      }
+    }
+
+    // Record the virtual registers defined by this instruction.
+    for (size_t i = 0; i < instr->OutputCount(); ++i) {
+      const InstructionOperand* output = instr->OutputAt(i);
+      if (output->IsUnallocated()) {
+        operands_map_[UnallocatedOperand::cast(output)->virtual_register()] =
+            new_node;
+      } else if (output->IsConstant()) {
+        operands_map_[ConstantOperand::cast(output)->virtual_register()] =
+            new_node;
       }
     }
   }
@@ -223,20 +242,29 @@
     case kArchTruncateDoubleToI:
     case kArchStackSlot:
     case kArchDebugBreak:
+    case kArchImpossible:
     case kArchComment:
+    case kIeee754Float64Acos:
+    case kIeee754Float64Acosh:
+    case kIeee754Float64Asin:
+    case kIeee754Float64Asinh:
     case kIeee754Float64Atan:
-    case kIeee754Float64Atan2:
     case kIeee754Float64Atanh:
+    case kIeee754Float64Atan2:
     case kIeee754Float64Cbrt:
     case kIeee754Float64Cos:
+    case kIeee754Float64Cosh:
     case kIeee754Float64Exp:
     case kIeee754Float64Expm1:
     case kIeee754Float64Log:
     case kIeee754Float64Log1p:
     case kIeee754Float64Log10:
     case kIeee754Float64Log2:
+    case kIeee754Float64Pow:
     case kIeee754Float64Sin:
+    case kIeee754Float64Sinh:
     case kIeee754Float64Tan:
+    case kIeee754Float64Tanh:
       return kNoOpcodeFlags;
 
     case kArchStackPointer:
@@ -308,33 +336,6 @@
 }
 
 
-bool InstructionScheduler::HasOperandDependency(
-    const Instruction* instr1, const Instruction* instr2) const {
-  for (size_t i = 0; i < instr1->OutputCount(); ++i) {
-    for (size_t j = 0; j < instr2->InputCount(); ++j) {
-      const InstructionOperand* output = instr1->OutputAt(i);
-      const InstructionOperand* input = instr2->InputAt(j);
-
-      if (output->IsUnallocated() && input->IsUnallocated() &&
-          (UnallocatedOperand::cast(output)->virtual_register() ==
-           UnallocatedOperand::cast(input)->virtual_register())) {
-        return true;
-      }
-
-      if (output->IsConstant() && input->IsUnallocated() &&
-          (ConstantOperand::cast(output)->virtual_register() ==
-           UnallocatedOperand::cast(input)->virtual_register())) {
-        return true;
-      }
-    }
-  }
-
-  // TODO(bafsa): Do we need to look for anti-dependencies/output-dependencies?
-
-  return false;
-}
-
-
 bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
   return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
           (instr->flags_mode() == kFlags_branch));
diff --git a/src/compiler/instruction-scheduler.h b/src/compiler/instruction-scheduler.h
index 4f5b0f7..271aa0d 100644
--- a/src/compiler/instruction-scheduler.h
+++ b/src/compiler/instruction-scheduler.h
@@ -156,10 +156,6 @@
   int GetInstructionFlags(const Instruction* instr) const;
   int GetTargetInstructionFlags(const Instruction* instr) const;
 
-  // Return true if instr2 uses any value defined by instr1.
-  bool HasOperandDependency(const Instruction* instr1,
-                            const Instruction* instr2) const;
-
   // Return true if the instruction is a basic block terminator.
   bool IsBlockTerminator(const Instruction* instr) const;
 
@@ -214,6 +210,10 @@
 
   // Last deoptimization instruction encountered while building the graph.
   ScheduleGraphNode* last_deopt_;
+
+  // Keep track of definition points for virtual registers. This is used to
+  // record operand dependencies in the scheduling graph.
+  ZoneMap<int32_t, ScheduleGraphNode*> operands_map_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index be24e2d..25d8a99 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -62,15 +62,18 @@
   }
 
   InstructionOperand DefineAsConstant(Node* node) {
+    return DefineAsConstant(node, ToConstant(node));
+  }
+
+  InstructionOperand DefineAsConstant(Node* node, Constant constant) {
     selector()->MarkAsDefined(node);
     int virtual_register = GetVReg(node);
-    sequence()->AddConstant(virtual_register, ToConstant(node));
+    sequence()->AddConstant(virtual_register, constant);
     return ConstantOperand(virtual_register);
   }
 
-  InstructionOperand DefineAsLocation(Node* node, LinkageLocation location,
-                                      MachineRepresentation rep) {
-    return Define(node, ToUnallocatedOperand(location, rep, GetVReg(node)));
+  InstructionOperand DefineAsLocation(Node* node, LinkageLocation location) {
+    return Define(node, ToUnallocatedOperand(location, GetVReg(node)));
   }
 
   InstructionOperand DefineAsDualLocation(Node* node,
@@ -140,24 +143,30 @@
     }
   }
 
+  InstructionOperand UseImmediate(int immediate) {
+    return sequence()->AddImmediate(Constant(immediate));
+  }
+
   InstructionOperand UseImmediate(Node* node) {
     return sequence()->AddImmediate(ToConstant(node));
   }
 
-  InstructionOperand UseLocation(Node* node, LinkageLocation location,
-                                 MachineRepresentation rep) {
-    return Use(node, ToUnallocatedOperand(location, rep, GetVReg(node)));
+  InstructionOperand UseNegatedImmediate(Node* node) {
+    return sequence()->AddImmediate(ToNegatedConstant(node));
+  }
+
+  InstructionOperand UseLocation(Node* node, LinkageLocation location) {
+    return Use(node, ToUnallocatedOperand(location, GetVReg(node)));
   }
 
   // Used to force gap moves from the from_location to the to_location
   // immediately before an instruction.
   InstructionOperand UsePointerLocation(LinkageLocation to_location,
                                         LinkageLocation from_location) {
-    MachineRepresentation rep = MachineType::PointerRepresentation();
     UnallocatedOperand casted_from_operand =
-        UnallocatedOperand::cast(TempLocation(from_location, rep));
+        UnallocatedOperand::cast(TempLocation(from_location));
     selector_->Emit(kArchNop, casted_from_operand);
-    return ToUnallocatedOperand(to_location, rep,
+    return ToUnallocatedOperand(to_location,
                                 casted_from_operand.virtual_register());
   }
 
@@ -185,10 +194,8 @@
     return sequence()->AddImmediate(Constant(imm));
   }
 
-  InstructionOperand TempLocation(LinkageLocation location,
-                                  MachineRepresentation rep) {
-    return ToUnallocatedOperand(location, rep,
-                                sequence()->NextVirtualRegister());
+  InstructionOperand TempLocation(LinkageLocation location) {
+    return ToUnallocatedOperand(location, sequence()->NextVirtualRegister());
   }
 
   InstructionOperand Label(BasicBlock* block) {
@@ -230,6 +237,19 @@
     return Constant(static_cast<int32_t>(0));
   }
 
+  static Constant ToNegatedConstant(const Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return Constant(-OpParameter<int32_t>(node));
+      case IrOpcode::kInt64Constant:
+        return Constant(-OpParameter<int64_t>(node));
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return Constant(static_cast<int32_t>(0));
+  }
+
   UnallocatedOperand Define(Node* node, UnallocatedOperand operand) {
     DCHECK_NOT_NULL(node);
     DCHECK_EQ(operand.virtual_register(), GetVReg(node));
@@ -257,7 +277,6 @@
   }
 
   UnallocatedOperand ToUnallocatedOperand(LinkageLocation location,
-                                          MachineRepresentation rep,
                                           int virtual_register) {
     if (location.IsAnyRegister()) {
       // any machine register.
@@ -275,7 +294,7 @@
                                 location.AsCalleeFrameSlot(), virtual_register);
     }
     // a fixed register.
-    if (IsFloatingPoint(rep)) {
+    if (IsFloatingPoint(location.GetType().representation())) {
       return UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER,
                                 location.AsRegister(), virtual_register);
     }
@@ -310,13 +329,14 @@
 
   // Creates a new flags continuation for an eager deoptimization exit.
   static FlagsContinuation ForDeoptimize(FlagsCondition condition,
+                                         DeoptimizeReason reason,
                                          Node* frame_state) {
-    return FlagsContinuation(kFlags_deoptimize, condition, frame_state);
+    return FlagsContinuation(condition, reason, frame_state);
   }
 
   // Creates a new flags continuation for a boolean value.
   static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
-    return FlagsContinuation(kFlags_set, condition, result);
+    return FlagsContinuation(condition, result);
   }
 
   bool IsNone() const { return mode_ == kFlags_none; }
@@ -327,6 +347,10 @@
     DCHECK(!IsNone());
     return condition_;
   }
+  DeoptimizeReason reason() const {
+    DCHECK(IsDeoptimize());
+    return reason_;
+  }
   Node* frame_state() const {
     DCHECK(IsDeoptimize());
     return frame_state_or_result_;
@@ -354,12 +378,33 @@
     condition_ = CommuteFlagsCondition(condition_);
   }
 
+  void Overwrite(FlagsCondition condition) { condition_ = condition; }
+
   void OverwriteAndNegateIfEqual(FlagsCondition condition) {
     bool negate = condition_ == kEqual;
     condition_ = condition;
     if (negate) Negate();
   }
 
+  void OverwriteUnsignedIfSigned() {
+    switch (condition_) {
+      case kSignedLessThan:
+        condition_ = kUnsignedLessThan;
+        break;
+      case kSignedLessThanOrEqual:
+        condition_ = kUnsignedLessThanOrEqual;
+        break;
+      case kSignedGreaterThan:
+        condition_ = kUnsignedGreaterThan;
+        break;
+      case kSignedGreaterThanOrEqual:
+        condition_ = kUnsignedGreaterThanOrEqual;
+        break;
+      default:
+        break;
+    }
+  }
+
   // Encodes this flags continuation into the given opcode.
   InstructionCode Encode(InstructionCode opcode) {
     opcode |= FlagsModeField::encode(mode_);
@@ -370,16 +415,24 @@
   }
 
  private:
-  FlagsContinuation(FlagsMode mode, FlagsCondition condition,
-                    Node* frame_state_or_result)
-      : mode_(mode),
+  FlagsContinuation(FlagsCondition condition, DeoptimizeReason reason,
+                    Node* frame_state)
+      : mode_(kFlags_deoptimize),
         condition_(condition),
-        frame_state_or_result_(frame_state_or_result) {
-    DCHECK_NOT_NULL(frame_state_or_result);
+        reason_(reason),
+        frame_state_or_result_(frame_state) {
+    DCHECK_NOT_NULL(frame_state);
+  }
+  FlagsContinuation(FlagsCondition condition, Node* result)
+      : mode_(kFlags_set),
+        condition_(condition),
+        frame_state_or_result_(result) {
+    DCHECK_NOT_NULL(result);
   }
 
   FlagsMode const mode_;
   FlagsCondition condition_;
+  DeoptimizeReason reason_;      // Only value if mode_ == kFlags_deoptimize
   Node* frame_state_or_result_;  // Only valid if mode_ == kFlags_deoptimize
                                  // or mode_ == kFlags_set.
   BasicBlock* true_block_;       // Only valid if mode_ == kFlags_branch.
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 558aff3..ac8e64a 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -241,6 +241,20 @@
   return true;
 }
 
+bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
+                                                      Node* node) const {
+  BasicBlock* bb_user = schedule()->block(user);
+  BasicBlock* bb_node = schedule()->block(node);
+  if (bb_user != bb_node) return false;
+  for (Edge const edge : node->use_edges()) {
+    Node* from = edge.from();
+    if ((from != user) && (schedule()->block(from) == bb_user)) {
+      return false;
+    }
+  }
+  return true;
+}
+
 int InstructionSelector::GetVirtualRegister(const Node* node) {
   DCHECK_NOT_NULL(node);
   size_t const id = node->id();
@@ -285,6 +299,9 @@
 
 bool InstructionSelector::IsUsed(Node* node) const {
   DCHECK_NOT_NULL(node);
+  // TODO(bmeurer): This is a terrible monster hack, but we have to make sure
+  // that the Retain is actually emitted, otherwise the GC will mess up.
+  if (node->opcode() == IrOpcode::kRetain) return true;
   if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
   size_t const id = node->id();
   DCHECK_LT(id, used_.size());
@@ -330,11 +347,12 @@
 
 enum class FrameStateInputKind { kAny, kStackSlot };
 
-
 InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
-                                   FrameStateInputKind kind) {
+                                   FrameStateInputKind kind,
+                                   MachineRepresentation rep) {
   switch (input->opcode()) {
     case IrOpcode::kInt32Constant:
+    case IrOpcode::kInt64Constant:
     case IrOpcode::kNumberConstant:
     case IrOpcode::kFloat32Constant:
     case IrOpcode::kFloat64Constant:
@@ -344,11 +362,15 @@
       UNREACHABLE();
       break;
     default:
-      switch (kind) {
-        case FrameStateInputKind::kStackSlot:
-          return g->UseUniqueSlot(input);
-        case FrameStateInputKind::kAny:
-          return g->UseAny(input);
+      if (rep == MachineRepresentation::kNone) {
+        return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
+      } else {
+        switch (kind) {
+          case FrameStateInputKind::kStackSlot:
+            return g->UseUniqueSlot(input);
+          case FrameStateInputKind::kAny:
+            return g->UseAny(input);
+        }
       }
   }
   UNREACHABLE();
@@ -414,7 +436,7 @@
       break;
     }
     default: {
-      inputs->push_back(OperandForDeopt(g, input, kind));
+      inputs->push_back(OperandForDeopt(g, input, kind, type.representation()));
       descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
       return 1;
     }
@@ -560,17 +582,17 @@
       bool output_is_live = buffer->output_nodes[i] != nullptr ||
                             i < outputs_needed_by_framestate;
       if (output_is_live) {
-        MachineType type =
-            buffer->descriptor->GetReturnType(static_cast<int>(i));
+        MachineRepresentation rep =
+            buffer->descriptor->GetReturnType(static_cast<int>(i))
+                .representation();
         LinkageLocation location =
             buffer->descriptor->GetReturnLocation(static_cast<int>(i));
 
         Node* output = buffer->output_nodes[i];
-        InstructionOperand op =
-            output == nullptr
-                ? g.TempLocation(location, type.representation())
-                : g.DefineAsLocation(output, location, type.representation());
-        MarkAsRepresentation(type.representation(), op);
+        InstructionOperand op = output == nullptr
+                                    ? g.TempLocation(location)
+                                    : g.DefineAsLocation(output, location);
+        MarkAsRepresentation(rep, op);
 
         buffer->outputs.push_back(op);
       }
@@ -597,8 +619,7 @@
       break;
     case CallDescriptor::kCallJSFunction:
       buffer->instruction_args.push_back(
-          g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
-                        buffer->descriptor->GetInputType(0).representation()));
+          g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
       break;
   }
   DCHECK_EQ(1u, buffer->instruction_args.size());
@@ -617,7 +638,7 @@
     // all the frames on top of it that are either an arguments adaptor frame
     // or a tail caller frame.
     if (buffer->descriptor->SupportsTailCalls()) {
-      frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+      frame_state = NodeProperties::GetFrameStateInput(frame_state);
       buffer->frame_state_descriptor =
           buffer->frame_state_descriptor->outer_state();
       while (buffer->frame_state_descriptor != nullptr &&
@@ -625,15 +646,15 @@
                   FrameStateType::kArgumentsAdaptor ||
               buffer->frame_state_descriptor->type() ==
                   FrameStateType::kTailCallerFunction)) {
-        frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+        frame_state = NodeProperties::GetFrameStateInput(frame_state);
         buffer->frame_state_descriptor =
             buffer->frame_state_descriptor->outer_state();
       }
     }
 
-    InstructionSequence::StateId state_id =
-        sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
-    buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
+    int const state_id = sequence()->AddDeoptimizationEntry(
+        buffer->frame_state_descriptor, DeoptimizeReason::kNoReason);
+    buffer->instruction_args.push_back(g.TempImmediate(state_id));
 
     StateObjectDeduplicator deduplicator(instruction_zone());
 
@@ -665,9 +686,7 @@
       location = LinkageLocation::ConvertToTailCallerLocation(
           location, stack_param_delta);
     }
-    InstructionOperand op =
-        g.UseLocation(*iter, location,
-                      buffer->descriptor->GetInputType(index).representation());
+    InstructionOperand op = g.UseLocation(*iter, location);
     if (UnallocatedOperand::cast(op).HasFixedSlotPolicy() && !call_tail) {
       int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
       if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
@@ -706,6 +725,7 @@
   int effect_level = 0;
   for (Node* const node : *block) {
     if (node->opcode() == IrOpcode::kStore ||
+        node->opcode() == IrOpcode::kUnalignedStore ||
         node->opcode() == IrOpcode::kCheckedStore ||
         node->opcode() == IrOpcode::kCall) {
       ++effect_level;
@@ -822,9 +842,9 @@
       return VisitReturn(input);
     }
     case BasicBlock::kDeoptimize: {
-      DeoptimizeKind kind = DeoptimizeKindOf(input->op());
+      DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
       Node* value = input->InputAt(0);
-      return VisitDeoptimize(kind, value);
+      return VisitDeoptimize(p.kind(), p.reason(), value);
     }
     case BasicBlock::kThrow:
       DCHECK_EQ(IrOpcode::kThrow, input->opcode());
@@ -874,6 +894,7 @@
       return MarkAsReference(node), VisitOsrValue(node);
     case IrOpcode::kPhi: {
       MachineRepresentation rep = PhiRepresentationOf(node->op());
+      if (rep == MachineRepresentation::kNone) return;
       MarkAsRepresentation(rep, node);
       return VisitPhi(node);
     }
@@ -912,6 +933,9 @@
     case IrOpcode::kComment:
       VisitComment(node);
       return;
+    case IrOpcode::kRetain:
+      VisitRetain(node);
+      return;
     case IrOpcode::kLoad: {
       LoadRepresentation type = LoadRepresentationOf(node->op());
       MarkAsRepresentation(type.representation(), node);
@@ -941,6 +965,8 @@
       return MarkAsWord32(node), VisitWord32Ctz(node);
     case IrOpcode::kWord32ReverseBits:
       return MarkAsWord32(node), VisitWord32ReverseBits(node);
+    case IrOpcode::kWord32ReverseBytes:
+      return MarkAsWord32(node), VisitWord32ReverseBytes(node);
     case IrOpcode::kWord32Popcnt:
       return MarkAsWord32(node), VisitWord32Popcnt(node);
     case IrOpcode::kWord64Popcnt:
@@ -965,6 +991,8 @@
       return MarkAsWord64(node), VisitWord64Ctz(node);
     case IrOpcode::kWord64ReverseBits:
       return MarkAsWord64(node), VisitWord64ReverseBits(node);
+    case IrOpcode::kWord64ReverseBytes:
+      return MarkAsWord64(node), VisitWord64ReverseBytes(node);
     case IrOpcode::kWord64Equal:
       return VisitWord64Equal(node);
     case IrOpcode::kInt32Add:
@@ -977,6 +1005,8 @@
       return VisitInt32SubWithOverflow(node);
     case IrOpcode::kInt32Mul:
       return MarkAsWord32(node), VisitInt32Mul(node);
+    case IrOpcode::kInt32MulWithOverflow:
+      return MarkAsWord32(node), VisitInt32MulWithOverflow(node);
     case IrOpcode::kInt32MulHigh:
       return VisitInt32MulHigh(node);
     case IrOpcode::kInt32Div:
@@ -1035,6 +1065,19 @@
       return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
     case IrOpcode::kChangeFloat64ToUint32:
       return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+    case IrOpcode::kImpossibleToWord32:
+      return MarkAsWord32(node), VisitImpossibleToWord32(node);
+    case IrOpcode::kImpossibleToWord64:
+      return MarkAsWord64(node), VisitImpossibleToWord64(node);
+    case IrOpcode::kImpossibleToFloat32:
+      return MarkAsFloat32(node), VisitImpossibleToFloat32(node);
+    case IrOpcode::kImpossibleToFloat64:
+      return MarkAsFloat64(node), VisitImpossibleToFloat64(node);
+    case IrOpcode::kImpossibleToTagged:
+      MarkAsRepresentation(MachineType::PointerRepresentation(), node);
+      return VisitImpossibleToTagged(node);
+    case IrOpcode::kImpossibleToBit:
+      return MarkAsWord32(node), VisitImpossibleToBit(node);
     case IrOpcode::kFloat64SilenceNaN:
       MarkAsFloat64(node);
       if (CanProduceSignalingNaN(node->InputAt(0))) {
@@ -1092,18 +1135,12 @@
       return MarkAsFloat32(node), VisitFloat32Add(node);
     case IrOpcode::kFloat32Sub:
       return MarkAsFloat32(node), VisitFloat32Sub(node);
-    case IrOpcode::kFloat32SubPreserveNan:
-      return MarkAsFloat32(node), VisitFloat32SubPreserveNan(node);
     case IrOpcode::kFloat32Neg:
       return MarkAsFloat32(node), VisitFloat32Neg(node);
     case IrOpcode::kFloat32Mul:
       return MarkAsFloat32(node), VisitFloat32Mul(node);
     case IrOpcode::kFloat32Div:
       return MarkAsFloat32(node), VisitFloat32Div(node);
-    case IrOpcode::kFloat32Min:
-      return MarkAsFloat32(node), VisitFloat32Min(node);
-    case IrOpcode::kFloat32Max:
-      return MarkAsFloat32(node), VisitFloat32Max(node);
     case IrOpcode::kFloat32Abs:
       return MarkAsFloat32(node), VisitFloat32Abs(node);
     case IrOpcode::kFloat32Sqrt:
@@ -1114,12 +1151,14 @@
       return VisitFloat32LessThan(node);
     case IrOpcode::kFloat32LessThanOrEqual:
       return VisitFloat32LessThanOrEqual(node);
+    case IrOpcode::kFloat32Max:
+      return MarkAsFloat32(node), VisitFloat32Max(node);
+    case IrOpcode::kFloat32Min:
+      return MarkAsFloat32(node), VisitFloat32Min(node);
     case IrOpcode::kFloat64Add:
       return MarkAsFloat64(node), VisitFloat64Add(node);
     case IrOpcode::kFloat64Sub:
       return MarkAsFloat64(node), VisitFloat64Sub(node);
-    case IrOpcode::kFloat64SubPreserveNan:
-      return MarkAsFloat64(node), VisitFloat64SubPreserveNan(node);
     case IrOpcode::kFloat64Neg:
       return MarkAsFloat64(node), VisitFloat64Neg(node);
     case IrOpcode::kFloat64Mul:
@@ -1134,16 +1173,26 @@
       return MarkAsFloat64(node), VisitFloat64Max(node);
     case IrOpcode::kFloat64Abs:
       return MarkAsFloat64(node), VisitFloat64Abs(node);
+    case IrOpcode::kFloat64Acos:
+      return MarkAsFloat64(node), VisitFloat64Acos(node);
+    case IrOpcode::kFloat64Acosh:
+      return MarkAsFloat64(node), VisitFloat64Acosh(node);
+    case IrOpcode::kFloat64Asin:
+      return MarkAsFloat64(node), VisitFloat64Asin(node);
+    case IrOpcode::kFloat64Asinh:
+      return MarkAsFloat64(node), VisitFloat64Asinh(node);
     case IrOpcode::kFloat64Atan:
       return MarkAsFloat64(node), VisitFloat64Atan(node);
-    case IrOpcode::kFloat64Atan2:
-      return MarkAsFloat64(node), VisitFloat64Atan2(node);
     case IrOpcode::kFloat64Atanh:
       return MarkAsFloat64(node), VisitFloat64Atanh(node);
+    case IrOpcode::kFloat64Atan2:
+      return MarkAsFloat64(node), VisitFloat64Atan2(node);
     case IrOpcode::kFloat64Cbrt:
       return MarkAsFloat64(node), VisitFloat64Cbrt(node);
     case IrOpcode::kFloat64Cos:
       return MarkAsFloat64(node), VisitFloat64Cos(node);
+    case IrOpcode::kFloat64Cosh:
+      return MarkAsFloat64(node), VisitFloat64Cosh(node);
     case IrOpcode::kFloat64Exp:
       return MarkAsFloat64(node), VisitFloat64Exp(node);
     case IrOpcode::kFloat64Expm1:
@@ -1156,12 +1205,18 @@
       return MarkAsFloat64(node), VisitFloat64Log10(node);
     case IrOpcode::kFloat64Log2:
       return MarkAsFloat64(node), VisitFloat64Log2(node);
+    case IrOpcode::kFloat64Pow:
+      return MarkAsFloat64(node), VisitFloat64Pow(node);
     case IrOpcode::kFloat64Sin:
       return MarkAsFloat64(node), VisitFloat64Sin(node);
+    case IrOpcode::kFloat64Sinh:
+      return MarkAsFloat64(node), VisitFloat64Sinh(node);
     case IrOpcode::kFloat64Sqrt:
       return MarkAsFloat64(node), VisitFloat64Sqrt(node);
     case IrOpcode::kFloat64Tan:
       return MarkAsFloat64(node), VisitFloat64Tan(node);
+    case IrOpcode::kFloat64Tanh:
+      return MarkAsFloat64(node), VisitFloat64Tanh(node);
     case IrOpcode::kFloat64Equal:
       return VisitFloat64Equal(node);
     case IrOpcode::kFloat64LessThan:
@@ -1202,6 +1257,14 @@
       return VisitLoadFramePointer(node);
     case IrOpcode::kLoadParentFramePointer:
       return VisitLoadParentFramePointer(node);
+    case IrOpcode::kUnalignedLoad: {
+      UnalignedLoadRepresentation type =
+          UnalignedLoadRepresentationOf(node->op());
+      MarkAsRepresentation(type.representation(), node);
+      return VisitUnalignedLoad(node);
+    }
+    case IrOpcode::kUnalignedStore:
+      return VisitUnalignedStore(node);
     case IrOpcode::kCheckedLoad: {
       MachineRepresentation rep =
           CheckedLoadRepresentationOf(node->op()).representation();
@@ -1241,6 +1304,9 @@
     }
     case IrOpcode::kAtomicStore:
       return VisitAtomicStore(node);
+    case IrOpcode::kUnsafePointerAdd:
+      MarkAsRepresentation(MachineType::PointerRepresentation(), node);
+      return VisitUnsafePointerAdd(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
@@ -1248,13 +1314,47 @@
   }
 }
 
+void InstructionSelector::VisitImpossibleToWord32(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
+}
+
+void InstructionSelector::VisitImpossibleToWord64(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchImpossible,
+       g.DefineAsConstant(node, Constant(static_cast<int64_t>(0))));
+}
+
+void InstructionSelector::VisitImpossibleToFloat32(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0.0f)));
+}
+
+void InstructionSelector::VisitImpossibleToFloat64(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0.0)));
+}
+
+void InstructionSelector::VisitImpossibleToBit(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
+}
+
+void InstructionSelector::VisitImpossibleToTagged(Node* node) {
+  OperandGenerator g(this);
+#if V8_TARGET_ARCH_64_BIT
+  Emit(kArchImpossible,
+       g.DefineAsConstant(node, Constant(static_cast<int64_t>(0))));
+#else   // V8_TARGET_ARCH_64_BIT
+  Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
+#endif  // V8_TARGET_ARCH_64_BIT
+}
 
 void InstructionSelector::VisitLoadStackPointer(Node* node) {
   OperandGenerator g(this);
   Emit(kArchStackPointer, g.DefineAsRegister(node));
 }
 
-
 void InstructionSelector::VisitLoadFramePointer(Node* node) {
   OperandGenerator g(this);
   Emit(kArchFramePointer, g.DefineAsRegister(node));
@@ -1265,18 +1365,34 @@
   Emit(kArchParentFramePointer, g.DefineAsRegister(node));
 }
 
+void InstructionSelector::VisitFloat64Acos(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
+}
+
+void InstructionSelector::VisitFloat64Acosh(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Acosh);
+}
+
+void InstructionSelector::VisitFloat64Asin(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Asin);
+}
+
+void InstructionSelector::VisitFloat64Asinh(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Asinh);
+}
+
 void InstructionSelector::VisitFloat64Atan(Node* node) {
   VisitFloat64Ieee754Unop(node, kIeee754Float64Atan);
 }
 
-void InstructionSelector::VisitFloat64Atan2(Node* node) {
-  VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
-}
-
 void InstructionSelector::VisitFloat64Atanh(Node* node) {
   VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh);
 }
 
+void InstructionSelector::VisitFloat64Atan2(Node* node) {
+  VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
+}
+
 void InstructionSelector::VisitFloat64Cbrt(Node* node) {
   VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt);
 }
@@ -1285,6 +1401,10 @@
   VisitFloat64Ieee754Unop(node, kIeee754Float64Cos);
 }
 
+void InstructionSelector::VisitFloat64Cosh(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Cosh);
+}
+
 void InstructionSelector::VisitFloat64Exp(Node* node) {
   VisitFloat64Ieee754Unop(node, kIeee754Float64Exp);
 }
@@ -1309,14 +1429,26 @@
   VisitFloat64Ieee754Unop(node, kIeee754Float64Log10);
 }
 
+void InstructionSelector::VisitFloat64Pow(Node* node) {
+  VisitFloat64Ieee754Binop(node, kIeee754Float64Pow);
+}
+
 void InstructionSelector::VisitFloat64Sin(Node* node) {
   VisitFloat64Ieee754Unop(node, kIeee754Float64Sin);
 }
 
+void InstructionSelector::VisitFloat64Sinh(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Sinh);
+}
+
 void InstructionSelector::VisitFloat64Tan(Node* node) {
   VisitFloat64Ieee754Unop(node, kIeee754Float64Tan);
 }
 
+void InstructionSelector::VisitFloat64Tanh(Node* node) {
+  VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh);
+}
+
 void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
                                           InstructionOperand& index_operand) {
   OperandGenerator g(this);
@@ -1421,7 +1553,6 @@
   UNIMPLEMENTED();
 }
 
-
 void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
 
 
@@ -1544,9 +1675,7 @@
           ? g.DefineAsDualLocation(
                 node, linkage()->GetParameterLocation(index),
                 linkage()->GetParameterSecondaryLocation(index))
-          : g.DefineAsLocation(
-                node, linkage()->GetParameterLocation(index),
-                linkage()->GetParameterType(index).representation());
+          : g.DefineAsLocation(node, linkage()->GetParameterLocation(index));
 
   Emit(kArchNop, op);
 }
@@ -1557,17 +1686,15 @@
   Node* call = node->InputAt(1);
   DCHECK_EQ(IrOpcode::kCall, call->opcode());
   const CallDescriptor* descriptor = CallDescriptorOf(call->op());
-  Emit(kArchNop,
-       g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
-                          descriptor->GetReturnType(0).representation()));
+  Emit(kArchNop, g.DefineAsLocation(node, descriptor->GetReturnLocation(0)));
 }
 
 
 void InstructionSelector::VisitOsrValue(Node* node) {
   OperandGenerator g(this);
   int index = OpParameter<int>(node);
-  Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index),
-                                    MachineRepresentation::kTagged));
+  Emit(kArchNop,
+       g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index)));
 }
 
 
@@ -1593,6 +1720,7 @@
   switch (value->opcode()) {
     case IrOpcode::kInt32AddWithOverflow:
     case IrOpcode::kInt32SubWithOverflow:
+    case IrOpcode::kInt32MulWithOverflow:
     case IrOpcode::kInt64AddWithOverflow:
     case IrOpcode::kInt64SubWithOverflow:
     case IrOpcode::kTryTruncateFloat32ToInt64:
@@ -1652,10 +1780,6 @@
   CallDescriptor::Flags flags = descriptor->flags();
   if (handler) {
     DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
-    IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
-    if (hint == IfExceptionHint::kLocallyCaught) {
-      flags |= CallDescriptor::kHasLocalCatchHandler;
-    }
     flags |= CallDescriptor::kHasExceptionHandler;
     buffer.instruction_args.push_back(g.Label(handler));
   }
@@ -1675,7 +1799,7 @@
     case CallDescriptor::kCallAddress:
       opcode =
           kArchCallCFunction |
-          MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+          MiscField::encode(static_cast<int>(descriptor->ParameterCount()));
       break;
     case CallDescriptor::kCallCodeObject:
       opcode = kArchCallCodeObject | MiscField::encode(flags);
@@ -1699,11 +1823,10 @@
   CallDescriptor const* descriptor = CallDescriptorOf(node->op());
   DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
 
-  // TODO(turbofan): Relax restriction for stack parameters.
-
-  int stack_param_delta = 0;
-  if (linkage()->GetIncomingDescriptor()->CanTailCall(node,
-                                                      &stack_param_delta)) {
+  CallDescriptor* caller = linkage()->GetIncomingDescriptor();
+  if (caller->CanTailCall(node)) {
+    const CallDescriptor* callee = CallDescriptorOf(node->op());
+    int stack_param_delta = callee->GetStackParameterDelta(caller);
     CallBuffer buffer(zone(), descriptor, nullptr);
 
     // Compute InstructionOperands for inputs and outputs.
@@ -1750,10 +1873,12 @@
     }
     opcode |= MiscField::encode(descriptor->flags());
 
-    buffer.instruction_args.push_back(g.TempImmediate(stack_param_delta));
+    Emit(kArchPrepareTailCall, g.NoOutput());
 
-    Emit(kArchPrepareTailCall, g.NoOutput(),
-         g.TempImmediate(stack_param_delta));
+    int first_unused_stack_slot =
+        (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
+        stack_param_delta;
+    buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
 
     // Emit the tailcall instruction.
     Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
@@ -1819,28 +1944,26 @@
     auto value_locations = zone()->NewArray<InstructionOperand>(ret_count);
     for (int i = 0; i < ret_count; ++i) {
       value_locations[i] =
-          g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i),
-                        linkage()->GetReturnType(i).representation());
+          g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i));
     }
     Emit(kArchRet, 0, nullptr, ret_count, value_locations);
   }
 }
 
-Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
-                                                 InstructionOperand output,
-                                                 InstructionOperand a,
-                                                 InstructionOperand b,
-                                                 Node* frame_state) {
+Instruction* InstructionSelector::EmitDeoptimize(
+    InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+    InstructionOperand b, DeoptimizeReason reason, Node* frame_state) {
   size_t output_count = output.IsInvalid() ? 0 : 1;
   InstructionOperand inputs[] = {a, b};
   size_t input_count = arraysize(inputs);
   return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
-                        frame_state);
+                        reason, frame_state);
 }
 
 Instruction* InstructionSelector::EmitDeoptimize(
     InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
-    size_t input_count, InstructionOperand* inputs, Node* frame_state) {
+    size_t input_count, InstructionOperand* inputs, DeoptimizeReason reason,
+    Node* frame_state) {
   OperandGenerator g(this);
   FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
   InstructionOperandVector args(instruction_zone());
@@ -1849,9 +1972,8 @@
     args.push_back(inputs[i]);
   }
   opcode |= MiscField::encode(static_cast<int>(input_count));
-  InstructionSequence::StateId const state_id =
-      sequence()->AddFrameStateDescriptor(descriptor);
-  args.push_back(g.TempImmediate(state_id.ToInt()));
+  int const state_id = sequence()->AddDeoptimizationEntry(descriptor, reason);
+  args.push_back(g.TempImmediate(state_id));
   StateObjectDeduplicator deduplicator(instruction_zone());
   AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
                                   &args, FrameStateInputKind::kAny,
@@ -1866,7 +1988,9 @@
   Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
 }
 
-void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
+void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
+                                          DeoptimizeReason reason,
+                                          Node* value) {
   InstructionCode opcode = kArchDeoptimize;
   switch (kind) {
     case DeoptimizeKind::kEager:
@@ -1876,7 +2000,7 @@
       opcode |= MiscField::encode(Deoptimizer::SOFT);
       break;
   }
-  EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, value);
+  EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, reason, value);
 }
 
 
@@ -1896,6 +2020,19 @@
   Emit(kArchComment, 0, nullptr, 1, &operand);
 }
 
+void InstructionSelector::VisitUnsafePointerAdd(Node* node) {
+#if V8_TARGET_ARCH_64_BIT
+  VisitInt64Add(node);
+#else   // V8_TARGET_ARCH_64_BIT
+  VisitInt32Add(node);
+#endif  // V8_TARGET_ARCH_64_BIT
+}
+
+void InstructionSelector::VisitRetain(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchNop, g.NoOutput(), g.UseAny(node->InputAt(0)));
+}
+
 bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
   // TODO(jarin) Improve the heuristic here.
   if (node->opcode() == IrOpcode::kFloat64Add ||
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 8ac8e7b..f9f43e9 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -105,10 +105,11 @@
 
   Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
                               InstructionOperand a, InstructionOperand b,
-                              Node* frame_state);
+                              DeoptimizeReason reason, Node* frame_state);
   Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
                               InstructionOperand* outputs, size_t input_count,
-                              InstructionOperand* inputs, Node* frame_state);
+                              InstructionOperand* inputs,
+                              DeoptimizeReason reason, Node* frame_state);
 
   // ===========================================================================
   // ============== Architecture-independent CPU feature methods. ==============
@@ -151,6 +152,31 @@
   // edge and the two are in the same basic block.
   bool CanCover(Node* user, Node* node) const;
 
+  // Used in pattern matching during code generation.
+  // This function checks that {node} and {user} are in the same basic block,
+  // and that {user} is the only user of {node} in this basic block.  This
+  // check guarantees that there are no users of {node} scheduled between
+  // {node} and {user}, and thus we can select a single instruction for both
+  // nodes, if such an instruction exists. This check can be used for example
+  // when selecting instructions for:
+  //   n = Int32Add(a, b)
+  //   c = Word32Compare(n, 0, cond)
+  //   Branch(c, true_label, false_label)
+  // Here we can generate a flag-setting add instruction, even if the add has
+  // uses in other basic blocks, since the flag-setting add instruction will
+  // still generate the result of the addition and not just set the flags.
+  // However, if we had uses of the add in the same basic block, we could have:
+  //   n = Int32Add(a, b)
+  //   o = OtherOp(n, ...)
+  //   c = Word32Compare(n, 0, cond)
+  //   Branch(c, true_label, false_label)
+  // where we cannot select the add and the compare together.  If we were to
+  // select a flag-setting add instruction for Word32Compare and Int32Add while
+  // visiting Word32Compare, we would then have to select an instruction for
+  // OtherOp *afterwards*, which means we would attempt to use the result of
+  // the add before we have defined it.
+  bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;
+
   // Checks if {node} was already defined, and therefore code was already
   // generated for it.
   bool IsDefined(Node* node) const;
@@ -224,7 +250,7 @@
   // {call_code_immediate} to generate immediate operands to calls of code.
   // {call_address_immediate} to generate immediate operands to address calls.
   void InitializeCallBuffer(Node* call, CallBuffer* buffer,
-                            CallBufferFlags flags, int stack_param_delta = 0);
+                            CallBufferFlags flags, int stack_slot_delta = 0);
   bool IsTailCallAddressImmediate();
   int GetTempsCountForTailCallFromJSFunction();
 
@@ -266,9 +292,11 @@
   void VisitGoto(BasicBlock* target);
   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
   void VisitSwitch(Node* node, const SwitchInfo& sw);
-  void VisitDeoptimize(DeoptimizeKind kind, Node* value);
+  void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
+                       Node* value);
   void VisitReturn(Node* ret);
   void VisitThrow(Node* value);
+  void VisitRetain(Node* node);
 
   void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
                             const CallDescriptor* descriptor, Node* node);
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 1ef42d6..615b644 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -48,6 +48,10 @@
       return kFloatGreaterThanOrEqualOrUnordered;
     case kFloatGreaterThan:
       return kFloatLessThan;
+    case kPositiveOrZero:
+    case kNegative:
+      UNREACHABLE();
+      break;
     case kEqual:
     case kNotEqual:
     case kOverflow:
@@ -61,14 +65,7 @@
 }
 
 bool InstructionOperand::InterferesWith(const InstructionOperand& that) const {
-  if (!IsFPRegister() || !that.IsFPRegister() || kSimpleFPAliasing)
-    return EqualsCanonicalized(that);
-  // Both operands are fp registers and aliasing is non-simple.
-  const LocationOperand& loc1 = *LocationOperand::cast(this);
-  const LocationOperand& loc2 = LocationOperand::cast(that);
-  return GetRegConfig()->AreAliases(loc1.representation(), loc1.register_code(),
-                                    loc2.representation(),
-                                    loc2.register_code());
+  return EqualsCanonicalized(that);
 }
 
 void InstructionOperand::Print(const RegisterConfiguration* config) const {
@@ -142,11 +139,15 @@
         os << "["
            << GetRegConfig()->GetDoubleRegisterName(allocated.register_code())
            << "|R";
-      } else {
-        DCHECK(op.IsFloatRegister());
+      } else if (op.IsFloatRegister()) {
         os << "["
            << GetRegConfig()->GetFloatRegisterName(allocated.register_code())
            << "|R";
+      } else {
+        DCHECK(op.IsSimd128Register());
+        os << "["
+           << GetRegConfig()->GetSimd128RegisterName(allocated.register_code())
+           << "|R";
       }
       if (allocated.IsExplicit()) {
         os << "|E";
@@ -179,6 +180,12 @@
         case MachineRepresentation::kSimd128:
           os << "|s128";
           break;
+        case MachineRepresentation::kTaggedSigned:
+          os << "|ts";
+          break;
+        case MachineRepresentation::kTaggedPointer:
+          os << "|tp";
+          break;
         case MachineRepresentation::kTagged:
           os << "|t";
           break;
@@ -448,6 +455,10 @@
       return os << "overflow";
     case kNotOverflow:
       return os << "not overflow";
+    case kPositiveOrZero:
+      return os << "positive or zero";
+    case kNegative:
+      return os << "negative";
   }
   UNREACHABLE();
   return os;
@@ -519,9 +530,6 @@
   DCHECK_EQ(kHeapObject, type());
   Handle<HeapObject> value(
       bit_cast<HeapObject**>(static_cast<intptr_t>(value_)));
-  if (value->IsConsString()) {
-    value = String::Flatten(Handle<String>::cast(value), TENURED);
-  }
   return value;
 }
 
@@ -793,6 +801,8 @@
     case MachineRepresentation::kFloat32:
     case MachineRepresentation::kFloat64:
     case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kTaggedSigned:
+    case MachineRepresentation::kTaggedPointer:
     case MachineRepresentation::kTagged:
       return rep;
     case MachineRepresentation::kNone:
@@ -827,22 +837,16 @@
   representations_[virtual_register] = rep;
 }
 
-
-InstructionSequence::StateId InstructionSequence::AddFrameStateDescriptor(
-    FrameStateDescriptor* descriptor) {
+int InstructionSequence::AddDeoptimizationEntry(
+    FrameStateDescriptor* descriptor, DeoptimizeReason reason) {
   int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
-  deoptimization_entries_.push_back(descriptor);
-  return StateId::FromInt(deoptimization_id);
+  deoptimization_entries_.push_back(DeoptimizationEntry(descriptor, reason));
+  return deoptimization_id;
 }
 
-FrameStateDescriptor* InstructionSequence::GetFrameStateDescriptor(
-    InstructionSequence::StateId state_id) {
-  return deoptimization_entries_[state_id.ToInt()];
-}
-
-
-int InstructionSequence::GetFrameStateDescriptorCount() {
-  return static_cast<int>(deoptimization_entries_.size());
+DeoptimizationEntry const& InstructionSequence::GetDeoptimizationEntry(
+    int state_id) {
+  return deoptimization_entries_[state_id];
 }
 
 
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 7130c3d..b5aea70 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -466,6 +466,8 @@
       case MachineRepresentation::kFloat32:
       case MachineRepresentation::kFloat64:
       case MachineRepresentation::kSimd128:
+      case MachineRepresentation::kTaggedSigned:
+      case MachineRepresentation::kTaggedPointer:
       case MachineRepresentation::kTagged:
         return true;
       case MachineRepresentation::kBit:
@@ -605,17 +607,10 @@
 
 uint64_t InstructionOperand::GetCanonicalizedValue() const {
   if (IsAllocated() || IsExplicit()) {
-    MachineRepresentation rep = LocationOperand::cast(this)->representation();
     MachineRepresentation canonical = MachineRepresentation::kNone;
-    if (IsFloatingPoint(rep)) {
-      if (kSimpleFPAliasing) {
-        // Archs with simple aliasing can treat all FP operands the same.
-        canonical = MachineRepresentation::kFloat64;
-      } else {
-        // We need to distinguish FP operands of different reps when FP
-        // aliasing is not simple (e.g. ARM).
-        canonical = rep;
-      }
+    if (IsFPRegister()) {
+      // We treat all FP register operands the same for simple aliasing.
+      canonical = MachineRepresentation::kFloat64;
     }
     return InstructionOperand::KindField::update(
         LocationOperand::RepresentationField::update(this->value_, canonical),
@@ -1150,6 +1145,8 @@
   }
   StateValueDescriptor* GetStateValueDescriptor() { return &values_; }
 
+  static const int kImpossibleValue = 0xdead;
+
  private:
   FrameStateType type_;
   BailoutId bailout_id_;
@@ -1162,9 +1159,23 @@
   FrameStateDescriptor* outer_state_;
 };
 
+// A deoptimization entry is a pair of the reason why we deoptimize and the
+// frame state descriptor that we have to go back to.
+class DeoptimizationEntry final {
+ public:
+  DeoptimizationEntry() {}
+  DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeReason reason)
+      : descriptor_(descriptor), reason_(reason) {}
 
-typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
+  FrameStateDescriptor* descriptor() const { return descriptor_; }
+  DeoptimizeReason reason() const { return reason_; }
 
+ private:
+  FrameStateDescriptor* descriptor_ = nullptr;
+  DeoptimizeReason reason_ = DeoptimizeReason::kNoReason;
+};
+
+typedef ZoneVector<DeoptimizationEntry> DeoptimizationVector;
 
 class PhiInstruction final : public ZoneObject {
  public:
@@ -1415,21 +1426,11 @@
     return Constant(static_cast<int32_t>(0));
   }
 
-  class StateId {
-   public:
-    static StateId FromInt(int id) { return StateId(id); }
-    int ToInt() const { return id_; }
-
-   private:
-    explicit StateId(int id) : id_(id) {}
-    int id_;
-  };
-
-  StateId AddFrameStateDescriptor(FrameStateDescriptor* descriptor);
-  FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
-  int GetFrameStateDescriptorCount();
-  DeoptimizationVector const& frame_state_descriptors() const {
-    return deoptimization_entries_;
+  int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
+                             DeoptimizeReason reason);
+  DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
+  int GetDeoptimizationEntryCount() const {
+    return static_cast<int>(deoptimization_entries_.size());
   }
 
   RpoNumber InputRpo(Instruction* instr, size_t index);
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index 68d3772..737947a 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -132,16 +132,31 @@
       ReplaceNode(node, low_node, high_node);
       break;
     }
-    case IrOpcode::kLoad: {
-      LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+    case IrOpcode::kLoad:
+    case IrOpcode::kUnalignedLoad: {
+      MachineRepresentation rep;
+      if (node->opcode() == IrOpcode::kLoad) {
+        rep = LoadRepresentationOf(node->op()).representation();
+      } else {
+        DCHECK(node->opcode() == IrOpcode::kUnalignedLoad);
+        rep = UnalignedLoadRepresentationOf(node->op()).representation();
+      }
 
-      if (load_rep.representation() == MachineRepresentation::kWord64) {
+      if (rep == MachineRepresentation::kWord64) {
         Node* base = node->InputAt(0);
         Node* index = node->InputAt(1);
         Node* index_low;
         Node* index_high;
         GetIndexNodes(index, index_low, index_high);
-        const Operator* load_op = machine()->Load(MachineType::Int32());
+        const Operator* load_op;
+
+        if (node->opcode() == IrOpcode::kLoad) {
+          load_op = machine()->Load(MachineType::Int32());
+        } else {
+          DCHECK(node->opcode() == IrOpcode::kUnalignedLoad);
+          load_op = machine()->UnalignedLoad(MachineType::Int32());
+        }
+
         Node* high_node;
         if (node->InputCount() > 2) {
           Node* effect_high = node->InputAt(2);
@@ -162,15 +177,21 @@
       }
       break;
     }
-    case IrOpcode::kStore: {
-      StoreRepresentation store_rep = StoreRepresentationOf(node->op());
-      if (store_rep.representation() == MachineRepresentation::kWord64) {
+    case IrOpcode::kStore:
+    case IrOpcode::kUnalignedStore: {
+      MachineRepresentation rep;
+      if (node->opcode() == IrOpcode::kStore) {
+        rep = StoreRepresentationOf(node->op()).representation();
+      } else {
+        DCHECK(node->opcode() == IrOpcode::kUnalignedStore);
+        rep = UnalignedStoreRepresentationOf(node->op());
+      }
+
+      if (rep == MachineRepresentation::kWord64) {
         // We change the original store node to store the low word, and create
         // a new store node to store the high word. The effect and control edges
         // are copied from the original store to the new store node, the effect
         // edge of the original store is redirected to the new store.
-        WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
-
         Node* base = node->InputAt(0);
         Node* index = node->InputAt(1);
         Node* index_low;
@@ -180,8 +201,16 @@
         DCHECK(HasReplacementLow(value));
         DCHECK(HasReplacementHigh(value));
 
-        const Operator* store_op = machine()->Store(StoreRepresentation(
-            MachineRepresentation::kWord32, write_barrier_kind));
+        const Operator* store_op;
+        if (node->opcode() == IrOpcode::kStore) {
+          WriteBarrierKind write_barrier_kind =
+              StoreRepresentationOf(node->op()).write_barrier_kind();
+          store_op = machine()->Store(StoreRepresentation(
+              MachineRepresentation::kWord32, write_barrier_kind));
+        } else {
+          DCHECK(node->opcode() == IrOpcode::kUnalignedStore);
+          store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
+        }
 
         Node* high_node;
         if (node->InputCount() > 3) {
@@ -749,6 +778,14 @@
       }
       break;
     }
+    case IrOpcode::kWord64ReverseBytes: {
+      Node* input = node->InputAt(0);
+      ReplaceNode(node, graph()->NewNode(machine()->Word32ReverseBytes().op(),
+                                         GetReplacementHigh(input)),
+                  graph()->NewNode(machine()->Word32ReverseBytes().op(),
+                                   GetReplacementLow(input)));
+      break;
+    }
 
     default: { DefaultLowering(node); }
   }
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 81d6392..926bd3f 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -3,6 +3,9 @@
 // found in the LICENSE file.
 
 #include "src/compiler/js-builtin-reducer.h"
+
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
@@ -40,6 +43,10 @@
     return function->shared()->builtin_function_id();
   }
 
+  bool ReceiverMatches(Type* type) {
+    return NodeProperties::GetType(receiver())->Is(type);
+  }
+
   // Determines whether the call takes zero inputs.
   bool InputsMatchZero() { return GetJSCallArity() == 0; }
 
@@ -66,6 +73,7 @@
     return true;
   }
 
+  Node* receiver() { return NodeProperties::GetValueInput(node_, 1); }
   Node* left() { return GetJSCallInput(0); }
   Node* right() { return GetJSCallInput(1); }
 
@@ -86,11 +94,235 @@
   Node* node_;
 };
 
-JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph)
+JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph,
+                                   Flags flags,
+                                   CompilationDependencies* dependencies)
     : AdvancedReducer(editor),
+      dependencies_(dependencies),
+      flags_(flags),
       jsgraph_(jsgraph),
       type_cache_(TypeCache::Get()) {}
 
+namespace {
+
+MaybeHandle<Map> GetMapWitness(Node* node) {
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  // Check if the {node} is dominated by a CheckMaps with a single map
+  // for the {receiver}, and if so use that map for the lowering below.
+  for (Node* dominator = effect;;) {
+    if (dominator->opcode() == IrOpcode::kCheckMaps &&
+        dominator->InputAt(0) == receiver) {
+      if (dominator->op()->ValueInputCount() == 2) {
+        HeapObjectMatcher m(dominator->InputAt(1));
+        if (m.HasValue()) return Handle<Map>::cast(m.Value());
+      }
+      return MaybeHandle<Map>();
+    }
+    if (dominator->op()->EffectInputCount() != 1) {
+      // Didn't find any appropriate CheckMaps node.
+      return MaybeHandle<Map>();
+    }
+    dominator = NodeProperties::GetEffectInput(dominator);
+  }
+}
+
+// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
+  DCHECK(!jsarray_map->is_dictionary_map());
+  Isolate* isolate = jsarray_map->GetIsolate();
+  Handle<Name> length_string = isolate->factory()->length_string();
+  DescriptorArray* descriptors = jsarray_map->instance_descriptors();
+  int number =
+      descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
+  DCHECK_NE(DescriptorArray::kNotFound, number);
+  return descriptors->GetDetails(number).IsReadOnly();
+}
+
+// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
+  Isolate* const isolate = receiver_map->GetIsolate();
+  if (!receiver_map->prototype()->IsJSArray()) return false;
+  Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
+                                     isolate);
+  // Ensure that all prototypes of the {receiver} are stable.
+  for (PrototypeIterator it(isolate, receiver_prototype, kStartAtReceiver);
+       !it.IsAtEnd(); it.Advance()) {
+    Handle<JSReceiver> current = PrototypeIterator::GetCurrent<JSReceiver>(it);
+    if (!current->map()->is_stable()) return false;
+  }
+  return receiver_map->instance_type() == JS_ARRAY_TYPE &&
+         IsFastElementsKind(receiver_map->elements_kind()) &&
+         !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
+         (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
+         isolate->IsFastArrayConstructorPrototypeChainIntact() &&
+         isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
+         !IsReadOnlyLengthDescriptor(receiver_map);
+}
+
+}  // namespace
+
+// ES6 section 22.1.3.17 Array.prototype.pop ( )
+Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
+  Handle<Map> receiver_map;
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  // TODO(turbofan): Extend this to also handle fast (holey) double elements
+  // once we got the hole NaN mess sorted out in TurboFan/V8.
+  if (GetMapWitness(node).ToHandle(&receiver_map) &&
+      CanInlineArrayResizeOperation(receiver_map) &&
+      IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
+    // Install code dependencies on the {receiver} prototype maps and the
+    // global array protector cell.
+    dependencies()->AssumePropertyCell(factory()->array_protector());
+    dependencies()->AssumePrototypeMapsStable(receiver_map);
+
+    // Load the "length" property of the {receiver}.
+    Node* length = effect = graph()->NewNode(
+        simplified()->LoadField(
+            AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
+        receiver, effect, control);
+
+    // Check if the {receiver} has any elements.
+    Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
+                                   jsgraph()->ZeroConstant());
+    Node* branch =
+        graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+    Node* etrue = effect;
+    Node* vtrue = jsgraph()->UndefinedConstant();
+
+    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+    Node* efalse = effect;
+    Node* vfalse;
+    {
+      // Load the elements backing store from the {receiver}.
+      Node* elements = efalse = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+          receiver, efalse, if_false);
+
+      // Ensure that we aren't popping from a copy-on-write backing store.
+      elements = efalse =
+          graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
+                           elements, efalse, if_false);
+
+      // Compute the new {length}.
+      length = graph()->NewNode(simplified()->NumberSubtract(), length,
+                                jsgraph()->OneConstant());
+
+      // Store the new {length} to the {receiver}.
+      efalse = graph()->NewNode(
+          simplified()->StoreField(
+              AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
+          receiver, length, efalse, if_false);
+
+      // Load the last entry from the {elements}.
+      vfalse = efalse = graph()->NewNode(
+          simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(
+              receiver_map->elements_kind())),
+          elements, length, efalse, if_false);
+
+      // Store a hole to the element we just removed from the {receiver}.
+      efalse = graph()->NewNode(
+          simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
+              GetHoleyElementsKind(receiver_map->elements_kind()))),
+          elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+    }
+
+    control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+    effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+    Node* value =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                         vtrue, vfalse, control);
+
+    // Convert the hole to undefined. Do this last, so that we can optimize
+    // conversion operator via some smart strength reduction in many cases.
+    if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
+      value =
+          graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+    }
+
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 22.1.3.18 Array.prototype.push ( )
+Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
+  Handle<Map> receiver_map;
+  // We need exactly target, receiver and value parameters.
+  if (node->op()->ValueInputCount() != 3) return NoChange();
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* value = NodeProperties::GetValueInput(node, 2);
+  if (GetMapWitness(node).ToHandle(&receiver_map) &&
+      CanInlineArrayResizeOperation(receiver_map)) {
+    // Install code dependencies on the {receiver} prototype maps and the
+    // global array protector cell.
+    dependencies()->AssumePropertyCell(factory()->array_protector());
+    dependencies()->AssumePrototypeMapsStable(receiver_map);
+
+    // TODO(turbofan): Perform type checks on the {value}. We are not guaranteed
+    // to learn from these checks in case they fail, as the witness (i.e. the
+    // map check from the LoadIC for a.push) might not be executed in baseline
+    // code (after we stored the value in the builtin and thereby changed the
+    // elements kind of a) before be decide to optimize this function again. We
+    // currently don't have a proper way to deal with this; the proper solution
+    // here is to learn on deopt, i.e. disable Array.prototype.push inlining
+    // for this function.
+    if (IsFastSmiElementsKind(receiver_map->elements_kind())) {
+      value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
+                                        value, effect, control);
+    } else if (IsFastDoubleElementsKind(receiver_map->elements_kind())) {
+      value = effect =
+          graph()->NewNode(simplified()->CheckNumber(), value, effect, control);
+      // Make sure we do not store signaling NaNs into double arrays.
+      value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+    }
+
+    // Load the "length" property of the {receiver}.
+    Node* length = effect = graph()->NewNode(
+        simplified()->LoadField(
+            AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
+        receiver, effect, control);
+
+    // Load the elements backing store of the {receiver}.
+    Node* elements = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+        effect, control);
+
+    // TODO(turbofan): Check if we need to grow the {elements} backing store.
+    // This will deopt if we cannot grow the array further, and we currently
+    // don't necessarily learn from it. See the comment on the value type check
+    // above.
+    GrowFastElementsFlags flags = GrowFastElementsFlag::kArrayObject;
+    if (IsFastDoubleElementsKind(receiver_map->elements_kind())) {
+      flags |= GrowFastElementsFlag::kDoubleElements;
+    }
+    elements = effect =
+        graph()->NewNode(simplified()->MaybeGrowFastElements(flags), receiver,
+                         elements, length, length, effect, control);
+
+    // Append the value to the {elements}.
+    effect = graph()->NewNode(
+        simplified()->StoreElement(
+            AccessBuilder::ForFixedArrayElement(receiver_map->elements_kind())),
+        elements, length, value, effect, control);
+
+    // Return the new length of the {receiver}.
+    value = graph()->NewNode(simplified()->NumberAdd(), length,
+                             jsgraph()->OneConstant());
+
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 // ES6 section 20.2.2.1 Math.abs ( x )
 Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
   JSCallReduction r(node);
@@ -103,6 +335,54 @@
   return NoChange();
 }
 
+// ES6 section 20.2.2.2 Math.acos ( x )
+Reduction JSBuiltinReducer::ReduceMathAcos(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.acos(a:plain-primitive) -> NumberAcos(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberAcos(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.3 Math.acosh ( x )
+Reduction JSBuiltinReducer::ReduceMathAcosh(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.acosh(a:plain-primitive) -> NumberAcosh(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberAcosh(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.4 Math.asin ( x )
+Reduction JSBuiltinReducer::ReduceMathAsin(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.asin(a:plain-primitive) -> NumberAsin(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberAsin(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.5 Math.asinh ( x )
+Reduction JSBuiltinReducer::ReduceMathAsinh(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.asinh(a:plain-primitive) -> NumberAsinh(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberAsinh(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 // ES6 section 20.2.2.6 Math.atan ( x )
 Reduction JSBuiltinReducer::ReduceMathAtan(Node* node) {
   JSCallReduction r(node);
@@ -115,6 +395,18 @@
   return NoChange();
 }
 
+// ES6 section 20.2.2.7 Math.atanh ( x )
+Reduction JSBuiltinReducer::ReduceMathAtanh(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.atanh(a:plain-primitive) -> NumberAtanh(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberAtanh(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 // ES6 section 20.2.2.8 Math.atan2 ( y, x )
 Reduction JSBuiltinReducer::ReduceMathAtan2(Node* node) {
   JSCallReduction r(node);
@@ -130,17 +422,6 @@
   return NoChange();
 }
 
-// ES6 section 20.2.2.7 Math.atanh ( x )
-Reduction JSBuiltinReducer::ReduceMathAtanh(Node* node) {
-  JSCallReduction r(node);
-  if (r.InputsMatchOne(Type::Number())) {
-    // Math.atanh(a:number) -> NumberAtanh(a)
-    Node* value = graph()->NewNode(simplified()->NumberAtanh(), r.left());
-    return Replace(value);
-  }
-  return NoChange();
-}
-
 // ES6 section 20.2.2.10 Math.ceil ( x )
 Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
   JSCallReduction r(node);
@@ -177,6 +458,18 @@
   return NoChange();
 }
 
+// ES6 section 20.2.2.13 Math.cosh ( x )
+Reduction JSBuiltinReducer::ReduceMathCosh(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.cosh(a:plain-primitive) -> NumberCosh(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberCosh(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 // ES6 section 20.2.2.14 Math.exp ( x )
 Reduction JSBuiltinReducer::ReduceMathExp(Node* node) {
   JSCallReduction r(node);
@@ -292,20 +585,12 @@
     // Math.max() -> -Infinity
     return Replace(jsgraph()->Constant(-V8_INFINITY));
   }
-  if (r.InputsMatchOne(Type::PlainPrimitive())) {
-    // Math.max(a:plain-primitive) -> ToNumber(a)
+  if (r.InputsMatchAll(Type::PlainPrimitive())) {
+    // Math.max(a:plain-primitive, b:plain-primitive, ...)
     Node* value = ToNumber(r.GetJSCallInput(0));
-    return Replace(value);
-  }
-  if (r.InputsMatchAll(Type::Integral32())) {
-    // Math.max(a:int32, b:int32, ...)
-    Node* value = r.GetJSCallInput(0);
     for (int i = 1; i < r.GetJSCallArity(); i++) {
-      Node* const input = r.GetJSCallInput(i);
-      value = graph()->NewNode(
-          common()->Select(MachineRepresentation::kNone),
-          graph()->NewNode(simplified()->NumberLessThan(), input, value), value,
-          input);
+      Node* input = ToNumber(r.GetJSCallInput(i));
+      value = graph()->NewNode(simplified()->NumberMax(), value, input);
     }
     return Replace(value);
   }
@@ -319,21 +604,27 @@
     // Math.min() -> Infinity
     return Replace(jsgraph()->Constant(V8_INFINITY));
   }
-  if (r.InputsMatchOne(Type::PlainPrimitive())) {
-    // Math.min(a:plain-primitive) -> ToNumber(a)
+  if (r.InputsMatchAll(Type::PlainPrimitive())) {
+    // Math.min(a:plain-primitive, b:plain-primitive, ...)
     Node* value = ToNumber(r.GetJSCallInput(0));
+    for (int i = 1; i < r.GetJSCallArity(); i++) {
+      Node* input = ToNumber(r.GetJSCallInput(i));
+      value = graph()->NewNode(simplified()->NumberMin(), value, input);
+    }
     return Replace(value);
   }
-  if (r.InputsMatchAll(Type::Integral32())) {
-    // Math.min(a:int32, b:int32, ...)
-    Node* value = r.GetJSCallInput(0);
-    for (int i = 1; i < r.GetJSCallArity(); i++) {
-      Node* const input = r.GetJSCallInput(i);
-      value = graph()->NewNode(
-          common()->Select(MachineRepresentation::kNone),
-          graph()->NewNode(simplified()->NumberLessThan(), input, value), input,
-          value);
-    }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.26 Math.pow ( x, y )
+Reduction JSBuiltinReducer::ReduceMathPow(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
+    // Math.pow(a:plain-primitive,
+    //          b:plain-primitive) -> NumberPow(ToNumber(a), ToNumber(b))
+    Node* left = ToNumber(r.left());
+    Node* right = ToNumber(r.right());
+    Node* value = graph()->NewNode(simplified()->NumberPow(), left, right);
     return Replace(value);
   }
   return NoChange();
@@ -362,6 +653,18 @@
   return NoChange();
 }
 
+// ES6 section 20.2.2.29 Math.sign ( x )
+Reduction JSBuiltinReducer::ReduceMathSign(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.sign(a:plain-primitive) -> NumberSign(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberSign(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 // ES6 section 20.2.2.30 Math.sin ( x )
 Reduction JSBuiltinReducer::ReduceMathSin(Node* node) {
   JSCallReduction r(node);
@@ -374,6 +677,18 @@
   return NoChange();
 }
 
+// ES6 section 20.2.2.31 Math.sinh ( x )
+Reduction JSBuiltinReducer::ReduceMathSinh(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.sinh(a:plain-primitive) -> NumberSinh(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberSinh(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 // ES6 section 20.2.2.32 Math.sqrt ( x )
 Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
   JSCallReduction r(node);
@@ -398,6 +713,18 @@
   return NoChange();
 }
 
+// ES6 section 20.2.2.34 Math.tanh ( x )
+Reduction JSBuiltinReducer::ReduceMathTanh(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::PlainPrimitive())) {
+    // Math.tanh(a:plain-primitive) -> NumberTanh(ToNumber(a))
+    Node* input = ToNumber(r.GetJSCallInput(0));
+    Node* value = graph()->NewNode(simplified()->NumberTanh(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 // ES6 section 20.2.2.35 Math.trunc ( x )
 Reduction JSBuiltinReducer::ReduceMathTrunc(Node* node) {
   JSCallReduction r(node);
@@ -410,6 +737,23 @@
   return NoChange();
 }
 
+// ES6 section 20.1.2.13 Number.parseInt ( string, radix )
+Reduction JSBuiltinReducer::ReduceNumberParseInt(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(type_cache_.kSafeInteger) ||
+      r.InputsMatchTwo(type_cache_.kSafeInteger,
+                       type_cache_.kZeroOrUndefined) ||
+      r.InputsMatchTwo(type_cache_.kSafeInteger, type_cache_.kTenOrUndefined)) {
+    // Number.parseInt(a:safe-integer) -> NumberToInt32(a)
+    // Number.parseInt(a:safe-integer,b:#0\/undefined) -> NumberToInt32(a)
+    // Number.parseInt(a:safe-integer,b:#10\/undefined) -> NumberToInt32(a)
+    Node* input = r.GetJSCallInput(0);
+    Node* value = graph()->NewNode(simplified()->NumberToInt32(), input);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 // ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
 Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
   JSCallReduction r(node);
@@ -422,6 +766,207 @@
   return NoChange();
 }
 
+namespace {
+
+Node* GetStringWitness(Node* node) {
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Type* receiver_type = NodeProperties::GetType(receiver);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  if (receiver_type->Is(Type::String())) return receiver;
+  // Check if the {node} is dominated by a CheckString renaming for
+  // it's {receiver}, and if so use that renaming as {receiver} for
+  // the lowering below.
+  for (Node* dominator = effect;;) {
+    if (dominator->opcode() == IrOpcode::kCheckString &&
+        dominator->InputAt(0) == receiver) {
+      return dominator;
+    }
+    if (dominator->op()->EffectInputCount() != 1) {
+      // Didn't find any appropriate CheckString node.
+      return nullptr;
+    }
+    dominator = NodeProperties::GetEffectInput(dominator);
+  }
+}
+
+}  // namespace
+
+// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
+Reduction JSBuiltinReducer::ReduceStringCharAt(Node* node) {
+  // We need at least target, receiver and index parameters.
+  if (node->op()->ValueInputCount() >= 3) {
+    Node* index = NodeProperties::GetValueInput(node, 2);
+    Type* index_type = NodeProperties::GetType(index);
+    Node* effect = NodeProperties::GetEffectInput(node);
+    Node* control = NodeProperties::GetControlInput(node);
+
+    if (index_type->Is(Type::Unsigned32())) {
+      if (Node* receiver = GetStringWitness(node)) {
+        // Determine the {receiver} length.
+        Node* receiver_length = effect = graph()->NewNode(
+            simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
+            effect, control);
+
+        // Check if {index} is less than {receiver} length.
+        Node* check = graph()->NewNode(simplified()->NumberLessThan(), index,
+                                       receiver_length);
+        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                        check, control);
+
+        Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+        Node* vtrue;
+        {
+          // Load the character from the {receiver}.
+          vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
+                                   index, if_true);
+
+          // Return it as single character string.
+          vtrue = graph()->NewNode(simplified()->StringFromCharCode(), vtrue);
+        }
+
+        // Return the empty string otherwise.
+        Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+        Node* vfalse = jsgraph()->EmptyStringConstant();
+
+        control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+        Node* value =
+            graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                             vtrue, vfalse, control);
+
+        ReplaceWithValue(node, value, effect, control);
+        return Replace(value);
+      }
+    }
+  }
+
+  return NoChange();
+}
+
+// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
+  // We need at least target, receiver and index parameters.
+  if (node->op()->ValueInputCount() >= 3) {
+    Node* index = NodeProperties::GetValueInput(node, 2);
+    Type* index_type = NodeProperties::GetType(index);
+    Node* effect = NodeProperties::GetEffectInput(node);
+    Node* control = NodeProperties::GetControlInput(node);
+
+    if (index_type->Is(Type::Unsigned32())) {
+      if (Node* receiver = GetStringWitness(node)) {
+        // Determine the {receiver} length.
+        Node* receiver_length = effect = graph()->NewNode(
+            simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
+            effect, control);
+
+        // Check if {index} is less than {receiver} length.
+        Node* check = graph()->NewNode(simplified()->NumberLessThan(), index,
+                                       receiver_length);
+        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                        check, control);
+
+        // Load the character from the {receiver}.
+        Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+        Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(),
+                                       receiver, index, if_true);
+
+        // Return NaN otherwise.
+        Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+        Node* vfalse = jsgraph()->NaNConstant();
+
+        control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+        Node* value =
+            graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                             vtrue, vfalse, control);
+
+        ReplaceWithValue(node, value, effect, control);
+        return Replace(value);
+      }
+    }
+  }
+
+  return NoChange();
+}
+
+namespace {
+
+bool HasInstanceTypeWitness(Node* receiver, Node* effect,
+                            InstanceType instance_type) {
+  for (Node* dominator = effect;;) {
+    if (dominator->opcode() == IrOpcode::kCheckMaps &&
+        dominator->InputAt(0) == receiver) {
+      // Check if all maps have the given {instance_type}.
+      for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
+        Node* const map = NodeProperties::GetValueInput(dominator, i);
+        Type* const map_type = NodeProperties::GetType(map);
+        if (!map_type->IsConstant()) return false;
+        Handle<Map> const map_value =
+            Handle<Map>::cast(map_type->AsConstant()->Value());
+        if (map_value->instance_type() != instance_type) return false;
+      }
+      return true;
+    }
+    switch (dominator->opcode()) {
+      case IrOpcode::kStoreField: {
+        FieldAccess const& access = FieldAccessOf(dominator->op());
+        if (access.base_is_tagged == kTaggedBase &&
+            access.offset == HeapObject::kMapOffset) {
+          return false;
+        }
+        break;
+      }
+      case IrOpcode::kStoreElement:
+        break;
+      default: {
+        DCHECK_EQ(1, dominator->op()->EffectOutputCount());
+        if (dominator->op()->EffectInputCount() != 1 ||
+            !dominator->op()->HasProperty(Operator::kNoWrite)) {
+          // Didn't find any appropriate CheckMaps node.
+          return false;
+        }
+        break;
+      }
+    }
+    dominator = NodeProperties::GetEffectInput(dominator);
+  }
+}
+
+}  // namespace
+
+Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
+    Node* node, InstanceType instance_type, FieldAccess const& access) {
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  if (HasInstanceTypeWitness(receiver, effect, instance_type)) {
+    // Load the {receiver}s field.
+    Node* receiver_length = effect = graph()->NewNode(
+        simplified()->LoadField(access), receiver, effect, control);
+
+    // Check if the {receiver}s buffer was neutered.
+    Node* receiver_buffer = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+        receiver, effect, control);
+    Node* receiver_buffer_bitfield = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+        receiver_buffer, effect, control);
+    Node* check = graph()->NewNode(
+        simplified()->NumberEqual(),
+        graph()->NewNode(
+            simplified()->NumberBitwiseAnd(), receiver_buffer_bitfield,
+            jsgraph()->Constant(JSArrayBuffer::WasNeutered::kMask)),
+        jsgraph()->ZeroConstant());
+
+    // Default to zero if the {receiver}s buffer was neutered.
+    Node* value = graph()->NewNode(
+        common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
+        check, receiver_length, jsgraph()->ZeroConstant());
+
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+  return NoChange();
+}
+
 Reduction JSBuiltinReducer::Reduce(Node* node) {
   Reduction reduction = NoChange();
   JSCallReduction r(node);
@@ -429,27 +974,49 @@
   // Dispatch according to the BuiltinFunctionId if present.
   if (!r.HasBuiltinFunctionId()) return NoChange();
   switch (r.GetBuiltinFunctionId()) {
+    case kArrayPop:
+      return ReduceArrayPop(node);
+    case kArrayPush:
+      return ReduceArrayPush(node);
     case kMathAbs:
       reduction = ReduceMathAbs(node);
       break;
+    case kMathAcos:
+      reduction = ReduceMathAcos(node);
+      break;
+    case kMathAcosh:
+      reduction = ReduceMathAcosh(node);
+      break;
+    case kMathAsin:
+      reduction = ReduceMathAsin(node);
+      break;
+    case kMathAsinh:
+      reduction = ReduceMathAsinh(node);
+      break;
     case kMathAtan:
       reduction = ReduceMathAtan(node);
       break;
-    case kMathAtan2:
-      reduction = ReduceMathAtan2(node);
-      break;
     case kMathAtanh:
       reduction = ReduceMathAtanh(node);
       break;
-    case kMathClz32:
-      reduction = ReduceMathClz32(node);
+    case kMathAtan2:
+      reduction = ReduceMathAtan2(node);
+      break;
+    case kMathCbrt:
+      reduction = ReduceMathCbrt(node);
       break;
     case kMathCeil:
       reduction = ReduceMathCeil(node);
       break;
+    case kMathClz32:
+      reduction = ReduceMathClz32(node);
+      break;
     case kMathCos:
       reduction = ReduceMathCos(node);
       break;
+    case kMathCosh:
+      reduction = ReduceMathCosh(node);
+      break;
     case kMathExp:
       reduction = ReduceMathExp(node);
       break;
@@ -483,27 +1050,62 @@
     case kMathMin:
       reduction = ReduceMathMin(node);
       break;
-    case kMathCbrt:
-      reduction = ReduceMathCbrt(node);
+    case kMathPow:
+      reduction = ReduceMathPow(node);
       break;
     case kMathRound:
       reduction = ReduceMathRound(node);
       break;
+    case kMathSign:
+      reduction = ReduceMathSign(node);
+      break;
     case kMathSin:
       reduction = ReduceMathSin(node);
       break;
+    case kMathSinh:
+      reduction = ReduceMathSinh(node);
+      break;
     case kMathSqrt:
       reduction = ReduceMathSqrt(node);
       break;
     case kMathTan:
       reduction = ReduceMathTan(node);
       break;
+    case kMathTanh:
+      reduction = ReduceMathTanh(node);
+      break;
     case kMathTrunc:
       reduction = ReduceMathTrunc(node);
       break;
+    case kNumberParseInt:
+      reduction = ReduceNumberParseInt(node);
+      break;
     case kStringFromCharCode:
       reduction = ReduceStringFromCharCode(node);
       break;
+    case kStringCharAt:
+      return ReduceStringCharAt(node);
+    case kStringCharCodeAt:
+      return ReduceStringCharCodeAt(node);
+    case kDataViewByteLength:
+      return ReduceArrayBufferViewAccessor(
+          node, JS_DATA_VIEW_TYPE,
+          AccessBuilder::ForJSArrayBufferViewByteLength());
+    case kDataViewByteOffset:
+      return ReduceArrayBufferViewAccessor(
+          node, JS_DATA_VIEW_TYPE,
+          AccessBuilder::ForJSArrayBufferViewByteOffset());
+    case kTypedArrayByteLength:
+      return ReduceArrayBufferViewAccessor(
+          node, JS_TYPED_ARRAY_TYPE,
+          AccessBuilder::ForJSArrayBufferViewByteLength());
+    case kTypedArrayByteOffset:
+      return ReduceArrayBufferViewAccessor(
+          node, JS_TYPED_ARRAY_TYPE,
+          AccessBuilder::ForJSArrayBufferViewByteOffset());
+    case kTypedArrayLength:
+      return ReduceArrayBufferViewAccessor(
+          node, JS_TYPED_ARRAY_TYPE, AccessBuilder::ForJSTypedArrayLength());
     default:
       break;
   }
@@ -530,6 +1132,7 @@
 
 Graph* JSBuiltinReducer::graph() const { return jsgraph()->graph(); }
 
+Factory* JSBuiltinReducer::factory() const { return isolate()->factory(); }
 
 Isolate* JSBuiltinReducer::isolate() const { return jsgraph()->isolate(); }
 
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index c915792..2da8347 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -5,38 +5,59 @@
 #ifndef V8_COMPILER_JS_BUILTIN_REDUCER_H_
 #define V8_COMPILER_JS_BUILTIN_REDUCER_H_
 
+#include "src/base/flags.h"
 #include "src/compiler/graph-reducer.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
+class CompilationDependencies;
+class Factory;
 class TypeCache;
 
 namespace compiler {
 
 // Forward declarations.
 class CommonOperatorBuilder;
+struct FieldAccess;
 class JSGraph;
 class SimplifiedOperatorBuilder;
 
 
 class JSBuiltinReducer final : public AdvancedReducer {
  public:
-  explicit JSBuiltinReducer(Editor* editor, JSGraph* jsgraph);
+  // Flags that control the mode of operation.
+  enum Flag {
+    kNoFlags = 0u,
+    kDeoptimizationEnabled = 1u << 0,
+  };
+  typedef base::Flags<Flag> Flags;
+
+  JSBuiltinReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
+                   CompilationDependencies* dependencies);
   ~JSBuiltinReducer() final {}
 
   Reduction Reduce(Node* node) final;
 
  private:
+  Reduction ReduceArrayPop(Node* node);
+  Reduction ReduceArrayPush(Node* node);
   Reduction ReduceMathAbs(Node* node);
+  Reduction ReduceMathAcos(Node* node);
+  Reduction ReduceMathAcosh(Node* node);
+  Reduction ReduceMathAsin(Node* node);
+  Reduction ReduceMathAsinh(Node* node);
   Reduction ReduceMathAtan(Node* node);
-  Reduction ReduceMathAtan2(Node* node);
   Reduction ReduceMathAtanh(Node* node);
+  Reduction ReduceMathAtan2(Node* node);
+  Reduction ReduceMathCbrt(Node* node);
   Reduction ReduceMathCeil(Node* node);
   Reduction ReduceMathClz32(Node* node);
   Reduction ReduceMathCos(Node* node);
+  Reduction ReduceMathCosh(Node* node);
   Reduction ReduceMathExp(Node* node);
+  Reduction ReduceMathExpm1(Node* node);
   Reduction ReduceMathFloor(Node* node);
   Reduction ReduceMathFround(Node* node);
   Reduction ReduceMathImul(Node* node);
@@ -46,28 +67,43 @@
   Reduction ReduceMathLog2(Node* node);
   Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathMin(Node* node);
-  Reduction ReduceMathCbrt(Node* node);
-  Reduction ReduceMathExpm1(Node* node);
+  Reduction ReduceMathPow(Node* node);
   Reduction ReduceMathRound(Node* node);
+  Reduction ReduceMathSign(Node* node);
   Reduction ReduceMathSin(Node* node);
+  Reduction ReduceMathSinh(Node* node);
   Reduction ReduceMathSqrt(Node* node);
   Reduction ReduceMathTan(Node* node);
+  Reduction ReduceMathTanh(Node* node);
   Reduction ReduceMathTrunc(Node* node);
+  Reduction ReduceNumberParseInt(Node* node);
+  Reduction ReduceStringCharAt(Node* node);
+  Reduction ReduceStringCharCodeAt(Node* node);
   Reduction ReduceStringFromCharCode(Node* node);
+  Reduction ReduceArrayBufferViewAccessor(Node* node,
+                                          InstanceType instance_type,
+                                          FieldAccess const& access);
 
   Node* ToNumber(Node* value);
   Node* ToUint32(Node* value);
 
+  Flags flags() const { return flags_; }
   Graph* graph() const;
+  Factory* factory() const;
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const;
   CommonOperatorBuilder* common() const;
   SimplifiedOperatorBuilder* simplified() const;
+  CompilationDependencies* dependencies() const { return dependencies_; }
 
+  CompilationDependencies* const dependencies_;
+  Flags const flags_;
   JSGraph* const jsgraph_;
   TypeCache const& type_cache_;
 };
 
+DEFINE_OPERATORS_FOR_FLAGS(JSBuiltinReducer::Flags)
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index f4b0d7b..e390214 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -6,6 +6,7 @@
 
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/objects-inl.h"
 #include "src/type-feedback-vector-inl.h"
 
@@ -128,7 +129,7 @@
     // we can only optimize this in case the {node} was already inlined into
     // some other function (and same for the {arg_array}).
     CreateArgumentsType type = CreateArgumentsTypeOf(arg_array->op());
-    Node* frame_state = NodeProperties::GetFrameStateInput(arg_array, 0);
+    Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
     Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
     if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
     FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
@@ -220,7 +221,6 @@
   Node* context = NodeProperties::GetContextInput(node);
   Node* control = NodeProperties::GetControlInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
-  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Try to specialize JSCallFunction {node}s with constant {target}s.
   HeapObjectMatcher m(target);
@@ -323,16 +323,13 @@
     }
 
     // Check that the {target} is still the {array_function}.
-    Node* check = graph()->NewNode(
-        javascript()->StrictEqual(CompareOperationHints::Any()), target,
-        array_function, context);
-    control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                        frame_state, effect, control);
+    Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
+                                   array_function);
+    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
 
     // Turn the {node} into a {JSCreateArray} call.
     NodeProperties::ReplaceValueInput(node, array_function, 0);
     NodeProperties::ReplaceEffectInput(node, effect);
-    NodeProperties::ReplaceControlInput(node, control);
     return ReduceArrayConstructor(node);
   } else if (feedback->IsWeakCell()) {
     Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
@@ -341,16 +338,14 @@
           jsgraph()->Constant(handle(cell->value(), isolate()));
 
       // Check that the {target} is still the {target_function}.
-      Node* check = graph()->NewNode(
-          javascript()->StrictEqual(CompareOperationHints::Any()), target,
-          target_function, context);
-      control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                          frame_state, effect, control);
+      Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
+                                     target_function);
+      effect =
+          graph()->NewNode(simplified()->CheckIf(), check, effect, control);
 
       // Specialize the JSCallFunction node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
       NodeProperties::ReplaceEffectInput(node, effect);
-      NodeProperties::ReplaceControlInput(node, control);
 
       // Try to further reduce the JSCallFunction {node}.
       Reduction const reduction = ReduceJSCallFunction(node);
@@ -371,7 +366,6 @@
   Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Try to specialize JSCallConstruct {node}s with constant {target}s.
   HeapObjectMatcher m(target);
@@ -445,15 +439,12 @@
     }
 
     // Check that the {target} is still the {array_function}.
-    Node* check = graph()->NewNode(
-        javascript()->StrictEqual(CompareOperationHints::Any()), target,
-        array_function, context);
-    control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                        frame_state, effect, control);
+    Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
+                                   array_function);
+    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
 
     // Turn the {node} into a {JSCreateArray} call.
     NodeProperties::ReplaceEffectInput(node, effect);
-    NodeProperties::ReplaceControlInput(node, control);
     for (int i = arity; i > 0; --i) {
       NodeProperties::ReplaceValueInput(
           node, NodeProperties::GetValueInput(node, i), i + 1);
@@ -468,16 +459,14 @@
           jsgraph()->Constant(handle(cell->value(), isolate()));
 
       // Check that the {target} is still the {target_function}.
-      Node* check = graph()->NewNode(
-          javascript()->StrictEqual(CompareOperationHints::Any()), target,
-          target_function, context);
-      control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                          frame_state, effect, control);
+      Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
+                                     target_function);
+      effect =
+          graph()->NewNode(simplified()->CheckIf(), check, effect, control);
 
       // Specialize the JSCallConstruct node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
       NodeProperties::ReplaceEffectInput(node, effect);
-      NodeProperties::ReplaceControlInput(node, control);
       if (target == new_target) {
         NodeProperties::ReplaceValueInput(node, target_function, arity + 1);
       }
@@ -514,6 +503,10 @@
   return jsgraph()->javascript();
 }
 
+SimplifiedOperatorBuilder* JSCallReducer::simplified() const {
+  return jsgraph()->simplified();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h
index 9ffae15..8d9700a 100644
--- a/src/compiler/js-call-reducer.h
+++ b/src/compiler/js-call-reducer.h
@@ -16,7 +16,7 @@
 class CommonOperatorBuilder;
 class JSGraph;
 class JSOperatorBuilder;
-
+class SimplifiedOperatorBuilder;
 
 // Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
 // which might allow inlining or other optimizations to be performed afterwards.
@@ -52,6 +52,7 @@
   MaybeHandle<Context> native_context() const { return native_context_; }
   CommonOperatorBuilder* common() const;
   JSOperatorBuilder* javascript() const;
+  SimplifiedOperatorBuilder* simplified() const;
 
   JSGraph* const jsgraph_;
   Flags const flags_;
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index 0f829d4..f2c5edd 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -102,7 +102,7 @@
 
 // Retrieves the frame state holding actual argument values.
 Node* GetArgumentsFrameState(Node* frame_state) {
-  Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+  Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state);
   FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
   return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
              ? outer_state
@@ -279,7 +279,7 @@
 Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
   CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
-  Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* const frame_state = NodeProperties::GetFrameStateInput(node);
   Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
   Node* const control = graph()->start();
   FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
@@ -509,12 +509,144 @@
   return Changed(node);
 }
 
+Reduction JSCreateLowering::ReduceNewArrayToStubCall(
+    Node* node, Handle<AllocationSite> site) {
+  CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+  int const arity = static_cast<int>(p.arity());
+
+  ElementsKind elements_kind = site->GetElementsKind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (arity == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
+                                        override_mode);
+    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
+        CallDescriptor::kNeedsFrameState);
+    node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+    node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+    node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(0));
+    node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+    NodeProperties::ChangeOp(node, common()->Call(desc));
+    return Changed(node);
+  } else if (arity == 1) {
+    AllocationSiteOverrideMode override_mode =
+        (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+            ? DISABLE_ALLOCATION_SITES
+            : DONT_OVERRIDE;
+
+    if (IsHoleyElementsKind(elements_kind)) {
+      ArraySingleArgumentConstructorStub stub(isolate(), elements_kind,
+                                              override_mode);
+      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+          CallDescriptor::kNeedsFrameState);
+      node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+      node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
+      node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+      NodeProperties::ChangeOp(node, common()->Call(desc));
+      return Changed(node);
+    }
+
+    Node* effect = NodeProperties::GetEffectInput(node);
+    Node* control = NodeProperties::GetControlInput(node);
+    Node* length = NodeProperties::GetValueInput(node, 2);
+    Node* equal = graph()->NewNode(simplified()->ReferenceEqual(), length,
+                                   jsgraph()->ZeroConstant());
+
+    Node* branch =
+        graph()->NewNode(common()->Branch(BranchHint::kFalse), equal, control);
+    Node* call_holey;
+    Node* call_packed;
+    Node* if_success_packed;
+    Node* if_success_holey;
+    Node* context = NodeProperties::GetContextInput(node);
+    Node* frame_state = NodeProperties::GetFrameStateInput(node);
+    Node* if_equal = graph()->NewNode(common()->IfTrue(), branch);
+    {
+      ArraySingleArgumentConstructorStub stub(isolate(), elements_kind,
+                                              override_mode);
+      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+          CallDescriptor::kNeedsFrameState);
+
+      Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
+                        node->InputAt(1),
+                        jsgraph()->HeapConstant(site),
+                        jsgraph()->Int32Constant(1),
+                        jsgraph()->UndefinedConstant(),
+                        length,
+                        context,
+                        frame_state,
+                        effect,
+                        if_equal};
+
+      call_holey =
+          graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
+      if_success_holey = graph()->NewNode(common()->IfSuccess(), call_holey);
+    }
+    Node* if_not_equal = graph()->NewNode(common()->IfFalse(), branch);
+    {
+      // Require elements kind to "go holey."
+      ArraySingleArgumentConstructorStub stub(
+          isolate(), GetHoleyElementsKind(elements_kind), override_mode);
+      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+          CallDescriptor::kNeedsFrameState);
+
+      Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
+                        node->InputAt(1),
+                        jsgraph()->HeapConstant(site),
+                        jsgraph()->Int32Constant(1),
+                        jsgraph()->UndefinedConstant(),
+                        length,
+                        context,
+                        frame_state,
+                        effect,
+                        if_not_equal};
+
+      call_packed =
+          graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
+      if_success_packed = graph()->NewNode(common()->IfSuccess(), call_packed);
+    }
+    Node* merge = graph()->NewNode(common()->Merge(2), if_success_holey,
+                                   if_success_packed);
+    Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), call_holey,
+                                        call_packed, merge);
+    Node* phi =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                         call_holey, call_packed, merge);
+
+    ReplaceWithValue(node, phi, effect_phi, merge);
+    return Changed(node);
+  }
+
+  DCHECK(arity > 1);
+  ArrayNArgumentsConstructorStub stub(isolate());
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), arity + 1,
+      CallDescriptor::kNeedsFrameState);
+  node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+  node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+  node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+  node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+  NodeProperties::ChangeOp(node, common()->Call(desc));
+  return Changed(node);
+}
+
 Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
   CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
   Node* target = NodeProperties::GetValueInput(node, 0);
   Node* new_target = NodeProperties::GetValueInput(node, 1);
 
+  // TODO(mstarzinger): Array constructor can throw. Hook up exceptional edges.
+  if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
   // TODO(bmeurer): Optimize the subclassing case.
   if (target != new_target) return NoChange();
 
@@ -533,16 +665,16 @@
     } else if (p.arity() == 1) {
       Node* length = NodeProperties::GetValueInput(node, 2);
       Type* length_type = NodeProperties::GetType(length);
-      if (length_type->Is(Type::SignedSmall()) &&
-          length_type->Min() >= 0 &&
-          length_type->Max() <= kElementLoopUnrollLimit) {
+      if (length_type->Is(Type::SignedSmall()) && length_type->Min() >= 0 &&
+          length_type->Max() <= kElementLoopUnrollLimit &&
+          length_type->Min() == length_type->Max()) {
         int capacity = static_cast<int>(length_type->Max());
         return ReduceNewArray(node, length, capacity, site);
       }
     }
   }
 
-  return NoChange();
+  return ReduceNewArrayToStubCall(node, site);
 }
 
 Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
@@ -955,7 +1087,7 @@
         effect = graph()->NewNode(
             common()->BeginRegion(RegionObservability::kNotObservable), effect);
         value = effect = graph()->NewNode(
-            simplified()->Allocate(NOT_TENURED),
+            simplified()->Allocate(pretenure),
             jsgraph()->Constant(HeapNumber::kSize), effect, control);
         effect = graph()->NewNode(
             simplified()->StoreField(AccessBuilder::ForMap()), value,
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
index 57b28af..2262e66 100644
--- a/src/compiler/js-create-lowering.h
+++ b/src/compiler/js-create-lowering.h
@@ -72,6 +72,8 @@
                                     PretenureFlag pretenure,
                                     AllocationSiteUsageContext* site_context);
 
+  Reduction ReduceNewArrayToStubCall(Node* node, Handle<AllocationSite> site);
+
   // Infers the LiteralsArray to use for a given {node}.
   MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
 
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 47a82d2..812d3e7 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -16,18 +16,16 @@
 namespace internal {
 namespace compiler {
 
-static CallDescriptor::Flags AdjustFrameStatesForCall(Node* node) {
-  int count = OperatorProperties::GetFrameStateInputCount(node->op());
-  if (count > 1) {
-    int index = NodeProperties::FirstFrameStateIndex(node) + 1;
-    do {
-      node->RemoveInput(index);
-    } while (--count > 1);
-  }
-  return count > 0 ? CallDescriptor::kNeedsFrameState
-                   : CallDescriptor::kNoFlags;
+namespace {
+
+CallDescriptor::Flags FrameStateFlagForCall(Node* node) {
+  return OperatorProperties::HasFrameStateInput(node->op())
+             ? CallDescriptor::kNeedsFrameState
+             : CallDescriptor::kNoFlags;
 }
 
+}  // namespace
+
 JSGenericLowering::JSGenericLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
 
 JSGenericLowering::~JSGenericLowering() {}
@@ -52,15 +50,14 @@
     ReplaceWithRuntimeCall(node, fun);            \
   }
 REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
-REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
 REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
 #undef REPLACE_RUNTIME_CALL
 
-#define REPLACE_STUB_CALL(Name)                                   \
-  void JSGenericLowering::LowerJS##Name(Node* node) {             \
-    CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
-    Callable callable = CodeFactory::Name(isolate());             \
-    ReplaceWithStubCall(node, callable, flags);                   \
+#define REPLACE_STUB_CALL(Name)                                \
+  void JSGenericLowering::LowerJS##Name(Node* node) {          \
+    CallDescriptor::Flags flags = FrameStateFlagForCall(node); \
+    Callable callable = CodeFactory::Name(isolate());          \
+    ReplaceWithStubCall(node, callable, flags);                \
   }
 REPLACE_STUB_CALL(Add)
 REPLACE_STUB_CALL(Subtract)
@@ -107,7 +104,7 @@
 void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
                                                Runtime::FunctionId f,
                                                int nargs_override) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   Operator::Properties properties = node->op()->properties();
   const Runtime::Function* fun = Runtime::FunctionForId(f);
   int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
@@ -123,14 +120,14 @@
 
 void JSGenericLowering::LowerJSStrictEqual(Node* node) {
   Callable callable = CodeFactory::StrictEqual(isolate());
-  node->AppendInput(zone(), graph()->start());
+  node->RemoveInput(4);  // control
   ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
                       Operator::kEliminatable);
 }
 
 void JSGenericLowering::LowerJSStrictNotEqual(Node* node) {
   Callable callable = CodeFactory::StrictNotEqual(isolate());
-  node->AppendInput(zone(), graph()->start());
+  node->RemoveInput(4);  // control
   ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
                       Operator::kEliminatable);
 }
@@ -154,7 +151,7 @@
   Node* closure = NodeProperties::GetValueInput(node, 2);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   const PropertyAccess& p = PropertyAccessOf(node->op());
   Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
   // Load the type feedback vector from the closure.
@@ -178,7 +175,7 @@
   Node* closure = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
   Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
   // Load the type feedback vector from the closure.
@@ -203,7 +200,7 @@
   Node* closure = NodeProperties::GetValueInput(node, 0);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
   Callable callable =
       CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
@@ -225,10 +222,13 @@
 
 
 void JSGenericLowering::LowerJSStoreProperty(Node* node) {
+  Node* receiver = NodeProperties::GetValueInput(node, 0);
+  Node* key = NodeProperties::GetValueInput(node, 1);
+  Node* value = NodeProperties::GetValueInput(node, 2);
   Node* closure = NodeProperties::GetValueInput(node, 3);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   PropertyAccess const& p = PropertyAccessOf(node->op());
   LanguageMode language_mode = p.language_mode();
   Callable callable =
@@ -243,18 +243,26 @@
       jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
                                 kHeapObjectTag),
       effect, control);
-  node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
-  node->ReplaceInput(4, vector);
+  typedef StoreWithVectorDescriptor Descriptor;
+  node->InsertInputs(zone(), 0, 1);
+  node->ReplaceInput(Descriptor::kReceiver, receiver);
+  node->ReplaceInput(Descriptor::kName, key);
+  node->ReplaceInput(Descriptor::kValue, value);
+  node->ReplaceInput(Descriptor::kSlot,
+                     jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(Descriptor::kVector, vector);
   node->ReplaceInput(7, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSStoreNamed(Node* node) {
+  Node* receiver = NodeProperties::GetValueInput(node, 0);
+  Node* value = NodeProperties::GetValueInput(node, 1);
   Node* closure = NodeProperties::GetValueInput(node, 2);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
   Callable callable =
       CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
@@ -268,20 +276,26 @@
       jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
                                 kHeapObjectTag),
       effect, control);
-  node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
-  node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
-  node->ReplaceInput(4, vector);
+  typedef StoreWithVectorDescriptor Descriptor;
+  node->InsertInputs(zone(), 0, 2);
+  node->ReplaceInput(Descriptor::kReceiver, receiver);
+  node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
+  node->ReplaceInput(Descriptor::kValue, value);
+  node->ReplaceInput(Descriptor::kSlot,
+                     jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(Descriptor::kVector, vector);
   node->ReplaceInput(7, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
+  Node* value = NodeProperties::GetValueInput(node, 0);
   Node* closure = NodeProperties::GetValueInput(node, 1);
   Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
   Callable callable =
       CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
@@ -305,10 +319,14 @@
       machine()->Load(MachineType::AnyTagged()), native_context,
       jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
       effect, control);
-  node->InsertInput(zone(), 0, global);
-  node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
-  node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
-  node->ReplaceInput(4, vector);
+  typedef StoreWithVectorDescriptor Descriptor;
+  node->InsertInputs(zone(), 0, 3);
+  node->ReplaceInput(Descriptor::kReceiver, global);
+  node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
+  node->ReplaceInput(Descriptor::kValue, value);
+  node->ReplaceInput(Descriptor::kSlot,
+                     jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(Descriptor::kVector, vector);
   node->ReplaceInput(7, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
@@ -323,7 +341,7 @@
 
 
 void JSGenericLowering::LowerJSInstanceOf(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   Callable callable = CodeFactory::InstanceOf(isolate());
   ReplaceWithStubCall(node, callable, flags);
 }
@@ -368,7 +386,7 @@
 
 
 void JSGenericLowering::LowerJSCreate(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   Callable callable = CodeFactory::FastNewObject(isolate());
   ReplaceWithStubCall(node, callable, flags);
 }
@@ -394,80 +412,25 @@
   CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
   int const arity = static_cast<int>(p.arity());
   Handle<AllocationSite> const site = p.site();
-
-  // TODO(turbofan): We embed the AllocationSite from the Operator at this
-  // point, which we should not do once we want to both consume the feedback
-  // but at the same time shared the optimized code across native contexts,
-  // as the AllocationSite is associated with a single native context (it's
-  // stored in the type feedback vector after all). Once we go for cross
-  // context code generation, we should somehow find a way to get to the
-  // allocation site for the actual native context at runtime.
-  if (!site.is_null()) {
-    // Reduce {node} to the appropriate ArrayConstructorStub backend.
-    // Note that these stubs "behave" like JSFunctions, which means they
-    // expect a receiver on the stack, which they remove. We just push
-    // undefined for the receiver.
-    ElementsKind elements_kind = site->GetElementsKind();
-    AllocationSiteOverrideMode override_mode =
-        (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
-            ? DISABLE_ALLOCATION_SITES
-            : DONT_OVERRIDE;
-    if (arity == 0) {
-      ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
-                                          override_mode);
-      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
-          CallDescriptor::kNeedsFrameState);
-      node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
-      node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(0));
-      node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
-      NodeProperties::ChangeOp(node, common()->Call(desc));
-    } else if (arity == 1) {
-      // TODO(bmeurer): Optimize for the 0 length non-holey case?
-      ArraySingleArgumentConstructorStub stub(
-          isolate(), GetHoleyElementsKind(elements_kind), override_mode);
-      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
-          CallDescriptor::kNeedsFrameState);
-      node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
-      node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
-      node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
-      NodeProperties::ChangeOp(node, common()->Call(desc));
-    } else {
-      ArrayNArgumentsConstructorStub stub(isolate());
-      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
-          arity + 1, CallDescriptor::kNeedsFrameState);
-      node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
-      node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
-      node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
-      NodeProperties::ChangeOp(node, common()->Call(desc));
-    }
-  } else {
-    Node* new_target = node->InputAt(1);
-    Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
-                                     : jsgraph()->HeapConstant(site);
-    node->RemoveInput(1);
-    node->InsertInput(zone(), 1 + arity, new_target);
-    node->InsertInput(zone(), 2 + arity, type_info);
-    ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
-  }
+  Node* new_target = node->InputAt(1);
+  Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
+                                   : jsgraph()->HeapConstant(site);
+  node->RemoveInput(1);
+  node->InsertInput(zone(), 1 + arity, new_target);
+  node->InsertInput(zone(), 2 + arity, type_info);
+  ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
 }
 
 
 void JSGenericLowering::LowerJSCreateClosure(Node* node) {
   CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   Handle<SharedFunctionInfo> const shared_info = p.shared_info();
   node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
 
   // Use the FastNewClosureStub only for functions allocated in new space.
   if (p.pretenure() == NOT_TENURED) {
-    Callable callable = CodeFactory::FastNewClosure(
-        isolate(), shared_info->language_mode(), shared_info->kind());
+    Callable callable = CodeFactory::FastNewClosure(isolate());
     ReplaceWithStubCall(node, callable, flags);
   } else {
     ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
@@ -479,11 +442,11 @@
 
 void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
   int const slot_count = OpParameter<int>(node->op());
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
 
-  // Use the FastNewContextStub only for function contexts up maximum size.
-  if (slot_count <= FastNewContextStub::kMaximumSlots) {
-    Callable callable = CodeFactory::FastNewContext(isolate(), slot_count);
+  if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
+    Callable callable = CodeFactory::FastNewFunctionContext(isolate());
+    node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
     ReplaceWithStubCall(node, callable, flags);
   } else {
     ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
@@ -498,7 +461,7 @@
 
 void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
 
@@ -517,7 +480,7 @@
 
 void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -537,7 +500,7 @@
 
 void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   Callable callable = CodeFactory::FastCloneRegExp(isolate());
   Node* literal_index = jsgraph()->SmiConstant(p.index());
   Node* literal_flags = jsgraph()->SmiConstant(p.flags());
@@ -573,7 +536,7 @@
 void JSGenericLowering::LowerJSCallConstruct(Node* node) {
   CallConstructParameters const& p = CallConstructParametersOf(node->op());
   int const arg_count = static_cast<int>(p.arity() - 2);
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   Callable callable = CodeFactory::Construct(isolate());
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
       isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
@@ -595,7 +558,7 @@
   int const arg_count = static_cast<int>(p.arity() - 2);
   ConvertReceiverMode const mode = p.convert_mode();
   Callable callable = CodeFactory::Call(isolate(), mode);
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   if (p.tail_call_mode() == TailCallMode::kAllow) {
     flags |= CallDescriptor::kSupportsTailCalls;
   }
@@ -611,7 +574,6 @@
 
 void JSGenericLowering::LowerJSCallRuntime(Node* node) {
   const CallRuntimeParameters& p = CallRuntimeParametersOf(node->op());
-  AdjustFrameStatesForCall(node);
   ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
 }
 
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index 31407e8..2b4bf1c 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -133,7 +133,6 @@
   Node* value = NodeProperties::GetValueInput(node, 0);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Retrieve the global object from the given {node}.
   Handle<JSGlobalObject> global_object;
@@ -170,37 +169,40 @@
       // Record a code dependency on the cell, and just deoptimize if the new
       // value doesn't match the previous value stored inside the cell.
       dependencies()->AssumePropertyCell(property_cell);
-      Node* check =
-          graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
-                           jsgraph()->Constant(property_cell_value));
-      control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                          frame_state, effect, control);
+      Node* check = graph()->NewNode(simplified()->ReferenceEqual(), value,
+                                     jsgraph()->Constant(property_cell_value));
+      effect =
+          graph()->NewNode(simplified()->CheckIf(), check, effect, control);
       break;
     }
     case PropertyCellType::kConstantType: {
       // Record a code dependency on the cell, and just deoptimize if the new
       // values' type doesn't match the type of the previous value in the cell.
       dependencies()->AssumePropertyCell(property_cell);
-      Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
-      Type* property_cell_value_type = Type::TaggedSigned();
+      Type* property_cell_value_type;
       if (property_cell_value->IsHeapObject()) {
-        // Deoptimize if the {value} is a Smi.
-        control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
-                                            frame_state, effect, control);
-
-        // Load the {value} map check against the {property_cell} map.
-        Node* value_map = effect =
-            graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                             value, effect, control);
+        // We cannot do anything if the {property_cell_value}s map is no
+        // longer stable.
         Handle<Map> property_cell_value_map(
             Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
-        check = graph()->NewNode(
-            simplified()->ReferenceEqual(Type::Any()), value_map,
-            jsgraph()->HeapConstant(property_cell_value_map));
+        if (!property_cell_value_map->is_stable()) return NoChange();
+        dependencies()->AssumeMapStable(property_cell_value_map);
+
+        // Check that the {value} is a HeapObject.
+        value = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
+                                          value, effect, control);
+
+        // Check {value} map agains the {property_cell} map.
+        effect = graph()->NewNode(
+            simplified()->CheckMaps(1), value,
+            jsgraph()->HeapConstant(property_cell_value_map), effect, control);
         property_cell_value_type = Type::TaggedPointer();
+      } else {
+        // Check that the {value} is a Smi.
+        value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
+                                          value, effect, control);
+        property_cell_value_type = Type::TaggedSigned();
       }
-      control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                          frame_state, effect, control);
       effect = graph()->NewNode(
           simplified()->StoreField(
               AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index 3f20daa..cafd047 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -29,15 +29,23 @@
                 HeapConstant(isolate()->builtins()->ToNumber()));
 }
 
-Node* JSGraph::CEntryStubConstant(int result_size) {
-  if (result_size == 1) {
-    return CACHED(kCEntryStubConstant,
-                  HeapConstant(CEntryStub(isolate(), 1).GetCode()));
+Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
+                                  ArgvMode argv_mode, bool builtin_exit_frame) {
+  if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack &&
+      result_size == 1) {
+    CachedNode key = builtin_exit_frame
+                         ? kCEntryStubWithBuiltinExitFrameConstant
+                         : kCEntryStubConstant;
+    return CACHED(key,
+                  HeapConstant(CEntryStub(isolate(), result_size, save_doubles,
+                                          argv_mode, builtin_exit_frame)
+                                   .GetCode()));
   }
-  return HeapConstant(CEntryStub(isolate(), result_size).GetCode());
+  CEntryStub stub(isolate(), result_size, save_doubles, argv_mode,
+                  builtin_exit_frame);
+  return HeapConstant(stub.GetCode());
 }
 
-
 Node* JSGraph::EmptyFixedArrayConstant() {
   return CACHED(kEmptyFixedArrayConstant,
                 HeapConstant(factory()->empty_fixed_array()));
@@ -48,6 +56,20 @@
                 HeapConstant(factory()->empty_literals_array()));
 }
 
+Node* JSGraph::EmptyStringConstant() {
+  return CACHED(kEmptyStringConstant, HeapConstant(factory()->empty_string()));
+}
+
+Node* JSGraph::FixedArrayMapConstant() {
+  return CACHED(kFixedArrayMapConstant,
+                HeapConstant(factory()->fixed_array_map()));
+}
+
+Node* JSGraph::FixedDoubleArrayMapConstant() {
+  return CACHED(kFixedDoubleArrayMapConstant,
+                HeapConstant(factory()->fixed_double_array_map()));
+}
+
 Node* JSGraph::HeapNumberMapConstant() {
   return CACHED(kHeapNumberMapConstant,
                 HeapConstant(factory()->heap_number_map()));
@@ -92,7 +114,6 @@
   return CACHED(kZeroConstant, NumberConstant(0.0));
 }
 
-
 Node* JSGraph::OneConstant() {
   return CACHED(kOneConstant, NumberConstant(1.0));
 }
@@ -147,6 +168,11 @@
   return NumberConstant(value);
 }
 
+Node* JSGraph::Constant(uint32_t value) {
+  if (value == 0) return ZeroConstant();
+  if (value == 1) return OneConstant();
+  return NumberConstant(value);
+}
 
 Node* JSGraph::Int32Constant(int32_t value) {
   Node** loc = cache_.FindInt32Constant(value);
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index fe5545a..9d6f27d 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -42,9 +42,15 @@
   Node* AllocateInNewSpaceStubConstant();
   Node* AllocateInOldSpaceStubConstant();
   Node* ToNumberBuiltinConstant();
-  Node* CEntryStubConstant(int result_size);
+  Node* CEntryStubConstant(int result_size,
+                           SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+                           ArgvMode argv_mode = kArgvOnStack,
+                           bool builtin_exit_frame = false);
   Node* EmptyFixedArrayConstant();
   Node* EmptyLiteralsArrayConstant();
+  Node* EmptyStringConstant();
+  Node* FixedArrayMapConstant();
+  Node* FixedDoubleArrayMapConstant();
   Node* HeapNumberMapConstant();
   Node* OptimizedOutConstant();
   Node* StaleRegisterConstant();
@@ -72,6 +78,9 @@
   // Creates a NumberConstant node, usually canonicalized.
   Node* Constant(int32_t value);
 
+  // Creates a NumberConstant node, usually canonicalized.
+  Node* Constant(uint32_t value);
+
   // Creates a Int32Constant node, usually canonicalized.
   Node* Int32Constant(int32_t value);
   Node* Uint32Constant(uint32_t value) {
@@ -149,8 +158,12 @@
     kAllocateInOldSpaceStubConstant,
     kToNumberBuiltinConstant,
     kCEntryStubConstant,
+    kCEntryStubWithBuiltinExitFrameConstant,
     kEmptyFixedArrayConstant,
     kEmptyLiteralsArrayConstant,
+    kEmptyStringConstant,
+    kFixedArrayMapConstant,
+    kFixedDoubleArrayMapConstant,
     kHeapNumberMapConstant,
     kOptimizedOutConstant,
     kStaleRegisterConstant,
diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc
index 0118b92..ce7b33b 100644
--- a/src/compiler/js-inlining-heuristic.cc
+++ b/src/compiler/js-inlining-heuristic.cc
@@ -67,9 +67,9 @@
 
   // Stop inlinining once the maximum allowed level is reached.
   int level = 0;
-  for (Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  for (Node* frame_state = NodeProperties::GetFrameStateInput(node);
        frame_state->opcode() == IrOpcode::kFrameState;
-       frame_state = NodeProperties::GetFrameStateInput(frame_state, 0)) {
+       frame_state = NodeProperties::GetFrameStateInput(frame_state)) {
     if (++level > FLAG_max_inlining_levels) return NoChange();
   }
 
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 0664105..635daa4 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -16,9 +16,10 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/compiler/type-hint-analyzer.h"
 #include "src/isolate-inl.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parse-info.h"
 #include "src/parsing/rewriter.h"
 
 namespace v8 {
@@ -57,7 +58,7 @@
 
   Node* frame_state() {
     // Both, {JSCallFunction} and {JSCallConstruct}, have frame state.
-    return NodeProperties::GetFrameStateInput(call_, 0);
+    return NodeProperties::GetFrameStateInput(call_);
   }
 
   int formal_arguments() {
@@ -106,13 +107,13 @@
           Replace(use, new_target);
         } else if (index == inlinee_arity_index) {
           // The projection is requesting the number of arguments.
-          Replace(use, jsgraph_->Int32Constant(inliner_inputs - 2));
+          Replace(use, jsgraph()->Int32Constant(inliner_inputs - 2));
         } else if (index == inlinee_context_index) {
           // The projection is requesting the inlinee function context.
           Replace(use, context);
         } else {
           // Call has fewer arguments than required, fill with undefined.
-          Replace(use, jsgraph_->UndefinedConstant());
+          Replace(use, jsgraph()->UndefinedConstant());
         }
         break;
       }
@@ -143,9 +144,8 @@
       case IrOpcode::kDeoptimize:
       case IrOpcode::kTerminate:
       case IrOpcode::kThrow:
-        NodeProperties::MergeControlToEnd(jsgraph_->graph(), jsgraph_->common(),
-                                          input);
-        Revisit(jsgraph_->graph()->end());
+        NodeProperties::MergeControlToEnd(graph(), common(), input);
+        Revisit(graph()->end());
         break;
       default:
         UNREACHABLE();
@@ -159,20 +159,20 @@
   // uses with said value or kill value uses if no value can be returned.
   if (values.size() > 0) {
     int const input_count = static_cast<int>(controls.size());
-    Node* control_output = jsgraph_->graph()->NewNode(
-        jsgraph_->common()->Merge(input_count), input_count, &controls.front());
+    Node* control_output = graph()->NewNode(common()->Merge(input_count),
+                                            input_count, &controls.front());
     values.push_back(control_output);
     effects.push_back(control_output);
-    Node* value_output = jsgraph_->graph()->NewNode(
-        jsgraph_->common()->Phi(MachineRepresentation::kTagged, input_count),
+    Node* value_output = graph()->NewNode(
+        common()->Phi(MachineRepresentation::kTagged, input_count),
         static_cast<int>(values.size()), &values.front());
-    Node* effect_output = jsgraph_->graph()->NewNode(
-        jsgraph_->common()->EffectPhi(input_count),
-        static_cast<int>(effects.size()), &effects.front());
+    Node* effect_output =
+        graph()->NewNode(common()->EffectPhi(input_count),
+                         static_cast<int>(effects.size()), &effects.front());
     ReplaceWithValue(call, value_output, effect_output, control_output);
     return Changed(value_output);
   } else {
-    ReplaceWithValue(call, call, call, jsgraph_->Dead());
+    ReplaceWithValue(call, call, call, jsgraph()->Dead());
     return Changed(call);
   }
 }
@@ -183,24 +183,24 @@
                                             FrameStateType frame_state_type,
                                             Handle<SharedFunctionInfo> shared) {
   const FrameStateFunctionInfo* state_info =
-      jsgraph_->common()->CreateFrameStateFunctionInfo(
-          frame_state_type, parameter_count + 1, 0, shared);
+      common()->CreateFrameStateFunctionInfo(frame_state_type,
+                                             parameter_count + 1, 0, shared);
 
-  const Operator* op = jsgraph_->common()->FrameState(
+  const Operator* op = common()->FrameState(
       BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
-  const Operator* op0 = jsgraph_->common()->StateValues(0);
-  Node* node0 = jsgraph_->graph()->NewNode(op0);
+  const Operator* op0 = common()->StateValues(0);
+  Node* node0 = graph()->NewNode(op0);
   NodeVector params(local_zone_);
   for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
     params.push_back(node->InputAt(1 + parameter));
   }
   const Operator* op_param =
-      jsgraph_->common()->StateValues(static_cast<int>(params.size()));
-  Node* params_node = jsgraph_->graph()->NewNode(
+      common()->StateValues(static_cast<int>(params.size()));
+  Node* params_node = graph()->NewNode(
       op_param, static_cast<int>(params.size()), &params.front());
-  return jsgraph_->graph()->NewNode(op, params_node, node0, node0,
-                                    jsgraph_->UndefinedConstant(),
-                                    node->InputAt(0), outer_frame_state);
+  return graph()->NewNode(op, params_node, node0, node0,
+                          jsgraph()->UndefinedConstant(), node->InputAt(0),
+                          outer_frame_state);
 }
 
 Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
@@ -212,25 +212,25 @@
 
   // If we are inlining a tail call drop caller's frame state and an
   // arguments adaptor if it exists.
-  frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+  frame_state = NodeProperties::GetFrameStateInput(frame_state);
   if (frame_state->opcode() == IrOpcode::kFrameState) {
     FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
     if (frame_info.type() == FrameStateType::kArgumentsAdaptor) {
-      frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+      frame_state = NodeProperties::GetFrameStateInput(frame_state);
     }
   }
 
   const FrameStateFunctionInfo* state_info =
-      jsgraph_->common()->CreateFrameStateFunctionInfo(
+      common()->CreateFrameStateFunctionInfo(
           FrameStateType::kTailCallerFunction, 0, 0, shared);
 
-  const Operator* op = jsgraph_->common()->FrameState(
+  const Operator* op = common()->FrameState(
       BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
-  const Operator* op0 = jsgraph_->common()->StateValues(0);
-  Node* node0 = jsgraph_->graph()->NewNode(op0);
-  return jsgraph_->graph()->NewNode(op, node0, node0, node0,
-                                    jsgraph_->UndefinedConstant(), function,
-                                    frame_state);
+  const Operator* op0 = common()->StateValues(0);
+  Node* node0 = graph()->NewNode(op0);
+  return graph()->NewNode(op, node0, node0, node0,
+                          jsgraph()->UndefinedConstant(), function,
+                          frame_state);
 }
 
 namespace {
@@ -417,7 +417,7 @@
   }
 
   Node* frame_state = call.frame_state();
-  Node* new_target = jsgraph_->UndefinedConstant();
+  Node* new_target = jsgraph()->UndefinedConstant();
 
   // Inline {JSCallConstruct} requires some additional magic.
   if (node->opcode() == IrOpcode::kJSCallConstruct) {
@@ -425,28 +425,26 @@
     // constructor dispatch (allocate implicit receiver and check return value).
     // This models the behavior usually accomplished by our {JSConstructStub}.
     // Note that the context has to be the callers context (input to call node).
-    Node* receiver = jsgraph_->UndefinedConstant();  // Implicit receiver.
+    Node* receiver = jsgraph()->UndefinedConstant();  // Implicit receiver.
     if (NeedsImplicitReceiver(shared_info)) {
       Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
       Node* effect = NodeProperties::GetEffectInput(node);
       Node* context = NodeProperties::GetContextInput(node);
-      Node* create = jsgraph_->graph()->NewNode(
-          jsgraph_->javascript()->Create(), call.target(), call.new_target(),
-          context, frame_state_before, effect);
+      Node* create = graph()->NewNode(javascript()->Create(), call.target(),
+                                      call.new_target(), context,
+                                      frame_state_before, effect);
       NodeProperties::ReplaceEffectInput(node, create);
       // Insert a check of the return value to determine whether the return
-      // value
-      // or the implicit receiver should be selected as a result of the call.
-      Node* check = jsgraph_->graph()->NewNode(
-          jsgraph_->javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1),
-          node, context, node, start);
-      Node* select = jsgraph_->graph()->NewNode(
-          jsgraph_->common()->Select(MachineRepresentation::kTagged), check,
-          node, create);
-      NodeProperties::ReplaceUses(node, select, check, node, node);
-      NodeProperties::ReplaceValueInput(select, node, 1);
-      NodeProperties::ReplaceValueInput(check, node, 0);
-      NodeProperties::ReplaceEffectInput(check, node);
+      // value or the implicit receiver should be selected as a result of the
+      // call.
+      Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), node);
+      Node* select =
+          graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+                           check, node, create);
+      NodeProperties::ReplaceUses(node, select, node, node, node);
+      // Fix-up inputs that have been mangled by the {ReplaceUses} call above.
+      NodeProperties::ReplaceValueInput(select, node, 1);  // Fix-up input.
+      NodeProperties::ReplaceValueInput(check, node, 0);   // Fix-up input.
       receiver = create;  // The implicit receiver.
     }
 
@@ -455,7 +453,7 @@
     // behaves as if we were dealing with a regular function invocation.
     new_target = call.new_target();  // Retrieve new target value input.
     node->RemoveInput(call.formal_arguments() + 1);  // Drop new target.
-    node->InsertInput(jsgraph_->graph()->zone(), 1, receiver);
+    node->InsertInput(graph()->zone(), 1, receiver);
 
     // Insert a construct stub frame into the chain of frame states. This will
     // reconstruct the proper frame when deoptimizing within the constructor.
@@ -468,7 +466,7 @@
   // TODO(turbofan): We might want to load the context from the JSFunction at
   // runtime in case we only know the SharedFunctionInfo once we have dynamic
   // type feedback in the compiler.
-  Node* context = jsgraph_->Constant(handle(function->context()));
+  Node* context = jsgraph()->Constant(handle(function->context()));
 
   // Insert a JSConvertReceiver node for sloppy callees. Note that the context
   // passed into this node has to be the callees context (loaded above). Note
@@ -481,9 +479,9 @@
     const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
     Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
     Node* effect = NodeProperties::GetEffectInput(node);
-    Node* convert = jsgraph_->graph()->NewNode(
-        jsgraph_->javascript()->ConvertReceiver(p.convert_mode()),
-        call.receiver(), context, frame_state_before, effect, start);
+    Node* convert = graph()->NewNode(
+        javascript()->ConvertReceiver(p.convert_mode()), call.receiver(),
+        context, frame_state_before, effect, start);
     NodeProperties::ReplaceValueInput(node, convert, 1);
     NodeProperties::ReplaceEffectInput(node, convert);
   }
@@ -519,6 +517,16 @@
 
 Graph* JSInliner::graph() const { return jsgraph()->graph(); }
 
+JSOperatorBuilder* JSInliner::javascript() const {
+  return jsgraph()->javascript();
+}
+
+CommonOperatorBuilder* JSInliner::common() const { return jsgraph()->common(); }
+
+SimplifiedOperatorBuilder* JSInliner::simplified() const {
+  return jsgraph()->simplified();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index 88cbf89..49487f5 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -36,6 +36,9 @@
   Reduction ReduceJSCall(Node* node, Handle<JSFunction> function);
 
  private:
+  CommonOperatorBuilder* common() const;
+  JSOperatorBuilder* javascript() const;
+  SimplifiedOperatorBuilder* simplified() const;
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
 
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index 8d24013..3324508 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -34,10 +34,6 @@
       return ReduceCreateIterResultObject(node);
     case Runtime::kInlineDeoptimizeNow:
       return ReduceDeoptimizeNow(node);
-    case Runtime::kInlineDoubleHi:
-      return ReduceDoubleHi(node);
-    case Runtime::kInlineDoubleLo:
-      return ReduceDoubleLo(node);
     case Runtime::kInlineGeneratorClose:
       return ReduceGeneratorClose(node);
     case Runtime::kInlineGeneratorGetInputOrDebugPos:
@@ -54,8 +50,6 @@
       return ReduceIsJSReceiver(node);
     case Runtime::kInlineIsSmi:
       return ReduceIsSmi(node);
-    case Runtime::kInlineValueOf:
-      return ReduceValueOf(node);
     case Runtime::kInlineFixedArrayGet:
       return ReduceFixedArrayGet(node);
     case Runtime::kInlineFixedArraySet:
@@ -74,14 +68,10 @@
       return ReduceToInteger(node);
     case Runtime::kInlineToLength:
       return ReduceToLength(node);
-    case Runtime::kInlineToName:
-      return ReduceToName(node);
     case Runtime::kInlineToNumber:
       return ReduceToNumber(node);
     case Runtime::kInlineToObject:
       return ReduceToObject(node);
-    case Runtime::kInlineToPrimitive:
-      return ReduceToPrimitive(node);
     case Runtime::kInlineToString:
       return ReduceToString(node);
     case Runtime::kInlineCall:
@@ -109,14 +99,14 @@
 
 Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
   if (mode() != kDeoptimizationEnabled) return NoChange();
-  Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* const frame_state = NodeProperties::GetFrameStateInput(node);
   Node* const effect = NodeProperties::GetEffectInput(node);
   Node* const control = NodeProperties::GetControlInput(node);
 
   // TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
-  Node* deoptimize =
-      graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                       frame_state, effect, control);
+  Node* deoptimize = graph()->NewNode(
+      common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
+      frame_state, effect, control);
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
   Revisit(graph()->end());
 
@@ -125,24 +115,6 @@
   return Changed(node);
 }
 
-
-Reduction JSIntrinsicLowering::ReduceDoubleHi(Node* node) {
-  // Tell the compiler to assume number input.
-  Node* renamed = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
-                                   node->InputAt(0), graph()->start());
-  node->ReplaceInput(0, renamed);
-  return Change(node, machine()->Float64ExtractHighWord32());
-}
-
-
-Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
-  // Tell the compiler to assume number input.
-  Node* renamed = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
-                                   node->InputAt(0), graph()->start());
-  node->ReplaceInput(0, renamed);
-  return Change(node, machine()->Float64ExtractLowWord32());
-}
-
 Reduction JSIntrinsicLowering::ReduceGeneratorClose(Node* node) {
   Node* const generator = NodeProperties::GetValueInput(node, 0);
   Node* const effect = NodeProperties::GetEffectInput(node);
@@ -201,8 +173,8 @@
       graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), value,
                        effect, if_false),
       effect, if_false);
-  Node* vfalse = graph()->NewNode(machine()->Word32Equal(), efalse,
-                                  jsgraph()->Int32Constant(instance_type));
+  Node* vfalse = graph()->NewNode(simplified()->NumberEqual(), efalse,
+                                  jsgraph()->Constant(instance_type));
 
   Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
 
@@ -226,70 +198,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
-  // if (%_IsSmi(value)) {
-  //   return value;
-  // } else if (%_GetInstanceType(%_GetMap(value)) == JS_VALUE_TYPE) {
-  //   return %_GetValue(value);
-  // } else {
-  //   return value;
-  // }
-  const Operator* const merge_op = common()->Merge(2);
-  const Operator* const ephi_op = common()->EffectPhi(2);
-  const Operator* const phi_op =
-      common()->Phi(MachineRepresentation::kTagged, 2);
-
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
-  Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0 = value;
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0;
-  Node* vfalse0;
-  {
-    Node* check1 = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(
-            simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-            graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                             value, effect, if_false0),
-            effect, if_false0),
-        jsgraph()->Int32Constant(JS_VALUE_TYPE));
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForValue()),
-                         value, effect, if_true1);
-    Node* vtrue1 = etrue1;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = effect;
-    Node* vfalse1 = value;
-
-    Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
-    efalse0 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
-    vfalse0 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
-  }
-
-  Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
-
-  // Replace all effect uses of {node} with the {ephi0}.
-  Node* ephi0 = graph()->NewNode(ephi_op, etrue0, efalse0, merge0);
-  ReplaceWithValue(node, node, ephi0);
-
-  // Turn the {node} into a Phi.
-  return Change(node, phi_op, vtrue0, vfalse0, merge0);
-}
-
-
 Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
   // Replace all effect uses of {node} with the effect dependency.
   RelaxEffectsAndControls(node);
@@ -368,12 +276,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceToName(Node* node) {
-  NodeProperties::ChangeOp(node, javascript()->ToName());
-  return Changed(node);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceToNumber(Node* node) {
   NodeProperties::ChangeOp(node, javascript()->ToNumber());
   return Changed(node);
@@ -392,17 +294,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceToPrimitive(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Type* value_type = NodeProperties::GetType(value);
-  if (value_type->Is(Type::Primitive())) {
-    ReplaceWithValue(node, value);
-    return Replace(value);
-  }
-  return NoChange();
-}
-
-
 Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
   NodeProperties::ChangeOp(node, javascript()->ToString());
   return Changed(node);
@@ -495,12 +386,6 @@
   return jsgraph_->javascript();
 }
 
-
-MachineOperatorBuilder* JSIntrinsicLowering::machine() const {
-  return jsgraph()->machine();
-}
-
-
 SimplifiedOperatorBuilder* JSIntrinsicLowering::simplified() const {
   return jsgraph()->simplified();
 }
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index f4b8695..6835a52 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -21,7 +21,6 @@
 class CommonOperatorBuilder;
 class JSOperatorBuilder;
 class JSGraph;
-class MachineOperatorBuilder;
 class SimplifiedOperatorBuilder;
 
 
@@ -39,15 +38,12 @@
  private:
   Reduction ReduceCreateIterResultObject(Node* node);
   Reduction ReduceDeoptimizeNow(Node* node);
-  Reduction ReduceDoubleHi(Node* node);
-  Reduction ReduceDoubleLo(Node* node);
   Reduction ReduceGeneratorClose(Node* node);
   Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
   Reduction ReduceGeneratorGetResumeMode(Node* node);
   Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
   Reduction ReduceIsJSReceiver(Node* node);
   Reduction ReduceIsSmi(Node* node);
-  Reduction ReduceValueOf(Node* node);
   Reduction ReduceFixedArrayGet(Node* node);
   Reduction ReduceFixedArraySet(Node* node);
   Reduction ReduceRegExpConstructResult(Node* node);
@@ -57,10 +53,8 @@
   Reduction ReduceSubString(Node* node);
   Reduction ReduceToInteger(Node* node);
   Reduction ReduceToLength(Node* node);
-  Reduction ReduceToName(Node* node);
   Reduction ReduceToNumber(Node* node);
   Reduction ReduceToObject(Node* node);
-  Reduction ReduceToPrimitive(Node* node);
   Reduction ReduceToString(Node* node);
   Reduction ReduceCall(Node* node);
   Reduction ReduceNewObject(Node* node);
@@ -79,7 +73,6 @@
   Isolate* isolate() const;
   CommonOperatorBuilder* common() const;
   JSOperatorBuilder* javascript() const;
-  MachineOperatorBuilder* machine() const;
   SimplifiedOperatorBuilder* simplified() const;
   DeoptimizationMode mode() const { return mode_; }
 
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index 81d4cd0..b76744e 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -22,6 +22,39 @@
 namespace internal {
 namespace compiler {
 
+namespace {
+
+bool HasNumberMaps(MapList const& maps) {
+  for (auto map : maps) {
+    if (map->instance_type() == HEAP_NUMBER_TYPE) return true;
+  }
+  return false;
+}
+
+bool HasOnlyJSArrayMaps(MapList const& maps) {
+  for (auto map : maps) {
+    if (!map->IsJSArrayMap()) return false;
+  }
+  return true;
+}
+
+bool HasOnlyNumberMaps(MapList const& maps) {
+  for (auto map : maps) {
+    if (map->instance_type() != HEAP_NUMBER_TYPE) return false;
+  }
+  return true;
+}
+
+template <typename T>
+bool HasOnlyStringMaps(T const& maps) {
+  for (auto map : maps) {
+    if (!map->IsStringMap()) return false;
+  }
+  return true;
+}
+
+}  // namespace
+
 JSNativeContextSpecialization::JSNativeContextSpecialization(
     Editor* editor, JSGraph* jsgraph, Flags flags,
     MaybeHandle<Context> native_context, CompilationDependencies* dependencies,
@@ -78,9 +111,11 @@
          node->opcode() == IrOpcode::kJSLoadProperty ||
          node->opcode() == IrOpcode::kJSStoreProperty);
   Node* receiver = NodeProperties::GetValueInput(node, 0);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* frame_state_eager = NodeProperties::FindFrameStateBefore(node);
+  Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Not much we can do if deoptimization support is disabled.
   if (!(flags() & kDeoptimizationEnabled)) return NoChange();
@@ -98,315 +133,193 @@
     return NoChange();
   }
 
-  // Nothing to do if we have no non-deprecated maps.
-  if (access_infos.empty()) return NoChange();
+  // TODO(turbofan): Add support for inlining into try blocks.
+  if (NodeProperties::IsExceptionalCall(node) ||
+      !(flags() & kAccessorInliningEnabled)) {
+    for (auto access_info : access_infos) {
+      if (access_info.IsAccessorConstant()) return NoChange();
+    }
+  }
 
-  // The final states for every polymorphic branch. We join them with
-  // Merge++Phi+EffectPhi at the bottom.
-  ZoneVector<Node*> values(zone());
-  ZoneVector<Node*> effects(zone());
-  ZoneVector<Node*> controls(zone());
+  // Nothing to do if we have no non-deprecated maps.
+  if (access_infos.empty()) {
+    return ReduceSoftDeoptimize(
+        node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
+  }
 
   // Ensure that {index} matches the specified {name} (if {index} is given).
   if (index != nullptr) {
-    Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Name()),
-                                   index, jsgraph()->HeapConstant(name));
-    control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                        frame_state, effect, control);
+    Node* check = graph()->NewNode(simplified()->ReferenceEqual(), index,
+                                   jsgraph()->HeapConstant(name));
+    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
   }
 
-  // Check if {receiver} may be a number.
-  bool receiverissmi_possible = false;
-  for (PropertyAccessInfo const& access_info : access_infos) {
-    if (access_info.receiver_type()->Is(Type::Number())) {
-      receiverissmi_possible = true;
-      break;
-    }
-  }
-
-  // Ensure that {receiver} is a heap object.
-  Node* receiverissmi_control = nullptr;
-  Node* receiverissmi_effect = effect;
-  if (receiverissmi_possible) {
-    Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
-    Node* branch = graph()->NewNode(common()->Branch(), check, control);
-    control = graph()->NewNode(common()->IfFalse(), branch);
-    receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
-    receiverissmi_effect = effect;
-  } else {
-    receiver = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
-                                         receiver, effect, control);
-  }
-
-  // Load the {receiver} map. The resulting effect is the dominating effect for
-  // all (polymorphic) branches.
-  Node* receiver_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       receiver, effect, control);
-
-  // Generate code for the various different property access patterns.
-  Node* fallthrough_control = control;
-  for (size_t j = 0; j < access_infos.size(); ++j) {
-    PropertyAccessInfo const& access_info = access_infos[j];
-    Node* this_value = value;
-    Node* this_receiver = receiver;
-    Node* this_effect = effect;
-    Node* this_control;
-
-    // Perform map check on {receiver}.
-    Type* receiver_type = access_info.receiver_type();
-    if (receiver_type->Is(Type::String())) {
-      Node* check = graph()->NewNode(simplified()->ObjectIsString(), receiver);
-      if (j == access_infos.size() - 1) {
-        this_control = this_effect =
-            graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                             this_effect, fallthrough_control);
-        fallthrough_control = nullptr;
-      } else {
-        Node* branch =
-            graph()->NewNode(common()->Branch(), check, fallthrough_control);
-        fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
-        this_control = graph()->NewNode(common()->IfTrue(), branch);
-      }
+  // Check for the monomorphic cases.
+  if (access_infos.size() == 1) {
+    PropertyAccessInfo access_info = access_infos.front();
+    if (HasOnlyStringMaps(access_info.receiver_maps())) {
+      // Monormorphic string access (ignoring the fact that there are multiple
+      // String maps).
+      receiver = effect = graph()->NewNode(simplified()->CheckString(),
+                                           receiver, effect, control);
+    } else if (HasOnlyNumberMaps(access_info.receiver_maps())) {
+      // Monomorphic number access (we also deal with Smis here).
+      receiver = effect = graph()->NewNode(simplified()->CheckNumber(),
+                                           receiver, effect, control);
     } else {
-      // Emit a (sequence of) map checks for other {receiver}s.
-      ZoneVector<Node*> this_controls(zone());
-      ZoneVector<Node*> this_effects(zone());
-      int num_classes = access_info.receiver_type()->NumClasses();
-      for (auto i = access_info.receiver_type()->Classes(); !i.Done();
-           i.Advance()) {
-        DCHECK_LT(0, num_classes);
-        Handle<Map> map = i.Current();
-        Node* check =
-            graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
-                             receiver_map, jsgraph()->Constant(map));
-        if (--num_classes == 0 && j == access_infos.size() - 1) {
-          Node* deoptimize =
-              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                               this_effect, fallthrough_control);
-          this_controls.push_back(deoptimize);
-          this_effects.push_back(deoptimize);
-          fallthrough_control = nullptr;
-        } else {
-          Node* branch =
-              graph()->NewNode(common()->Branch(), check, fallthrough_control);
-          fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
-          this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
-          this_effects.push_back(this_effect);
-        }
-      }
-
-      // The Number case requires special treatment to also deal with Smis.
-      if (receiver_type->Is(Type::Number())) {
-        // Join this check with the "receiver is smi" check above.
-        DCHECK_NOT_NULL(receiverissmi_effect);
-        DCHECK_NOT_NULL(receiverissmi_control);
-        this_effects.push_back(receiverissmi_effect);
-        this_controls.push_back(receiverissmi_control);
-        receiverissmi_effect = receiverissmi_control = nullptr;
-      }
-
-      // Create dominating Merge+EffectPhi for this {receiver} type.
-      int const this_control_count = static_cast<int>(this_controls.size());
-      this_control =
-          (this_control_count == 1)
-              ? this_controls.front()
-              : graph()->NewNode(common()->Merge(this_control_count),
-                                 this_control_count, &this_controls.front());
-      this_effects.push_back(this_control);
-      int const this_effect_count = static_cast<int>(this_effects.size());
-      this_effect =
-          (this_control_count == 1)
-              ? this_effects.front()
-              : graph()->NewNode(common()->EffectPhi(this_control_count),
-                                 this_effect_count, &this_effects.front());
-    }
-
-    // Determine actual holder and perform prototype chain checks.
-    Handle<JSObject> holder;
-    if (access_info.holder().ToHandle(&holder)) {
-      AssumePrototypesStable(receiver_type, native_context, holder);
+      // Monomorphic property access.
+      effect = BuildCheckTaggedPointer(receiver, effect, control);
+      effect = BuildCheckMaps(receiver, effect, control,
+                              access_info.receiver_maps());
     }
 
     // Generate the actual property access.
-    if (access_info.IsNotFound()) {
-      DCHECK_EQ(AccessMode::kLoad, access_mode);
-      this_value = jsgraph()->UndefinedConstant();
-    } else if (access_info.IsDataConstant()) {
-      this_value = jsgraph()->Constant(access_info.constant());
-      if (access_mode == AccessMode::kStore) {
-        Node* check = graph()->NewNode(
-            simplified()->ReferenceEqual(Type::Tagged()), value, this_value);
-        this_control = this_effect =
-            graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                             this_effect, this_control);
-      }
-    } else {
-      DCHECK(access_info.IsDataField());
-      FieldIndex const field_index = access_info.field_index();
-      Type* const field_type = access_info.field_type();
-      if (access_mode == AccessMode::kLoad &&
-          access_info.holder().ToHandle(&holder)) {
-        this_receiver = jsgraph()->Constant(holder);
-      }
-      Node* this_storage = this_receiver;
-      if (!field_index.is_inobject()) {
-        this_storage = this_effect = graph()->NewNode(
-            simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
-            this_storage, this_effect, this_control);
-      }
-      FieldAccess field_access = {
-          kTaggedBase, field_index.offset(),     name,
-          field_type,  MachineType::AnyTagged(), kFullWriteBarrier};
-      if (access_mode == AccessMode::kLoad) {
-        if (field_type->Is(Type::UntaggedFloat64())) {
-          // TODO(turbofan): We remove the representation axis from the type to
-          // avoid uninhabited representation types. This is a workaround until
-          // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
-          field_access.type = Type::Union(
-              field_type, Type::Representation(Type::Number(), zone()), zone());
-          if (!field_index.is_inobject() || field_index.is_hidden_field() ||
-              !FLAG_unbox_double_fields) {
-            this_storage = this_effect =
-                graph()->NewNode(simplified()->LoadField(field_access),
-                                 this_storage, this_effect, this_control);
-            field_access.offset = HeapNumber::kValueOffset;
-            field_access.name = MaybeHandle<Name>();
-          }
-          field_access.machine_type = MachineType::Float64();
-        }
-        this_value = this_effect =
-            graph()->NewNode(simplified()->LoadField(field_access),
-                             this_storage, this_effect, this_control);
-      } else {
-        DCHECK_EQ(AccessMode::kStore, access_mode);
-        if (field_type->Is(Type::UntaggedFloat64())) {
-          // TODO(turbofan): We remove the representation axis from the type to
-          // avoid uninhabited representation types. This is a workaround until
-          // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
-          field_access.type = Type::Union(
-              field_type, Type::Representation(Type::Number(), zone()), zone());
-          Node* check =
-              graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
-          this_control = this_effect =
-              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                               this_effect, this_control);
-          this_value = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
-                                        this_value, this_control);
+    ValueEffectControl continuation = BuildPropertyAccess(
+        receiver, value, context, frame_state_lazy, effect, control, name,
+        native_context, access_info, access_mode);
+    value = continuation.value();
+    effect = continuation.effect();
+    control = continuation.control();
+  } else {
+    // The final states for every polymorphic branch. We join them with
+    // Merge+Phi+EffectPhi at the bottom.
+    ZoneVector<Node*> values(zone());
+    ZoneVector<Node*> effects(zone());
+    ZoneVector<Node*> controls(zone());
 
-          if (!field_index.is_inobject() || field_index.is_hidden_field() ||
-              !FLAG_unbox_double_fields) {
-            if (access_info.HasTransitionMap()) {
-              // Allocate a MutableHeapNumber for the new property.
-              this_effect = graph()->NewNode(
-                  common()->BeginRegion(RegionObservability::kNotObservable),
-                  this_effect);
-              Node* this_box = this_effect =
-                  graph()->NewNode(simplified()->Allocate(NOT_TENURED),
-                                   jsgraph()->Constant(HeapNumber::kSize),
-                                   this_effect, this_control);
-              this_effect = graph()->NewNode(
-                  simplified()->StoreField(AccessBuilder::ForMap()), this_box,
-                  jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
-                  this_effect, this_control);
-              this_effect = graph()->NewNode(
-                  simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
-                  this_box, this_value, this_effect, this_control);
-              this_value = this_effect = graph()->NewNode(
-                  common()->FinishRegion(), this_box, this_effect);
-
-              field_access.type = Type::TaggedPointer();
-            } else {
-              // We just store directly to the MutableHeapNumber.
-              this_storage = this_effect =
-                  graph()->NewNode(simplified()->LoadField(field_access),
-                                   this_storage, this_effect, this_control);
-              field_access.offset = HeapNumber::kValueOffset;
-              field_access.name = MaybeHandle<Name>();
-              field_access.machine_type = MachineType::Float64();
-            }
-          } else {
-            // Unboxed double field, we store directly to the field.
-            field_access.machine_type = MachineType::Float64();
-          }
-        } else if (field_type->Is(Type::TaggedSigned())) {
-          this_value = this_effect =
-              graph()->NewNode(simplified()->CheckTaggedSigned(), this_value,
-                               this_effect, this_control);
-        } else if (field_type->Is(Type::TaggedPointer())) {
-          this_value = this_effect =
-              graph()->NewNode(simplified()->CheckTaggedPointer(), this_value,
-                               this_effect, this_control);
-          if (field_type->NumClasses() == 1) {
-            // Emit a map check for the value.
-            Node* this_value_map = this_effect = graph()->NewNode(
-                simplified()->LoadField(AccessBuilder::ForMap()), this_value,
-                this_effect, this_control);
-            Node* check = graph()->NewNode(
-                simplified()->ReferenceEqual(Type::Internal()), this_value_map,
-                jsgraph()->Constant(field_type->Classes().Current()));
-            this_control = this_effect =
-                graph()->NewNode(common()->DeoptimizeUnless(), check,
-                                 frame_state, this_effect, this_control);
-          } else {
-            DCHECK_EQ(0, field_type->NumClasses());
-          }
-        } else {
-          DCHECK(field_type->Is(Type::Tagged()));
-        }
-        Handle<Map> transition_map;
-        if (access_info.transition_map().ToHandle(&transition_map)) {
-          this_effect = graph()->NewNode(
-              common()->BeginRegion(RegionObservability::kObservable),
-              this_effect);
-          this_effect = graph()->NewNode(
-              simplified()->StoreField(AccessBuilder::ForMap()), this_receiver,
-              jsgraph()->Constant(transition_map), this_effect, this_control);
-        }
-        this_effect = graph()->NewNode(simplified()->StoreField(field_access),
-                                       this_storage, this_value, this_effect,
-                                       this_control);
-        if (access_info.HasTransitionMap()) {
-          this_effect =
-              graph()->NewNode(common()->FinishRegion(),
-                               jsgraph()->UndefinedConstant(), this_effect);
-        }
+    // Check if {receiver} may be a number.
+    bool receiverissmi_possible = false;
+    for (PropertyAccessInfo const& access_info : access_infos) {
+      if (HasNumberMaps(access_info.receiver_maps())) {
+        receiverissmi_possible = true;
+        break;
       }
     }
 
-    // Remember the final state for this property access.
-    values.push_back(this_value);
-    effects.push_back(this_effect);
-    controls.push_back(this_control);
-  }
+    // Ensure that {receiver} is a heap object.
+    Node* receiverissmi_control = nullptr;
+    Node* receiverissmi_effect = effect;
+    if (receiverissmi_possible) {
+      Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+      Node* branch = graph()->NewNode(common()->Branch(), check, control);
+      control = graph()->NewNode(common()->IfFalse(), branch);
+      receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+      receiverissmi_effect = effect;
+    } else {
+      effect = BuildCheckTaggedPointer(receiver, effect, control);
+    }
 
-  DCHECK_NULL(fallthrough_control);
+    // Load the {receiver} map. The resulting effect is the dominating effect
+    // for all (polymorphic) branches.
+    Node* receiver_map = effect =
+        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                         receiver, effect, control);
 
-  // Generate the final merge point for all (polymorphic) branches.
-  int const control_count = static_cast<int>(controls.size());
-  if (control_count == 0) {
-    value = effect = control = jsgraph()->Dead();
-  } else if (control_count == 1) {
-    value = values.front();
-    effect = effects.front();
-    control = controls.front();
-  } else {
-    control = graph()->NewNode(common()->Merge(control_count), control_count,
-                               &controls.front());
-    values.push_back(control);
-    value = graph()->NewNode(
-        common()->Phi(MachineRepresentation::kTagged, control_count),
-        control_count + 1, &values.front());
-    effects.push_back(control);
-    effect = graph()->NewNode(common()->EffectPhi(control_count),
-                              control_count + 1, &effects.front());
+    // Generate code for the various different property access patterns.
+    Node* fallthrough_control = control;
+    for (size_t j = 0; j < access_infos.size(); ++j) {
+      PropertyAccessInfo const& access_info = access_infos[j];
+      Node* this_value = value;
+      Node* this_receiver = receiver;
+      Node* this_effect = effect;
+      Node* this_control;
+
+      // Perform map check on {receiver}.
+      MapList const& receiver_maps = access_info.receiver_maps();
+      {
+        // Emit a (sequence of) map checks for other {receiver}s.
+        ZoneVector<Node*> this_controls(zone());
+        ZoneVector<Node*> this_effects(zone());
+        size_t num_classes = receiver_maps.size();
+        for (auto map : receiver_maps) {
+          DCHECK_LT(0u, num_classes);
+          Node* check =
+              graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
+                               jsgraph()->Constant(map));
+          if (--num_classes == 0 && j == access_infos.size() - 1) {
+            check = graph()->NewNode(simplified()->CheckIf(), check,
+                                     this_effect, fallthrough_control);
+            this_controls.push_back(fallthrough_control);
+            this_effects.push_back(check);
+            fallthrough_control = nullptr;
+          } else {
+            Node* branch = graph()->NewNode(common()->Branch(), check,
+                                            fallthrough_control);
+            fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+            this_controls.push_back(
+                graph()->NewNode(common()->IfTrue(), branch));
+            this_effects.push_back(this_effect);
+          }
+        }
+
+        // The Number case requires special treatment to also deal with Smis.
+        if (HasNumberMaps(receiver_maps)) {
+          // Join this check with the "receiver is smi" check above.
+          DCHECK_NOT_NULL(receiverissmi_effect);
+          DCHECK_NOT_NULL(receiverissmi_control);
+          this_effects.push_back(receiverissmi_effect);
+          this_controls.push_back(receiverissmi_control);
+          receiverissmi_effect = receiverissmi_control = nullptr;
+        }
+
+        // Create single chokepoint for the control.
+        int const this_control_count = static_cast<int>(this_controls.size());
+        if (this_control_count == 1) {
+          this_control = this_controls.front();
+          this_effect = this_effects.front();
+        } else {
+          this_control =
+              graph()->NewNode(common()->Merge(this_control_count),
+                               this_control_count, &this_controls.front());
+          this_effects.push_back(this_control);
+          this_effect =
+              graph()->NewNode(common()->EffectPhi(this_control_count),
+                               this_control_count + 1, &this_effects.front());
+
+          // TODO(turbofan): The effect/control linearization will not find a
+          // FrameState after the EffectPhi that is generated above.
+          this_effect =
+              graph()->NewNode(common()->Checkpoint(), frame_state_eager,
+                               this_effect, this_control);
+        }
+      }
+
+      // Generate the actual property access.
+      ValueEffectControl continuation = BuildPropertyAccess(
+          this_receiver, this_value, context, frame_state_lazy, this_effect,
+          this_control, name, native_context, access_info, access_mode);
+      values.push_back(continuation.value());
+      effects.push_back(continuation.effect());
+      controls.push_back(continuation.control());
+    }
+
+    DCHECK_NULL(fallthrough_control);
+
+    // Generate the final merge point for all (polymorphic) branches.
+    int const control_count = static_cast<int>(controls.size());
+    if (control_count == 0) {
+      value = effect = control = jsgraph()->Dead();
+    } else if (control_count == 1) {
+      value = values.front();
+      effect = effects.front();
+      control = controls.front();
+    } else {
+      control = graph()->NewNode(common()->Merge(control_count), control_count,
+                                 &controls.front());
+      values.push_back(control);
+      value = graph()->NewNode(
+          common()->Phi(MachineRepresentation::kTagged, control_count),
+          control_count + 1, &values.front());
+      effects.push_back(control);
+      effect = graph()->NewNode(common()->EffectPhi(control_count),
+                                control_count + 1, &effects.front());
+    }
   }
   ReplaceWithValue(node, value, effect, control);
   return Replace(value);
 }
 
-
-Reduction JSNativeContextSpecialization::ReduceNamedAccess(
+Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
     Node* node, Node* value, FeedbackNexus const& nexus, Handle<Name> name,
     AccessMode access_mode, LanguageMode language_mode) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
@@ -418,7 +331,9 @@
   if (nexus.IsUninitialized()) {
     if ((flags() & kDeoptimizationEnabled) &&
         (flags() & kBailoutOnUninitialized)) {
-      return ReduceSoftDeoptimize(node);
+      return ReduceSoftDeoptimize(
+          node,
+          DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
     }
     return NoChange();
   }
@@ -430,7 +345,9 @@
   } else if (receiver_maps.length() == 0) {
     if ((flags() & kDeoptimizationEnabled) &&
         (flags() & kBailoutOnUninitialized)) {
-      return ReduceSoftDeoptimize(node);
+      return ReduceSoftDeoptimize(
+          node,
+          DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
     }
     return NoChange();
   }
@@ -476,8 +393,8 @@
   LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
 
   // Try to lower the named access based on the {receiver_maps}.
-  return ReduceNamedAccess(node, value, nexus, p.name(), AccessMode::kLoad,
-                           p.language_mode());
+  return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
+                                    AccessMode::kLoad, p.language_mode());
 }
 
 
@@ -491,8 +408,8 @@
   StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
 
   // Try to lower the named access based on the {receiver_maps}.
-  return ReduceNamedAccess(node, value, nexus, p.name(), AccessMode::kStore,
-                           p.language_mode());
+  return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
+                                    AccessMode::kStore, p.language_mode());
 }
 
 
@@ -503,7 +420,6 @@
   DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
          node->opcode() == IrOpcode::kJSStoreProperty);
   Node* receiver = NodeProperties::GetValueInput(node, 0);
-  Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
   Node* frame_state = NodeProperties::FindFrameStateBefore(node);
@@ -511,152 +427,107 @@
   // Not much we can do if deoptimization support is disabled.
   if (!(flags() & kDeoptimizationEnabled)) return NoChange();
 
-  // TODO(bmeurer): Add support for non-standard stores.
-  if (store_mode != STANDARD_STORE) return NoChange();
+  // Check for keyed access to strings.
+  if (HasOnlyStringMaps(receiver_maps)) {
+    // Strings are immutable in JavaScript.
+    if (access_mode == AccessMode::kStore) return NoChange();
 
-  // Retrieve the native context from the given {node}.
-  Handle<Context> native_context;
-  if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+    // Ensure that the {receiver} is actually a String.
+    receiver = effect = graph()->NewNode(simplified()->CheckString(), receiver,
+                                         effect, control);
 
-  // Compute element access infos for the receiver maps.
-  AccessInfoFactory access_info_factory(dependencies(), native_context,
-                                        graph()->zone());
-  ZoneVector<ElementAccessInfo> access_infos(zone());
-  if (!access_info_factory.ComputeElementAccessInfos(receiver_maps, access_mode,
-                                                     &access_infos)) {
-    return NoChange();
-  }
+    // Determine the {receiver} length.
+    Node* length = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
+        effect, control);
 
-  // Nothing to do if we have no non-deprecated maps.
-  if (access_infos.empty()) return NoChange();
+    // Ensure that {index} is less than {receiver} length.
+    index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+                                      length, effect, control);
 
-  // The final states for every polymorphic branch. We join them with
-  // Merge+Phi+EffectPhi at the bottom.
-  ZoneVector<Node*> values(zone());
-  ZoneVector<Node*> effects(zone());
-  ZoneVector<Node*> controls(zone());
+    // Load the character from the {receiver}.
+    value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver, index,
+                             control);
 
-  // Ensure that {receiver} is a heap object.
-  receiver = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
-                                       receiver, effect, control);
+    // Return it as a single character string.
+    value = graph()->NewNode(simplified()->StringFromCharCode(), value);
+  } else {
+    // Retrieve the native context from the given {node}.
+    Handle<Context> native_context;
+    if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
 
-  // Load the {receiver} map. The resulting effect is the dominating effect for
-  // all (polymorphic) branches.
-  Node* receiver_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       receiver, effect, control);
+    // Compute element access infos for the receiver maps.
+    AccessInfoFactory access_info_factory(dependencies(), native_context,
+                                          graph()->zone());
+    ZoneVector<ElementAccessInfo> access_infos(zone());
+    if (!access_info_factory.ComputeElementAccessInfos(
+            receiver_maps, access_mode, &access_infos)) {
+      return NoChange();
+    }
 
-  // Generate code for the various different element access patterns.
-  Node* fallthrough_control = control;
-  for (size_t j = 0; j < access_infos.size(); ++j) {
-    ElementAccessInfo const& access_info = access_infos[j];
-    Node* this_receiver = receiver;
-    Node* this_value = value;
-    Node* this_index = index;
-    Node* this_effect;
-    Node* this_control;
+    // Nothing to do if we have no non-deprecated maps.
+    if (access_infos.empty()) {
+      return ReduceSoftDeoptimize(
+          node,
+          DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
+    }
 
-    // Perform map check on {receiver}.
-    Type* receiver_type = access_info.receiver_type();
-    bool receiver_is_jsarray = true;
-    {
-      ZoneVector<Node*> this_controls(zone());
-      ZoneVector<Node*> this_effects(zone());
-      size_t num_transitions = access_info.transitions().size();
-      int num_classes = access_info.receiver_type()->NumClasses();
-      for (auto i = access_info.receiver_type()->Classes(); !i.Done();
-           i.Advance()) {
-        DCHECK_LT(0, num_classes);
-        Handle<Map> map = i.Current();
-        Node* check =
-            graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
-                             receiver_map, jsgraph()->Constant(map));
-        if (--num_classes == 0 && num_transitions == 0 &&
-            j == access_infos.size() - 1) {
-          // Last map check on the fallthrough control path, do a conditional
-          // eager deoptimization exit here.
-          // TODO(turbofan): This is ugly as hell! We should probably introduce
-          // macro-ish operators for property access that encapsulate this whole
-          // mess.
-          Node* deoptimize =
-              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                               effect, fallthrough_control);
-          this_controls.push_back(deoptimize);
-          this_effects.push_back(deoptimize);
-          fallthrough_control = nullptr;
-        } else {
-          Node* branch =
-              graph()->NewNode(common()->Branch(), check, fallthrough_control);
-          this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
-          this_effects.push_back(effect);
-          fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+    // For holey stores or growing stores, we need to check that the prototype
+    // chain contains no setters for elements, and we need to guard those checks
+    // via code dependencies on the relevant prototype maps.
+    if (access_mode == AccessMode::kStore) {
+      // TODO(turbofan): We could have a fast path here, that checks for the
+      // common case of Array or Object prototype only and therefore avoids
+      // the zone allocation of this vector.
+      ZoneVector<Handle<Map>> prototype_maps(zone());
+      for (ElementAccessInfo const& access_info : access_infos) {
+        for (Handle<Map> receiver_map : access_info.receiver_maps()) {
+          // If the {receiver_map} has a prototype and it's elements backing
+          // store is either holey, or we have a potentially growing store,
+          // then we need to check that all prototypes have stable maps with
+          // fast elements (and we need to guard against changes to that below).
+          if (IsHoleyElementsKind(receiver_map->elements_kind()) ||
+              IsGrowStoreMode(store_mode)) {
+            // Make sure all prototypes are stable and have fast elements.
+            for (Handle<Map> map = receiver_map;;) {
+              Handle<Object> map_prototype(map->prototype(), isolate());
+              if (map_prototype->IsNull(isolate())) break;
+              if (!map_prototype->IsJSObject()) return NoChange();
+              map = handle(Handle<JSObject>::cast(map_prototype)->map(),
+                           isolate());
+              if (!map->is_stable()) return NoChange();
+              if (!IsFastElementsKind(map->elements_kind())) return NoChange();
+              prototype_maps.push_back(map);
+            }
+          }
         }
-        if (!map->IsJSArrayMap()) receiver_is_jsarray = false;
       }
 
-      // Generate possible elements kind transitions.
+      // Install dependencies on the relevant prototype maps.
+      for (Handle<Map> prototype_map : prototype_maps) {
+        dependencies()->AssumeMapStable(prototype_map);
+      }
+    }
+
+    // Ensure that {receiver} is a heap object.
+    effect = BuildCheckTaggedPointer(receiver, effect, control);
+
+    // Check for the monomorphic case.
+    if (access_infos.size() == 1) {
+      ElementAccessInfo access_info = access_infos.front();
+
+      // Perform possible elements kind transitions.
       for (auto transition : access_info.transitions()) {
-        DCHECK_LT(0u, num_transitions);
-        Handle<Map> transition_source = transition.first;
-        Handle<Map> transition_target = transition.second;
-        Node* transition_control;
-        Node* transition_effect = effect;
-
-        // Check if {receiver} has the specified {transition_source} map.
-        Node* check = graph()->NewNode(
-            simplified()->ReferenceEqual(Type::Any()), receiver_map,
-            jsgraph()->HeapConstant(transition_source));
-        if (--num_transitions == 0 && j == access_infos.size() - 1) {
-          transition_control = transition_effect =
-              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                               transition_effect, fallthrough_control);
-          fallthrough_control = nullptr;
-        } else {
-          Node* branch =
-              graph()->NewNode(common()->Branch(), check, fallthrough_control);
-          fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
-          transition_control = graph()->NewNode(common()->IfTrue(), branch);
-        }
-
-        // Migrate {receiver} from {transition_source} to {transition_target}.
-        if (IsSimpleMapChangeTransition(transition_source->elements_kind(),
-                                        transition_target->elements_kind())) {
-          // In-place migration, just store the {transition_target} map.
-          transition_effect = graph()->NewNode(
-              simplified()->StoreField(AccessBuilder::ForMap()), receiver,
-              jsgraph()->HeapConstant(transition_target), transition_effect,
-              transition_control);
-        } else {
-          // Instance migration, let the stub deal with the {receiver}.
-          TransitionElementsKindStub stub(isolate(),
-                                          transition_source->elements_kind(),
-                                          transition_target->elements_kind());
-          CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
-              isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 0,
-              CallDescriptor::kNeedsFrameState, node->op()->properties());
-          transition_effect = graph()->NewNode(
-              common()->Call(desc), jsgraph()->HeapConstant(stub.GetCode()),
-              receiver, jsgraph()->HeapConstant(transition_target), context,
-              frame_state, transition_effect, transition_control);
-        }
-
-        this_controls.push_back(transition_control);
-        this_effects.push_back(transition_effect);
-      }
-
-      // Create single chokepoint for the control.
-      int const this_control_count = static_cast<int>(this_controls.size());
-      if (this_control_count == 1) {
-        this_control = this_controls.front();
-        this_effect = this_effects.front();
-      } else {
-        this_control =
-            graph()->NewNode(common()->Merge(this_control_count),
-                             this_control_count, &this_controls.front());
-        this_effects.push_back(this_control);
-        this_effect =
-            graph()->NewNode(common()->EffectPhi(this_control_count),
-                             this_control_count + 1, &this_effects.front());
+        Handle<Map> const transition_source = transition.first;
+        Handle<Map> const transition_target = transition.second;
+        effect = graph()->NewNode(
+            simplified()->TransitionElementsKind(
+                IsSimpleMapChangeTransition(transition_source->elements_kind(),
+                                            transition_target->elements_kind())
+                    ? ElementsTransition::kFastTransition
+                    : ElementsTransition::kSlowTransition),
+            receiver, jsgraph()->HeapConstant(transition_source),
+            jsgraph()->HeapConstant(transition_target), effect, control);
       }
 
       // TODO(turbofan): The effect/control linearization will not find a
@@ -664,188 +535,152 @@
       // elements kind transition above. This is because those operators
       // don't have the kNoWrite flag on it, even though they are not
       // observable by JavaScript.
-      this_effect = graph()->NewNode(common()->Checkpoint(), frame_state,
-                                     this_effect, this_control);
-    }
+      effect = graph()->NewNode(common()->Checkpoint(), frame_state, effect,
+                                control);
 
-    // Certain stores need a prototype chain check because shape changes
-    // could allow callbacks on elements in the prototype chain that are
-    // not compatible with (monomorphic) keyed stores.
-    Handle<JSObject> holder;
-    if (access_info.holder().ToHandle(&holder)) {
-      AssumePrototypesStable(receiver_type, native_context, holder);
-    }
+      // Perform map check on the {receiver}.
+      effect = BuildCheckMaps(receiver, effect, control,
+                              access_info.receiver_maps());
 
-    // TODO(bmeurer): We currently specialize based on elements kind. We should
-    // also be able to properly support strings and other JSObjects here.
-    ElementsKind elements_kind = access_info.elements_kind();
-
-    // Load the elements for the {receiver}.
-    Node* this_elements = this_effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
-        this_receiver, this_effect, this_control);
-
-    // Don't try to store to a copy-on-write backing store.
-    if (access_mode == AccessMode::kStore &&
-        IsFastSmiOrObjectElementsKind(elements_kind)) {
-      Node* this_elements_map = this_effect =
-          graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                           this_elements, this_effect, this_control);
-      Node* check = graph()->NewNode(
-          simplified()->ReferenceEqual(Type::Any()), this_elements_map,
-          jsgraph()->HeapConstant(factory()->fixed_array_map()));
-      this_control = this_effect =
-          graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                           this_effect, this_control);
-    }
-
-    // Load the length of the {receiver}.
-    Node* this_length = this_effect =
-        receiver_is_jsarray
-            ? graph()->NewNode(
-                  simplified()->LoadField(
-                      AccessBuilder::ForJSArrayLength(elements_kind)),
-                  this_receiver, this_effect, this_control)
-            : graph()->NewNode(
-                  simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
-                  this_elements, this_effect, this_control);
-
-    // Check that the {index} is in the valid range for the {receiver}.
-    this_index = this_effect =
-        graph()->NewNode(simplified()->CheckBounds(), this_index, this_length,
-                         this_effect, this_control);
-
-    // Compute the element access.
-    Type* element_type = Type::Any();
-    MachineType element_machine_type = MachineType::AnyTagged();
-    if (IsFastDoubleElementsKind(elements_kind)) {
-      element_type = Type::Number();
-      element_machine_type = MachineType::Float64();
-    } else if (IsFastSmiElementsKind(elements_kind)) {
-      element_type = type_cache_.kSmi;
-    }
-    ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
-                                    element_type, element_machine_type,
-                                    kFullWriteBarrier};
-
-    // Access the actual element.
-    // TODO(bmeurer): Refactor this into separate methods or even a separate
-    // class that deals with the elements access.
-    if (access_mode == AccessMode::kLoad) {
-      // Compute the real element access type, which includes the hole in case
-      // of holey backing stores.
-      if (elements_kind == FAST_HOLEY_ELEMENTS ||
-          elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
-        element_access.type = Type::Union(
-            element_type,
-            Type::Constant(factory()->the_hole_value(), graph()->zone()),
-            graph()->zone());
-      }
-      // Perform the actual backing store access.
-      this_value = this_effect = graph()->NewNode(
-          simplified()->LoadElement(element_access), this_elements, this_index,
-          this_effect, this_control);
-      // Handle loading from holey backing stores correctly, by either mapping
-      // the hole to undefined if possible, or deoptimizing otherwise.
-      if (elements_kind == FAST_HOLEY_ELEMENTS ||
-          elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
-        // Perform the hole check on the result.
-        CheckTaggedHoleMode mode = CheckTaggedHoleMode::kNeverReturnHole;
-        // Check if we are allowed to turn the hole into undefined.
-        Type* initial_holey_array_type = Type::Class(
-            handle(isolate()->get_initial_js_array_map(elements_kind)),
-            graph()->zone());
-        if (receiver_type->NowIs(initial_holey_array_type) &&
-            isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
-          // Add a code dependency on the array protector cell.
-          AssumePrototypesStable(receiver_type, native_context,
-                                 isolate()->initial_object_prototype());
-          dependencies()->AssumePropertyCell(factory()->array_protector());
-          // Turn the hole into undefined.
-          mode = CheckTaggedHoleMode::kConvertHoleToUndefined;
-        }
-        this_value = this_effect =
-            graph()->NewNode(simplified()->CheckTaggedHole(mode), this_value,
-                             this_effect, this_control);
-      } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
-        // Perform the hole check on the result.
-        CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
-        // Check if we are allowed to return the hole directly.
-        Type* initial_holey_array_type = Type::Class(
-            handle(isolate()->get_initial_js_array_map(elements_kind)),
-            graph()->zone());
-        if (receiver_type->NowIs(initial_holey_array_type) &&
-            isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
-          // Add a code dependency on the array protector cell.
-          AssumePrototypesStable(receiver_type, native_context,
-                                 isolate()->initial_object_prototype());
-          dependencies()->AssumePropertyCell(factory()->array_protector());
-          // Return the signaling NaN hole directly if all uses are truncating.
-          mode = CheckFloat64HoleMode::kAllowReturnHole;
-        }
-        this_value = this_effect =
-            graph()->NewNode(simplified()->CheckFloat64Hole(mode), this_value,
-                             this_effect, this_control);
-      }
+      // Access the actual element.
+      ValueEffectControl continuation = BuildElementAccess(
+          receiver, index, value, effect, control, native_context, access_info,
+          access_mode, store_mode);
+      value = continuation.value();
+      effect = continuation.effect();
+      control = continuation.control();
     } else {
-      DCHECK_EQ(AccessMode::kStore, access_mode);
-      if (IsFastSmiElementsKind(elements_kind)) {
-        this_value = this_effect =
-            graph()->NewNode(simplified()->CheckTaggedSigned(), this_value,
-                             this_effect, this_control);
-      } else if (IsFastDoubleElementsKind(elements_kind)) {
-        Node* check =
-            graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
-        this_control = this_effect =
-            graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
-                             this_effect, this_control);
-        this_value = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
-                                      this_value, this_control);
-        // Make sure we do not store signalling NaNs into holey double arrays.
-        if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
-          this_value =
-              graph()->NewNode(simplified()->NumberSilenceNaN(), this_value);
+      // The final states for every polymorphic branch. We join them with
+      // Merge+Phi+EffectPhi at the bottom.
+      ZoneVector<Node*> values(zone());
+      ZoneVector<Node*> effects(zone());
+      ZoneVector<Node*> controls(zone());
+
+      // Generate code for the various different element access patterns.
+      Node* fallthrough_control = control;
+      for (size_t j = 0; j < access_infos.size(); ++j) {
+        ElementAccessInfo const& access_info = access_infos[j];
+        Node* this_receiver = receiver;
+        Node* this_value = value;
+        Node* this_index = index;
+        Node* this_effect = effect;
+        Node* this_control = fallthrough_control;
+
+        // Perform possible elements kind transitions.
+        for (auto transition : access_info.transitions()) {
+          Handle<Map> const transition_source = transition.first;
+          Handle<Map> const transition_target = transition.second;
+          this_effect = graph()->NewNode(
+              simplified()->TransitionElementsKind(
+                  IsSimpleMapChangeTransition(
+                      transition_source->elements_kind(),
+                      transition_target->elements_kind())
+                      ? ElementsTransition::kFastTransition
+                      : ElementsTransition::kSlowTransition),
+              receiver, jsgraph()->HeapConstant(transition_source),
+              jsgraph()->HeapConstant(transition_target), this_effect,
+              this_control);
         }
+
+        // Load the {receiver} map.
+        Node* receiver_map = this_effect =
+            graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                             receiver, this_effect, this_control);
+
+        // Perform map check(s) on {receiver}.
+        MapList const& receiver_maps = access_info.receiver_maps();
+        {
+          ZoneVector<Node*> this_controls(zone());
+          ZoneVector<Node*> this_effects(zone());
+          size_t num_classes = receiver_maps.size();
+          for (Handle<Map> map : receiver_maps) {
+            DCHECK_LT(0u, num_classes);
+            Node* check =
+                graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
+                                 jsgraph()->Constant(map));
+            if (--num_classes == 0 && j == access_infos.size() - 1) {
+              // Last map check on the fallthrough control path, do a
+              // conditional eager deoptimization exit here.
+              // TODO(turbofan): This is ugly as hell! We should probably
+              // introduce macro-ish operators for property access that
+              // encapsulate this whole mess.
+              check = graph()->NewNode(simplified()->CheckIf(), check,
+                                       this_effect, this_control);
+              this_controls.push_back(this_control);
+              this_effects.push_back(check);
+              fallthrough_control = nullptr;
+            } else {
+              Node* branch = graph()->NewNode(common()->Branch(), check,
+                                              fallthrough_control);
+              this_controls.push_back(
+                  graph()->NewNode(common()->IfTrue(), branch));
+              this_effects.push_back(this_effect);
+              fallthrough_control =
+                  graph()->NewNode(common()->IfFalse(), branch);
+            }
+          }
+
+          // Create single chokepoint for the control.
+          int const this_control_count = static_cast<int>(this_controls.size());
+          if (this_control_count == 1) {
+            this_control = this_controls.front();
+            this_effect = this_effects.front();
+          } else {
+            this_control =
+                graph()->NewNode(common()->Merge(this_control_count),
+                                 this_control_count, &this_controls.front());
+            this_effects.push_back(this_control);
+            this_effect =
+                graph()->NewNode(common()->EffectPhi(this_control_count),
+                                 this_control_count + 1, &this_effects.front());
+
+            // TODO(turbofan): The effect/control linearization will not find a
+            // FrameState after the EffectPhi that is generated above.
+            this_effect = graph()->NewNode(common()->Checkpoint(), frame_state,
+                                           this_effect, this_control);
+          }
+        }
+
+        // Access the actual element.
+        ValueEffectControl continuation = BuildElementAccess(
+            this_receiver, this_index, this_value, this_effect, this_control,
+            native_context, access_info, access_mode, store_mode);
+        values.push_back(continuation.value());
+        effects.push_back(continuation.effect());
+        controls.push_back(continuation.control());
       }
-      this_effect = graph()->NewNode(simplified()->StoreElement(element_access),
-                                     this_elements, this_index, this_value,
-                                     this_effect, this_control);
+
+      DCHECK_NULL(fallthrough_control);
+
+      // Generate the final merge point for all (polymorphic) branches.
+      int const control_count = static_cast<int>(controls.size());
+      if (control_count == 0) {
+        value = effect = control = jsgraph()->Dead();
+      } else if (control_count == 1) {
+        value = values.front();
+        effect = effects.front();
+        control = controls.front();
+      } else {
+        control = graph()->NewNode(common()->Merge(control_count),
+                                   control_count, &controls.front());
+        values.push_back(control);
+        value = graph()->NewNode(
+            common()->Phi(MachineRepresentation::kTagged, control_count),
+            control_count + 1, &values.front());
+        effects.push_back(control);
+        effect = graph()->NewNode(common()->EffectPhi(control_count),
+                                  control_count + 1, &effects.front());
+      }
     }
-
-    // Remember the final state for this element access.
-    values.push_back(this_value);
-    effects.push_back(this_effect);
-    controls.push_back(this_control);
   }
 
-  DCHECK_NULL(fallthrough_control);
-
-  // Generate the final merge point for all (polymorphic) branches.
-  int const control_count = static_cast<int>(controls.size());
-  if (control_count == 0) {
-    value = effect = control = jsgraph()->Dead();
-  } else if (control_count == 1) {
-    value = values.front();
-    effect = effects.front();
-    control = controls.front();
-  } else {
-    control = graph()->NewNode(common()->Merge(control_count), control_count,
-                               &controls.front());
-    values.push_back(control);
-    value = graph()->NewNode(
-        common()->Phi(MachineRepresentation::kTagged, control_count),
-        control_count + 1, &values.front());
-    effects.push_back(control);
-    effect = graph()->NewNode(common()->EffectPhi(control_count),
-                              control_count + 1, &effects.front());
-  }
   ReplaceWithValue(node, value, effect, control);
   return Replace(value);
 }
 
-
+template <typename KeyedICNexus>
 Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
-    Node* node, Node* index, Node* value, FeedbackNexus const& nexus,
+    Node* node, Node* index, Node* value, KeyedICNexus const& nexus,
     AccessMode access_mode, LanguageMode language_mode,
     KeyedAccessStoreMode store_mode) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -857,7 +692,9 @@
   if (nexus.IsUninitialized()) {
     if ((flags() & kDeoptimizationEnabled) &&
         (flags() & kBailoutOnUninitialized)) {
-      return ReduceSoftDeoptimize(node);
+      return ReduceSoftDeoptimize(
+          node,
+          DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
     }
     return NoChange();
   }
@@ -869,7 +706,9 @@
   } else if (receiver_maps.length() == 0) {
     if ((flags() & kDeoptimizationEnabled) &&
         (flags() & kBailoutOnUninitialized)) {
-      return ReduceSoftDeoptimize(node);
+      return ReduceSoftDeoptimize(
+          node,
+          DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
     }
     return NoChange();
   }
@@ -900,6 +739,11 @@
     return ReduceNamedAccess(node, value, receiver_maps,
                              handle(name, isolate()), access_mode,
                              language_mode, index);
+  } else if (nexus.GetKeyType() != ELEMENT) {
+    // The KeyedLoad/StoreIC has seen non-element accesses, so we cannot assume
+    // that the {index} is a valid array index, thus we just let the IC continue
+    // to deal with this load/store.
+    return NoChange();
   }
 
   // Try to lower the element access based on the {receiver_maps}.
@@ -907,14 +751,14 @@
                              language_mode, store_mode);
 }
 
-
-Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(Node* node) {
+Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
+    Node* node, DeoptimizeReason reason) {
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
   Node* frame_state = NodeProperties::FindFrameStateBefore(node);
   Node* deoptimize =
-      graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft), frame_state,
-                       effect, control);
+      graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
+                       frame_state, effect, control);
   // TODO(bmeurer): This should be on the AdvancedReducer somehow.
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
   Revisit(graph()->end());
@@ -958,13 +802,529 @@
                            p.language_mode(), store_mode);
 }
 
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::BuildPropertyAccess(
+    Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
+    Node* control, Handle<Name> name, Handle<Context> native_context,
+    PropertyAccessInfo const& access_info, AccessMode access_mode) {
+  // Determine actual holder and perform prototype chain checks.
+  Handle<JSObject> holder;
+  if (access_info.holder().ToHandle(&holder)) {
+    AssumePrototypesStable(access_info.receiver_maps(), native_context, holder);
+  }
+
+  // Generate the actual property access.
+  if (access_info.IsNotFound()) {
+    DCHECK_EQ(AccessMode::kLoad, access_mode);
+    value = jsgraph()->UndefinedConstant();
+  } else if (access_info.IsDataConstant()) {
+    value = jsgraph()->Constant(access_info.constant());
+    if (access_mode == AccessMode::kStore) {
+      Node* check =
+          graph()->NewNode(simplified()->ReferenceEqual(), value, value);
+      effect =
+          graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+    }
+  } else if (access_info.IsAccessorConstant()) {
+    // TODO(bmeurer): Properly rewire the IfException edge here if there's any.
+    Node* target = jsgraph()->Constant(access_info.constant());
+    FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+    Handle<SharedFunctionInfo> shared_info =
+        frame_info.shared_info().ToHandleChecked();
+    switch (access_mode) {
+      case AccessMode::kLoad: {
+        // We need a FrameState for the getter stub to restore the correct
+        // context before returning to fullcodegen.
+        FrameStateFunctionInfo const* frame_info0 =
+            common()->CreateFrameStateFunctionInfo(FrameStateType::kGetterStub,
+                                                   1, 0, shared_info);
+        Node* frame_state0 = graph()->NewNode(
+            common()->FrameState(BailoutId::None(),
+                                 OutputFrameStateCombine::Ignore(),
+                                 frame_info0),
+            graph()->NewNode(common()->StateValues(1), receiver),
+            jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
+            context, target, frame_state);
+
+        // Introduce the call to the getter function.
+        value = effect = graph()->NewNode(
+            javascript()->CallFunction(
+                2, VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined),
+            target, receiver, context, frame_state0, effect, control);
+        control = graph()->NewNode(common()->IfSuccess(), value);
+        break;
+      }
+      case AccessMode::kStore: {
+        // We need a FrameState for the setter stub to restore the correct
+        // context and return the appropriate value to fullcodegen.
+        FrameStateFunctionInfo const* frame_info0 =
+            common()->CreateFrameStateFunctionInfo(FrameStateType::kSetterStub,
+                                                   2, 0, shared_info);
+        Node* frame_state0 = graph()->NewNode(
+            common()->FrameState(BailoutId::None(),
+                                 OutputFrameStateCombine::Ignore(),
+                                 frame_info0),
+            graph()->NewNode(common()->StateValues(2), receiver, value),
+            jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
+            context, target, frame_state);
+
+        // Introduce the call to the setter function.
+        effect = graph()->NewNode(
+            javascript()->CallFunction(
+                3, VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined),
+            target, receiver, value, context, frame_state0, effect, control);
+        control = graph()->NewNode(common()->IfSuccess(), effect);
+        break;
+      }
+    }
+  } else {
+    DCHECK(access_info.IsDataField());
+    FieldIndex const field_index = access_info.field_index();
+    Type* const field_type = access_info.field_type();
+    if (access_mode == AccessMode::kLoad &&
+        access_info.holder().ToHandle(&holder)) {
+      receiver = jsgraph()->Constant(holder);
+    }
+    Node* storage = receiver;
+    if (!field_index.is_inobject()) {
+      storage = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
+          storage, effect, control);
+    }
+    FieldAccess field_access = {
+        kTaggedBase, field_index.offset(),     name,
+        field_type,  MachineType::AnyTagged(), kFullWriteBarrier};
+    if (access_mode == AccessMode::kLoad) {
+      if (field_type->Is(Type::UntaggedFloat64())) {
+        // TODO(turbofan): We remove the representation axis from the type to
+        // avoid uninhabited representation types. This is a workaround until
+        // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
+        field_access.type = Type::Union(
+            field_type, Type::Representation(Type::Number(), zone()), zone());
+        if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+            !FLAG_unbox_double_fields) {
+          storage = effect = graph()->NewNode(
+              simplified()->LoadField(field_access), storage, effect, control);
+          field_access.offset = HeapNumber::kValueOffset;
+          field_access.name = MaybeHandle<Name>();
+        }
+        field_access.machine_type = MachineType::Float64();
+      }
+      value = effect = graph()->NewNode(simplified()->LoadField(field_access),
+                                        storage, effect, control);
+    } else {
+      DCHECK_EQ(AccessMode::kStore, access_mode);
+      if (field_type->Is(Type::UntaggedFloat64())) {
+        // TODO(turbofan): We remove the representation axis from the type to
+        // avoid uninhabited representation types. This is a workaround until
+        // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
+        field_access.type = Type::Union(
+            field_type, Type::Representation(Type::Number(), zone()), zone());
+        value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+                                          effect, control);
+
+        if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+            !FLAG_unbox_double_fields) {
+          if (access_info.HasTransitionMap()) {
+            // Allocate a MutableHeapNumber for the new property.
+            effect = graph()->NewNode(
+                common()->BeginRegion(RegionObservability::kNotObservable),
+                effect);
+            Node* box = effect = graph()->NewNode(
+                simplified()->Allocate(NOT_TENURED),
+                jsgraph()->Constant(HeapNumber::kSize), effect, control);
+            effect = graph()->NewNode(
+                simplified()->StoreField(AccessBuilder::ForMap()), box,
+                jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+                effect, control);
+            effect = graph()->NewNode(
+                simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+                box, value, effect, control);
+            value = effect =
+                graph()->NewNode(common()->FinishRegion(), box, effect);
+
+            field_access.type = Type::TaggedPointer();
+          } else {
+            // We just store directly to the MutableHeapNumber.
+            storage = effect =
+                graph()->NewNode(simplified()->LoadField(field_access), storage,
+                                 effect, control);
+            field_access.offset = HeapNumber::kValueOffset;
+            field_access.name = MaybeHandle<Name>();
+            field_access.machine_type = MachineType::Float64();
+          }
+        } else {
+          // Unboxed double field, we store directly to the field.
+          field_access.machine_type = MachineType::Float64();
+        }
+      } else if (field_type->Is(Type::TaggedSigned())) {
+        value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
+                                          value, effect, control);
+      } else if (field_type->Is(Type::TaggedPointer())) {
+        // Ensure that {value} is a HeapObject.
+        value = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
+                                          value, effect, control);
+        if (field_type->NumClasses() == 1) {
+          // Emit a map check for the value.
+          Node* field_map =
+              jsgraph()->Constant(field_type->Classes().Current());
+          effect = graph()->NewNode(simplified()->CheckMaps(1), value,
+                                    field_map, effect, control);
+        } else {
+          DCHECK_EQ(0, field_type->NumClasses());
+        }
+      } else {
+        DCHECK(field_type->Is(Type::Tagged()));
+      }
+      Handle<Map> transition_map;
+      if (access_info.transition_map().ToHandle(&transition_map)) {
+        effect = graph()->NewNode(
+            common()->BeginRegion(RegionObservability::kObservable), effect);
+        effect = graph()->NewNode(
+            simplified()->StoreField(AccessBuilder::ForMap()), receiver,
+            jsgraph()->Constant(transition_map), effect, control);
+      }
+      effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
+                                value, effect, control);
+      if (access_info.HasTransitionMap()) {
+        effect = graph()->NewNode(common()->FinishRegion(),
+                                  jsgraph()->UndefinedConstant(), effect);
+      }
+    }
+  }
+
+  return ValueEffectControl(value, effect, control);
+}
+
+namespace {
+
+ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
+  switch (kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case TYPE##_ELEMENTS:                                 \
+    return kExternal##Type##Array;
+    TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return kExternalInt8Array;
+}
+
+}  // namespace
+
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::BuildElementAccess(
+    Node* receiver, Node* index, Node* value, Node* effect, Node* control,
+    Handle<Context> native_context, ElementAccessInfo const& access_info,
+    AccessMode access_mode, KeyedAccessStoreMode store_mode) {
+  // TODO(bmeurer): We currently specialize based on elements kind. We should
+  // also be able to properly support strings and other JSObjects here.
+  ElementsKind elements_kind = access_info.elements_kind();
+  MapList const& receiver_maps = access_info.receiver_maps();
+
+  // Load the elements for the {receiver}.
+  Node* elements = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+      effect, control);
+
+  // Don't try to store to a copy-on-write backing store.
+  if (access_mode == AccessMode::kStore &&
+      IsFastSmiOrObjectElementsKind(elements_kind) &&
+      store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+    effect =
+        graph()->NewNode(simplified()->CheckMaps(1), elements,
+                         jsgraph()->FixedArrayMapConstant(), effect, control);
+  }
+
+  if (IsFixedTypedArrayElementsKind(elements_kind)) {
+    // Load the {receiver}s length.
+    Node* length = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
+        receiver, effect, control);
+
+    // Check if the {receiver}s buffer was neutered.
+    Node* buffer = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+        receiver, effect, control);
+    Node* buffer_bitfield = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+        buffer, effect, control);
+    Node* check = graph()->NewNode(
+        simplified()->NumberEqual(),
+        graph()->NewNode(
+            simplified()->NumberBitwiseAnd(), buffer_bitfield,
+            jsgraph()->Constant(JSArrayBuffer::WasNeutered::kMask)),
+        jsgraph()->ZeroConstant());
+
+    // Default to zero if the {receiver}s buffer was neutered.
+    length = graph()->NewNode(
+        common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
+        check, length, jsgraph()->ZeroConstant());
+
+    if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+      // Check that the {index} is a valid array index, we do the actual
+      // bounds check below and just skip the store below if it's out of
+      // bounds for the {receiver}.
+      index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+                                        jsgraph()->Constant(Smi::kMaxValue),
+                                        effect, control);
+    } else {
+      // Check that the {index} is in the valid range for the {receiver}.
+      DCHECK_EQ(STANDARD_STORE, store_mode);
+      index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+                                        length, effect, control);
+    }
+
+    // Load the base and external pointer for the {receiver}.
+    Node* base_pointer = effect = graph()->NewNode(
+        simplified()->LoadField(
+            AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
+        elements, effect, control);
+    Node* external_pointer = effect = graph()->NewNode(
+        simplified()->LoadField(
+            AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
+        elements, effect, control);
+
+    // Access the actual element.
+    ExternalArrayType external_array_type =
+        GetArrayTypeFromElementsKind(elements_kind);
+    switch (access_mode) {
+      case AccessMode::kLoad: {
+        value = effect = graph()->NewNode(
+            simplified()->LoadTypedElement(external_array_type), buffer,
+            base_pointer, external_pointer, index, effect, control);
+        break;
+      }
+      case AccessMode::kStore: {
+        // Ensure that the {value} is actually a Number.
+        value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+                                          effect, control);
+
+        // Check if we can skip the out-of-bounds store.
+        if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+          Node* check =
+              graph()->NewNode(simplified()->NumberLessThan(), index, length);
+          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                          check, control);
+
+          Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+          Node* etrue = effect;
+          {
+            // Perform the actual store.
+            etrue = graph()->NewNode(
+                simplified()->StoreTypedElement(external_array_type), buffer,
+                base_pointer, external_pointer, index, value, etrue, if_true);
+          }
+
+          Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+          Node* efalse = effect;
+          {
+            // Just ignore the out-of-bounds write.
+          }
+
+          control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+          effect =
+              graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+        } else {
+          // Perform the actual store
+          DCHECK_EQ(STANDARD_STORE, store_mode);
+          effect = graph()->NewNode(
+              simplified()->StoreTypedElement(external_array_type), buffer,
+              base_pointer, external_pointer, index, value, effect, control);
+        }
+        break;
+      }
+    }
+  } else {
+    // Check if the {receiver} is a JSArray.
+    bool receiver_is_jsarray = HasOnlyJSArrayMaps(receiver_maps);
+
+    // Load the length of the {receiver}.
+    Node* length = effect =
+        receiver_is_jsarray
+            ? graph()->NewNode(
+                  simplified()->LoadField(
+                      AccessBuilder::ForJSArrayLength(elements_kind)),
+                  receiver, effect, control)
+            : graph()->NewNode(
+                  simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+                  elements, effect, control);
+
+    // Check if we might need to grow the {elements} backing store.
+    if (IsGrowStoreMode(store_mode)) {
+      DCHECK_EQ(AccessMode::kStore, access_mode);
+
+      // Check that the {index} is a valid array index; the actual checking
+      // happens below right before the element store.
+      index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+                                        jsgraph()->Constant(Smi::kMaxValue),
+                                        effect, control);
+    } else {
+      // Check that the {index} is in the valid range for the {receiver}.
+      index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+                                        length, effect, control);
+    }
+
+    // Compute the element access.
+    Type* element_type = Type::NonInternal();
+    MachineType element_machine_type = MachineType::AnyTagged();
+    if (IsFastDoubleElementsKind(elements_kind)) {
+      element_type = Type::Number();
+      element_machine_type = MachineType::Float64();
+    } else if (IsFastSmiElementsKind(elements_kind)) {
+      element_type = type_cache_.kSmi;
+    }
+    ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+                                    element_type, element_machine_type,
+                                    kFullWriteBarrier};
+
+    // Access the actual element.
+    if (access_mode == AccessMode::kLoad) {
+      // Compute the real element access type, which includes the hole in case
+      // of holey backing stores.
+      if (elements_kind == FAST_HOLEY_ELEMENTS ||
+          elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+        element_access.type =
+            Type::Union(element_type, Type::Hole(), graph()->zone());
+      }
+      // Perform the actual backing store access.
+      value = effect =
+          graph()->NewNode(simplified()->LoadElement(element_access), elements,
+                           index, effect, control);
+      // Handle loading from holey backing stores correctly, by either mapping
+      // the hole to undefined if possible, or deoptimizing otherwise.
+      if (elements_kind == FAST_HOLEY_ELEMENTS ||
+          elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+        // Check if we are allowed to turn the hole into undefined.
+        if (CanTreatHoleAsUndefined(receiver_maps, native_context)) {
+          // Turn the hole into undefined.
+          value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
+                                   value);
+        } else {
+          // Bailout if we see the hole.
+          value = effect = graph()->NewNode(simplified()->CheckTaggedHole(),
+                                            value, effect, control);
+        }
+      } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+        // Perform the hole check on the result.
+        CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
+        // Check if we are allowed to return the hole directly.
+        if (CanTreatHoleAsUndefined(receiver_maps, native_context)) {
+          // Return the signaling NaN hole directly if all uses are truncating.
+          mode = CheckFloat64HoleMode::kAllowReturnHole;
+        }
+        value = effect = graph()->NewNode(simplified()->CheckFloat64Hole(mode),
+                                          value, effect, control);
+      }
+    } else {
+      DCHECK_EQ(AccessMode::kStore, access_mode);
+      if (IsFastSmiElementsKind(elements_kind)) {
+        value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
+                                          value, effect, control);
+      } else if (IsFastDoubleElementsKind(elements_kind)) {
+        value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+                                          effect, control);
+        // Make sure we do not store signalling NaNs into double arrays.
+        value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+      }
+
+      // Ensure that copy-on-write backing store is writable.
+      if (IsFastSmiOrObjectElementsKind(elements_kind) &&
+          store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
+        elements = effect =
+            graph()->NewNode(simplified()->EnsureWritableFastElements(),
+                             receiver, elements, effect, control);
+      } else if (IsGrowStoreMode(store_mode)) {
+        // Grow {elements} backing store if necessary. Also updates the
+        // "length" property for JSArray {receiver}s, hence there must
+        // not be any other check after this operation, as the write
+        // to the "length" property is observable.
+        GrowFastElementsFlags flags = GrowFastElementsFlag::kNone;
+        if (receiver_is_jsarray) {
+          flags |= GrowFastElementsFlag::kArrayObject;
+        }
+        if (IsHoleyElementsKind(elements_kind)) {
+          flags |= GrowFastElementsFlag::kHoleyElements;
+        }
+        if (IsFastDoubleElementsKind(elements_kind)) {
+          flags |= GrowFastElementsFlag::kDoubleElements;
+        }
+        elements = effect = graph()->NewNode(
+            simplified()->MaybeGrowFastElements(flags), receiver, elements,
+            index, length, effect, control);
+      }
+
+      // Perform the actual element access.
+      effect = graph()->NewNode(simplified()->StoreElement(element_access),
+                                elements, index, value, effect, control);
+    }
+  }
+
+  return ValueEffectControl(value, effect, control);
+}
+
+Node* JSNativeContextSpecialization::BuildCheckMaps(
+    Node* receiver, Node* effect, Node* control,
+    std::vector<Handle<Map>> const& maps) {
+  HeapObjectMatcher m(receiver);
+  if (m.HasValue()) {
+    Handle<Map> receiver_map(m.Value()->map(), isolate());
+    if (receiver_map->is_stable()) {
+      for (Handle<Map> map : maps) {
+        if (map.is_identical_to(receiver_map)) {
+          dependencies()->AssumeMapStable(receiver_map);
+          return effect;
+        }
+      }
+    }
+  }
+  int const map_input_count = static_cast<int>(maps.size());
+  int const input_count = 1 + map_input_count + 1 + 1;
+  Node** inputs = zone()->NewArray<Node*>(input_count);
+  inputs[0] = receiver;
+  for (int i = 0; i < map_input_count; ++i) {
+    inputs[1 + i] = jsgraph()->HeapConstant(maps[i]);
+  }
+  inputs[input_count - 2] = effect;
+  inputs[input_count - 1] = control;
+  return graph()->NewNode(simplified()->CheckMaps(map_input_count), input_count,
+                          inputs);
+}
+
+Node* JSNativeContextSpecialization::BuildCheckTaggedPointer(Node* receiver,
+                                                             Node* effect,
+                                                             Node* control) {
+  switch (receiver->opcode()) {
+    case IrOpcode::kHeapConstant:
+    case IrOpcode::kJSCreate:
+    case IrOpcode::kJSCreateArguments:
+    case IrOpcode::kJSCreateArray:
+    case IrOpcode::kJSCreateClosure:
+    case IrOpcode::kJSCreateIterResultObject:
+    case IrOpcode::kJSCreateLiteralArray:
+    case IrOpcode::kJSCreateLiteralObject:
+    case IrOpcode::kJSCreateLiteralRegExp:
+    case IrOpcode::kJSConvertReceiver:
+    case IrOpcode::kJSToName:
+    case IrOpcode::kJSToString:
+    case IrOpcode::kJSToObject:
+    case IrOpcode::kJSTypeOf: {
+      return effect;
+    }
+    default: {
+      return graph()->NewNode(simplified()->CheckTaggedPointer(), receiver,
+                              effect, control);
+    }
+  }
+}
 
 void JSNativeContextSpecialization::AssumePrototypesStable(
-    Type* receiver_type, Handle<Context> native_context,
-    Handle<JSObject> holder) {
+    std::vector<Handle<Map>> const& receiver_maps,
+    Handle<Context> native_context, Handle<JSObject> holder) {
   // Determine actual holder and perform prototype chain checks.
-  for (auto i = receiver_type->Classes(); !i.Done(); i.Advance()) {
-    Handle<Map> map = i.Current();
+  for (auto map : receiver_maps) {
     // Perform the implicit ToObject for primitives here.
     // Implemented according to ES6 section 7.3.2 GetV (V, P).
     Handle<JSFunction> constructor;
@@ -976,6 +1336,42 @@
   }
 }
 
+bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
+    std::vector<Handle<Map>> const& receiver_maps,
+    Handle<Context> native_context) {
+  // Check if the array prototype chain is intact.
+  if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
+
+  // Make sure both the initial Array and Object prototypes are stable.
+  Handle<JSObject> initial_array_prototype(
+      native_context->initial_array_prototype(), isolate());
+  Handle<JSObject> initial_object_prototype(
+      native_context->initial_object_prototype(), isolate());
+  if (!initial_array_prototype->map()->is_stable() ||
+      !initial_object_prototype->map()->is_stable()) {
+    return false;
+  }
+
+  // Check if all {receiver_maps} either have the initial Array.prototype
+  // or the initial Object.prototype as their prototype, as those are
+  // guarded by the array protector cell.
+  for (Handle<Map> map : receiver_maps) {
+    if (map->prototype() != *initial_array_prototype &&
+        map->prototype() != *initial_object_prototype) {
+      return false;
+    }
+  }
+
+  // Install code dependencies on the prototype maps.
+  for (Handle<Map> map : receiver_maps) {
+    dependencies()->AssumePrototypeMapsStable(map, initial_object_prototype);
+  }
+
+  // Install code dependency on the array protector cell.
+  dependencies()->AssumePropertyCell(factory()->array_protector());
+  return true;
+}
+
 bool JSNativeContextSpecialization::ExtractReceiverMaps(
     Node* receiver, Node* effect, FeedbackNexus const& nexus,
     MapHandleList* receiver_maps) {
@@ -1004,8 +1400,11 @@
 
 MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverMap(Node* receiver,
                                                                  Node* effect) {
-  NodeMatcher m(receiver);
-  if (m.IsJSCreate()) {
+  HeapObjectMatcher m(receiver);
+  if (m.HasValue()) {
+    Handle<Map> receiver_map(m.Value()->map(), isolate());
+    if (receiver_map->is_stable()) return receiver_map;
+  } else if (m.IsJSCreate()) {
     HeapObjectMatcher mtarget(m.InputAt(0));
     HeapObjectMatcher mnewtarget(m.InputAt(1));
     if (mtarget.HasValue() && mnewtarget.HasValue()) {
@@ -1029,6 +1428,7 @@
       }
     }
   }
+  // TODO(turbofan): Go hunting for CheckMaps(receiver) in the effect chain?
   return MaybeHandle<Map>();
 }
 
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index 7d43bfb..549dc93 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -7,6 +7,7 @@
 
 #include "src/base/flags.h"
 #include "src/compiler/graph-reducer.h"
+#include "src/deoptimize-reason.h"
 
 namespace v8 {
 namespace internal {
@@ -23,9 +24,11 @@
 // Forward declarations.
 enum class AccessMode;
 class CommonOperatorBuilder;
+class ElementAccessInfo;
 class JSGraph;
 class JSOperatorBuilder;
 class MachineOperatorBuilder;
+class PropertyAccessInfo;
 class SimplifiedOperatorBuilder;
 
 
@@ -38,8 +41,9 @@
   // Flags that control the mode of operation.
   enum Flag {
     kNoFlags = 0u,
-    kBailoutOnUninitialized = 1u << 0,
-    kDeoptimizationEnabled = 1u << 1,
+    kAccessorInliningEnabled = 1u << 0,
+    kBailoutOnUninitialized = 1u << 1,
+    kDeoptimizationEnabled = 1u << 2,
   };
   typedef base::Flags<Flag> Flags;
 
@@ -62,29 +66,74 @@
                                 AccessMode access_mode,
                                 LanguageMode language_mode,
                                 KeyedAccessStoreMode store_mode);
+  template <typename KeyedICNexus>
   Reduction ReduceKeyedAccess(Node* node, Node* index, Node* value,
-                              FeedbackNexus const& nexus,
-                              AccessMode access_mode,
+                              KeyedICNexus const& nexus, AccessMode access_mode,
                               LanguageMode language_mode,
                               KeyedAccessStoreMode store_mode);
-  Reduction ReduceNamedAccess(Node* node, Node* value,
-                              FeedbackNexus const& nexus, Handle<Name> name,
-                              AccessMode access_mode,
-                              LanguageMode language_mode);
+  Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
+                                       FeedbackNexus const& nexus,
+                                       Handle<Name> name,
+                                       AccessMode access_mode,
+                                       LanguageMode language_mode);
   Reduction ReduceNamedAccess(Node* node, Node* value,
                               MapHandleList const& receiver_maps,
                               Handle<Name> name, AccessMode access_mode,
                               LanguageMode language_mode,
                               Node* index = nullptr);
 
-  Reduction ReduceSoftDeoptimize(Node* node);
+  Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
+
+  // A triple of nodes that represents a continuation.
+  class ValueEffectControl final {
+   public:
+    ValueEffectControl(Node* value, Node* effect, Node* control)
+        : value_(value), effect_(effect), control_(control) {}
+
+    Node* value() const { return value_; }
+    Node* effect() const { return effect_; }
+    Node* control() const { return control_; }
+
+   private:
+    Node* const value_;
+    Node* const effect_;
+    Node* const control_;
+  };
+
+  // Construct the appropriate subgraph for property access.
+  ValueEffectControl BuildPropertyAccess(Node* receiver, Node* value,
+                                         Node* context, Node* frame_state,
+                                         Node* effect, Node* control,
+                                         Handle<Name> name,
+                                         Handle<Context> native_context,
+                                         PropertyAccessInfo const& access_info,
+                                         AccessMode access_mode);
+
+  // Construct the appropriate subgraph for element access.
+  ValueEffectControl BuildElementAccess(
+      Node* receiver, Node* index, Node* value, Node* effect, Node* control,
+      Handle<Context> native_context, ElementAccessInfo const& access_info,
+      AccessMode access_mode, KeyedAccessStoreMode store_mode);
+
+  // Construct an appropriate map check.
+  Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
+                       std::vector<Handle<Map>> const& maps);
+
+  // Construct an appropriate heap object check.
+  Node* BuildCheckTaggedPointer(Node* receiver, Node* effect, Node* control);
 
   // Adds stability dependencies on all prototypes of every class in
   // {receiver_type} up to (and including) the {holder}.
-  void AssumePrototypesStable(Type* receiver_type,
+  void AssumePrototypesStable(std::vector<Handle<Map>> const& receiver_maps,
                               Handle<Context> native_context,
                               Handle<JSObject> holder);
 
+  // Checks if we can turn the hole into undefined when loading an element
+  // from an object with one of the {receiver_maps}; sets up appropriate
+  // code dependencies and might use the array protector cell.
+  bool CanTreatHoleAsUndefined(std::vector<Handle<Map>> const& receiver_maps,
+                               Handle<Context> native_context);
+
   // Extract receiver maps from {nexus} and filter based on {receiver} if
   // possible.
   bool ExtractReceiverMaps(Node* receiver, Node* effect,
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index 89c0eee..d19bb76 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -376,7 +376,7 @@
   return OpParameter<CreateLiteralParameters>(op);
 }
 
-const BinaryOperationHints& BinaryOperationHintsOf(const Operator* op) {
+const BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
   DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
          op->opcode() == IrOpcode::kJSBitwiseXor ||
          op->opcode() == IrOpcode::kJSBitwiseAnd ||
@@ -388,10 +388,10 @@
          op->opcode() == IrOpcode::kJSMultiply ||
          op->opcode() == IrOpcode::kJSDivide ||
          op->opcode() == IrOpcode::kJSModulus);
-  return OpParameter<BinaryOperationHints>(op);
+  return OpParameter<BinaryOperationHint>(op);
 }
 
-const CompareOperationHints& CompareOperationHintsOf(const Operator* op) {
+const CompareOperationHint CompareOperationHintOf(const Operator* op) {
   DCHECK(op->opcode() == IrOpcode::kJSEqual ||
          op->opcode() == IrOpcode::kJSNotEqual ||
          op->opcode() == IrOpcode::kJSStrictEqual ||
@@ -400,7 +400,7 @@
          op->opcode() == IrOpcode::kJSGreaterThan ||
          op->opcode() == IrOpcode::kJSLessThanOrEqual ||
          op->opcode() == IrOpcode::kJSGreaterThanOrEqual);
-  return OpParameter<CompareOperationHints>(op);
+  return OpParameter<CompareOperationHint>(op);
 }
 
 #define CACHED_OP_LIST(V)                                   \
@@ -422,214 +422,135 @@
   V(LoadMessage, Operator::kNoThrow, 0, 1)                  \
   V(StoreMessage, Operator::kNoThrow, 1, 0)                 \
   V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
-  V(StackCheck, Operator::kNoProperties, 0, 0)              \
-  V(CreateWithContext, Operator::kNoProperties, 2, 1)       \
-  V(CreateModuleContext, Operator::kNoProperties, 2, 1)
+  V(StackCheck, Operator::kNoWrite, 0, 0)                   \
+  V(CreateWithContext, Operator::kNoProperties, 2, 1)
+
+#define BINARY_OP_LIST(V) \
+  V(BitwiseOr)            \
+  V(BitwiseXor)           \
+  V(BitwiseAnd)           \
+  V(ShiftLeft)            \
+  V(ShiftRight)           \
+  V(ShiftRightLogical)    \
+  V(Add)                  \
+  V(Subtract)             \
+  V(Multiply)             \
+  V(Divide)               \
+  V(Modulus)
+
+#define COMPARE_OP_LIST(V)                    \
+  V(Equal, Operator::kNoProperties)           \
+  V(NotEqual, Operator::kNoProperties)        \
+  V(StrictEqual, Operator::kPure)             \
+  V(StrictNotEqual, Operator::kPure)          \
+  V(LessThan, Operator::kNoProperties)        \
+  V(GreaterThan, Operator::kNoProperties)     \
+  V(LessThanOrEqual, Operator::kNoProperties) \
+  V(GreaterThanOrEqual, Operator::kNoProperties)
 
 struct JSOperatorGlobalCache final {
-#define CACHED(Name, properties, value_input_count, value_output_count)  \
-  struct Name##Operator final : public Operator {                        \
-    Name##Operator()                                                     \
-        : Operator(IrOpcode::kJS##Name, properties, "JS" #Name,          \
-                   value_input_count, Operator::ZeroIfPure(properties),  \
-                   Operator::ZeroIfEliminatable(properties),             \
-                   value_output_count, Operator::ZeroIfPure(properties), \
-                   Operator::ZeroIfNoThrow(properties)) {}               \
-  };                                                                     \
+#define CACHED_OP(Name, properties, value_input_count, value_output_count) \
+  struct Name##Operator final : public Operator {                          \
+    Name##Operator()                                                       \
+        : Operator(IrOpcode::kJS##Name, properties, "JS" #Name,            \
+                   value_input_count, Operator::ZeroIfPure(properties),    \
+                   Operator::ZeroIfEliminatable(properties),               \
+                   value_output_count, Operator::ZeroIfPure(properties),   \
+                   Operator::ZeroIfNoThrow(properties)) {}                 \
+  };                                                                       \
   Name##Operator k##Name##Operator;
-  CACHED_OP_LIST(CACHED)
-#undef CACHED
-};
+  CACHED_OP_LIST(CACHED_OP)
+#undef CACHED_OP
 
+#define BINARY_OP(Name)                                                       \
+  template <BinaryOperationHint kHint>                                        \
+  struct Name##Operator final : public Operator1<BinaryOperationHint> {       \
+    Name##Operator()                                                          \
+        : Operator1<BinaryOperationHint>(IrOpcode::kJS##Name,                 \
+                                         Operator::kNoProperties, "JS" #Name, \
+                                         2, 1, 1, 1, 1, 2, kHint) {}          \
+  };                                                                          \
+  Name##Operator<BinaryOperationHint::kNone> k##Name##NoneOperator;           \
+  Name##Operator<BinaryOperationHint::kSignedSmall>                           \
+      k##Name##SignedSmallOperator;                                           \
+  Name##Operator<BinaryOperationHint::kSigned32> k##Name##Signed32Operator;   \
+  Name##Operator<BinaryOperationHint::kNumberOrOddball>                       \
+      k##Name##NumberOrOddballOperator;                                       \
+  Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
+  BINARY_OP_LIST(BINARY_OP)
+#undef BINARY_OP
+
+#define COMPARE_OP(Name, properties)                                      \
+  template <CompareOperationHint kHint>                                   \
+  struct Name##Operator final : public Operator1<CompareOperationHint> {  \
+    Name##Operator()                                                      \
+        : Operator1<CompareOperationHint>(                                \
+              IrOpcode::kJS##Name, properties, "JS" #Name, 2, 1, 1, 1, 1, \
+              Operator::ZeroIfNoThrow(properties), kHint) {}              \
+  };                                                                      \
+  Name##Operator<CompareOperationHint::kNone> k##Name##NoneOperator;      \
+  Name##Operator<CompareOperationHint::kSignedSmall>                      \
+      k##Name##SignedSmallOperator;                                       \
+  Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator;  \
+  Name##Operator<CompareOperationHint::kNumberOrOddball>                  \
+      k##Name##NumberOrOddballOperator;                                   \
+  Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
+  COMPARE_OP_LIST(COMPARE_OP)
+#undef COMPARE_OP
+};
 
 static base::LazyInstance<JSOperatorGlobalCache>::type kCache =
     LAZY_INSTANCE_INITIALIZER;
 
-
 JSOperatorBuilder::JSOperatorBuilder(Zone* zone)
     : cache_(kCache.Get()), zone_(zone) {}
 
-
-#define CACHED(Name, properties, value_input_count, value_output_count) \
-  const Operator* JSOperatorBuilder::Name() {                           \
-    return &cache_.k##Name##Operator;                                   \
+#define CACHED_OP(Name, properties, value_input_count, value_output_count) \
+  const Operator* JSOperatorBuilder::Name() {                              \
+    return &cache_.k##Name##Operator;                                      \
   }
-CACHED_OP_LIST(CACHED)
-#undef CACHED
+CACHED_OP_LIST(CACHED_OP)
+#undef CACHED_OP
 
-const Operator* JSOperatorBuilder::BitwiseOr(BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(  //--
-      IrOpcode::kJSBitwiseOr, Operator::kNoProperties,  // opcode
-      "JSBitwiseOr",                                    // name
-      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
-      hints);                                           // parameter
-}
+#define BINARY_OP(Name)                                               \
+  const Operator* JSOperatorBuilder::Name(BinaryOperationHint hint) { \
+    switch (hint) {                                                   \
+      case BinaryOperationHint::kNone:                                \
+        return &cache_.k##Name##NoneOperator;                         \
+      case BinaryOperationHint::kSignedSmall:                         \
+        return &cache_.k##Name##SignedSmallOperator;                  \
+      case BinaryOperationHint::kSigned32:                            \
+        return &cache_.k##Name##Signed32Operator;                     \
+      case BinaryOperationHint::kNumberOrOddball:                     \
+        return &cache_.k##Name##NumberOrOddballOperator;              \
+      case BinaryOperationHint::kAny:                                 \
+        return &cache_.k##Name##AnyOperator;                          \
+    }                                                                 \
+    UNREACHABLE();                                                    \
+    return nullptr;                                                   \
+  }
+BINARY_OP_LIST(BINARY_OP)
+#undef BINARY_OP
 
-const Operator* JSOperatorBuilder::BitwiseXor(BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(   //--
-      IrOpcode::kJSBitwiseXor, Operator::kNoProperties,  // opcode
-      "JSBitwiseXor",                                    // name
-      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
-      hints);                                            // parameter
-}
-
-const Operator* JSOperatorBuilder::BitwiseAnd(BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(   //--
-      IrOpcode::kJSBitwiseAnd, Operator::kNoProperties,  // opcode
-      "JSBitwiseAnd",                                    // name
-      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
-      hints);                                            // parameter
-}
-
-const Operator* JSOperatorBuilder::ShiftLeft(BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(  //--
-      IrOpcode::kJSShiftLeft, Operator::kNoProperties,  // opcode
-      "JSShiftLeft",                                    // name
-      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
-      hints);                                           // parameter
-}
-
-const Operator* JSOperatorBuilder::ShiftRight(BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(   //--
-      IrOpcode::kJSShiftRight, Operator::kNoProperties,  // opcode
-      "JSShiftRight",                                    // name
-      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
-      hints);                                            // parameter
-}
-
-const Operator* JSOperatorBuilder::ShiftRightLogical(
-    BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(          //--
-      IrOpcode::kJSShiftRightLogical, Operator::kNoProperties,  // opcode
-      "JSShiftRightLogical",                                    // name
-      2, 1, 1, 1, 1, 2,  // inputs/outputs
-      hints);            // parameter
-}
-
-const Operator* JSOperatorBuilder::Add(BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(  //--
-      IrOpcode::kJSAdd, Operator::kNoProperties,        // opcode
-      "JSAdd",                                          // name
-      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
-      hints);                                           // parameter
-}
-
-const Operator* JSOperatorBuilder::Subtract(BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(  //--
-      IrOpcode::kJSSubtract, Operator::kNoProperties,   // opcode
-      "JSSubtract",                                     // name
-      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
-      hints);                                           // parameter
-}
-
-const Operator* JSOperatorBuilder::Multiply(BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(  //--
-      IrOpcode::kJSMultiply, Operator::kNoProperties,   // opcode
-      "JSMultiply",                                     // name
-      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
-      hints);                                           // parameter
-}
-
-const Operator* JSOperatorBuilder::Divide(BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(  //--
-      IrOpcode::kJSDivide, Operator::kNoProperties,     // opcode
-      "JSDivide",                                       // name
-      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
-      hints);                                           // parameter
-}
-
-const Operator* JSOperatorBuilder::Modulus(BinaryOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<BinaryOperationHints>(  //--
-      IrOpcode::kJSModulus, Operator::kNoProperties,    // opcode
-      "JSModulus",                                      // name
-      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
-      hints);                                           // parameter
-}
-
-const Operator* JSOperatorBuilder::Equal(CompareOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<CompareOperationHints>(  //--
-      IrOpcode::kJSEqual, Operator::kNoProperties,       // opcode
-      "JSEqual",                                         // name
-      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
-      hints);                                            // parameter
-}
-
-const Operator* JSOperatorBuilder::NotEqual(CompareOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<CompareOperationHints>(  //--
-      IrOpcode::kJSNotEqual, Operator::kNoProperties,    // opcode
-      "JSNotEqual",                                      // name
-      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
-      hints);                                            // parameter
-}
-
-const Operator* JSOperatorBuilder::StrictEqual(CompareOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<CompareOperationHints>(  //--
-      IrOpcode::kJSStrictEqual, Operator::kPure,         // opcode
-      "JSStrictEqual",                                   // name
-      2, 0, 0, 1, 0, 0,                                  // inputs/outputs
-      hints);                                            // parameter
-}
-
-const Operator* JSOperatorBuilder::StrictNotEqual(CompareOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<CompareOperationHints>(  //--
-      IrOpcode::kJSStrictNotEqual, Operator::kPure,      // opcode
-      "JSStrictNotEqual",                                // name
-      2, 0, 0, 1, 0, 0,                                  // inputs/outputs
-      hints);                                            // parameter
-}
-
-const Operator* JSOperatorBuilder::LessThan(CompareOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<CompareOperationHints>(  //--
-      IrOpcode::kJSLessThan, Operator::kNoProperties,    // opcode
-      "JSLessThan",                                      // name
-      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
-      hints);                                            // parameter
-}
-
-const Operator* JSOperatorBuilder::GreaterThan(CompareOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<CompareOperationHints>(   //--
-      IrOpcode::kJSGreaterThan, Operator::kNoProperties,  // opcode
-      "JSGreaterThan",                                    // name
-      2, 1, 1, 1, 1, 2,                                   // inputs/outputs
-      hints);                                             // parameter
-}
-
-const Operator* JSOperatorBuilder::LessThanOrEqual(
-    CompareOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<CompareOperationHints>(       //--
-      IrOpcode::kJSLessThanOrEqual, Operator::kNoProperties,  // opcode
-      "JSLessThanOrEqual",                                    // name
-      2, 1, 1, 1, 1, 2,                                       // inputs/outputs
-      hints);                                                 // parameter
-}
-
-const Operator* JSOperatorBuilder::GreaterThanOrEqual(
-    CompareOperationHints hints) {
-  // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<CompareOperationHints>(          //--
-      IrOpcode::kJSGreaterThanOrEqual, Operator::kNoProperties,  // opcode
-      "JSGreaterThanOrEqual",                                    // name
-      2, 1, 1, 1, 1, 2,  // inputs/outputs
-      hints);            // parameter
-}
+#define COMPARE_OP(Name, ...)                                          \
+  const Operator* JSOperatorBuilder::Name(CompareOperationHint hint) { \
+    switch (hint) {                                                    \
+      case CompareOperationHint::kNone:                                \
+        return &cache_.k##Name##NoneOperator;                          \
+      case CompareOperationHint::kSignedSmall:                         \
+        return &cache_.k##Name##SignedSmallOperator;                   \
+      case CompareOperationHint::kNumber:                              \
+        return &cache_.k##Name##NumberOperator;                        \
+      case CompareOperationHint::kNumberOrOddball:                     \
+        return &cache_.k##Name##NumberOrOddballOperator;               \
+      case CompareOperationHint::kAny:                                 \
+        return &cache_.k##Name##AnyOperator;                           \
+    }                                                                  \
+    UNREACHABLE();                                                     \
+    return nullptr;                                                    \
+  }
+COMPARE_OP_LIST(COMPARE_OP)
+#undef COMPARE_OP
 
 const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
@@ -691,11 +612,11 @@
 
 const Operator* JSOperatorBuilder::ConvertReceiver(
     ConvertReceiverMode convert_mode) {
-  return new (zone()) Operator1<ConvertReceiverMode>(    // --
-      IrOpcode::kJSConvertReceiver, Operator::kNoThrow,  // opcode
-      "JSConvertReceiver",                               // name
-      1, 1, 1, 1, 1, 0,                                  // counts
-      convert_mode);                                     // parameter
+  return new (zone()) Operator1<ConvertReceiverMode>(         // --
+      IrOpcode::kJSConvertReceiver, Operator::kEliminatable,  // opcode
+      "JSConvertReceiver",                                    // name
+      1, 1, 1, 1, 1, 0,                                       // counts
+      convert_mode);                                          // parameter
 }
 
 const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index 8390cbd..19022fa 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -374,9 +374,9 @@
 
 const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
 
-const BinaryOperationHints& BinaryOperationHintsOf(const Operator* op);
+const BinaryOperationHint BinaryOperationHintOf(const Operator* op);
 
-const CompareOperationHints& CompareOperationHintsOf(const Operator* op);
+const CompareOperationHint CompareOperationHintOf(const Operator* op);
 
 // Interface for building JavaScript-level operators, e.g. directly from the
 // AST. Most operators have no parameters, thus can be globally shared for all
@@ -385,25 +385,26 @@
  public:
   explicit JSOperatorBuilder(Zone* zone);
 
-  const Operator* Equal(CompareOperationHints hints);
-  const Operator* NotEqual(CompareOperationHints hints);
-  const Operator* StrictEqual(CompareOperationHints hints);
-  const Operator* StrictNotEqual(CompareOperationHints hints);
-  const Operator* LessThan(CompareOperationHints hints);
-  const Operator* GreaterThan(CompareOperationHints hints);
-  const Operator* LessThanOrEqual(CompareOperationHints hints);
-  const Operator* GreaterThanOrEqual(CompareOperationHints hints);
-  const Operator* BitwiseOr(BinaryOperationHints hints);
-  const Operator* BitwiseXor(BinaryOperationHints hints);
-  const Operator* BitwiseAnd(BinaryOperationHints hints);
-  const Operator* ShiftLeft(BinaryOperationHints hints);
-  const Operator* ShiftRight(BinaryOperationHints hints);
-  const Operator* ShiftRightLogical(BinaryOperationHints hints);
-  const Operator* Add(BinaryOperationHints hints);
-  const Operator* Subtract(BinaryOperationHints hints);
-  const Operator* Multiply(BinaryOperationHints hints);
-  const Operator* Divide(BinaryOperationHints hints);
-  const Operator* Modulus(BinaryOperationHints hints);
+  const Operator* Equal(CompareOperationHint hint);
+  const Operator* NotEqual(CompareOperationHint hint);
+  const Operator* StrictEqual(CompareOperationHint hint);
+  const Operator* StrictNotEqual(CompareOperationHint hint);
+  const Operator* LessThan(CompareOperationHint hint);
+  const Operator* GreaterThan(CompareOperationHint hint);
+  const Operator* LessThanOrEqual(CompareOperationHint hint);
+  const Operator* GreaterThanOrEqual(CompareOperationHint hint);
+
+  const Operator* BitwiseOr(BinaryOperationHint hint);
+  const Operator* BitwiseXor(BinaryOperationHint hint);
+  const Operator* BitwiseAnd(BinaryOperationHint hint);
+  const Operator* ShiftLeft(BinaryOperationHint hint);
+  const Operator* ShiftRight(BinaryOperationHint hint);
+  const Operator* ShiftRightLogical(BinaryOperationHint hint);
+  const Operator* Add(BinaryOperationHint hint);
+  const Operator* Subtract(BinaryOperationHint hint);
+  const Operator* Multiply(BinaryOperationHint hint);
+  const Operator* Divide(BinaryOperationHint hint);
+  const Operator* Modulus(BinaryOperationHint hint);
 
   const Operator* ToBoolean(ToBooleanHints hints);
   const Operator* ToInteger();
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index fcfe134..89ab0de 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -2,11 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/js-typed-lowering.h"
+
+#include "src/builtins/builtins-utils.h"
 #include "src/code-factory.h"
 #include "src/compilation-dependencies.h"
 #include "src/compiler/access-builder.h"
 #include "src/compiler/js-graph.h"
-#include "src/compiler/js-typed-lowering.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
@@ -27,42 +29,51 @@
   JSBinopReduction(JSTypedLowering* lowering, Node* node)
       : lowering_(lowering), node_(node) {}
 
-  BinaryOperationHints::Hint GetNumberBinaryOperationFeedback() {
-    if (!(lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) ||
-        !(lowering_->flags() & JSTypedLowering::kTypeFeedbackEnabled)) {
-      return BinaryOperationHints::kAny;
+  bool GetBinaryNumberOperationHint(NumberOperationHint* hint) {
+    if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+      DCHECK_NE(0, node_->op()->ControlOutputCount());
+      DCHECK_EQ(1, node_->op()->EffectOutputCount());
+      DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node_->op()));
+      switch (BinaryOperationHintOf(node_->op())) {
+        case BinaryOperationHint::kSignedSmall:
+          *hint = NumberOperationHint::kSignedSmall;
+          return true;
+        case BinaryOperationHint::kSigned32:
+          *hint = NumberOperationHint::kSigned32;
+          return true;
+        case BinaryOperationHint::kNumberOrOddball:
+          *hint = NumberOperationHint::kNumberOrOddball;
+          return true;
+        case BinaryOperationHint::kAny:
+        case BinaryOperationHint::kNone:
+          break;
+      }
     }
-    DCHECK_NE(0, node_->op()->ControlOutputCount());
-    DCHECK_EQ(1, node_->op()->EffectOutputCount());
-    DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node_->op()));
-    BinaryOperationHints hints = BinaryOperationHintsOf(node_->op());
-    BinaryOperationHints::Hint combined = hints.combined();
-    if (combined == BinaryOperationHints::kSignedSmall ||
-        combined == BinaryOperationHints::kSigned32 ||
-        combined == BinaryOperationHints::kNumberOrUndefined) {
-      return combined;
-    }
-    return BinaryOperationHints::kAny;
+    return false;
   }
 
-  CompareOperationHints::Hint GetNumberCompareOperationFeedback() {
-    if (!(lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) ||
-        !(lowering_->flags() & JSTypedLowering::kTypeFeedbackEnabled)) {
-      return CompareOperationHints::kAny;
+  bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
+    if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+      DCHECK_EQ(1, node_->op()->EffectOutputCount());
+      switch (CompareOperationHintOf(node_->op())) {
+        case CompareOperationHint::kSignedSmall:
+          *hint = NumberOperationHint::kSignedSmall;
+          return true;
+        case CompareOperationHint::kNumber:
+          *hint = NumberOperationHint::kNumber;
+          return true;
+        case CompareOperationHint::kNumberOrOddball:
+          *hint = NumberOperationHint::kNumberOrOddball;
+          return true;
+        case CompareOperationHint::kAny:
+        case CompareOperationHint::kNone:
+          break;
+      }
     }
-    DCHECK_NE(0, node_->op()->ControlOutputCount());
-    DCHECK_EQ(1, node_->op()->EffectOutputCount());
-    DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node_->op()));
-    CompareOperationHints hints = CompareOperationHintsOf(node_->op());
-    CompareOperationHints::Hint combined = hints.combined();
-    if (combined == CompareOperationHints::kSignedSmall ||
-        combined == CompareOperationHints::kNumber) {
-      return combined;
-    }
-    return CompareOperationHints::kAny;
+    return false;
   }
 
-  void ConvertInputsToNumber(Node* frame_state) {
+  void ConvertInputsToNumber() {
     // To convert the inputs to numbers, we have to provide frame states
     // for lazy bailouts in the ToNumber conversions.
     // We use a little hack here: we take the frame state before the binary
@@ -78,17 +89,17 @@
     bool handles_exception = NodeProperties::IsExceptionalCall(node_);
 
     if (!left_is_primitive && !right_is_primitive && handles_exception) {
-      ConvertBothInputsToNumber(&left_input, &right_input, frame_state);
+      ConvertBothInputsToNumber(&left_input, &right_input);
     } else {
       left_input = left_is_primitive
                        ? ConvertPlainPrimitiveToNumber(left())
                        : ConvertSingleInputToNumber(
-                             left(), CreateFrameStateForLeftInput(frame_state));
-      right_input = right_is_primitive
-                        ? ConvertPlainPrimitiveToNumber(right())
-                        : ConvertSingleInputToNumber(
-                              right(), CreateFrameStateForRightInput(
-                                           frame_state, left_input));
+                             left(), CreateFrameStateForLeftInput());
+      right_input =
+          right_is_primitive
+              ? ConvertPlainPrimitiveToNumber(right())
+              : ConvertSingleInputToNumber(
+                    right(), CreateFrameStateForRightInput(left_input));
     }
 
     node_->ReplaceInput(0, left_input);
@@ -142,7 +153,8 @@
     return lowering_->Changed(node_);
   }
 
-  Reduction ChangeToSpeculativeOperator(const Operator* op, Type* upper_bound) {
+  Reduction ChangeToSpeculativeOperator(const Operator* op, bool invert,
+                                        Type* upper_bound) {
     DCHECK_EQ(1, op->EffectInputCount());
     DCHECK_EQ(1, op->EffectOutputCount());
     DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
@@ -154,8 +166,6 @@
     DCHECK_EQ(1, node_->op()->EffectInputCount());
     DCHECK_EQ(1, node_->op()->EffectOutputCount());
     DCHECK_EQ(1, node_->op()->ControlInputCount());
-    DCHECK_LT(1, node_->op()->ControlOutputCount());
-    DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node_->op()));
     DCHECK_EQ(2, node_->op()->ValueInputCount());
 
     // Reconnect the control output to bypass the IfSuccess node and
@@ -174,9 +184,10 @@
       }
     }
 
-    // Remove both bailout frame states and the context.
-    node_->RemoveInput(NodeProperties::FirstFrameStateIndex(node_) + 1);
-    node_->RemoveInput(NodeProperties::FirstFrameStateIndex(node_));
+    // Remove the frame state and the context.
+    if (OperatorProperties::HasFrameStateInput(node_->op())) {
+      node_->RemoveInput(NodeProperties::FirstFrameStateIndex(node_));
+    }
     node_->RemoveInput(NodeProperties::FirstContextIndex(node_));
 
     NodeProperties::ChangeOp(node_, op);
@@ -186,6 +197,14 @@
     NodeProperties::SetType(node_,
                             Type::Intersect(node_type, upper_bound, zone()));
 
+    if (invert) {
+      // Insert an boolean not to invert the value.
+      Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
+      node_->ReplaceUses(value);
+      // Note: ReplaceUses() smashes all uses, so smash it back here.
+      value->ReplaceInput(0, node_);
+      return lowering_->Replace(value);
+    }
     return lowering_->Changed(node_);
   }
 
@@ -193,6 +212,72 @@
     return ChangeToPureOperator(op, false, type);
   }
 
+  Reduction ChangeToSpeculativeOperator(const Operator* op, Type* type) {
+    return ChangeToSpeculativeOperator(op, false, type);
+  }
+
+  const Operator* NumberOp() {
+    switch (node_->opcode()) {
+      case IrOpcode::kJSAdd:
+        return simplified()->NumberAdd();
+      case IrOpcode::kJSSubtract:
+        return simplified()->NumberSubtract();
+      case IrOpcode::kJSMultiply:
+        return simplified()->NumberMultiply();
+      case IrOpcode::kJSDivide:
+        return simplified()->NumberDivide();
+      case IrOpcode::kJSModulus:
+        return simplified()->NumberModulus();
+      case IrOpcode::kJSBitwiseAnd:
+        return simplified()->NumberBitwiseAnd();
+      case IrOpcode::kJSBitwiseOr:
+        return simplified()->NumberBitwiseOr();
+      case IrOpcode::kJSBitwiseXor:
+        return simplified()->NumberBitwiseXor();
+      case IrOpcode::kJSShiftLeft:
+        return simplified()->NumberShiftLeft();
+      case IrOpcode::kJSShiftRight:
+        return simplified()->NumberShiftRight();
+      case IrOpcode::kJSShiftRightLogical:
+        return simplified()->NumberShiftRightLogical();
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
+  const Operator* SpeculativeNumberOp(NumberOperationHint hint) {
+    switch (node_->opcode()) {
+      case IrOpcode::kJSAdd:
+        return simplified()->SpeculativeNumberAdd(hint);
+      case IrOpcode::kJSSubtract:
+        return simplified()->SpeculativeNumberSubtract(hint);
+      case IrOpcode::kJSMultiply:
+        return simplified()->SpeculativeNumberMultiply(hint);
+      case IrOpcode::kJSDivide:
+        return simplified()->SpeculativeNumberDivide(hint);
+      case IrOpcode::kJSModulus:
+        return simplified()->SpeculativeNumberModulus(hint);
+      case IrOpcode::kJSBitwiseAnd:
+        return simplified()->SpeculativeNumberBitwiseAnd(hint);
+      case IrOpcode::kJSBitwiseOr:
+        return simplified()->SpeculativeNumberBitwiseOr(hint);
+      case IrOpcode::kJSBitwiseXor:
+        return simplified()->SpeculativeNumberBitwiseXor(hint);
+      case IrOpcode::kJSShiftLeft:
+        return simplified()->SpeculativeNumberShiftLeft(hint);
+      case IrOpcode::kJSShiftRight:
+        return simplified()->SpeculativeNumberShiftRight(hint);
+      case IrOpcode::kJSShiftRightLogical:
+        return simplified()->SpeculativeNumberShiftRightLogical(hint);
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
   bool LeftInputIs(Type* t) { return left_type()->Is(t); }
 
   bool RightInputIs(Type* t) { return right_type()->Is(t); }
@@ -216,12 +301,12 @@
   Node* right() { return NodeProperties::GetValueInput(node_, 1); }
   Type* left_type() { return NodeProperties::GetType(node_->InputAt(0)); }
   Type* right_type() { return NodeProperties::GetType(node_->InputAt(1)); }
+  Type* type() { return NodeProperties::GetType(node_); }
 
   SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
   Graph* graph() const { return lowering_->graph(); }
   JSGraph* jsgraph() { return lowering_->jsgraph(); }
   JSOperatorBuilder* javascript() { return lowering_->javascript(); }
-  MachineOperatorBuilder* machine() { return lowering_->machine(); }
   CommonOperatorBuilder* common() { return jsgraph()->common(); }
   Zone* zone() const { return graph()->zone(); }
 
@@ -229,73 +314,18 @@
   JSTypedLowering* lowering_;  // The containing lowering instance.
   Node* node_;                 // The original node.
 
-  Node* CreateFrameStateForLeftInput(Node* frame_state) {
-    FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-
-    if (state_info.bailout_id() == BailoutId::None()) {
-      // Dummy frame state => just leave it as is.
-      return frame_state;
-    }
-
-    // If the frame state is already the right one, just return it.
-    if (state_info.state_combine().kind() == OutputFrameStateCombine::kPokeAt &&
-        state_info.state_combine().GetOffsetToPokeAt() == 1) {
-      return frame_state;
-    }
-
-    // Here, we smash the result of the conversion into the slot just below
-    // the stack top. This is the slot that full code uses to store the
-    // left operand.
-    const Operator* op = jsgraph()->common()->FrameState(
-        state_info.bailout_id(), OutputFrameStateCombine::PokeAt(1),
-        state_info.function_info());
-
-    return graph()->NewNode(op,
-                            frame_state->InputAt(kFrameStateParametersInput),
-                            frame_state->InputAt(kFrameStateLocalsInput),
-                            frame_state->InputAt(kFrameStateStackInput),
-                            frame_state->InputAt(kFrameStateContextInput),
-                            frame_state->InputAt(kFrameStateFunctionInput),
-                            frame_state->InputAt(kFrameStateOuterStateInput));
+  Node* CreateFrameStateForLeftInput() {
+    // Deoptimization is disabled => return dummy frame state instead.
+    Node* dummy_state = NodeProperties::GetFrameStateInput(node_);
+    DCHECK(OpParameter<FrameStateInfo>(dummy_state).bailout_id().IsNone());
+    return dummy_state;
   }
 
-  Node* CreateFrameStateForRightInput(Node* frame_state, Node* converted_left) {
-    FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-
-    if (state_info.bailout_id() == BailoutId::None()) {
-      // Dummy frame state => just leave it as is.
-      return frame_state;
-    }
-
-    // Create a frame state that stores the result of the operation to the
-    // top of the stack (i.e., the slot used for the right operand).
-    const Operator* op = jsgraph()->common()->FrameState(
-        state_info.bailout_id(), OutputFrameStateCombine::PokeAt(0),
-        state_info.function_info());
-
-    // Change the left operand {converted_left} on the expression stack.
-    Node* stack = frame_state->InputAt(2);
-    DCHECK_EQ(stack->opcode(), IrOpcode::kStateValues);
-    DCHECK_GE(stack->InputCount(), 2);
-
-    // TODO(jarin) Allocate in a local zone or a reusable buffer.
-    NodeVector new_values(stack->InputCount(), zone());
-    for (int i = 0; i < stack->InputCount(); i++) {
-      if (i == stack->InputCount() - 2) {
-        new_values[i] = converted_left;
-      } else {
-        new_values[i] = stack->InputAt(i);
-      }
-    }
-    Node* new_stack =
-        graph()->NewNode(stack->op(), stack->InputCount(), &new_values.front());
-
-    return graph()->NewNode(
-        op, frame_state->InputAt(kFrameStateParametersInput),
-        frame_state->InputAt(kFrameStateLocalsInput), new_stack,
-        frame_state->InputAt(kFrameStateContextInput),
-        frame_state->InputAt(kFrameStateFunctionInput),
-        frame_state->InputAt(kFrameStateOuterStateInput));
+  Node* CreateFrameStateForRightInput(Node* converted_left) {
+    // Deoptimization is disabled => return dummy frame state instead.
+    Node* dummy_state = NodeProperties::GetFrameStateInput(node_);
+    DCHECK(OpParameter<FrameStateInfo>(dummy_state).bailout_id().IsNone());
+    return dummy_state;
   }
 
   Node* ConvertPlainPrimitiveToNumber(Node* node) {
@@ -320,30 +350,28 @@
     return n;
   }
 
-  void ConvertBothInputsToNumber(Node** left_result, Node** right_result,
-                                 Node* frame_state) {
+  void ConvertBothInputsToNumber(Node** left_result, Node** right_result) {
     Node* projections[2];
 
     // Find {IfSuccess} and {IfException} continuations of the operation.
     NodeProperties::CollectControlProjections(node_, projections, 2);
-    IfExceptionHint hint = OpParameter<IfExceptionHint>(projections[1]);
     Node* if_exception = projections[1];
     Node* if_success = projections[0];
 
     // Insert two ToNumber() operations that both potentially throw.
-    Node* left_state = CreateFrameStateForLeftInput(frame_state);
+    Node* left_state = CreateFrameStateForLeftInput();
     Node* left_conv =
         graph()->NewNode(javascript()->ToNumber(), left(), context(),
                          left_state, effect(), control());
     Node* left_success = graph()->NewNode(common()->IfSuccess(), left_conv);
-    Node* right_state = CreateFrameStateForRightInput(frame_state, left_conv);
+    Node* right_state = CreateFrameStateForRightInput(left_conv);
     Node* right_conv =
         graph()->NewNode(javascript()->ToNumber(), right(), context(),
                          right_state, left_conv, left_success);
     Node* left_exception =
-        graph()->NewNode(common()->IfException(hint), left_conv, left_conv);
+        graph()->NewNode(common()->IfException(), left_conv, left_conv);
     Node* right_exception =
-        graph()->NewNode(common()->IfException(hint), right_conv, right_conv);
+        graph()->NewNode(common()->IfException(), right_conv, right_conv);
     NodeProperties::ReplaceControlInput(if_success, right_conv);
     update_effect(right_conv);
 
@@ -414,36 +442,30 @@
   }
 }
 
-
 Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
-  if (flags() & kDisableBinaryOpReduction) return NoChange();
-
   JSBinopReduction r(this, node);
-
-  BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
-  if (feedback == BinaryOperationHints::kNumberOrUndefined &&
-      r.BothInputsAre(Type::PlainPrimitive()) &&
-      r.NeitherInputCanBe(Type::StringOrReceiver())) {
-    // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
-    Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-    r.ConvertInputsToNumber(frame_state);
-    return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
-  }
-  if (feedback != BinaryOperationHints::kAny) {
-    // Lower to the optimistic number binop.
+  NumberOperationHint hint;
+  if (r.GetBinaryNumberOperationHint(&hint)) {
+    if (hint == NumberOperationHint::kNumberOrOddball &&
+        r.BothInputsAre(Type::PlainPrimitive()) &&
+        r.NeitherInputCanBe(Type::StringOrReceiver())) {
+      // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
+      r.ConvertInputsToNumber();
+      return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
+    }
     return r.ChangeToSpeculativeOperator(
-        simplified()->SpeculativeNumberAdd(feedback), Type::Number());
+        simplified()->SpeculativeNumberAdd(hint), Type::Number());
   }
   if (r.BothInputsAre(Type::Number())) {
     // JSAdd(x:number, y:number) => NumberAdd(x, y)
-    Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-    r.ConvertInputsToNumber(frame_state);
+    r.ConvertInputsToNumber();
     return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
-  if (r.NeitherInputCanBe(Type::StringOrReceiver())) {
+  if ((r.BothInputsAre(Type::PlainPrimitive()) ||
+       !(flags() & kDeoptimizationEnabled)) &&
+      r.NeitherInputCanBe(Type::StringOrReceiver())) {
     // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
-    Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-    r.ConvertInputsToNumber(frame_state);
+    r.ConvertInputsToNumber();
     return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
   if (r.OneInputIs(Type::String())) {
@@ -460,8 +482,7 @@
     CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
         isolate(), graph()->zone(), callable.descriptor(), 0,
         CallDescriptor::kNeedsFrameState, node->op()->properties());
-    DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node->op()));
-    node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) + 1);
+    DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
     node->InsertInput(graph()->zone(), 0,
                       jsgraph()->HeapConstant(callable.code()));
     NodeProperties::ChangeOp(node, common()->Call(desc));
@@ -470,102 +491,62 @@
   return NoChange();
 }
 
-
-Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
-  if (flags() & kDisableBinaryOpReduction) return NoChange();
+Reduction JSTypedLowering::ReduceNumberBinop(Node* node) {
   JSBinopReduction r(this, node);
-  if (r.BothInputsAre(Type::Number())) {
-    // JSModulus(x:number, x:number) => NumberModulus(x, y)
-    return r.ChangeToPureOperator(simplified()->NumberModulus(),
-                                  Type::Number());
+  NumberOperationHint hint;
+  if (r.GetBinaryNumberOperationHint(&hint)) {
+    if (hint == NumberOperationHint::kNumberOrOddball &&
+        r.BothInputsAre(Type::PlainPrimitive())) {
+      r.ConvertInputsToNumber();
+      return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
+    }
+    return r.ChangeToSpeculativeOperator(r.SpeculativeNumberOp(hint),
+                                         Type::Number());
   }
-  BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
-  if (feedback != BinaryOperationHints::kAny) {
-    return r.ChangeToSpeculativeOperator(
-        simplified()->SpeculativeNumberModulus(feedback), Type::Number());
+  if (r.BothInputsAre(Type::PlainPrimitive()) ||
+      !(flags() & kDeoptimizationEnabled)) {
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
   }
   return NoChange();
 }
 
-Reduction JSTypedLowering::ReduceJSSubtract(Node* node) {
-  if (flags() & kDisableBinaryOpReduction) return NoChange();
+Reduction JSTypedLowering::ReduceInt32Binop(Node* node) {
   JSBinopReduction r(this, node);
-  BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
-  if (feedback == BinaryOperationHints::kNumberOrUndefined &&
-      r.BothInputsAre(Type::PlainPrimitive())) {
-    // JSSubtract(x:plain-primitive, y:plain-primitive)
-    //   => NumberSubtract(ToNumber(x), ToNumber(y))
-    Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-    r.ConvertInputsToNumber(frame_state);
-    return r.ChangeToPureOperator(simplified()->NumberSubtract(),
-                                  Type::Number());
+  NumberOperationHint hint;
+  if (r.GetBinaryNumberOperationHint(&hint)) {
+    return r.ChangeToSpeculativeOperator(r.SpeculativeNumberOp(hint),
+                                         Type::Signed32());
   }
-  if (feedback != BinaryOperationHints::kAny) {
-    // Lower to the optimistic number binop.
+  if (r.BothInputsAre(Type::PlainPrimitive()) ||
+      !(flags() & kDeoptimizationEnabled)) {
+    r.ConvertInputsToNumber();
+    r.ConvertInputsToUI32(kSigned, kSigned);
+    return r.ChangeToPureOperator(r.NumberOp(), Type::Signed32());
+  }
+  return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceUI32Shift(Node* node, Signedness signedness) {
+  JSBinopReduction r(this, node);
+  NumberOperationHint hint;
+  if (r.GetBinaryNumberOperationHint(&hint)) {
     return r.ChangeToSpeculativeOperator(
-        simplified()->SpeculativeNumberSubtract(feedback), Type::Number());
+        r.SpeculativeNumberOp(hint),
+        signedness == kUnsigned ? Type::Unsigned32() : Type::Signed32());
   }
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumber(frame_state);
-  return r.ChangeToPureOperator(simplified()->NumberSubtract(), Type::Number());
-}
-
-Reduction JSTypedLowering::ReduceJSMultiply(Node* node) {
-  if (flags() & kDisableBinaryOpReduction) return NoChange();
-  JSBinopReduction r(this, node);
-
-  BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
-  if (feedback != BinaryOperationHints::kAny) {
-    return r.ChangeToSpeculativeOperator(
-        simplified()->SpeculativeNumberMultiply(feedback), Type::Number());
+  if (r.BothInputsAre(Type::PlainPrimitive()) ||
+      !(flags() & kDeoptimizationEnabled)) {
+    r.ConvertInputsToNumber();
+    r.ConvertInputsToUI32(signedness, kUnsigned);
+    return r.ChangeToPureOperator(r.NumberOp(), signedness == kUnsigned
+                                                    ? Type::Unsigned32()
+                                                    : Type::Signed32());
   }
-
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumber(frame_state);
-  return r.ChangeToPureOperator(simplified()->NumberMultiply(), Type::Number());
+  return NoChange();
 }
 
-Reduction JSTypedLowering::ReduceJSDivide(Node* node) {
-  if (flags() & kDisableBinaryOpReduction) return NoChange();
-  JSBinopReduction r(this, node);
-  BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
-  if (feedback != BinaryOperationHints::kAny) {
-    return r.ChangeToSpeculativeOperator(
-        simplified()->SpeculativeNumberDivide(feedback), Type::Number());
-  }
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumber(frame_state);
-  return r.ChangeToPureOperator(simplified()->NumberDivide(), Type::Number());
-}
-
-
-Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
-  if (flags() & kDisableBinaryOpReduction) return NoChange();
-
-  JSBinopReduction r(this, node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumber(frame_state);
-  r.ConvertInputsToUI32(kSigned, kSigned);
-  return r.ChangeToPureOperator(intOp, Type::Integral32());
-}
-
-
-Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
-                                           Signedness left_signedness,
-                                           const Operator* shift_op) {
-  if (flags() & kDisableBinaryOpReduction) return NoChange();
-
-  JSBinopReduction r(this, node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumber(frame_state);
-  r.ConvertInputsToUI32(left_signedness, kUnsigned);
-  return r.ChangeToPureOperator(shift_op);
-}
-
-
 Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
-  if (flags() & kDisableBinaryOpReduction) return NoChange();
-
   JSBinopReduction r(this, node);
   if (r.BothInputsAre(Type::String())) {
     // If both inputs are definitely strings, perform a string comparison.
@@ -592,54 +573,49 @@
     return Changed(node);
   }
 
-  CompareOperationHints::Hint hint = r.GetNumberCompareOperationFeedback();
-  if (hint != CompareOperationHints::kAny ||
-      r.OneInputCannotBe(Type::StringOrReceiver())) {
-    const Operator* less_than;
-    const Operator* less_than_or_equal;
-    if (r.BothInputsAre(Type::Unsigned32())) {
-      less_than = machine()->Uint32LessThan();
-      less_than_or_equal = machine()->Uint32LessThanOrEqual();
-    } else if (r.BothInputsAre(Type::Signed32())) {
-      less_than = machine()->Int32LessThan();
-      less_than_or_equal = machine()->Int32LessThanOrEqual();
-    } else if (hint != CompareOperationHints::kAny) {
-      less_than = simplified()->SpeculativeNumberLessThan(hint);
-      less_than_or_equal = simplified()->SpeculativeNumberLessThanOrEqual(hint);
-    } else {
-      // TODO(turbofan): mixed signed/unsigned int32 comparisons.
-      Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-      r.ConvertInputsToNumber(frame_state);
-      less_than = simplified()->NumberLessThan();
-      less_than_or_equal = simplified()->NumberLessThanOrEqual();
-    }
-    const Operator* comparison;
-    switch (node->opcode()) {
-      case IrOpcode::kJSLessThan:
-        comparison = less_than;
-        break;
-      case IrOpcode::kJSGreaterThan:
-        comparison = less_than;
-        r.SwapInputs();  // a > b => b < a
-        break;
-      case IrOpcode::kJSLessThanOrEqual:
-        comparison = less_than_or_equal;
-        break;
-      case IrOpcode::kJSGreaterThanOrEqual:
-        comparison = less_than_or_equal;
-        r.SwapInputs();  // a >= b => b <= a
-        break;
-      default:
-        return NoChange();
-    }
-    if (comparison->EffectInputCount() > 0) {
-      return r.ChangeToSpeculativeOperator(comparison, Type::Boolean());
-    } else {
-      return r.ChangeToPureOperator(comparison);
-    }
+  NumberOperationHint hint;
+  const Operator* less_than;
+  const Operator* less_than_or_equal;
+  if (r.BothInputsAre(Type::Signed32()) ||
+      r.BothInputsAre(Type::Unsigned32())) {
+    less_than = simplified()->NumberLessThan();
+    less_than_or_equal = simplified()->NumberLessThanOrEqual();
+  } else if (r.GetCompareNumberOperationHint(&hint)) {
+    less_than = simplified()->SpeculativeNumberLessThan(hint);
+    less_than_or_equal = simplified()->SpeculativeNumberLessThanOrEqual(hint);
+  } else if (r.OneInputCannotBe(Type::StringOrReceiver()) &&
+             (r.BothInputsAre(Type::PlainPrimitive()) ||
+              !(flags() & kDeoptimizationEnabled))) {
+    r.ConvertInputsToNumber();
+    less_than = simplified()->NumberLessThan();
+    less_than_or_equal = simplified()->NumberLessThanOrEqual();
+  } else {
+    return NoChange();
   }
-  // TODO(turbofan): relax/remove effects of this operator in other cases.
-  return NoChange();  // Keep a generic comparison.
+  const Operator* comparison;
+  switch (node->opcode()) {
+    case IrOpcode::kJSLessThan:
+      comparison = less_than;
+      break;
+    case IrOpcode::kJSGreaterThan:
+      comparison = less_than;
+      r.SwapInputs();  // a > b => b < a
+      break;
+    case IrOpcode::kJSLessThanOrEqual:
+      comparison = less_than_or_equal;
+      break;
+    case IrOpcode::kJSGreaterThanOrEqual:
+      comparison = less_than_or_equal;
+      r.SwapInputs();  // a >= b => b <= a
+      break;
+    default:
+      return NoChange();
+  }
+  if (comparison->EffectInputCount() > 0) {
+    return r.ChangeToSpeculativeOperator(comparison, Type::Boolean());
+  } else {
+    return r.ChangeToPureOperator(comparison);
+  }
 }
 
 Reduction JSTypedLowering::ReduceJSEqualTypeOf(Node* node, bool invert) {
@@ -650,13 +626,13 @@
     Node* input = m.left().InputAt(0);
     Handle<String> value = Handle<String>::cast(m.right().Value());
     if (String::Equals(value, factory()->boolean_string())) {
-      replacement = graph()->NewNode(
-          common()->Select(MachineRepresentation::kTagged),
-          graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), input,
-                           jsgraph()->TrueConstant()),
-          jsgraph()->TrueConstant(),
-          graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), input,
-                           jsgraph()->FalseConstant()));
+      replacement =
+          graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+                           graph()->NewNode(simplified()->ReferenceEqual(),
+                                            input, jsgraph()->TrueConstant()),
+                           jsgraph()->TrueConstant(),
+                           graph()->NewNode(simplified()->ReferenceEqual(),
+                                            input, jsgraph()->FalseConstant()));
     } else if (String::Equals(value, factory()->function_string())) {
       replacement = graph()->NewNode(simplified()->ObjectIsCallable(), input);
     } else if (String::Equals(value, factory()->number_string())) {
@@ -666,7 +642,7 @@
     } else if (String::Equals(value, factory()->undefined_string())) {
       replacement = graph()->NewNode(
           common()->Select(MachineRepresentation::kTagged),
-          graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), input,
+          graph()->NewNode(simplified()->ReferenceEqual(), input,
                            jsgraph()->NullConstant()),
           jsgraph()->FalseConstant(),
           graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
@@ -676,35 +652,26 @@
     if (invert) {
       replacement = graph()->NewNode(simplified()->BooleanNot(), replacement);
     }
+    ReplaceWithValue(node, replacement);
     return Replace(replacement);
   }
   return NoChange();
 }
 
 Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
-  if (flags() & kDisableBinaryOpReduction) return NoChange();
-
   Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
-  if (reduction.Changed()) {
-    ReplaceWithValue(node, reduction.replacement());
-    return reduction;
-  }
+  if (reduction.Changed()) return reduction;
 
   JSBinopReduction r(this, node);
 
-  if (r.BothInputsAre(Type::Number())) {
-    return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
-  }
   if (r.BothInputsAre(Type::String())) {
     return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
   if (r.BothInputsAre(Type::Boolean())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
-                                  invert);
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
   if (r.BothInputsAre(Type::Receiver())) {
-    return r.ChangeToPureOperator(
-        simplified()->ReferenceEqual(Type::Receiver()), invert);
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
   if (r.OneInputIs(Type::Undetectable())) {
     RelaxEffectsAndControls(node);
@@ -721,13 +688,21 @@
     }
     return Changed(node);
   }
+
+  NumberOperationHint hint;
+  if (r.BothInputsAre(Type::Signed32()) ||
+      r.BothInputsAre(Type::Unsigned32())) {
+    return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  } else if (r.GetCompareNumberOperationHint(&hint)) {
+    return r.ChangeToSpeculativeOperator(
+        simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
+  } else if (r.BothInputsAre(Type::Number())) {
+    return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  }
   return NoChange();
 }
 
-
 Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
-  if (flags() & kDisableBinaryOpReduction) return NoChange();
-
   JSBinopReduction r(this, node);
   if (r.left() == r.right()) {
     // x === x is always true if x != NaN
@@ -747,49 +722,48 @@
       return Replace(replacement);
     }
   }
+
   Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
-  if (reduction.Changed()) {
-    return reduction;
-  }
+  if (reduction.Changed()) return reduction;
+
   if (r.OneInputIs(the_hole_type_)) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(the_hole_type_),
-                                  invert);
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
   if (r.OneInputIs(Type::Undefined())) {
-    return r.ChangeToPureOperator(
-        simplified()->ReferenceEqual(Type::Undefined()), invert);
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
   if (r.OneInputIs(Type::Null())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Null()),
-                                  invert);
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
   if (r.OneInputIs(Type::Boolean())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
-                                  invert);
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
   if (r.OneInputIs(Type::Object())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Object()),
-                                  invert);
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
   if (r.OneInputIs(Type::Receiver())) {
-    return r.ChangeToPureOperator(
-        simplified()->ReferenceEqual(Type::Receiver()), invert);
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
   if (r.BothInputsAre(Type::Unique())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Unique()),
-                                  invert);
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
   if (r.BothInputsAre(Type::String())) {
     return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
-  if (r.BothInputsAre(Type::Number())) {
+
+  NumberOperationHint hint;
+  if (r.BothInputsAre(Type::Signed32()) ||
+      r.BothInputsAre(Type::Unsigned32())) {
+    return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  } else if (r.GetCompareNumberOperationHint(&hint)) {
+    return r.ChangeToSpeculativeOperator(
+        simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
+  } else if (r.BothInputsAre(Type::Number())) {
     return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
   }
-  // TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
   return NoChange();
 }
 
-
 Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
   Node* const input = node->InputAt(0);
   Type* const input_type = NodeProperties::GetType(input);
@@ -804,6 +778,14 @@
     node->TrimInputCount(1);
     NodeProperties::ChangeOp(node, simplified()->BooleanNot());
     return Changed(node);
+  } else if (input_type->Is(Type::Number())) {
+    // JSToBoolean(x:number) => NumberLessThan(#0,NumberAbs(x))
+    RelaxEffectsAndControls(node);
+    node->ReplaceInput(0, jsgraph()->ZeroConstant());
+    node->ReplaceInput(1, graph()->NewNode(simplified()->NumberAbs(), input));
+    node->TrimInputCount(2);
+    NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
+    return Changed(node);
   } else if (input_type->Is(Type::String())) {
     // JSToBoolean(x:string) => NumberLessThan(#0,x.length)
     FieldAccess const access = AccessBuilder::ForStringLength();
@@ -889,18 +871,9 @@
     // JSToNumber(null) => #0
     return Replace(jsgraph()->ZeroConstant());
   }
-  if (input_type->Is(Type::Boolean())) {
-    // JSToNumber(x:boolean) => BooleanToNumber(x)
-    return Replace(graph()->NewNode(simplified()->BooleanToNumber(), input));
-  }
-  if (input_type->Is(Type::String())) {
-    // JSToNumber(x:string) => StringToNumber(x)
-    return Replace(graph()->NewNode(simplified()->StringToNumber(), input));
-  }
   return NoChange();
 }
 
-
 Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
   // Try to reduce the input first.
   Node* const input = node->InputAt(0);
@@ -919,7 +892,6 @@
   return NoChange();
 }
 
-
 Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
   if (input->opcode() == IrOpcode::kJSToString) {
     // Recursively try to reduce the input first.
@@ -947,7 +919,6 @@
   return NoChange();
 }
 
-
 Reduction JSTypedLowering::ReduceJSToString(Node* node) {
   // Try to reduce the input first.
   Node* const input = node->InputAt(0);
@@ -959,86 +930,63 @@
   return NoChange();
 }
 
-
 Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
   DCHECK_EQ(IrOpcode::kJSToObject, node->opcode());
   Node* receiver = NodeProperties::GetValueInput(node, 0);
   Type* receiver_type = NodeProperties::GetType(receiver);
   Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  if (!receiver_type->Is(Type::Receiver())) {
-    // TODO(bmeurer/mstarzinger): Add support for lowering inside try blocks.
-    if (receiver_type->Maybe(Type::NullOrUndefined()) &&
-        NodeProperties::IsExceptionalCall(node)) {
-      // ToObject throws for null or undefined inputs.
-      return NoChange();
-    }
-
-    // Check whether {receiver} is a Smi.
-    Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
-    Node* branch0 =
-        graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
-    Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-    Node* etrue0 = effect;
-
-    Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-    Node* efalse0 = effect;
-
-    // Determine the instance type of {receiver}.
-    Node* receiver_map = efalse0 =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         receiver, efalse0, if_false0);
-    Node* receiver_instance_type = efalse0 = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-        receiver_map, efalse0, if_false0);
-
-    // Check whether {receiver} is a spec object.
-    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
-    Node* check1 =
-        graph()->NewNode(machine()->Uint32LessThanOrEqual(),
-                         jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
-                         receiver_instance_type);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                     check1, if_false0);
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = efalse0;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = efalse0;
-
-    // Convert {receiver} using the ToObjectStub.
-    Node* if_convert =
-        graph()->NewNode(common()->Merge(2), if_true0, if_false1);
-    Node* econvert =
-        graph()->NewNode(common()->EffectPhi(2), etrue0, efalse1, if_convert);
-    Node* rconvert;
-    {
-      Callable callable = CodeFactory::ToObject(isolate());
-      CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
-          isolate(), graph()->zone(), callable.descriptor(), 0,
-          CallDescriptor::kNeedsFrameState, node->op()->properties());
-      rconvert = econvert = graph()->NewNode(
-          common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-          receiver, context, frame_state, econvert, if_convert);
-    }
-
-    // The {receiver} is already a spec object.
-    Node* if_done = if_true1;
-    Node* edone = etrue1;
-    Node* rdone = receiver;
-
-    control = graph()->NewNode(common()->Merge(2), if_convert, if_done);
-    effect = graph()->NewNode(common()->EffectPhi(2), econvert, edone, control);
-    receiver =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                         rconvert, rdone, control);
+  if (receiver_type->Is(Type::Receiver())) {
+    ReplaceWithValue(node, receiver, effect, control);
+    return Replace(receiver);
   }
-  ReplaceWithValue(node, receiver, effect, control);
-  return Changed(receiver);
-}
 
+  // TODO(bmeurer/mstarzinger): Add support for lowering inside try blocks.
+  if (receiver_type->Maybe(Type::NullOrUndefined()) &&
+      NodeProperties::IsExceptionalCall(node)) {
+    // ToObject throws for null or undefined inputs.
+    return NoChange();
+  }
+
+  // Check whether {receiver} is a spec object.
+  Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), receiver);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* rtrue = receiver;
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* rfalse;
+  {
+    // Convert {receiver} using the ToObjectStub.
+    Callable callable = CodeFactory::ToObject(isolate());
+    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), callable.descriptor(), 0,
+        CallDescriptor::kNeedsFrameState, node->op()->properties());
+    rfalse = efalse = graph()->NewNode(
+        common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+        receiver, context, frame_state, efalse, if_false);
+    if_false = graph()->NewNode(common()->IfSuccess(), rfalse);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+  // Morph the {node} into an appropriate Phi.
+  ReplaceWithValue(node, node, effect, control);
+  node->ReplaceInput(0, rtrue);
+  node->ReplaceInput(1, rfalse);
+  node->ReplaceInput(2, control);
+  node->TrimInputCount(3);
+  NodeProperties::ChangeOp(node,
+                           common()->Phi(MachineRepresentation::kTagged, 2));
+  return Changed(node);
+}
 
 Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
@@ -1059,7 +1007,6 @@
   return NoChange();
 }
 
-
 Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
   Node* key = NodeProperties::GetValueInput(node, 1);
   Node* base = NodeProperties::GetValueInput(node, 0);
@@ -1093,7 +1040,10 @@
           return Replace(load);
         }
         // Compute byte offset.
-        Node* offset = Word32Shl(key, static_cast<int>(k));
+        Node* offset =
+            (k == 0) ? key : graph()->NewNode(
+                                 simplified()->NumberShiftLeft(), key,
+                                 jsgraph()->Constant(static_cast<double>(k)));
         Node* load = graph()->NewNode(simplified()->LoadBuffer(access), buffer,
                                       offset, length, effect, control);
         ReplaceWithValue(node, load, load);
@@ -1104,7 +1054,6 @@
   return NoChange();
 }
 
-
 Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
   Node* key = NodeProperties::GetValueInput(node, 1);
   Node* base = NodeProperties::GetValueInput(node, 0);
@@ -1143,6 +1092,7 @@
             value = effect =
                 graph()->NewNode(javascript()->ToNumber(), value, context,
                                  frame_state_for_to_number, effect, control);
+            control = graph()->NewNode(common()->IfSuccess(), value);
           }
         }
         // Check if we can avoid the bounds check.
@@ -1161,7 +1111,10 @@
           return Changed(node);
         }
         // Compute byte offset.
-        Node* offset = Word32Shl(key, static_cast<int>(k));
+        Node* offset =
+            (k == 0) ? key : graph()->NewNode(
+                                 simplified()->NumberShiftLeft(), key,
+                                 jsgraph()->Constant(static_cast<double>(k)));
         // Turn into a StoreBuffer operation.
         RelaxControls(node);
         node->ReplaceInput(0, buffer);
@@ -1179,11 +1132,10 @@
   return NoChange();
 }
 
-
 Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
   DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
   Node* const context = NodeProperties::GetContextInput(node);
-  Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* const frame_state = NodeProperties::GetFrameStateInput(node);
 
   // If deoptimization is disabled, we cannot optimize.
   if (!(flags() & kDeoptimizationEnabled)) return NoChange();
@@ -1252,10 +1204,10 @@
   int is_access_check_needed_bit = 1 << Map::kIsAccessCheckNeeded;
   Node* is_access_check_needed_num =
       graph()->NewNode(simplified()->NumberBitwiseAnd(), map_bit_field,
-                       jsgraph()->Uint32Constant(is_access_check_needed_bit));
+                       jsgraph()->Constant(is_access_check_needed_bit));
   Node* is_access_check_needed =
-      graph()->NewNode(machine()->Word32Equal(), is_access_check_needed_num,
-                       jsgraph()->Uint32Constant(is_access_check_needed_bit));
+      graph()->NewNode(simplified()->NumberEqual(), is_access_check_needed_num,
+                       jsgraph()->Constant(is_access_check_needed_bit));
 
   Node* branch_is_access_check_needed = graph()->NewNode(
       common()->Branch(BranchHint::kFalse), is_access_check_needed, control);
@@ -1270,25 +1222,28 @@
   Node* map_instance_type = effect = graph()->NewNode(
       simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
       loop_object_map, loop_effect, control);
-  Node* is_proxy = graph()->NewNode(machine()->Word32Equal(), map_instance_type,
-                                    jsgraph()->Uint32Constant(JS_PROXY_TYPE));
+  Node* is_proxy =
+      graph()->NewNode(simplified()->NumberEqual(), map_instance_type,
+                       jsgraph()->Constant(JS_PROXY_TYPE));
   Node* branch_is_proxy =
       graph()->NewNode(common()->Branch(BranchHint::kFalse), is_proxy, control);
   Node* if_is_proxy = graph()->NewNode(common()->IfTrue(), branch_is_proxy);
   Node* e_is_proxy = effect;
 
-
-  Node* runtime_has_in_proto_chain = control = graph()->NewNode(
-      common()->Merge(2), if_is_access_check_needed, if_is_proxy);
+  control = graph()->NewNode(common()->Merge(2), if_is_access_check_needed,
+                             if_is_proxy);
   effect = graph()->NewNode(common()->EffectPhi(2), e_is_access_check_needed,
                             e_is_proxy, control);
 
   // If we need an access check or the object is a Proxy, make a runtime call
   // to finish the lowering.
-  Node* bool_result_runtime_has_in_proto_chain_case = graph()->NewNode(
+  Node* runtimecall = graph()->NewNode(
       javascript()->CallRuntime(Runtime::kHasInPrototypeChain), r.left(),
       prototype, context, frame_state, effect, control);
 
+  Node* runtimecall_control =
+      graph()->NewNode(common()->IfSuccess(), runtimecall);
+
   control = graph()->NewNode(common()->IfFalse(), branch_is_proxy);
 
   Node* object_prototype = effect = graph()->NewNode(
@@ -1297,8 +1252,8 @@
 
   // If not, check if object prototype is the null prototype.
   Node* null_proto =
-      graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
-                       object_prototype, jsgraph()->NullConstant());
+      graph()->NewNode(simplified()->ReferenceEqual(), object_prototype,
+                       jsgraph()->NullConstant());
   Node* branch_null_proto = graph()->NewNode(
       common()->Branch(BranchHint::kFalse), null_proto, control);
   Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
@@ -1307,9 +1262,8 @@
   control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
 
   // Check if object prototype is equal to function prototype.
-  Node* eq_proto =
-      graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
-                       object_prototype, prototype);
+  Node* eq_proto = graph()->NewNode(simplified()->ReferenceEqual(),
+                                    object_prototype, prototype);
   Node* branch_eq_proto =
       graph()->NewNode(common()->Branch(BranchHint::kFalse), eq_proto, control);
   Node* if_eq_proto = graph()->NewNode(common()->IfTrue(), branch_eq_proto);
@@ -1325,21 +1279,17 @@
   loop_object_map->ReplaceInput(1, load_object_map);
   loop->ReplaceInput(1, control);
 
-  control = graph()->NewNode(common()->Merge(3), runtime_has_in_proto_chain,
+  control = graph()->NewNode(common()->Merge(3), runtimecall_control,
                              if_eq_proto, if_null_proto);
-  effect = graph()->NewNode(common()->EffectPhi(3),
-                            bool_result_runtime_has_in_proto_chain_case,
-                            e_eq_proto, e_null_proto, control);
+  effect = graph()->NewNode(common()->EffectPhi(3), runtimecall, e_eq_proto,
+                            e_null_proto, control);
 
   Node* result = graph()->NewNode(
-      common()->Phi(MachineRepresentation::kTagged, 3),
-      bool_result_runtime_has_in_proto_chain_case, jsgraph()->TrueConstant(),
-      jsgraph()->FalseConstant(), control);
+      common()->Phi(MachineRepresentation::kTagged, 3), runtimecall,
+      jsgraph()->TrueConstant(), jsgraph()->FalseConstant(), control);
 
-  DCHECK_NOT_NULL(e_is_smi);
   control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
-  effect =
-      graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
+  effect = graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
   result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
                             jsgraph()->FalseConstant(), result, control);
 
@@ -1347,7 +1297,6 @@
   return Changed(result);
 }
 
-
 Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
   ContextAccess const& access = ContextAccessOf(node->op());
@@ -1368,7 +1317,6 @@
   return Changed(node);
 }
 
-
 Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
   ContextAccess const& access = ContextAccessOf(node->op());
@@ -1389,7 +1337,6 @@
   return Changed(node);
 }
 
-
 Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
   DCHECK_EQ(IrOpcode::kJSConvertReceiver, node->opcode());
   ConvertReceiverMode mode = ConvertReceiverModeOf(node->op());
@@ -1397,94 +1344,229 @@
   Type* receiver_type = NodeProperties::GetType(receiver);
   Node* context = NodeProperties::GetContextInput(node);
   Type* context_type = NodeProperties::GetType(context);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  if (!receiver_type->Is(Type::Receiver())) {
-    if (receiver_type->Is(Type::NullOrUndefined()) ||
-        mode == ConvertReceiverMode::kNullOrUndefined) {
-      if (context_type->IsConstant()) {
-        Handle<JSObject> global_proxy(
-            Handle<Context>::cast(context_type->AsConstant()->Value())
-                ->global_proxy(),
-            isolate());
-        receiver = jsgraph()->Constant(global_proxy);
-      } else {
-        Node* native_context = effect = graph()->NewNode(
-            javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-            context, context, effect);
-        receiver = effect = graph()->NewNode(
-            javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
-            native_context, native_context, effect);
-      }
-    } else if (!receiver_type->Maybe(Type::NullOrUndefined()) ||
-               mode == ConvertReceiverMode::kNotNullOrUndefined) {
-      receiver = effect =
-          graph()->NewNode(javascript()->ToObject(), receiver, context,
-                           frame_state, effect, control);
+
+  // Check if {receiver} is known to be a receiver.
+  if (receiver_type->Is(Type::Receiver())) {
+    ReplaceWithValue(node, receiver, effect, control);
+    return Replace(receiver);
+  }
+
+  // If the {receiver} is known to be null or undefined, we can just replace it
+  // with the global proxy unconditionally.
+  if (receiver_type->Is(Type::NullOrUndefined()) ||
+      mode == ConvertReceiverMode::kNullOrUndefined) {
+    if (context_type->IsConstant()) {
+      Handle<JSObject> global_proxy(
+          Handle<Context>::cast(context_type->AsConstant()->Value())
+              ->global_proxy(),
+          isolate());
+      receiver = jsgraph()->Constant(global_proxy);
     } else {
-      // Check {receiver} for undefined.
-      Node* check0 =
-          graph()->NewNode(simplified()->ReferenceEqual(receiver_type),
-                           receiver, jsgraph()->UndefinedConstant());
-      Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check0, control);
-      Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-      Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+      Node* native_context = effect = graph()->NewNode(
+          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+          context, context, effect);
+      receiver = effect = graph()->NewNode(
+          javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
+          native_context, native_context, effect);
+    }
+    ReplaceWithValue(node, receiver, effect, control);
+    return Replace(receiver);
+  }
 
-      // Check {receiver} for null.
-      Node* check1 =
-          graph()->NewNode(simplified()->ReferenceEqual(receiver_type),
-                           receiver, jsgraph()->NullConstant());
-      Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check1, if_false0);
-      Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-      Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+  // If {receiver} cannot be null or undefined we can skip a few checks.
+  if (!receiver_type->Maybe(Type::NullOrUndefined()) ||
+      mode == ConvertReceiverMode::kNotNullOrUndefined) {
+    Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), receiver);
+    Node* branch =
+        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
 
-      // Convert {receiver} using ToObject.
-      Node* if_convert = if_false1;
-      Node* econvert = effect;
-      Node* rconvert;
-      {
-        rconvert = econvert =
-            graph()->NewNode(javascript()->ToObject(), receiver, context,
-                             frame_state, econvert, if_convert);
-      }
+    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+    Node* etrue = effect;
+    Node* rtrue = receiver;
 
-      // Replace {receiver} with global proxy of {context}.
-      Node* if_global =
-          graph()->NewNode(common()->Merge(2), if_true0, if_true1);
-      Node* eglobal = effect;
-      Node* rglobal;
-      {
-        if (context_type->IsConstant()) {
-          Handle<JSObject> global_proxy(
-              Handle<Context>::cast(context_type->AsConstant()->Value())
-                  ->global_proxy(),
-              isolate());
-          rglobal = jsgraph()->Constant(global_proxy);
-        } else {
-          Node* native_context = eglobal = graph()->NewNode(
-              javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-              context, context, eglobal);
-          rglobal = eglobal = graph()->NewNode(
-              javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
-              native_context, native_context, eglobal);
-        }
-      }
+    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+    Node* efalse = effect;
+    Node* rfalse;
+    {
+      // Convert {receiver} using the ToObjectStub.
+      Callable callable = CodeFactory::ToObject(isolate());
+      CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), callable.descriptor(), 0,
+          CallDescriptor::kNeedsFrameState, node->op()->properties());
+      rfalse = efalse = graph()->NewNode(
+          common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+          receiver, context, frame_state, efalse);
+    }
 
-      control = graph()->NewNode(common()->Merge(2), if_convert, if_global);
-      effect =
-          graph()->NewNode(common()->EffectPhi(2), econvert, eglobal, control);
-      receiver =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           rconvert, rglobal, control);
+    control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+    effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+    // Morph the {node} into an appropriate Phi.
+    ReplaceWithValue(node, node, effect, control);
+    node->ReplaceInput(0, rtrue);
+    node->ReplaceInput(1, rfalse);
+    node->ReplaceInput(2, control);
+    node->TrimInputCount(3);
+    NodeProperties::ChangeOp(node,
+                             common()->Phi(MachineRepresentation::kTagged, 2));
+    return Changed(node);
+  }
+
+  // Check if {receiver} is already a JSReceiver.
+  Node* check0 = graph()->NewNode(simplified()->ObjectIsReceiver(), receiver);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+
+  // Check {receiver} for undefined.
+  Node* check1 = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
+                                  jsgraph()->UndefinedConstant());
+  Node* branch1 =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, if_false0);
+  Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+  Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+
+  // Check {receiver} for null.
+  Node* check2 = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
+                                  jsgraph()->NullConstant());
+  Node* branch2 =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check2, if_false1);
+  Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+  Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+
+  // We just use {receiver} directly.
+  Node* if_noop = if_true0;
+  Node* enoop = effect;
+  Node* rnoop = receiver;
+
+  // Convert {receiver} using ToObject.
+  Node* if_convert = if_false2;
+  Node* econvert = effect;
+  Node* rconvert;
+  {
+    // Convert {receiver} using the ToObjectStub.
+    Callable callable = CodeFactory::ToObject(isolate());
+    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), callable.descriptor(), 0,
+        CallDescriptor::kNeedsFrameState, node->op()->properties());
+    rconvert = econvert = graph()->NewNode(
+        common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+        receiver, context, frame_state, econvert);
+  }
+
+  // Replace {receiver} with global proxy of {context}.
+  Node* if_global = graph()->NewNode(common()->Merge(2), if_true1, if_true2);
+  Node* eglobal = effect;
+  Node* rglobal;
+  {
+    if (context_type->IsConstant()) {
+      Handle<JSObject> global_proxy(
+          Handle<Context>::cast(context_type->AsConstant()->Value())
+              ->global_proxy(),
+          isolate());
+      rglobal = jsgraph()->Constant(global_proxy);
+    } else {
+      Node* native_context = eglobal = graph()->NewNode(
+          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+          context, context, eglobal);
+      rglobal = eglobal = graph()->NewNode(
+          javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
+          native_context, native_context, eglobal);
     }
   }
-  ReplaceWithValue(node, receiver, effect, control);
-  return Changed(receiver);
+
+  control =
+      graph()->NewNode(common()->Merge(3), if_noop, if_convert, if_global);
+  effect = graph()->NewNode(common()->EffectPhi(3), enoop, econvert, eglobal,
+                            control);
+  // Morph the {node} into an appropriate Phi.
+  ReplaceWithValue(node, node, effect, control);
+  node->ReplaceInput(0, rnoop);
+  node->ReplaceInput(1, rconvert);
+  node->ReplaceInput(2, rglobal);
+  node->ReplaceInput(3, control);
+  node->TrimInputCount(4);
+  NodeProperties::ChangeOp(node,
+                           common()->Phi(MachineRepresentation::kTagged, 3));
+  return Changed(node);
 }
 
+namespace {
+
+void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
+                   int builtin_index, int arity, CallDescriptor::Flags flags) {
+  // Patch {node} to a direct CEntryStub call.
+  //
+  // ----------- A r g u m e n t s -----------
+  // -- 0: CEntryStub
+  // --- Stack args ---
+  // -- 1: receiver
+  // -- [2, 2 + n[: the n actual arguments passed to the builtin
+  // -- 2 + n: argc, including the receiver and implicit args (Smi)
+  // -- 2 + n + 1: target
+  // -- 2 + n + 2: new_target
+  // --- Register args ---
+  // -- 2 + n + 3: the C entry point
+  // -- 2 + n + 4: argc (Int32)
+  // -----------------------------------
+
+  // The logic contained here is mirrored in Builtins::Generate_Adaptor.
+  // Keep these in sync.
+
+  const bool is_construct = (node->opcode() == IrOpcode::kJSCallConstruct);
+
+  DCHECK(Builtins::HasCppImplementation(builtin_index));
+
+  Node* target = NodeProperties::GetValueInput(node, 0);
+  Node* new_target = is_construct
+                         ? NodeProperties::GetValueInput(node, arity + 1)
+                         : jsgraph->UndefinedConstant();
+
+  // API and CPP builtins are implemented in C++, and we can inline both.
+  // CPP builtins create a builtin exit frame, API builtins don't.
+  const bool has_builtin_exit_frame = Builtins::IsCpp(builtin_index);
+
+  Node* stub = jsgraph->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack,
+                                           has_builtin_exit_frame);
+  node->ReplaceInput(0, stub);
+
+  Zone* zone = jsgraph->zone();
+  if (is_construct) {
+    // Unify representations between construct and call nodes.
+    // Remove new target and add receiver as a stack parameter.
+    Node* receiver = jsgraph->UndefinedConstant();
+    node->RemoveInput(arity + 1);
+    node->InsertInput(zone, 1, receiver);
+  }
+
+  const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
+  Node* argc_node = jsgraph->Int32Constant(argc);
+
+  node->InsertInput(zone, arity + 2, argc_node);
+  node->InsertInput(zone, arity + 3, target);
+  node->InsertInput(zone, arity + 4, new_target);
+
+  Address entry = Builtins::CppEntryOf(builtin_index);
+  ExternalReference entry_ref(ExternalReference(entry, isolate));
+  Node* entry_node = jsgraph->ExternalConstant(entry_ref);
+
+  node->InsertInput(zone, arity + 5, entry_node);
+  node->InsertInput(zone, arity + 6, argc_node);
+
+  static const int kReturnCount = 1;
+  const char* debug_name = Builtins::name(builtin_index);
+  Operator::Properties properties = node->op()->properties();
+  CallDescriptor* desc = Linkage::GetCEntryStubCallDescriptor(
+      zone, kReturnCount, argc, debug_name, properties, flags);
+
+  NodeProperties::ChangeOp(node, jsgraph->common()->Call(desc));
+}
+
+}  // namespace
 
 Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
@@ -1494,6 +1576,8 @@
   Node* target = NodeProperties::GetValueInput(node, 0);
   Type* target_type = NodeProperties::GetType(target);
   Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
 
   // Check if {target} is a known JSFunction.
   if (target_type->IsConstant() &&
@@ -1501,21 +1585,43 @@
     Handle<JSFunction> function =
         Handle<JSFunction>::cast(target_type->AsConstant()->Value());
     Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+    const int builtin_index = shared->construct_stub()->builtin_index();
+    const bool is_builtin = (builtin_index != -1);
 
-    // Patch {node} to an indirect call via the {function}s construct stub.
-    Callable callable(handle(shared->construct_stub(), isolate()),
-                      ConstructStubDescriptor(isolate()));
-    node->RemoveInput(arity + 1);
-    node->InsertInput(graph()->zone(), 0,
-                      jsgraph()->HeapConstant(callable.code()));
-    node->InsertInput(graph()->zone(), 2, new_target);
-    node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
-    node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
-    node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
-    NodeProperties::ChangeOp(
-        node, common()->Call(Linkage::GetStubCallDescriptor(
-                  isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
-                  CallDescriptor::kNeedsFrameState)));
+    CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+
+    if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
+        (shared->internal_formal_parameter_count() == arity ||
+         shared->internal_formal_parameter_count() ==
+             SharedFunctionInfo::kDontAdaptArgumentsSentinel)) {
+      // Patch {node} to a direct CEntryStub call.
+
+      // Load the context from the {target}.
+      Node* context = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSFunctionContext()),
+          target, effect, control);
+      NodeProperties::ReplaceContextInput(node, context);
+
+      // Update the effect dependency for the {node}.
+      NodeProperties::ReplaceEffectInput(node, effect);
+
+      ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
+    } else {
+      // Patch {node} to an indirect call via the {function}s construct stub.
+      Callable callable(handle(shared->construct_stub(), isolate()),
+                        ConstructStubDescriptor(isolate()));
+      node->RemoveInput(arity + 1);
+      node->InsertInput(graph()->zone(), 0,
+                        jsgraph()->HeapConstant(callable.code()));
+      node->InsertInput(graph()->zone(), 2, new_target);
+      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+      node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+      node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
+      NodeProperties::ChangeOp(
+          node, common()->Call(Linkage::GetStubCallDescriptor(
+                    isolate(), graph()->zone(), callable.descriptor(),
+                    1 + arity, flags)));
+    }
     return Changed(node);
   }
 
@@ -1566,6 +1672,8 @@
     Handle<JSFunction> function =
         Handle<JSFunction>::cast(target_type->AsConstant()->Value());
     Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+    const int builtin_index = shared->code()->builtin_index();
+    const bool is_builtin = (builtin_index != -1);
 
     // Class constructors are callable, but [[Call]] will raise an exception.
     // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
@@ -1597,9 +1705,15 @@
 
     Node* new_target = jsgraph()->UndefinedConstant();
     Node* argument_count = jsgraph()->Int32Constant(arity);
-    if (shared->internal_formal_parameter_count() == arity ||
-        shared->internal_formal_parameter_count() ==
-            SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+    if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
+        (shared->internal_formal_parameter_count() == arity ||
+         shared->internal_formal_parameter_count() ==
+             SharedFunctionInfo::kDontAdaptArgumentsSentinel)) {
+      // Patch {node} to a direct CEntryStub call.
+      ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
+    } else if (shared->internal_formal_parameter_count() == arity ||
+               shared->internal_formal_parameter_count() ==
+                   SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
       // Patch {node} to a direct call.
       node->InsertInput(graph()->zone(), arity + 2, new_target);
       node->InsertInput(graph()->zone(), arity + 3, argument_count);
@@ -1671,7 +1785,7 @@
   Node* cache_type = NodeProperties::GetValueInput(node, 2);
   Node* index = NodeProperties::GetValueInput(node, 3);
   Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
@@ -1686,8 +1800,8 @@
                        receiver, effect, control);
 
   // Check if the expected map still matches that of the {receiver}.
-  Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
-                                  receiver_map, cache_type);
+  Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
+                                  cache_type);
   Node* branch0 =
       graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
 
@@ -1707,9 +1821,13 @@
   {
     // Filter the {key} to check if it's still a valid property of the
     // {receiver} (does the ToName conversion implicitly).
+    Callable const callable = CodeFactory::ForInFilter(isolate());
+    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), callable.descriptor(), 0,
+        CallDescriptor::kNeedsFrameState);
     vfalse0 = efalse0 = graph()->NewNode(
-        javascript()->CallRuntime(Runtime::kForInFilter), receiver, key,
-        context, frame_state, effect, if_false0);
+        common()->Call(desc), jsgraph()->HeapConstant(callable.code()), key,
+        receiver, context, frame_state, effect, if_false0);
     if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
   }
 
@@ -1841,6 +1959,105 @@
   return NoChange();
 }
 
+namespace {
+
+MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
+  if (object_type->IsConstant() &&
+      object_type->AsConstant()->Value()->IsHeapObject()) {
+    Handle<Map> object_map(
+        Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
+    if (object_map->is_stable()) return object_map;
+  } else if (object_type->IsClass()) {
+    Handle<Map> object_map = object_type->AsClass()->Map();
+    if (object_map->is_stable()) return object_map;
+  }
+  return MaybeHandle<Map>();
+}
+
+}  // namespace
+
+Reduction JSTypedLowering::ReduceCheckMaps(Node* node) {
+  // TODO(bmeurer): Find a better home for this thing!
+  // The CheckMaps(o, ...map...) can be eliminated if map is stable and
+  // either
+  //  (a) o has type Constant(object) and map == object->map, or
+  //  (b) o has type Class(map),
+  // and either
+  //  (1) map cannot transition further, or
+  //  (2) we can add a code dependency on the stability of map
+  //      (to guard the Constant type information).
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Type* const object_type = NodeProperties::GetType(object);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Handle<Map> object_map;
+  if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
+    for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
+      Node* const map = NodeProperties::GetValueInput(node, i);
+      Type* const map_type = NodeProperties::GetType(map);
+      if (map_type->IsConstant() &&
+          map_type->AsConstant()->Value().is_identical_to(object_map)) {
+        if (object_map->CanTransition()) {
+          DCHECK(flags() & kDeoptimizationEnabled);
+          dependencies()->AssumeMapStable(object_map);
+        }
+        return Replace(effect);
+      }
+    }
+  }
+  return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceCheckString(Node* node) {
+  // TODO(bmeurer): Find a better home for this thing!
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (input_type->Is(Type::String())) {
+    ReplaceWithValue(node, input);
+    return Replace(input);
+  }
+  return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceLoadField(Node* node) {
+  // TODO(bmeurer): Find a better home for this thing!
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Type* const object_type = NodeProperties::GetType(object);
+  FieldAccess const& access = FieldAccessOf(node->op());
+  if (access.base_is_tagged == kTaggedBase &&
+      access.offset == HeapObject::kMapOffset) {
+    // We can replace LoadField[Map](o) with map if is stable and either
+    //  (a) o has type Constant(object) and map == object->map, or
+    //  (b) o has type Class(map),
+    // and either
+    //  (1) map cannot transition further, or
+    //  (2) deoptimization is enabled and we can add a code dependency on the
+    //      stability of map (to guard the Constant type information).
+    Handle<Map> object_map;
+    if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
+      if (object_map->CanTransition()) {
+        if (flags() & kDeoptimizationEnabled) {
+          dependencies()->AssumeMapStable(object_map);
+        } else {
+          return NoChange();
+        }
+      }
+      Node* const value = jsgraph()->HeapConstant(object_map);
+      ReplaceWithValue(node, value);
+      return Replace(value);
+    }
+  }
+  return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceNumberRoundop(Node* node) {
+  // TODO(bmeurer): Find a better home for this thing!
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+    return Replace(input);
+  }
+  return NoChange();
+}
 
 Reduction JSTypedLowering::Reduce(Node* node) {
   // Check if the output type is a singleton.  In that case we already know the
@@ -1896,28 +2113,21 @@
     case IrOpcode::kJSGreaterThanOrEqual:
       return ReduceJSComparison(node);
     case IrOpcode::kJSBitwiseOr:
-      return ReduceInt32Binop(node, simplified()->NumberBitwiseOr());
     case IrOpcode::kJSBitwiseXor:
-      return ReduceInt32Binop(node, simplified()->NumberBitwiseXor());
     case IrOpcode::kJSBitwiseAnd:
-      return ReduceInt32Binop(node, simplified()->NumberBitwiseAnd());
+      return ReduceInt32Binop(node);
     case IrOpcode::kJSShiftLeft:
-      return ReduceUI32Shift(node, kSigned, simplified()->NumberShiftLeft());
     case IrOpcode::kJSShiftRight:
-      return ReduceUI32Shift(node, kSigned, simplified()->NumberShiftRight());
+      return ReduceUI32Shift(node, kSigned);
     case IrOpcode::kJSShiftRightLogical:
-      return ReduceUI32Shift(node, kUnsigned,
-                             simplified()->NumberShiftRightLogical());
+      return ReduceUI32Shift(node, kUnsigned);
     case IrOpcode::kJSAdd:
       return ReduceJSAdd(node);
     case IrOpcode::kJSSubtract:
-      return ReduceJSSubtract(node);
     case IrOpcode::kJSMultiply:
-      return ReduceJSMultiply(node);
     case IrOpcode::kJSDivide:
-      return ReduceJSDivide(node);
     case IrOpcode::kJSModulus:
-      return ReduceJSModulus(node);
+      return ReduceNumberBinop(node);
     case IrOpcode::kJSToBoolean:
       return ReduceJSToBoolean(node);
     case IrOpcode::kJSToInteger:
@@ -1962,6 +2172,17 @@
       return ReduceJSGeneratorRestoreRegister(node);
     case IrOpcode::kSelect:
       return ReduceSelect(node);
+    case IrOpcode::kCheckMaps:
+      return ReduceCheckMaps(node);
+    case IrOpcode::kCheckString:
+      return ReduceCheckString(node);
+    case IrOpcode::kNumberCeil:
+    case IrOpcode::kNumberFloor:
+    case IrOpcode::kNumberRound:
+    case IrOpcode::kNumberTrunc:
+      return ReduceNumberRoundop(node);
+    case IrOpcode::kLoadField:
+      return ReduceLoadField(node);
     default:
       break;
   }
@@ -1969,21 +2190,6 @@
 }
 
 
-Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
-  if (rhs == 0) return lhs;
-  return graph()->NewNode(machine()->Word32Shl(), lhs,
-                          jsgraph()->Int32Constant(rhs));
-}
-
-Node* JSTypedLowering::EmptyFrameState() {
-  return graph()->NewNode(
-      common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
-                           nullptr),
-      jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
-      jsgraph()->EmptyStateValues(), jsgraph()->NoContextConstant(),
-      jsgraph()->UndefinedConstant(), graph()->start());
-}
-
 Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
 
 
@@ -2002,17 +2208,15 @@
   return jsgraph()->common();
 }
 
+MachineOperatorBuilder* JSTypedLowering::machine() const {
+  return jsgraph()->machine();
+}
 
 SimplifiedOperatorBuilder* JSTypedLowering::simplified() const {
   return jsgraph()->simplified();
 }
 
 
-MachineOperatorBuilder* JSTypedLowering::machine() const {
-  return jsgraph()->machine();
-}
-
-
 CompilationDependencies* JSTypedLowering::dependencies() const {
   return dependencies_;
 }
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index a370b7a..35c397f 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -35,8 +35,6 @@
   enum Flag {
     kNoFlags = 0u,
     kDeoptimizationEnabled = 1u << 0,
-    kDisableBinaryOpReduction = 1u << 1,
-    kTypeFeedbackEnabled = 1u << 2,
   };
   typedef base::Flags<Flag> Flags;
 
@@ -50,9 +48,6 @@
   friend class JSBinopReduction;
 
   Reduction ReduceJSAdd(Node* node);
-  Reduction ReduceJSModulus(Node* node);
-  Reduction ReduceJSBitwiseOr(Node* node);
-  Reduction ReduceJSMultiply(Node* node);
   Reduction ReduceJSComparison(Node* node);
   Reduction ReduceJSLoadNamed(Node* node);
   Reduction ReduceJSLoadProperty(Node* node);
@@ -80,16 +75,14 @@
   Reduction ReduceJSGeneratorStore(Node* node);
   Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
   Reduction ReduceJSGeneratorRestoreRegister(Node* node);
+  Reduction ReduceCheckMaps(Node* node);
+  Reduction ReduceCheckString(Node* node);
+  Reduction ReduceLoadField(Node* node);
+  Reduction ReduceNumberRoundop(Node* node);
   Reduction ReduceSelect(Node* node);
-  Reduction ReduceJSSubtract(Node* node);
-  Reduction ReduceJSDivide(Node* node);
-  Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
-  Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
-                            const Operator* shift_op);
-
-  Node* Word32Shl(Node* const lhs, int32_t const rhs);
-
-  Node* EmptyFrameState();
+  Reduction ReduceNumberBinop(Node* node);
+  Reduction ReduceInt32Binop(Node* node);
+  Reduction ReduceUI32Shift(Node* node, Signedness signedness);
 
   Factory* factory() const;
   Graph* graph() const;
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index c3b68d6..e4df58d 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -2,12 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/linkage.h"
+
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-utils.h"
 #include "src/code-stubs.h"
 #include "src/compiler.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/frame.h"
-#include "src/compiler/linkage.h"
 #include "src/compiler/node.h"
 #include "src/compiler/osr.h"
 #include "src/compiler/pipeline.h"
@@ -17,10 +19,10 @@
 namespace compiler {
 
 namespace {
-LinkageLocation regloc(Register reg) {
-  return LinkageLocation::ForRegister(reg.code());
-}
 
+LinkageLocation regloc(Register reg, MachineType type) {
+  return LinkageLocation::ForRegister(reg.code(), type);
+}
 
 MachineType reptyp(Representation representation) {
   switch (representation.kind()) {
@@ -49,6 +51,7 @@
   UNREACHABLE();
   return MachineType::None();
 }
+
 }  // namespace
 
 
@@ -75,6 +78,20 @@
             << d.FrameStateCount() << "t" << d.SupportsTailCalls();
 }
 
+MachineSignature* CallDescriptor::GetMachineSignature(Zone* zone) const {
+  size_t param_count = ParameterCount();
+  size_t return_count = ReturnCount();
+  MachineType* types = reinterpret_cast<MachineType*>(
+      zone->New(sizeof(MachineType*) * (param_count + return_count)));
+  int current = 0;
+  for (size_t i = 0; i < return_count; ++i) {
+    types[current++] = GetReturnType(i);
+  }
+  for (size_t i = 0; i < param_count; ++i) {
+    types[current++] = GetParameterType(i);
+  }
+  return new (zone) MachineSignature(return_count, param_count, types);
+}
 
 bool CallDescriptor::HasSameReturnLocationsAs(
     const CallDescriptor* other) const {
@@ -85,33 +102,36 @@
   return true;
 }
 
-
-bool CallDescriptor::CanTailCall(const Node* node,
-                                 int* stack_param_delta) const {
-  CallDescriptor const* other = CallDescriptorOf(node->op());
-  size_t current_input = 0;
-  size_t other_input = 0;
-  *stack_param_delta = 0;
-  bool more_other = true;
-  bool more_this = true;
-  while (more_other || more_this) {
-    if (other_input < other->InputCount()) {
-      if (!other->GetInputLocation(other_input).IsRegister()) {
-        (*stack_param_delta)--;
+int CallDescriptor::GetStackParameterDelta(
+    CallDescriptor const* tail_caller) const {
+  int callee_slots_above_sp = 0;
+  for (size_t i = 0; i < InputCount(); ++i) {
+    LinkageLocation operand = GetInputLocation(i);
+    if (!operand.IsRegister()) {
+      int new_candidate =
+          -operand.GetLocation() + operand.GetSizeInPointers() - 1;
+      if (new_candidate > callee_slots_above_sp) {
+        callee_slots_above_sp = new_candidate;
       }
-    } else {
-      more_other = false;
     }
-    if (current_input < InputCount()) {
-      if (!GetInputLocation(current_input).IsRegister()) {
-        (*stack_param_delta)++;
-      }
-    } else {
-      more_this = false;
-    }
-    ++current_input;
-    ++other_input;
   }
+  int tail_caller_slots_above_sp = 0;
+  if (tail_caller != nullptr) {
+    for (size_t i = 0; i < tail_caller->InputCount(); ++i) {
+      LinkageLocation operand = tail_caller->GetInputLocation(i);
+      if (!operand.IsRegister()) {
+        int new_candidate =
+            -operand.GetLocation() + operand.GetSizeInPointers() - 1;
+        if (new_candidate > tail_caller_slots_above_sp) {
+          tail_caller_slots_above_sp = new_candidate;
+        }
+      }
+    }
+  }
+  return callee_slots_above_sp - tail_caller_slots_above_sp;
+}
+
+bool CallDescriptor::CanTailCall(const Node* node) const {
   return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
 }
 
@@ -173,12 +193,8 @@
     case Runtime::kInlineThrowNotDateError:
     case Runtime::kInlineToInteger:
     case Runtime::kInlineToLength:
-    case Runtime::kInlineToName:
     case Runtime::kInlineToNumber:
     case Runtime::kInlineToObject:
-    case Runtime::kInlineToPrimitive:
-    case Runtime::kInlineToPrimitive_Number:
-    case Runtime::kInlineToPrimitive_String:
     case Runtime::kInlineToString:
       return true;
     default:
@@ -208,6 +224,23 @@
 CallDescriptor* Linkage::GetRuntimeCallDescriptor(
     Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
     Operator::Properties properties, CallDescriptor::Flags flags) {
+  const Runtime::Function* function = Runtime::FunctionForId(function_id);
+  const int return_count = function->result_size;
+  const char* debug_name = function->name;
+
+  if (!Linkage::NeedsFrameStateInput(function_id)) {
+    flags = static_cast<CallDescriptor::Flags>(
+        flags & ~CallDescriptor::kNeedsFrameState);
+  }
+
+  return GetCEntryStubCallDescriptor(zone, return_count, js_parameter_count,
+                                     debug_name, properties, flags);
+}
+
+CallDescriptor* Linkage::GetCEntryStubCallDescriptor(
+    Zone* zone, int return_count, int js_parameter_count,
+    const char* debug_name, Operator::Properties properties,
+    CallDescriptor::Flags flags) {
   const size_t function_count = 1;
   const size_t num_args_count = 1;
   const size_t context_count = 1;
@@ -215,67 +248,53 @@
                                  static_cast<size_t>(js_parameter_count) +
                                  num_args_count + context_count;
 
-  const Runtime::Function* function = Runtime::FunctionForId(function_id);
-  const size_t return_count = static_cast<size_t>(function->result_size);
-
-  LocationSignature::Builder locations(zone, return_count, parameter_count);
-  MachineSignature::Builder types(zone, return_count, parameter_count);
+  LocationSignature::Builder locations(zone, static_cast<size_t>(return_count),
+                                       static_cast<size_t>(parameter_count));
 
   // Add returns.
   if (locations.return_count_ > 0) {
-    locations.AddReturn(regloc(kReturnRegister0));
+    locations.AddReturn(regloc(kReturnRegister0, MachineType::AnyTagged()));
   }
   if (locations.return_count_ > 1) {
-    locations.AddReturn(regloc(kReturnRegister1));
+    locations.AddReturn(regloc(kReturnRegister1, MachineType::AnyTagged()));
   }
   if (locations.return_count_ > 2) {
-    locations.AddReturn(regloc(kReturnRegister2));
-  }
-  for (size_t i = 0; i < return_count; i++) {
-    types.AddReturn(MachineType::AnyTagged());
+    locations.AddReturn(regloc(kReturnRegister2, MachineType::AnyTagged()));
   }
 
   // All parameters to the runtime call go on the stack.
   for (int i = 0; i < js_parameter_count; i++) {
-    locations.AddParam(
-        LinkageLocation::ForCallerFrameSlot(i - js_parameter_count));
-    types.AddParam(MachineType::AnyTagged());
+    locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+        i - js_parameter_count, MachineType::AnyTagged()));
   }
   // Add runtime function itself.
-  locations.AddParam(regloc(kRuntimeCallFunctionRegister));
-  types.AddParam(MachineType::AnyTagged());
+  locations.AddParam(
+      regloc(kRuntimeCallFunctionRegister, MachineType::Pointer()));
 
   // Add runtime call argument count.
-  locations.AddParam(regloc(kRuntimeCallArgCountRegister));
-  types.AddParam(MachineType::Pointer());
+  locations.AddParam(
+      regloc(kRuntimeCallArgCountRegister, MachineType::Int32()));
 
   // Add context.
-  locations.AddParam(regloc(kContextRegister));
-  types.AddParam(MachineType::AnyTagged());
-
-  if (!Linkage::NeedsFrameStateInput(function_id)) {
-    flags = static_cast<CallDescriptor::Flags>(
-        flags & ~CallDescriptor::kNeedsFrameState);
-  }
+  locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
 
   // The target for runtime calls is a code object.
   MachineType target_type = MachineType::AnyTagged();
-  LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+  LinkageLocation target_loc =
+      LinkageLocation::ForAnyRegister(MachineType::AnyTagged());
   return new (zone) CallDescriptor(     // --
       CallDescriptor::kCallCodeObject,  // kind
       target_type,                      // target MachineType
       target_loc,                       // target location
-      types.Build(),                    // machine_sig
       locations.Build(),                // location_sig
       js_parameter_count,               // stack_parameter_count
       properties,                       // properties
       kNoCalleeSaved,                   // callee-saved
       kNoCalleeSaved,                   // callee-saved fp
       flags,                            // flags
-      function->name);                  // debug name
+      debug_name);                      // debug name
 }
 
-
 CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
                                              int js_parameter_count,
                                              CallDescriptor::Flags flags) {
@@ -287,43 +306,39 @@
       js_parameter_count + new_target_count + num_args_count + context_count;
 
   LocationSignature::Builder locations(zone, return_count, parameter_count);
-  MachineSignature::Builder types(zone, return_count, parameter_count);
 
   // All JS calls have exactly one return value.
-  locations.AddReturn(regloc(kReturnRegister0));
-  types.AddReturn(MachineType::AnyTagged());
+  locations.AddReturn(regloc(kReturnRegister0, MachineType::AnyTagged()));
 
   // All parameters to JS calls go on the stack.
   for (int i = 0; i < js_parameter_count; i++) {
     int spill_slot_index = i - js_parameter_count;
-    locations.AddParam(LinkageLocation::ForCallerFrameSlot(spill_slot_index));
-    types.AddParam(MachineType::AnyTagged());
+    locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+        spill_slot_index, MachineType::AnyTagged()));
   }
 
   // Add JavaScript call new target value.
-  locations.AddParam(regloc(kJavaScriptCallNewTargetRegister));
-  types.AddParam(MachineType::AnyTagged());
+  locations.AddParam(
+      regloc(kJavaScriptCallNewTargetRegister, MachineType::AnyTagged()));
 
   // Add JavaScript call argument count.
-  locations.AddParam(regloc(kJavaScriptCallArgCountRegister));
-  types.AddParam(MachineType::Int32());
+  locations.AddParam(
+      regloc(kJavaScriptCallArgCountRegister, MachineType::Int32()));
 
   // Add context.
-  locations.AddParam(regloc(kContextRegister));
-  types.AddParam(MachineType::AnyTagged());
+  locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
 
   // The target for JS function calls is the JSFunction object.
   MachineType target_type = MachineType::AnyTagged();
   // When entering into an OSR function from unoptimized code the JSFunction
   // is not in a register, but it is on the stack in the marker spill slot.
-  LinkageLocation target_loc = is_osr
-                                   ? LinkageLocation::ForSavedCallerFunction()
-                                   : regloc(kJSFunctionRegister);
+  LinkageLocation target_loc =
+      is_osr ? LinkageLocation::ForSavedCallerFunction()
+             : regloc(kJSFunctionRegister, MachineType::AnyTagged());
   return new (zone) CallDescriptor(     // --
       CallDescriptor::kCallJSFunction,  // kind
       target_type,                      // target MachineType
       target_loc,                       // target location
-      types.Build(),                    // machine_sig
       locations.Build(),                // location_sig
       js_parameter_count,               // stack_parameter_count
       Operator::kNoProperties,          // properties
@@ -350,20 +365,16 @@
       static_cast<size_t>(js_parameter_count + context_count);
 
   LocationSignature::Builder locations(zone, return_count, parameter_count);
-  MachineSignature::Builder types(zone, return_count, parameter_count);
 
   // Add returns.
   if (locations.return_count_ > 0) {
-    locations.AddReturn(regloc(kReturnRegister0));
+    locations.AddReturn(regloc(kReturnRegister0, return_type));
   }
   if (locations.return_count_ > 1) {
-    locations.AddReturn(regloc(kReturnRegister1));
+    locations.AddReturn(regloc(kReturnRegister1, return_type));
   }
   if (locations.return_count_ > 2) {
-    locations.AddReturn(regloc(kReturnRegister2));
-  }
-  for (size_t i = 0; i < return_count; i++) {
-    types.AddReturn(return_type);
+    locations.AddReturn(regloc(kReturnRegister2, return_type));
   }
 
   // Add parameters in registers and on the stack.
@@ -371,29 +382,27 @@
     if (i < register_parameter_count) {
       // The first parameters go in registers.
       Register reg = descriptor.GetRegisterParameter(i);
-      Representation rep =
-          RepresentationFromType(descriptor.GetParameterType(i));
-      locations.AddParam(regloc(reg));
-      types.AddParam(reptyp(rep));
+      MachineType type =
+          reptyp(RepresentationFromType(descriptor.GetParameterType(i)));
+      locations.AddParam(regloc(reg, type));
     } else {
       // The rest of the parameters go on the stack.
       int stack_slot = i - register_parameter_count - stack_parameter_count;
-      locations.AddParam(LinkageLocation::ForCallerFrameSlot(stack_slot));
-      types.AddParam(MachineType::AnyTagged());
+      locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+          stack_slot, MachineType::AnyTagged()));
     }
   }
   // Add context.
-  locations.AddParam(regloc(kContextRegister));
-  types.AddParam(MachineType::AnyTagged());
+  locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
 
   // The target for stub calls is a code object.
   MachineType target_type = MachineType::AnyTagged();
-  LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+  LinkageLocation target_loc =
+      LinkageLocation::ForAnyRegister(MachineType::AnyTagged());
   return new (zone) CallDescriptor(     // --
       CallDescriptor::kCallCodeObject,  // kind
       target_type,                      // target MachineType
       target_loc,                       // target location
-      types.Build(),                    // machine_sig
       locations.Build(),                // location_sig
       stack_parameter_count,            // stack_parameter_count
       properties,                       // properties
@@ -407,22 +416,19 @@
 // static
 CallDescriptor* Linkage::GetAllocateCallDescriptor(Zone* zone) {
   LocationSignature::Builder locations(zone, 1, 1);
-  MachineSignature::Builder types(zone, 1, 1);
 
-  locations.AddParam(regloc(kAllocateSizeRegister));
-  types.AddParam(MachineType::Int32());
+  locations.AddParam(regloc(kAllocateSizeRegister, MachineType::Int32()));
 
-  locations.AddReturn(regloc(kReturnRegister0));
-  types.AddReturn(MachineType::AnyTagged());
+  locations.AddReturn(regloc(kReturnRegister0, MachineType::AnyTagged()));
 
   // The target for allocate calls is a code object.
   MachineType target_type = MachineType::AnyTagged();
-  LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+  LinkageLocation target_loc =
+      LinkageLocation::ForAnyRegister(MachineType::AnyTagged());
   return new (zone) CallDescriptor(     // --
       CallDescriptor::kCallCodeObject,  // kind
       target_type,                      // target MachineType
       target_loc,                       // target location
-      types.Build(),                    // machine_sig
       locations.Build(),                // location_sig
       0,                                // stack_parameter_count
       Operator::kNoThrow,               // properties
@@ -440,33 +446,30 @@
   const int parameter_count = register_parameter_count + stack_parameter_count;
 
   LocationSignature::Builder locations(zone, 0, parameter_count);
-  MachineSignature::Builder types(zone, 0, parameter_count);
 
   // Add parameters in registers and on the stack.
   for (int i = 0; i < parameter_count; i++) {
     if (i < register_parameter_count) {
       // The first parameters go in registers.
       Register reg = descriptor.GetRegisterParameter(i);
-      Representation rep =
-          RepresentationFromType(descriptor.GetParameterType(i));
-      locations.AddParam(regloc(reg));
-      types.AddParam(reptyp(rep));
+      MachineType type =
+          reptyp(RepresentationFromType(descriptor.GetParameterType(i)));
+      locations.AddParam(regloc(reg, type));
     } else {
       // The rest of the parameters go on the stack.
       int stack_slot = i - register_parameter_count - stack_parameter_count;
-      locations.AddParam(LinkageLocation::ForCallerFrameSlot(stack_slot));
-      types.AddParam(MachineType::AnyTagged());
+      locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+          stack_slot, MachineType::AnyTagged()));
     }
   }
 
   // The target for interpreter dispatches is a code entry address.
   MachineType target_type = MachineType::Pointer();
-  LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+  LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
   return new (zone) CallDescriptor(            // --
       CallDescriptor::kCallAddress,            // kind
       target_type,                             // target MachineType
       target_loc,                              // target location
-      types.Build(),                           // machine_sig
       locations.Build(),                       // location_sig
       stack_parameter_count,                   // stack_parameter_count
       Operator::kNoProperties,                 // properties
@@ -492,7 +495,8 @@
     // Local variable stored in this (callee) stack.
     int spill_index =
         index - first_stack_slot + StandardFrameConstants::kFixedSlotCount;
-    return LinkageLocation::ForCalleeFrameSlot(spill_index);
+    return LinkageLocation::ForCalleeFrameSlot(spill_index,
+                                               MachineType::AnyTagged());
   } else {
     // Parameter. Use the assigned location from the incoming call descriptor.
     int parameter_index = 1 + index;  // skip index 0, which is the target.
@@ -504,19 +508,21 @@
 bool Linkage::ParameterHasSecondaryLocation(int index) const {
   if (!incoming_->IsJSFunctionCall()) return false;
   LinkageLocation loc = GetParameterLocation(index);
-  return (loc == regloc(kJSFunctionRegister) ||
-          loc == regloc(kContextRegister));
+  return (loc == regloc(kJSFunctionRegister, MachineType::AnyTagged()) ||
+          loc == regloc(kContextRegister, MachineType::AnyTagged()));
 }
 
 LinkageLocation Linkage::GetParameterSecondaryLocation(int index) const {
   DCHECK(ParameterHasSecondaryLocation(index));
   LinkageLocation loc = GetParameterLocation(index);
 
-  if (loc == regloc(kJSFunctionRegister)) {
-    return LinkageLocation::ForCalleeFrameSlot(Frame::kJSFunctionSlot);
+  if (loc == regloc(kJSFunctionRegister, MachineType::AnyTagged())) {
+    return LinkageLocation::ForCalleeFrameSlot(Frame::kJSFunctionSlot,
+                                               MachineType::AnyTagged());
   } else {
-    DCHECK(loc == regloc(kContextRegister));
-    return LinkageLocation::ForCalleeFrameSlot(Frame::kContextSlot);
+    DCHECK(loc == regloc(kContextRegister, MachineType::AnyTagged()));
+    return LinkageLocation::ForCalleeFrameSlot(Frame::kContextSlot,
+                                               MachineType::AnyTagged());
   }
 }
 
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 8596327..1c02508 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -37,56 +37,63 @@
     return !(*this == other);
   }
 
-  static LinkageLocation ForAnyRegister() {
-    return LinkageLocation(REGISTER, ANY_REGISTER);
+  static LinkageLocation ForAnyRegister(
+      MachineType type = MachineType::None()) {
+    return LinkageLocation(REGISTER, ANY_REGISTER, type);
   }
 
-  static LinkageLocation ForRegister(int32_t reg) {
+  static LinkageLocation ForRegister(int32_t reg,
+                                     MachineType type = MachineType::None()) {
     DCHECK(reg >= 0);
-    return LinkageLocation(REGISTER, reg);
+    return LinkageLocation(REGISTER, reg, type);
   }
 
-  static LinkageLocation ForCallerFrameSlot(int32_t slot) {
+  static LinkageLocation ForCallerFrameSlot(int32_t slot, MachineType type) {
     DCHECK(slot < 0);
-    return LinkageLocation(STACK_SLOT, slot);
+    return LinkageLocation(STACK_SLOT, slot, type);
   }
 
-  static LinkageLocation ForCalleeFrameSlot(int32_t slot) {
+  static LinkageLocation ForCalleeFrameSlot(int32_t slot, MachineType type) {
     // TODO(titzer): bailout instead of crashing here.
     DCHECK(slot >= 0 && slot < LinkageLocation::MAX_STACK_SLOT);
-    return LinkageLocation(STACK_SLOT, slot);
+    return LinkageLocation(STACK_SLOT, slot, type);
   }
 
   static LinkageLocation ForSavedCallerReturnAddress() {
     return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
                                StandardFrameConstants::kCallerPCOffset) /
-                              kPointerSize);
+                                  kPointerSize,
+                              MachineType::Pointer());
   }
 
   static LinkageLocation ForSavedCallerFramePtr() {
     return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
                                StandardFrameConstants::kCallerFPOffset) /
-                              kPointerSize);
+                                  kPointerSize,
+                              MachineType::Pointer());
   }
 
   static LinkageLocation ForSavedCallerConstantPool() {
     DCHECK(V8_EMBEDDED_CONSTANT_POOL);
     return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
                                StandardFrameConstants::kConstantPoolOffset) /
-                              kPointerSize);
+                                  kPointerSize,
+                              MachineType::AnyTagged());
   }
 
   static LinkageLocation ForSavedCallerFunction() {
     return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
                                StandardFrameConstants::kFunctionOffset) /
-                              kPointerSize);
+                                  kPointerSize,
+                              MachineType::AnyTagged());
   }
 
   static LinkageLocation ConvertToTailCallerLocation(
       LinkageLocation caller_location, int stack_param_delta) {
     if (!caller_location.IsRegister()) {
       return LinkageLocation(STACK_SLOT,
-                             caller_location.GetLocation() - stack_param_delta);
+                             caller_location.GetLocation() + stack_param_delta,
+                             caller_location.GetType());
     }
     return caller_location;
   }
@@ -103,9 +110,22 @@
   static const int32_t ANY_REGISTER = -1;
   static const int32_t MAX_STACK_SLOT = 32767;
 
-  LinkageLocation(LocationType type, int32_t location) {
+  LinkageLocation(LocationType type, int32_t location,
+                  MachineType machine_type) {
     bit_field_ = TypeField::encode(type) |
                  ((location << LocationField::kShift) & LocationField::kMask);
+    machine_type_ = machine_type;
+  }
+
+  MachineType GetType() const { return machine_type_; }
+
+  int GetSize() const {
+    return 1 << ElementSizeLog2Of(GetType().representation());
+  }
+
+  int GetSizeInPointers() const {
+    // Round up
+    return (GetSize() + kPointerSize - 1) / kPointerSize;
   }
 
   int32_t GetLocation() const {
@@ -134,6 +154,7 @@
   }
 
   int32_t bit_field_;
+  MachineType machine_type_;
 };
 
 typedef Signature<LinkageLocation> LocationSignature;
@@ -153,23 +174,21 @@
     kNoFlags = 0u,
     kNeedsFrameState = 1u << 0,
     kHasExceptionHandler = 1u << 1,
-    kHasLocalCatchHandler = 1u << 2,
-    kSupportsTailCalls = 1u << 3,
-    kCanUseRoots = 1u << 4,
+    kSupportsTailCalls = 1u << 2,
+    kCanUseRoots = 1u << 3,
     // (arm64 only) native stack should be used for arguments.
-    kUseNativeStack = 1u << 5,
+    kUseNativeStack = 1u << 4,
     // (arm64 only) call instruction has to restore JSSP or CSP.
-    kRestoreJSSP = 1u << 6,
-    kRestoreCSP = 1u << 7,
+    kRestoreJSSP = 1u << 5,
+    kRestoreCSP = 1u << 6,
     // Causes the code generator to initialize the root register.
-    kInitializeRootRegister = 1u << 8,
+    kInitializeRootRegister = 1u << 7,
     // Does not ever try to allocate space on our heap.
-    kNoAllocate = 1u << 9
+    kNoAllocate = 1u << 8
   };
   typedef base::Flags<Flag> Flags;
 
   CallDescriptor(Kind kind, MachineType target_type, LinkageLocation target_loc,
-                 const MachineSignature* machine_sig,
                  LocationSignature* location_sig, size_t stack_param_count,
                  Operator::Properties properties,
                  RegList callee_saved_registers,
@@ -178,7 +197,6 @@
       : kind_(kind),
         target_type_(target_type),
         target_loc_(target_loc),
-        machine_sig_(machine_sig),
         location_sig_(location_sig),
         stack_param_count_(stack_param_count),
         properties_(properties),
@@ -186,8 +204,6 @@
         callee_saved_fp_registers_(callee_saved_fp_registers),
         flags_(flags),
         debug_name_(debug_name) {
-    DCHECK(machine_sig->return_count() == location_sig->return_count());
-    DCHECK(machine_sig->parameter_count() == location_sig->parameter_count());
   }
 
   // Returns the kind of this call.
@@ -204,10 +220,10 @@
   }
 
   // The number of return values from this call.
-  size_t ReturnCount() const { return machine_sig_->return_count(); }
+  size_t ReturnCount() const { return location_sig_->return_count(); }
 
   // The number of C parameters to this call.
-  size_t CParameterCount() const { return machine_sig_->parameter_count(); }
+  size_t ParameterCount() const { return location_sig_->parameter_count(); }
 
   // The number of stack parameters to the call.
   size_t StackParameterCount() const { return stack_param_count_; }
@@ -221,7 +237,7 @@
   // The total number of inputs to this call, which includes the target,
   // receiver, context, etc.
   // TODO(titzer): this should input the framestate input too.
-  size_t InputCount() const { return 1 + machine_sig_->parameter_count(); }
+  size_t InputCount() const { return 1 + location_sig_->parameter_count(); }
 
   size_t FrameStateCount() const { return NeedsFrameState() ? 1 : 0; }
 
@@ -243,15 +259,19 @@
     return location_sig_->GetParam(index - 1);
   }
 
-  const MachineSignature* GetMachineSignature() const { return machine_sig_; }
+  MachineSignature* GetMachineSignature(Zone* zone) const;
 
   MachineType GetReturnType(size_t index) const {
-    return machine_sig_->GetReturn(index);
+    return location_sig_->GetReturn(index).GetType();
   }
 
   MachineType GetInputType(size_t index) const {
     if (index == 0) return target_type_;
-    return machine_sig_->GetParam(index - 1);
+    return location_sig_->GetParam(index - 1).GetType();
+  }
+
+  MachineType GetParameterType(size_t index) const {
+    return location_sig_->GetParam(index).GetType();
   }
 
   // Operator properties describe how this call can be optimized, if at all.
@@ -269,7 +289,9 @@
 
   bool HasSameReturnLocationsAs(const CallDescriptor* other) const;
 
-  bool CanTailCall(const Node* call, int* stack_param_delta) const;
+  int GetStackParameterDelta(const CallDescriptor* tail_caller = nullptr) const;
+
+  bool CanTailCall(const Node* call) const;
 
  private:
   friend class Linkage;
@@ -277,7 +299,6 @@
   const Kind kind_;
   const MachineType target_type_;
   const LinkageLocation target_loc_;
-  const MachineSignature* const machine_sig_;
   const LocationSignature* const location_sig_;
   const size_t stack_param_count_;
   const Operator::Properties properties_;
@@ -322,9 +343,14 @@
                                              CallDescriptor::Flags flags);
 
   static CallDescriptor* GetRuntimeCallDescriptor(
-      Zone* zone, Runtime::FunctionId function, int parameter_count,
+      Zone* zone, Runtime::FunctionId function, int js_parameter_count,
       Operator::Properties properties, CallDescriptor::Flags flags);
 
+  static CallDescriptor* GetCEntryStubCallDescriptor(
+      Zone* zone, int return_count, int js_parameter_count,
+      const char* debug_name, Operator::Properties properties,
+      CallDescriptor::Flags flags);
+
   static CallDescriptor* GetStubCallDescriptor(
       Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
       int stack_parameter_count, CallDescriptor::Flags flags,
@@ -399,6 +425,9 @@
   // A special {OsrValue} index to indicate the context spill slot.
   static const int kOsrContextSpillSlotIndex = -1;
 
+  // A special {OsrValue} index to indicate the accumulator register.
+  static const int kOsrAccumulatorRegisterIndex = -1;
+
  private:
   CallDescriptor* const incoming_;
 
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
index a451cfc..ad787f8 100644
--- a/src/compiler/load-elimination.cc
+++ b/src/compiler/load-elimination.cc
@@ -1,104 +1,710 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "src/compiler/load-elimination.h"
 
-#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
-#include "src/types.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-LoadElimination::~LoadElimination() {}
+namespace {
+
+enum Aliasing { kNoAlias, kMayAlias, kMustAlias };
+
+Aliasing QueryAlias(Node* a, Node* b) {
+  if (a == b) return kMustAlias;
+  if (!NodeProperties::GetType(a)->Maybe(NodeProperties::GetType(b))) {
+    return kNoAlias;
+  }
+  if (b->opcode() == IrOpcode::kAllocate) {
+    switch (a->opcode()) {
+      case IrOpcode::kAllocate:
+      case IrOpcode::kHeapConstant:
+      case IrOpcode::kParameter:
+        return kNoAlias;
+      case IrOpcode::kFinishRegion:
+        return QueryAlias(a->InputAt(0), b);
+      default:
+        break;
+    }
+  }
+  if (a->opcode() == IrOpcode::kAllocate) {
+    switch (b->opcode()) {
+      case IrOpcode::kHeapConstant:
+      case IrOpcode::kParameter:
+        return kNoAlias;
+      case IrOpcode::kFinishRegion:
+        return QueryAlias(a, b->InputAt(0));
+      default:
+        break;
+    }
+  }
+  return kMayAlias;
+}
+
+bool MayAlias(Node* a, Node* b) { return QueryAlias(a, b) != kNoAlias; }
+
+bool MustAlias(Node* a, Node* b) { return QueryAlias(a, b) == kMustAlias; }
+
+}  // namespace
 
 Reduction LoadElimination::Reduce(Node* node) {
   switch (node->opcode()) {
+    case IrOpcode::kCheckMaps:
+      return ReduceCheckMaps(node);
+    case IrOpcode::kEnsureWritableFastElements:
+      return ReduceEnsureWritableFastElements(node);
+    case IrOpcode::kMaybeGrowFastElements:
+      return ReduceMaybeGrowFastElements(node);
+    case IrOpcode::kTransitionElementsKind:
+      return ReduceTransitionElementsKind(node);
     case IrOpcode::kLoadField:
       return ReduceLoadField(node);
-    default:
+    case IrOpcode::kStoreField:
+      return ReduceStoreField(node);
+    case IrOpcode::kLoadElement:
+      return ReduceLoadElement(node);
+    case IrOpcode::kStoreElement:
+      return ReduceStoreElement(node);
+    case IrOpcode::kStoreTypedElement:
+      return ReduceStoreTypedElement(node);
+    case IrOpcode::kEffectPhi:
+      return ReduceEffectPhi(node);
+    case IrOpcode::kDead:
       break;
+    case IrOpcode::kStart:
+      return ReduceStart(node);
+    default:
+      return ReduceOtherNode(node);
   }
   return NoChange();
 }
 
-Reduction LoadElimination::ReduceLoadField(Node* node) {
-  DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
-  FieldAccess const access = FieldAccessOf(node->op());
-  Node* object = NodeProperties::GetValueInput(node, 0);
-  for (Node* effect = NodeProperties::GetEffectInput(node);;
-       effect = NodeProperties::GetEffectInput(effect)) {
-    switch (effect->opcode()) {
-      case IrOpcode::kLoadField: {
-        FieldAccess const effect_access = FieldAccessOf(effect->op());
-        if (object == NodeProperties::GetValueInput(effect, 0) &&
-            access == effect_access && effect_access.type->Is(access.type)) {
-          Node* const value = effect;
-          ReplaceWithValue(node, value);
-          return Replace(value);
+Node* LoadElimination::AbstractElements::Lookup(Node* object,
+                                                Node* index) const {
+  for (Element const element : elements_) {
+    if (element.object == nullptr) continue;
+    DCHECK_NOT_NULL(element.index);
+    DCHECK_NOT_NULL(element.value);
+    if (MustAlias(object, element.object) && MustAlias(index, element.index)) {
+      return element.value;
+    }
+  }
+  return nullptr;
+}
+
+LoadElimination::AbstractElements const*
+LoadElimination::AbstractElements::Kill(Node* object, Node* index,
+                                        Zone* zone) const {
+  for (Element const element : this->elements_) {
+    if (element.object == nullptr) continue;
+    if (MayAlias(object, element.object)) {
+      AbstractElements* that = new (zone) AbstractElements(zone);
+      for (Element const element : this->elements_) {
+        if (element.object == nullptr) continue;
+        DCHECK_NOT_NULL(element.index);
+        DCHECK_NOT_NULL(element.value);
+        if (!MayAlias(object, element.object) ||
+            !MayAlias(index, element.index)) {
+          that->elements_[that->next_index_++] = element;
         }
-        break;
       }
-      case IrOpcode::kStoreField: {
-        if (access == FieldAccessOf(effect->op())) {
-          if (object == NodeProperties::GetValueInput(effect, 0)) {
-            Node* const value = NodeProperties::GetValueInput(effect, 1);
-            Type* stored_value_type = NodeProperties::GetType(value);
-            Type* load_type = NodeProperties::GetType(node);
-            // Make sure the replacement's type is a subtype of the node's
-            // type. Otherwise we could confuse optimizations that were
-            // based on the original type.
-            if (stored_value_type->Is(load_type)) {
-              ReplaceWithValue(node, value);
-              return Replace(value);
-            } else {
-              Node* renamed = graph()->NewNode(
-                  simplified()->TypeGuard(Type::Intersect(
-                      stored_value_type, load_type, graph()->zone())),
-                  value, NodeProperties::GetControlInput(node));
-              ReplaceWithValue(node, renamed);
-              return Replace(renamed);
-            }
-          }
-          // TODO(turbofan): Alias analysis to the rescue?
-          return NoChange();
-        }
-        break;
-      }
-      case IrOpcode::kBeginRegion:
-      case IrOpcode::kStoreBuffer:
-      case IrOpcode::kStoreElement: {
-        // These can never interfere with field loads.
-        break;
-      }
-      case IrOpcode::kFinishRegion: {
-        // "Look through" FinishRegion nodes to make LoadElimination capable
-        // of looking into atomic regions.
-        if (object == effect) object = NodeProperties::GetValueInput(effect, 0);
-        break;
-      }
-      case IrOpcode::kAllocate: {
-        // Allocations don't interfere with field loads. In case we see the
-        // actual allocation for the {object} we can abort.
-        if (object == effect) return NoChange();
-        break;
-      }
-      default: {
-        if (!effect->op()->HasProperty(Operator::kNoWrite) ||
-            effect->op()->EffectInputCount() != 1) {
-          return NoChange();
-        }
+      that->next_index_ %= arraysize(elements_);
+      return that;
+    }
+  }
+  return this;
+}
+
+bool LoadElimination::AbstractElements::Equals(
+    AbstractElements const* that) const {
+  if (this == that) return true;
+  for (size_t i = 0; i < arraysize(elements_); ++i) {
+    Element this_element = this->elements_[i];
+    if (this_element.object == nullptr) continue;
+    for (size_t j = 0;; ++j) {
+      if (j == arraysize(elements_)) return false;
+      Element that_element = that->elements_[j];
+      if (this_element.object == that_element.object &&
+          this_element.index == that_element.index &&
+          this_element.value == that_element.value) {
         break;
       }
     }
   }
-  UNREACHABLE();
+  for (size_t i = 0; i < arraysize(elements_); ++i) {
+    Element that_element = that->elements_[i];
+    if (that_element.object == nullptr) continue;
+    for (size_t j = 0;; ++j) {
+      if (j == arraysize(elements_)) return false;
+      Element this_element = this->elements_[j];
+      if (that_element.object == this_element.object &&
+          that_element.index == this_element.index &&
+          that_element.value == this_element.value) {
+        break;
+      }
+    }
+  }
+  return true;
+}
+
+LoadElimination::AbstractElements const*
+LoadElimination::AbstractElements::Merge(AbstractElements const* that,
+                                         Zone* zone) const {
+  if (this->Equals(that)) return this;
+  AbstractElements* copy = new (zone) AbstractElements(zone);
+  for (Element const this_element : this->elements_) {
+    if (this_element.object == nullptr) continue;
+    for (Element const that_element : that->elements_) {
+      if (this_element.object == that_element.object &&
+          this_element.index == that_element.index &&
+          this_element.value == that_element.value) {
+        copy->elements_[copy->next_index_++] = this_element;
+      }
+    }
+  }
+  copy->next_index_ %= arraysize(elements_);
+  return copy;
+}
+
+Node* LoadElimination::AbstractField::Lookup(Node* object) const {
+  for (auto pair : info_for_node_) {
+    if (MustAlias(object, pair.first)) return pair.second;
+  }
+  return nullptr;
+}
+
+LoadElimination::AbstractField const* LoadElimination::AbstractField::Kill(
+    Node* object, Zone* zone) const {
+  for (auto pair : this->info_for_node_) {
+    if (MayAlias(object, pair.first)) {
+      AbstractField* that = new (zone) AbstractField(zone);
+      for (auto pair : this->info_for_node_) {
+        if (!MayAlias(object, pair.first)) that->info_for_node_.insert(pair);
+      }
+      return that;
+    }
+  }
+  return this;
+}
+
+bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
+  if (this->elements_) {
+    if (!that->elements_ || !that->elements_->Equals(this->elements_)) {
+      return false;
+    }
+  } else if (that->elements_) {
+    return false;
+  }
+  for (size_t i = 0u; i < arraysize(fields_); ++i) {
+    AbstractField const* this_field = this->fields_[i];
+    AbstractField const* that_field = that->fields_[i];
+    if (this_field) {
+      if (!that_field || !that_field->Equals(this_field)) return false;
+    } else if (that_field) {
+      return false;
+    }
+  }
+  return true;
+}
+
+void LoadElimination::AbstractState::Merge(AbstractState const* that,
+                                           Zone* zone) {
+  // Merge the information we have about the elements.
+  if (this->elements_) {
+    this->elements_ = that->elements_
+                          ? that->elements_->Merge(this->elements_, zone)
+                          : that->elements_;
+  } else {
+    this->elements_ = that->elements_;
+  }
+
+  // Merge the information we have about the fields.
+  for (size_t i = 0; i < arraysize(fields_); ++i) {
+    if (this->fields_[i]) {
+      if (that->fields_[i]) {
+        this->fields_[i] = this->fields_[i]->Merge(that->fields_[i], zone);
+      } else {
+        this->fields_[i] = nullptr;
+      }
+    }
+  }
+}
+
+Node* LoadElimination::AbstractState::LookupElement(Node* object,
+                                                    Node* index) const {
+  if (this->elements_) {
+    return this->elements_->Lookup(object, index);
+  }
+  return nullptr;
+}
+
+LoadElimination::AbstractState const*
+LoadElimination::AbstractState::AddElement(Node* object, Node* index,
+                                           Node* value, Zone* zone) const {
+  AbstractState* that = new (zone) AbstractState(*this);
+  if (that->elements_) {
+    that->elements_ = that->elements_->Extend(object, index, value, zone);
+  } else {
+    that->elements_ = new (zone) AbstractElements(object, index, value, zone);
+  }
+  return that;
+}
+
+LoadElimination::AbstractState const*
+LoadElimination::AbstractState::KillElement(Node* object, Node* index,
+                                            Zone* zone) const {
+  if (this->elements_) {
+    AbstractElements const* that_elements =
+        this->elements_->Kill(object, index, zone);
+    if (this->elements_ != that_elements) {
+      AbstractState* that = new (zone) AbstractState(*this);
+      that->elements_ = that_elements;
+      return that;
+    }
+  }
+  return this;
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::AddField(
+    Node* object, size_t index, Node* value, Zone* zone) const {
+  AbstractState* that = new (zone) AbstractState(*this);
+  if (that->fields_[index]) {
+    that->fields_[index] = that->fields_[index]->Extend(object, value, zone);
+  } else {
+    that->fields_[index] = new (zone) AbstractField(object, value, zone);
+  }
+  return that;
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField(
+    Node* object, size_t index, Zone* zone) const {
+  if (AbstractField const* this_field = this->fields_[index]) {
+    this_field = this_field->Kill(object, zone);
+    if (this->fields_[index] != this_field) {
+      AbstractState* that = new (zone) AbstractState(*this);
+      that->fields_[index] = this_field;
+      return that;
+    }
+  }
+  return this;
+}
+
+Node* LoadElimination::AbstractState::LookupField(Node* object,
+                                                  size_t index) const {
+  if (AbstractField const* this_field = this->fields_[index]) {
+    return this_field->Lookup(object);
+  }
+  return nullptr;
+}
+
+LoadElimination::AbstractState const*
+LoadElimination::AbstractStateForEffectNodes::Get(Node* node) const {
+  size_t const id = node->id();
+  if (id < info_for_node_.size()) return info_for_node_[id];
+  return nullptr;
+}
+
+void LoadElimination::AbstractStateForEffectNodes::Set(
+    Node* node, AbstractState const* state) {
+  size_t const id = node->id();
+  if (id >= info_for_node_.size()) info_for_node_.resize(id + 1, nullptr);
+  info_for_node_[id] = state;
+}
+
+Reduction LoadElimination::ReduceCheckMaps(Node* node) {
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  AbstractState const* state = node_states_.Get(effect);
+  if (state == nullptr) return NoChange();
+  int const map_input_count = node->op()->ValueInputCount() - 1;
+  if (Node* const object_map = state->LookupField(object, 0)) {
+    for (int i = 0; i < map_input_count; ++i) {
+      Node* map = NodeProperties::GetValueInput(node, 1 + i);
+      if (map == object_map) return Replace(effect);
+    }
+  }
+  if (map_input_count == 1) {
+    Node* const map0 = NodeProperties::GetValueInput(node, 1);
+    state = state->AddField(object, 0, map0, zone());
+  }
+  return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Node* const elements = NodeProperties::GetValueInput(node, 1);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  AbstractState const* state = node_states_.Get(effect);
+  if (state == nullptr) return NoChange();
+  Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
+  if (Node* const elements_map = state->LookupField(elements, 0)) {
+    // Check if the {elements} already have the fixed array map.
+    if (elements_map == fixed_array_map) {
+      ReplaceWithValue(node, elements, effect);
+      return Replace(elements);
+    }
+  }
+  // We know that the resulting elements have the fixed array map.
+  state = state->AddField(node, 0, fixed_array_map, zone());
+  // Kill the previous elements on {object}.
+  state = state->KillField(object, 2, zone());
+  // Add the new elements on {object}.
+  state = state->AddField(object, 2, node, zone());
+  return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
+  GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  AbstractState const* state = node_states_.Get(effect);
+  if (state == nullptr) return NoChange();
+  if (flags & GrowFastElementsFlag::kDoubleElements) {
+    // We know that the resulting elements have the fixed double array map.
+    Node* fixed_double_array_map = jsgraph()->FixedDoubleArrayMapConstant();
+    state = state->AddField(node, 0, fixed_double_array_map, zone());
+  } else {
+    // We know that the resulting elements have the fixed array map.
+    Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
+    state = state->AddField(node, 0, fixed_array_map, zone());
+  }
+  if (flags & GrowFastElementsFlag::kArrayObject) {
+    // Kill the previous Array::length on {object}.
+    state = state->KillField(object, 3, zone());
+  }
+  // Kill the previous elements on {object}.
+  state = state->KillField(object, 2, zone());
+  // Add the new elements on {object}.
+  state = state->AddField(object, 2, node, zone());
+  return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Node* const source_map = NodeProperties::GetValueInput(node, 1);
+  Node* const target_map = NodeProperties::GetValueInput(node, 2);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  AbstractState const* state = node_states_.Get(effect);
+  if (state == nullptr) return NoChange();
+  if (Node* const object_map = state->LookupField(object, 0)) {
+    if (target_map == object_map) {
+      // The {object} already has the {target_map}, so this TransitionElements
+      // {node} is fully redundant (independent of what {source_map} is).
+      return Replace(effect);
+    }
+    state = state->KillField(object, 0, zone());
+    if (source_map == object_map) {
+      state = state->AddField(object, 0, target_map, zone());
+    }
+  } else {
+    state = state->KillField(object, 0, zone());
+  }
+  ElementsTransition transition = ElementsTransitionOf(node->op());
+  switch (transition) {
+    case ElementsTransition::kFastTransition:
+      break;
+    case ElementsTransition::kSlowTransition:
+      // Kill the elements as well.
+      state = state->KillField(object, 2, zone());
+      break;
+  }
+  return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceLoadField(Node* node) {
+  FieldAccess const& access = FieldAccessOf(node->op());
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  AbstractState const* state = node_states_.Get(effect);
+  if (state == nullptr) return NoChange();
+  int field_index = FieldIndexOf(access);
+  if (field_index >= 0) {
+    if (Node* const replacement = state->LookupField(object, field_index)) {
+      // Make sure the {replacement} has at least as good type
+      // as the original {node}.
+      if (!replacement->IsDead() &&
+          NodeProperties::GetType(replacement)
+              ->Is(NodeProperties::GetType(node))) {
+        ReplaceWithValue(node, replacement, effect);
+        return Replace(replacement);
+      }
+    }
+    state = state->AddField(object, field_index, node, zone());
+  }
+  return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceStoreField(Node* node) {
+  FieldAccess const& access = FieldAccessOf(node->op());
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Node* const new_value = NodeProperties::GetValueInput(node, 1);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  AbstractState const* state = node_states_.Get(effect);
+  if (state == nullptr) return NoChange();
+  int field_index = FieldIndexOf(access);
+  if (field_index >= 0) {
+    Node* const old_value = state->LookupField(object, field_index);
+    if (old_value == new_value) {
+      // This store is fully redundant.
+      return Replace(effect);
+    }
+    // Kill all potentially aliasing fields and record the new value.
+    state = state->KillField(object, field_index, zone());
+    state = state->AddField(object, field_index, new_value, zone());
+  } else {
+    // Unsupported StoreField operator.
+    state = empty_state();
+  }
+  return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceLoadElement(Node* node) {
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Node* const index = NodeProperties::GetValueInput(node, 1);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  AbstractState const* state = node_states_.Get(effect);
+  if (state == nullptr) return NoChange();
+  if (Node* const replacement = state->LookupElement(object, index)) {
+    // Make sure the {replacement} has at least as good type
+    // as the original {node}.
+    if (!replacement->IsDead() &&
+        NodeProperties::GetType(replacement)
+            ->Is(NodeProperties::GetType(node))) {
+      ReplaceWithValue(node, replacement, effect);
+      return Replace(replacement);
+    }
+  }
+  state = state->AddElement(object, index, node, zone());
+  return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceStoreElement(Node* node) {
+  ElementAccess const& access = ElementAccessOf(node->op());
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  Node* const index = NodeProperties::GetValueInput(node, 1);
+  Node* const new_value = NodeProperties::GetValueInput(node, 2);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  AbstractState const* state = node_states_.Get(effect);
+  if (state == nullptr) return NoChange();
+  Node* const old_value = state->LookupElement(object, index);
+  if (old_value == new_value) {
+    // This store is fully redundant.
+    return Replace(effect);
+  }
+  // Kill all potentially aliasing elements.
+  state = state->KillElement(object, index, zone());
+  // Only record the new value if the store doesn't have an implicit truncation.
+  switch (access.machine_type.representation()) {
+    case MachineRepresentation::kNone:
+    case MachineRepresentation::kBit:
+      UNREACHABLE();
+      break;
+    case MachineRepresentation::kWord8:
+    case MachineRepresentation::kWord16:
+    case MachineRepresentation::kWord32:
+    case MachineRepresentation::kWord64:
+    case MachineRepresentation::kFloat32:
+      // TODO(turbofan): Add support for doing the truncations.
+      break;
+    case MachineRepresentation::kFloat64:
+    case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kTaggedSigned:
+    case MachineRepresentation::kTaggedPointer:
+    case MachineRepresentation::kTagged:
+      state = state->AddElement(object, index, new_value, zone());
+      break;
+  }
+  return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceStoreTypedElement(Node* node) {
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  AbstractState const* state = node_states_.Get(effect);
+  if (state == nullptr) return NoChange();
+  return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceEffectPhi(Node* node) {
+  Node* const effect0 = NodeProperties::GetEffectInput(node, 0);
+  Node* const control = NodeProperties::GetControlInput(node);
+  AbstractState const* state0 = node_states_.Get(effect0);
+  if (state0 == nullptr) return NoChange();
+  if (control->opcode() == IrOpcode::kLoop) {
+    // Here we rely on having only reducible loops:
+    // The loop entry edge always dominates the header, so we can just take
+    // the state from the first input, and compute the loop state based on it.
+    AbstractState const* state = ComputeLoopState(node, state0);
+    return UpdateState(node, state);
+  }
+  DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+
+  // Shortcut for the case when we do not know anything about some input.
+  int const input_count = node->op()->EffectInputCount();
+  for (int i = 1; i < input_count; ++i) {
+    Node* const effect = NodeProperties::GetEffectInput(node, i);
+    if (node_states_.Get(effect) == nullptr) return NoChange();
+  }
+
+  // Make a copy of the first input's state and merge with the state
+  // from other inputs.
+  AbstractState* state = new (zone()) AbstractState(*state0);
+  for (int i = 1; i < input_count; ++i) {
+    Node* const input = NodeProperties::GetEffectInput(node, i);
+    state->Merge(node_states_.Get(input), zone());
+  }
+  return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceStart(Node* node) {
+  return UpdateState(node, empty_state());
+}
+
+Reduction LoadElimination::ReduceOtherNode(Node* node) {
+  if (node->op()->EffectInputCount() == 1) {
+    if (node->op()->EffectOutputCount() == 1) {
+      Node* const effect = NodeProperties::GetEffectInput(node);
+      AbstractState const* state = node_states_.Get(effect);
+      // If we do not know anything about the predecessor, do not propagate
+      // just yet because we will have to recompute anyway once we compute
+      // the predecessor.
+      if (state == nullptr) return NoChange();
+      // Check if this {node} has some uncontrolled side effects.
+      if (!node->op()->HasProperty(Operator::kNoWrite)) {
+        state = empty_state();
+      }
+      return UpdateState(node, state);
+    } else {
+      // Effect terminators should be handled specially.
+      return NoChange();
+    }
+  }
+  DCHECK_EQ(0, node->op()->EffectInputCount());
+  DCHECK_EQ(0, node->op()->EffectOutputCount());
   return NoChange();
 }
 
+Reduction LoadElimination::UpdateState(Node* node, AbstractState const* state) {
+  AbstractState const* original = node_states_.Get(node);
+  // Only signal that the {node} has Changed, if the information about {state}
+  // has changed wrt. the {original}.
+  if (state != original) {
+    if (original == nullptr || !state->Equals(original)) {
+      node_states_.Set(node, state);
+      return Changed(node);
+    }
+  }
+  return NoChange();
+}
+
+LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
+    Node* node, AbstractState const* state) const {
+  Node* const control = NodeProperties::GetControlInput(node);
+  ZoneQueue<Node*> queue(zone());
+  ZoneSet<Node*> visited(zone());
+  visited.insert(node);
+  for (int i = 1; i < control->InputCount(); ++i) {
+    queue.push(node->InputAt(i));
+  }
+  while (!queue.empty()) {
+    Node* const current = queue.front();
+    queue.pop();
+    if (visited.find(current) == visited.end()) {
+      visited.insert(current);
+      if (!current->op()->HasProperty(Operator::kNoWrite)) {
+        switch (current->opcode()) {
+          case IrOpcode::kEnsureWritableFastElements: {
+            Node* const object = NodeProperties::GetValueInput(current, 0);
+            state = state->KillField(object, 2, zone());
+            break;
+          }
+          case IrOpcode::kMaybeGrowFastElements: {
+            GrowFastElementsFlags flags =
+                GrowFastElementsFlagsOf(current->op());
+            Node* const object = NodeProperties::GetValueInput(current, 0);
+            state = state->KillField(object, 2, zone());
+            if (flags & GrowFastElementsFlag::kArrayObject) {
+              state = state->KillField(object, 3, zone());
+            }
+            break;
+          }
+          case IrOpcode::kTransitionElementsKind: {
+            Node* const object = NodeProperties::GetValueInput(current, 0);
+            state = state->KillField(object, 0, zone());
+            state = state->KillField(object, 2, zone());
+            break;
+          }
+          case IrOpcode::kStoreField: {
+            FieldAccess const& access = FieldAccessOf(current->op());
+            Node* const object = NodeProperties::GetValueInput(current, 0);
+            int field_index = FieldIndexOf(access);
+            if (field_index < 0) return empty_state();
+            state = state->KillField(object, field_index, zone());
+            break;
+          }
+          case IrOpcode::kStoreElement: {
+            Node* const object = NodeProperties::GetValueInput(current, 0);
+            Node* const index = NodeProperties::GetValueInput(current, 1);
+            state = state->KillElement(object, index, zone());
+            break;
+          }
+          case IrOpcode::kStoreBuffer:
+          case IrOpcode::kStoreTypedElement: {
+            // Doesn't affect anything we track with the state currently.
+            break;
+          }
+          default:
+            return empty_state();
+        }
+      }
+      for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
+        queue.push(NodeProperties::GetEffectInput(current, i));
+      }
+    }
+  }
+  return state;
+}
+
+// static
+int LoadElimination::FieldIndexOf(FieldAccess const& access) {
+  MachineRepresentation rep = access.machine_type.representation();
+  switch (rep) {
+    case MachineRepresentation::kNone:
+    case MachineRepresentation::kBit:
+      UNREACHABLE();
+      break;
+    case MachineRepresentation::kWord32:
+    case MachineRepresentation::kWord64:
+      if (rep != MachineType::PointerRepresentation()) {
+        return -1;  // We currently only track pointer size fields.
+      }
+      break;
+    case MachineRepresentation::kWord8:
+    case MachineRepresentation::kWord16:
+    case MachineRepresentation::kFloat32:
+      return -1;  // Currently untracked.
+    case MachineRepresentation::kFloat64:
+    case MachineRepresentation::kSimd128:
+      return -1;  // Currently untracked.
+    case MachineRepresentation::kTaggedSigned:
+    case MachineRepresentation::kTaggedPointer:
+    case MachineRepresentation::kTagged:
+      // TODO(bmeurer): Check that we never do overlapping load/stores of
+      // individual parts of Float64/Simd128 values.
+      break;
+  }
+  DCHECK_EQ(kTaggedBase, access.base_is_tagged);
+  DCHECK_EQ(0, access.offset % kPointerSize);
+  int field_index = access.offset / kPointerSize;
+  if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
+  return field_index;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
index 4a1323b..2a4ee40 100644
--- a/src/compiler/load-elimination.h
+++ b/src/compiler/load-elimination.h
@@ -1,4 +1,4 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -11,26 +11,174 @@
 namespace internal {
 namespace compiler {
 
-class Graph;
-class SimplifiedOperatorBuilder;
+// Foward declarations.
+struct FieldAccess;
+class JSGraph;
 
 class LoadElimination final : public AdvancedReducer {
  public:
-  explicit LoadElimination(Editor* editor, Graph* graph,
-                           SimplifiedOperatorBuilder* simplified)
-      : AdvancedReducer(editor), graph_(graph), simplified_(simplified) {}
-  ~LoadElimination() final;
+  LoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
+      : AdvancedReducer(editor), node_states_(zone), jsgraph_(jsgraph) {}
+  ~LoadElimination() final {}
 
   Reduction Reduce(Node* node) final;
 
  private:
-  SimplifiedOperatorBuilder* simplified() const { return simplified_; }
-  Graph* graph() const { return graph_; }
+  static const size_t kMaxTrackedElements = 8;
 
+  // Abstract state to approximate the current state of an element along the
+  // effect paths through the graph.
+  class AbstractElements final : public ZoneObject {
+   public:
+    explicit AbstractElements(Zone* zone) {
+      for (size_t i = 0; i < arraysize(elements_); ++i) {
+        elements_[i] = Element();
+      }
+    }
+    AbstractElements(Node* object, Node* index, Node* value, Zone* zone)
+        : AbstractElements(zone) {
+      elements_[next_index_++] = Element(object, index, value);
+    }
+
+    AbstractElements const* Extend(Node* object, Node* index, Node* value,
+                                   Zone* zone) const {
+      AbstractElements* that = new (zone) AbstractElements(*this);
+      that->elements_[that->next_index_] = Element(object, index, value);
+      that->next_index_ = (that->next_index_ + 1) % arraysize(elements_);
+      return that;
+    }
+    Node* Lookup(Node* object, Node* index) const;
+    AbstractElements const* Kill(Node* object, Node* index, Zone* zone) const;
+    bool Equals(AbstractElements const* that) const;
+    AbstractElements const* Merge(AbstractElements const* that,
+                                  Zone* zone) const;
+
+   private:
+    struct Element {
+      Element() {}
+      Element(Node* object, Node* index, Node* value)
+          : object(object), index(index), value(value) {}
+
+      Node* object = nullptr;
+      Node* index = nullptr;
+      Node* value = nullptr;
+    };
+
+    Element elements_[kMaxTrackedElements];
+    size_t next_index_ = 0;
+  };
+
+  // Abstract state to approximate the current state of a certain field along
+  // the effect paths through the graph.
+  class AbstractField final : public ZoneObject {
+   public:
+    explicit AbstractField(Zone* zone) : info_for_node_(zone) {}
+    AbstractField(Node* object, Node* value, Zone* zone)
+        : info_for_node_(zone) {
+      info_for_node_.insert(std::make_pair(object, value));
+    }
+
+    AbstractField const* Extend(Node* object, Node* value, Zone* zone) const {
+      AbstractField* that = new (zone) AbstractField(zone);
+      that->info_for_node_ = this->info_for_node_;
+      that->info_for_node_.insert(std::make_pair(object, value));
+      return that;
+    }
+    Node* Lookup(Node* object) const;
+    AbstractField const* Kill(Node* object, Zone* zone) const;
+    bool Equals(AbstractField const* that) const {
+      return this == that || this->info_for_node_ == that->info_for_node_;
+    }
+    AbstractField const* Merge(AbstractField const* that, Zone* zone) const {
+      if (this->Equals(that)) return this;
+      AbstractField* copy = new (zone) AbstractField(zone);
+      for (auto this_it : this->info_for_node_) {
+        Node* this_object = this_it.first;
+        Node* this_value = this_it.second;
+        auto that_it = that->info_for_node_.find(this_object);
+        if (that_it != that->info_for_node_.end() &&
+            that_it->second == this_value) {
+          copy->info_for_node_.insert(this_it);
+        }
+      }
+      return copy;
+    }
+
+   private:
+    ZoneMap<Node*, Node*> info_for_node_;
+  };
+
+  static size_t const kMaxTrackedFields = 32;
+
+  class AbstractState final : public ZoneObject {
+   public:
+    AbstractState() {
+      for (size_t i = 0; i < arraysize(fields_); ++i) {
+        fields_[i] = nullptr;
+      }
+    }
+
+    bool Equals(AbstractState const* that) const;
+    void Merge(AbstractState const* that, Zone* zone);
+
+    AbstractState const* AddField(Node* object, size_t index, Node* value,
+                                  Zone* zone) const;
+    AbstractState const* KillField(Node* object, size_t index,
+                                   Zone* zone) const;
+    Node* LookupField(Node* object, size_t index) const;
+
+    AbstractState const* AddElement(Node* object, Node* index, Node* value,
+                                    Zone* zone) const;
+    AbstractState const* KillElement(Node* object, Node* index,
+                                     Zone* zone) const;
+    Node* LookupElement(Node* object, Node* index) const;
+
+   private:
+    AbstractElements const* elements_ = nullptr;
+    AbstractField const* fields_[kMaxTrackedFields];
+  };
+
+  class AbstractStateForEffectNodes final : public ZoneObject {
+   public:
+    explicit AbstractStateForEffectNodes(Zone* zone) : info_for_node_(zone) {}
+    AbstractState const* Get(Node* node) const;
+    void Set(Node* node, AbstractState const* state);
+
+    Zone* zone() const { return info_for_node_.get_allocator().zone(); }
+
+   private:
+    ZoneVector<AbstractState const*> info_for_node_;
+  };
+
+  Reduction ReduceCheckMaps(Node* node);
+  Reduction ReduceEnsureWritableFastElements(Node* node);
+  Reduction ReduceMaybeGrowFastElements(Node* node);
+  Reduction ReduceTransitionElementsKind(Node* node);
   Reduction ReduceLoadField(Node* node);
+  Reduction ReduceStoreField(Node* node);
+  Reduction ReduceLoadElement(Node* node);
+  Reduction ReduceStoreElement(Node* node);
+  Reduction ReduceStoreTypedElement(Node* node);
+  Reduction ReduceEffectPhi(Node* node);
+  Reduction ReduceStart(Node* node);
+  Reduction ReduceOtherNode(Node* node);
 
-  Graph* const graph_;
-  SimplifiedOperatorBuilder* const simplified_;
+  Reduction UpdateState(Node* node, AbstractState const* state);
+
+  AbstractState const* ComputeLoopState(Node* node,
+                                        AbstractState const* state) const;
+
+  static int FieldIndexOf(FieldAccess const& access);
+
+  AbstractState const* empty_state() const { return &empty_state_; }
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Zone* zone() const { return node_states_.zone(); }
+
+  AbstractState const empty_state_;
+  AbstractStateForEffectNodes node_states_;
+  JSGraph* const jsgraph_;
+
+  DISALLOW_COPY_AND_ASSIGN(LoadElimination);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/loop-analysis.cc b/src/compiler/loop-analysis.cc
index d52c7c7..2a81aee 100644
--- a/src/compiler/loop-analysis.cc
+++ b/src/compiler/loop-analysis.cc
@@ -29,6 +29,7 @@
 struct LoopInfo {
   Node* header;
   NodeInfo* header_list;
+  NodeInfo* exit_list;
   NodeInfo* body_list;
   LoopTree::Loop* loop;
 };
@@ -81,9 +82,9 @@
         if (marked_forward && marked_backward) {
           PrintF("X");
         } else if (marked_forward) {
-          PrintF("/");
+          PrintF(">");
         } else if (marked_backward) {
-          PrintF("\\");
+          PrintF("<");
         } else {
           PrintF(" ");
         }
@@ -198,12 +199,22 @@
         if (merge->opcode() == IrOpcode::kLoop) {
           loop_num = CreateLoopInfo(merge);
         }
+      } else if (node->opcode() == IrOpcode::kLoopExit) {
+        // Intentionally ignore return value. Loop exit node marks
+        // are propagated normally.
+        CreateLoopInfo(node->InputAt(1));
+      } else if (node->opcode() == IrOpcode::kLoopExitValue ||
+                 node->opcode() == IrOpcode::kLoopExitEffect) {
+        Node* loop_exit = NodeProperties::GetControlInput(node);
+        // Intentionally ignore return value. Loop exit node marks
+        // are propagated normally.
+        CreateLoopInfo(loop_exit->InputAt(1));
       }
 
       // Propagate marks backwards from this node.
       for (int i = 0; i < node->InputCount(); i++) {
         Node* input = node->InputAt(i);
-        if (loop_num > 0 && i != kAssumedLoopEntryIndex) {
+        if (IsBackedge(node, i)) {
           // Only propagate the loop mark on backedges.
           if (SetBackwardMark(input, loop_num)) Queue(input);
         } else {
@@ -216,6 +227,7 @@
 
   // Make a new loop if necessary for the given node.
   int CreateLoopInfo(Node* node) {
+    DCHECK_EQ(IrOpcode::kLoop, node->opcode());
     int loop_num = LoopNum(node);
     if (loop_num > 0) return loop_num;
 
@@ -223,21 +235,39 @@
     if (INDEX(loop_num) >= width_) ResizeBackwardMarks();
 
     // Create a new loop.
-    loops_.push_back({node, nullptr, nullptr, nullptr});
+    loops_.push_back({node, nullptr, nullptr, nullptr, nullptr});
     loop_tree_->NewLoop();
+    SetLoopMarkForLoopHeader(node, loop_num);
+    return loop_num;
+  }
+
+  void SetLoopMark(Node* node, int loop_num) {
+    info(node);  // create the NodeInfo
     SetBackwardMark(node, loop_num);
     loop_tree_->node_to_loop_num_[node->id()] = loop_num;
+  }
 
-    // Setup loop mark for phis attached to loop header.
+  void SetLoopMarkForLoopHeader(Node* node, int loop_num) {
+    DCHECK_EQ(IrOpcode::kLoop, node->opcode());
+    SetLoopMark(node, loop_num);
     for (Node* use : node->uses()) {
       if (NodeProperties::IsPhi(use)) {
-        info(use);  // create the NodeInfo
-        SetBackwardMark(use, loop_num);
-        loop_tree_->node_to_loop_num_[use->id()] = loop_num;
+        SetLoopMark(use, loop_num);
+      }
+
+      // Do not keep the loop alive if it does not have any backedges.
+      if (node->InputCount() <= 1) continue;
+
+      if (use->opcode() == IrOpcode::kLoopExit) {
+        SetLoopMark(use, loop_num);
+        for (Node* exit_use : use->uses()) {
+          if (exit_use->opcode() == IrOpcode::kLoopExitValue ||
+              exit_use->opcode() == IrOpcode::kLoopExitEffect) {
+            SetLoopMark(exit_use, loop_num);
+          }
+        }
       }
     }
-
-    return loop_num;
   }
 
   void ResizeBackwardMarks() {
@@ -276,20 +306,33 @@
       queued_.Set(node, false);
       for (Edge edge : node->use_edges()) {
         Node* use = edge.from();
-        if (!IsBackedge(use, edge)) {
+        if (!IsBackedge(use, edge.index())) {
           if (PropagateForwardMarks(node, use)) Queue(use);
         }
       }
     }
   }
 
-  bool IsBackedge(Node* use, Edge& edge) {
+  bool IsLoopHeaderNode(Node* node) {
+    return node->opcode() == IrOpcode::kLoop || NodeProperties::IsPhi(node);
+  }
+
+  bool IsLoopExitNode(Node* node) {
+    return node->opcode() == IrOpcode::kLoopExit ||
+           node->opcode() == IrOpcode::kLoopExitValue ||
+           node->opcode() == IrOpcode::kLoopExitEffect;
+  }
+
+  bool IsBackedge(Node* use, int index) {
     if (LoopNum(use) <= 0) return false;
-    if (edge.index() == kAssumedLoopEntryIndex) return false;
     if (NodeProperties::IsPhi(use)) {
-      return !NodeProperties::IsControlEdge(edge);
+      return index != NodeProperties::FirstControlIndex(use) &&
+             index != kAssumedLoopEntryIndex;
+    } else if (use->opcode() == IrOpcode::kLoop) {
+      return index != kAssumedLoopEntryIndex;
     }
-    return true;
+    DCHECK(IsLoopExitNode(use));
+    return false;
   }
 
   int LoopNum(Node* node) { return loop_tree_->node_to_loop_num_[node->id()]; }
@@ -307,6 +350,22 @@
     }
   }
 
+  void AddNodeToLoop(NodeInfo* node_info, LoopInfo* loop, int loop_num) {
+    if (LoopNum(node_info->node) == loop_num) {
+      if (IsLoopHeaderNode(node_info->node)) {
+        node_info->next = loop->header_list;
+        loop->header_list = node_info;
+      } else {
+        DCHECK(IsLoopExitNode(node_info->node));
+        node_info->next = loop->exit_list;
+        loop->exit_list = node_info;
+      }
+    } else {
+      node_info->next = loop->body_list;
+      loop->body_list = node_info;
+    }
+  }
+
   void FinishLoopTree() {
     DCHECK(loops_found_ == static_cast<int>(loops_.size()));
     DCHECK(loops_found_ == static_cast<int>(loop_tree_->all_loops_.size()));
@@ -342,13 +401,7 @@
         }
       }
       if (innermost == nullptr) continue;
-      if (LoopNum(ni.node) == innermost_index) {
-        ni.next = innermost->header_list;
-        innermost->header_list = &ni;
-      } else {
-        ni.next = innermost->body_list;
-        innermost->body_list = &ni;
-      }
+      AddNodeToLoop(&ni, innermost, innermost_index);
       count++;
     }
 
@@ -368,13 +421,7 @@
     size_t count = 0;
     for (NodeInfo& ni : info_) {
       if (ni.node == nullptr || !IsInLoop(ni.node, 1)) continue;
-      if (LoopNum(ni.node) == 1) {
-        ni.next = li->header_list;
-        li->header_list = &ni;
-      } else {
-        ni.next = li->body_list;
-        li->body_list = &ni;
-      }
+      AddNodeToLoop(&ni, li, 1);
       count++;
     }
 
@@ -406,7 +453,14 @@
     // Serialize nested loops.
     for (LoopTree::Loop* child : loop->children_) SerializeLoop(child);
 
-    loop->body_end_ = static_cast<int>(loop_tree_->loop_nodes_.size());
+    // Serialize the exits.
+    loop->exits_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
+    for (NodeInfo* ni = li.exit_list; ni != nullptr; ni = ni->next) {
+      loop_tree_->loop_nodes_.push_back(ni->node);
+      loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num;
+    }
+
+    loop->exits_end_ = static_cast<int>(loop_tree_->loop_nodes_.size());
   }
 
   // Connect the LoopTree loops to their parents recursively.
@@ -438,9 +492,12 @@
     while (i < loop->body_start_) {
       PrintF(" H#%d", loop_tree_->loop_nodes_[i++]->id());
     }
-    while (i < loop->body_end_) {
+    while (i < loop->exits_start_) {
       PrintF(" B#%d", loop_tree_->loop_nodes_[i++]->id());
     }
+    while (i < loop->exits_end_) {
+      PrintF(" E#%d", loop_tree_->loop_nodes_[i++]->id());
+    }
     PrintF("\n");
     for (LoopTree::Loop* child : loop->children_) PrintLoop(child);
   }
@@ -452,7 +509,7 @@
       new (graph->zone()) LoopTree(graph->NodeCount(), graph->zone());
   LoopFinderImpl finder(graph, loop_tree, zone);
   finder.Run();
-  if (FLAG_trace_turbo_graph) {
+  if (FLAG_trace_turbo_loop) {
     finder.Print();
   }
   return loop_tree;
diff --git a/src/compiler/loop-analysis.h b/src/compiler/loop-analysis.h
index b8bc395..a8c3bca 100644
--- a/src/compiler/loop-analysis.h
+++ b/src/compiler/loop-analysis.h
@@ -38,8 +38,9 @@
     Loop* parent() const { return parent_; }
     const ZoneVector<Loop*>& children() const { return children_; }
     size_t HeaderSize() const { return body_start_ - header_start_; }
-    size_t BodySize() const { return body_end_ - body_start_; }
-    size_t TotalSize() const { return body_end_ - header_start_; }
+    size_t BodySize() const { return exits_start_ - body_start_; }
+    size_t ExitsSize() const { return exits_end_ - exits_start_; }
+    size_t TotalSize() const { return exits_end_ - header_start_; }
     size_t depth() const { return static_cast<size_t>(depth_); }
 
    private:
@@ -52,13 +53,15 @@
           children_(zone),
           header_start_(-1),
           body_start_(-1),
-          body_end_(-1) {}
+          exits_start_(-1),
+          exits_end_(-1) {}
     Loop* parent_;
     int depth_;
     ZoneVector<Loop*> children_;
     int header_start_;
     int body_start_;
-    int body_end_;
+    int exits_start_;
+    int exits_end_;
   };
 
   // Return the innermost nested loop, if any, that contains {node}.
@@ -97,13 +100,19 @@
   // Return a range which can iterate over the body nodes of {loop}.
   NodeRange BodyNodes(Loop* loop) {
     return NodeRange(&loop_nodes_[0] + loop->body_start_,
-                     &loop_nodes_[0] + loop->body_end_);
+                     &loop_nodes_[0] + loop->exits_start_);
+  }
+
+  // Return a range which can iterate over the body nodes of {loop}.
+  NodeRange ExitNodes(Loop* loop) {
+    return NodeRange(&loop_nodes_[0] + loop->exits_start_,
+                     &loop_nodes_[0] + loop->exits_end_);
   }
 
   // Return a range which can iterate over the nodes of {loop}.
   NodeRange LoopNodes(Loop* loop) {
     return NodeRange(&loop_nodes_[0] + loop->header_start_,
-                     &loop_nodes_[0] + loop->body_end_);
+                     &loop_nodes_[0] + loop->exits_end_);
   }
 
   // Return the node that represents the control, i.e. the loop node itself.
diff --git a/src/compiler/loop-peeling.cc b/src/compiler/loop-peeling.cc
index 5379596..9535df5 100644
--- a/src/compiler/loop-peeling.cc
+++ b/src/compiler/loop-peeling.cc
@@ -126,8 +126,14 @@
     // Copy all the nodes first.
     for (Node* node : nodes) {
       inputs.clear();
-      for (Node* input : node->inputs()) inputs.push_back(map(input));
-      Insert(node, graph->NewNode(node->op(), node->InputCount(), &inputs[0]));
+      for (Node* input : node->inputs()) {
+        inputs.push_back(map(input));
+      }
+      Node* copy = graph->NewNode(node->op(), node->InputCount(), &inputs[0]);
+      if (NodeProperties::IsTyped(node)) {
+        NodeProperties::SetType(copy, NodeProperties::GetType(node));
+      }
+      Insert(node, copy);
     }
 
     // Fix remaining inputs of the copies.
@@ -160,56 +166,54 @@
   return node;
 }
 
-
-static void FindLoopExits(LoopTree* loop_tree, LoopTree::Loop* loop,
-                          NodeVector& exits, NodeVector& rets) {
+bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
   // Look for returns and if projections that are outside the loop but whose
   // control input is inside the loop.
+  Node* loop_node = loop_tree->GetLoopControl(loop);
   for (Node* node : loop_tree->LoopNodes(loop)) {
     for (Node* use : node->uses()) {
       if (!loop_tree->Contains(loop, use)) {
-        if (IrOpcode::IsIfProjectionOpcode(use->opcode())) {
-          // This is a branch from inside the loop to outside the loop.
-          exits.push_back(use);
-        } else if (use->opcode() == IrOpcode::kReturn &&
-                   loop_tree->Contains(loop,
-                                       NodeProperties::GetControlInput(use))) {
-          // This is a return from inside the loop.
-          rets.push_back(use);
+        bool unmarked_exit;
+        switch (node->opcode()) {
+          case IrOpcode::kLoopExit:
+            unmarked_exit = (node->InputAt(1) != loop_node);
+            break;
+          case IrOpcode::kLoopExitValue:
+          case IrOpcode::kLoopExitEffect:
+            unmarked_exit = (node->InputAt(1)->InputAt(1) != loop_node);
+            break;
+          default:
+            unmarked_exit = (use->opcode() != IrOpcode::kTerminate);
+        }
+        if (unmarked_exit) {
+          if (FLAG_trace_turbo_loop) {
+            Node* loop_node = loop_tree->GetLoopControl(loop);
+            PrintF(
+                "Cannot peel loop %i. Loop exit without explicit mark: Node %i "
+                "(%s) is inside "
+                "loop, but its use %i (%s) is outside.\n",
+                loop_node->id(), node->id(), node->op()->mnemonic(), use->id(),
+                use->op()->mnemonic());
+          }
+          return false;
         }
       }
     }
   }
-}
-
-
-bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
-  Zone zone(loop_tree->zone()->allocator());
-  NodeVector exits(&zone);
-  NodeVector rets(&zone);
-  FindLoopExits(loop_tree, loop, exits, rets);
-  return exits.size() <= 1u;
+  return true;
 }
 
 
 PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
                                   LoopTree* loop_tree, LoopTree::Loop* loop,
                                   Zone* tmp_zone) {
-  //============================================================================
-  // Find the loop exit region to determine if this loop can be peeled.
-  //============================================================================
-  NodeVector exits(tmp_zone);
-  NodeVector rets(tmp_zone);
-  FindLoopExits(loop_tree, loop, exits, rets);
-
-  if (exits.size() != 1) return nullptr;  // not peelable currently.
+  if (!CanPeel(loop_tree, loop)) return nullptr;
 
   //============================================================================
   // Construct the peeled iteration.
   //============================================================================
   PeeledIterationImpl* iter = new (tmp_zone) PeeledIterationImpl(tmp_zone);
-  size_t estimated_peeled_size =
-      5 + (loop->TotalSize() + exits.size() + rets.size()) * 2;
+  size_t estimated_peeled_size = 5 + (loop->TotalSize()) * 2;
   Peeling peeling(graph, tmp_zone, estimated_peeled_size, &iter->node_pairs_);
 
   Node* dead = graph->NewNode(common->Dead());
@@ -260,73 +264,126 @@
     // Only one backedge, simply replace the input to loop with output of
     // peeling.
     for (Node* node : loop_tree->HeaderNodes(loop)) {
-      node->ReplaceInput(0, peeling.map(node->InputAt(0)));
+      node->ReplaceInput(0, peeling.map(node->InputAt(1)));
     }
     new_entry = peeling.map(loop_node->InputAt(1));
   }
   loop_node->ReplaceInput(0, new_entry);
 
   //============================================================================
-  // Duplicate the loop exit region and add a merge.
+  // Change the exit and exit markers to merge/phi/effect-phi.
   //============================================================================
+  for (Node* exit : loop_tree->ExitNodes(loop)) {
+    switch (exit->opcode()) {
+      case IrOpcode::kLoopExit:
+        // Change the loop exit node to a merge node.
+        exit->ReplaceInput(1, peeling.map(exit->InputAt(0)));
+        NodeProperties::ChangeOp(exit, common->Merge(2));
+        break;
+      case IrOpcode::kLoopExitValue:
+        // Change exit marker to phi.
+        exit->InsertInput(graph->zone(), 1, peeling.map(exit->InputAt(0)));
+        NodeProperties::ChangeOp(
+            exit, common->Phi(MachineRepresentation::kTagged, 2));
+        break;
+      case IrOpcode::kLoopExitEffect:
+        // Change effect exit marker to effect phi.
+        exit->InsertInput(graph->zone(), 1, peeling.map(exit->InputAt(0)));
+        NodeProperties::ChangeOp(exit, common->EffectPhi(2));
+        break;
+      default:
+        break;
+    }
+  }
+  return iter;
+}
 
-  // Currently we are limited to peeling loops with a single exit. The exit is
-  // the postdominator of the loop (ignoring returns).
-  Node* postdom = exits[0];
-  for (Node* node : rets) exits.push_back(node);
-  for (Node* use : postdom->uses()) {
-    if (NodeProperties::IsPhi(use)) exits.push_back(use);
+namespace {
+
+void PeelInnerLoops(Graph* graph, CommonOperatorBuilder* common,
+                    LoopTree* loop_tree, LoopTree::Loop* loop,
+                    Zone* temp_zone) {
+  // If the loop has nested loops, peel inside those.
+  if (!loop->children().empty()) {
+    for (LoopTree::Loop* inner_loop : loop->children()) {
+      PeelInnerLoops(graph, common, loop_tree, inner_loop, temp_zone);
+    }
+    return;
+  }
+  // Only peel small-enough loops.
+  if (loop->TotalSize() > LoopPeeler::kMaxPeeledNodes) return;
+  if (FLAG_trace_turbo_loop) {
+    PrintF("Peeling loop with header: ");
+    for (Node* node : loop_tree->HeaderNodes(loop)) {
+      PrintF("%i ", node->id());
+    }
+    PrintF("\n");
   }
 
-  NodeRange exit_range(&exits[0], &exits[0] + exits.size());
-  peeling.CopyNodes(graph, tmp_zone, dead, exit_range);
+  LoopPeeler::Peel(graph, common, loop_tree, loop, temp_zone);
+}
 
-  Node* merge = graph->NewNode(common->Merge(2), postdom, peeling.map(postdom));
-  postdom->ReplaceUses(merge);
-  merge->ReplaceInput(0, postdom);  // input 0 overwritten by above line.
-
-  // Find and update all the edges into either the loop or exit region.
-  for (int i = 0; i < 2; i++) {
-    NodeRange range = i == 0 ? loop_tree->LoopNodes(loop) : exit_range;
-    ZoneVector<Edge> value_edges(tmp_zone);
-    ZoneVector<Edge> effect_edges(tmp_zone);
-
-    for (Node* node : range) {
-      // Gather value and effect edges from outside the region.
-      for (Edge edge : node->use_edges()) {
-        if (!peeling.Marked(edge.from())) {
-          // Edge from outside the loop into the region.
-          if (NodeProperties::IsValueEdge(edge) ||
-              NodeProperties::IsContextEdge(edge)) {
-            value_edges.push_back(edge);
-          } else if (NodeProperties::IsEffectEdge(edge)) {
-            effect_edges.push_back(edge);
-          } else {
-            // don't do anything for control edges.
-            // TODO(titzer): should update control edges to peeled?
-          }
-        }
-      }
-
-      // Update all the value and effect edges at once.
-      if (!value_edges.empty()) {
-        // TODO(titzer): machine type is wrong here.
-        Node* phi =
-            graph->NewNode(common->Phi(MachineRepresentation::kTagged, 2), node,
-                           peeling.map(node), merge);
-        for (Edge edge : value_edges) edge.UpdateTo(phi);
-        value_edges.clear();
-      }
-      if (!effect_edges.empty()) {
-        Node* effect_phi = graph->NewNode(common->EffectPhi(2), node,
-                                          peeling.map(node), merge);
-        for (Edge edge : effect_edges) edge.UpdateTo(effect_phi);
-        effect_edges.clear();
+void EliminateLoopExit(Node* node) {
+  DCHECK_EQ(IrOpcode::kLoopExit, node->opcode());
+  // The exit markers take the loop exit as input. We iterate over uses
+  // and remove all the markers from the graph.
+  for (Edge edge : node->use_edges()) {
+    if (NodeProperties::IsControlEdge(edge)) {
+      Node* marker = edge.from();
+      if (marker->opcode() == IrOpcode::kLoopExitValue) {
+        NodeProperties::ReplaceUses(marker, marker->InputAt(0));
+        marker->Kill();
+      } else if (marker->opcode() == IrOpcode::kLoopExitEffect) {
+        NodeProperties::ReplaceUses(marker, nullptr,
+                                    NodeProperties::GetEffectInput(marker));
+        marker->Kill();
       }
     }
   }
+  NodeProperties::ReplaceUses(node, nullptr, nullptr,
+                              NodeProperties::GetControlInput(node, 0));
+  node->Kill();
+}
 
-  return iter;
+}  // namespace
+
+// static
+void LoopPeeler::PeelInnerLoopsOfTree(Graph* graph,
+                                      CommonOperatorBuilder* common,
+                                      LoopTree* loop_tree, Zone* temp_zone) {
+  for (LoopTree::Loop* loop : loop_tree->outer_loops()) {
+    PeelInnerLoops(graph, common, loop_tree, loop, temp_zone);
+  }
+
+  EliminateLoopExits(graph, temp_zone);
+}
+
+// static
+void LoopPeeler::EliminateLoopExits(Graph* graph, Zone* temp_zone) {
+  ZoneQueue<Node*> queue(temp_zone);
+  ZoneVector<bool> visited(graph->NodeCount(), false, temp_zone);
+  queue.push(graph->end());
+  while (!queue.empty()) {
+    Node* node = queue.front();
+    queue.pop();
+
+    if (node->opcode() == IrOpcode::kLoopExit) {
+      Node* control = NodeProperties::GetControlInput(node);
+      EliminateLoopExit(node);
+      if (!visited[control->id()]) {
+        visited[control->id()] = true;
+        queue.push(control);
+      }
+    } else {
+      for (int i = 0; i < node->op()->ControlInputCount(); i++) {
+        Node* control = NodeProperties::GetControlInput(node, i);
+        if (!visited[control->id()]) {
+          visited[control->id()] = true;
+          queue.push(control);
+        }
+      }
+    }
+  }
 }
 
 }  // namespace compiler
diff --git a/src/compiler/loop-peeling.h b/src/compiler/loop-peeling.h
index ea963b0..8b38e25 100644
--- a/src/compiler/loop-peeling.h
+++ b/src/compiler/loop-peeling.h
@@ -33,6 +33,11 @@
   static PeeledIteration* Peel(Graph* graph, CommonOperatorBuilder* common,
                                LoopTree* loop_tree, LoopTree::Loop* loop,
                                Zone* tmp_zone);
+  static void PeelInnerLoopsOfTree(Graph* graph, CommonOperatorBuilder* common,
+                                   LoopTree* loop_tree, Zone* tmp_zone);
+
+  static void EliminateLoopExits(Graph* graph, Zone* temp_zone);
+  static const size_t kMaxPeeledNodes = 1000;
 };
 
 
diff --git a/src/compiler/loop-variable-optimizer.cc b/src/compiler/loop-variable-optimizer.cc
new file mode 100644
index 0000000..8331963
--- /dev/null
+++ b/src/compiler/loop-variable-optimizer.cc
@@ -0,0 +1,406 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/loop-variable-optimizer.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/zone-containers.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Macro for outputting trace information from representation inference.
+#define TRACE(...)                                  \
+  do {                                              \
+    if (FLAG_trace_turbo_loop) PrintF(__VA_ARGS__); \
+  } while (false)
+
+LoopVariableOptimizer::LoopVariableOptimizer(Graph* graph,
+                                             CommonOperatorBuilder* common,
+                                             Zone* zone)
+    : graph_(graph),
+      common_(common),
+      zone_(zone),
+      limits_(zone),
+      induction_vars_(zone) {}
+
+void LoopVariableOptimizer::Run() {
+  ZoneQueue<Node*> queue(zone());
+  queue.push(graph()->start());
+  NodeMarker<bool> queued(graph(), 2);
+  while (!queue.empty()) {
+    Node* node = queue.front();
+    queue.pop();
+    queued.Set(node, false);
+
+    DCHECK(limits_.find(node->id()) == limits_.end());
+    bool all_inputs_visited = true;
+    int inputs_end = (node->opcode() == IrOpcode::kLoop)
+                         ? kFirstBackedge
+                         : node->op()->ControlInputCount();
+    for (int i = 0; i < inputs_end; i++) {
+      auto input = limits_.find(NodeProperties::GetControlInput(node, i)->id());
+      if (input == limits_.end()) {
+        all_inputs_visited = false;
+        break;
+      }
+    }
+    if (!all_inputs_visited) continue;
+
+    VisitNode(node);
+    DCHECK(limits_.find(node->id()) != limits_.end());
+
+    // Queue control outputs.
+    for (Edge edge : node->use_edges()) {
+      if (NodeProperties::IsControlEdge(edge) &&
+          edge.from()->op()->ControlOutputCount() > 0) {
+        Node* use = edge.from();
+        if (use->opcode() == IrOpcode::kLoop &&
+            edge.index() != kAssumedLoopEntryIndex) {
+          VisitBackedge(node, use);
+        } else if (!queued.Get(use)) {
+          queue.push(use);
+          queued.Set(use, true);
+        }
+      }
+    }
+  }
+}
+
+class LoopVariableOptimizer::Constraint : public ZoneObject {
+ public:
+  InductionVariable::ConstraintKind kind() const { return kind_; }
+  Node* left() const { return left_; }
+  Node* right() const { return right_; }
+
+  const Constraint* next() const { return next_; }
+
+  Constraint(Node* left, InductionVariable::ConstraintKind kind, Node* right,
+             const Constraint* next)
+      : left_(left), right_(right), kind_(kind), next_(next) {}
+
+ private:
+  Node* left_;
+  Node* right_;
+  InductionVariable::ConstraintKind kind_;
+  const Constraint* next_;
+};
+
+class LoopVariableOptimizer::VariableLimits : public ZoneObject {
+ public:
+  static VariableLimits* Empty(Zone* zone) {
+    return new (zone) VariableLimits();
+  }
+
+  VariableLimits* Copy(Zone* zone) const {
+    return new (zone) VariableLimits(this);
+  }
+
+  void Add(Node* left, InductionVariable::ConstraintKind kind, Node* right,
+           Zone* zone) {
+    head_ = new (zone) Constraint(left, kind, right, head_);
+    limit_count_++;
+  }
+
+  void Merge(const VariableLimits* other) {
+    // Change the current condition list to a longest common tail
+    // of this condition list and the other list. (The common tail
+    // should correspond to the list from the common dominator.)
+
+    // First, we throw away the prefix of the longer list, so that
+    // we have lists of the same length.
+    size_t other_size = other->limit_count_;
+    const Constraint* other_limit = other->head_;
+    while (other_size > limit_count_) {
+      other_limit = other_limit->next();
+      other_size--;
+    }
+    while (limit_count_ > other_size) {
+      head_ = head_->next();
+      limit_count_--;
+    }
+
+    // Then we go through both lists in lock-step until we find
+    // the common tail.
+    while (head_ != other_limit) {
+      DCHECK(limit_count_ > 0);
+      limit_count_--;
+      other_limit = other_limit->next();
+      head_ = head_->next();
+    }
+  }
+
+  const Constraint* head() const { return head_; }
+
+ private:
+  VariableLimits() {}
+  explicit VariableLimits(const VariableLimits* other)
+      : head_(other->head_), limit_count_(other->limit_count_) {}
+
+  const Constraint* head_ = nullptr;
+  size_t limit_count_ = 0;
+};
+
+void InductionVariable::AddUpperBound(Node* bound,
+                                      InductionVariable::ConstraintKind kind) {
+  if (FLAG_trace_turbo_loop) {
+    OFStream os(stdout);
+    os << "New upper bound for " << phi()->id() << " (loop "
+       << NodeProperties::GetControlInput(phi())->id() << "): " << *bound
+       << std::endl;
+  }
+  upper_bounds_.push_back(Bound(bound, kind));
+}
+
+void InductionVariable::AddLowerBound(Node* bound,
+                                      InductionVariable::ConstraintKind kind) {
+  if (FLAG_trace_turbo_loop) {
+    OFStream os(stdout);
+    os << "New lower bound for " << phi()->id() << " (loop "
+       << NodeProperties::GetControlInput(phi())->id() << "): " << *bound;
+  }
+  lower_bounds_.push_back(Bound(bound, kind));
+}
+
+void LoopVariableOptimizer::VisitBackedge(Node* from, Node* loop) {
+  if (loop->op()->ControlInputCount() != 2) return;
+
+  // Go through the constraints, and update the induction variables in
+  // this loop if they are involved in the constraint.
+  const VariableLimits* limits = limits_[from->id()];
+  for (const Constraint* constraint = limits->head(); constraint != nullptr;
+       constraint = constraint->next()) {
+    if (constraint->left()->opcode() == IrOpcode::kPhi &&
+        NodeProperties::GetControlInput(constraint->left()) == loop) {
+      auto var = induction_vars_.find(constraint->left()->id());
+      if (var != induction_vars_.end()) {
+        var->second->AddUpperBound(constraint->right(), constraint->kind());
+      }
+    }
+    if (constraint->right()->opcode() == IrOpcode::kPhi &&
+        NodeProperties::GetControlInput(constraint->right()) == loop) {
+      auto var = induction_vars_.find(constraint->right()->id());
+      if (var != induction_vars_.end()) {
+        var->second->AddLowerBound(constraint->left(), constraint->kind());
+      }
+    }
+  }
+}
+
+void LoopVariableOptimizer::VisitNode(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kMerge:
+      return VisitMerge(node);
+    case IrOpcode::kLoop:
+      return VisitLoop(node);
+    case IrOpcode::kIfFalse:
+      return VisitIf(node, false);
+    case IrOpcode::kIfTrue:
+      return VisitIf(node, true);
+    case IrOpcode::kStart:
+      return VisitStart(node);
+    case IrOpcode::kLoopExit:
+      return VisitLoopExit(node);
+    default:
+      return VisitOtherControl(node);
+  }
+}
+
+void LoopVariableOptimizer::VisitMerge(Node* node) {
+  // Merge the limits of all incoming edges.
+  VariableLimits* merged = limits_[node->InputAt(0)->id()]->Copy(zone());
+  for (int i = 1; i < node->InputCount(); i++) {
+    merged->Merge(limits_[node->InputAt(i)->id()]);
+  }
+  limits_[node->id()] = merged;
+}
+
+void LoopVariableOptimizer::VisitLoop(Node* node) {
+  DetectInductionVariables(node);
+  // Conservatively take the limits from the loop entry here.
+  return TakeConditionsFromFirstControl(node);
+}
+
+void LoopVariableOptimizer::VisitIf(Node* node, bool polarity) {
+  Node* branch = node->InputAt(0);
+  Node* cond = branch->InputAt(0);
+  VariableLimits* limits = limits_[branch->id()]->Copy(zone());
+  // Normalize to less than comparison.
+  switch (cond->opcode()) {
+    case IrOpcode::kJSLessThan:
+      AddCmpToLimits(limits, cond, InductionVariable::kStrict, polarity);
+      break;
+    case IrOpcode::kJSGreaterThan:
+      AddCmpToLimits(limits, cond, InductionVariable::kNonStrict, !polarity);
+      break;
+    case IrOpcode::kJSLessThanOrEqual:
+      AddCmpToLimits(limits, cond, InductionVariable::kNonStrict, polarity);
+      break;
+    case IrOpcode::kJSGreaterThanOrEqual:
+      AddCmpToLimits(limits, cond, InductionVariable::kStrict, !polarity);
+      break;
+    default:
+      break;
+  }
+  limits_[node->id()] = limits;
+}
+
+void LoopVariableOptimizer::AddCmpToLimits(
+    VariableLimits* limits, Node* node, InductionVariable::ConstraintKind kind,
+    bool polarity) {
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (FindInductionVariable(left) || FindInductionVariable(right)) {
+    if (polarity) {
+      limits->Add(left, kind, right, zone());
+    } else {
+      kind = (kind == InductionVariable::kStrict)
+                 ? InductionVariable::kNonStrict
+                 : InductionVariable::kStrict;
+      limits->Add(right, kind, left, zone());
+    }
+  }
+}
+
+void LoopVariableOptimizer::VisitStart(Node* node) {
+  limits_[node->id()] = VariableLimits::Empty(zone());
+}
+
+void LoopVariableOptimizer::VisitLoopExit(Node* node) {
+  return TakeConditionsFromFirstControl(node);
+}
+
+void LoopVariableOptimizer::VisitOtherControl(Node* node) {
+  DCHECK_EQ(1, node->op()->ControlInputCount());
+  return TakeConditionsFromFirstControl(node);
+}
+
+void LoopVariableOptimizer::TakeConditionsFromFirstControl(Node* node) {
+  const VariableLimits* limits =
+      limits_[NodeProperties::GetControlInput(node, 0)->id()];
+  DCHECK_NOT_NULL(limits);
+  limits_[node->id()] = limits;
+}
+
+const InductionVariable* LoopVariableOptimizer::FindInductionVariable(
+    Node* node) {
+  auto var = induction_vars_.find(node->id());
+  if (var != induction_vars_.end()) {
+    return var->second;
+  }
+  return nullptr;
+}
+
+InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
+  DCHECK_EQ(2, phi->op()->ValueInputCount());
+  DCHECK_EQ(IrOpcode::kLoop, NodeProperties::GetControlInput(phi)->opcode());
+  Node* initial = phi->InputAt(0);
+  Node* arith = phi->InputAt(1);
+  InductionVariable::ArithmeticType arithmeticType;
+  if (arith->opcode() == IrOpcode::kJSAdd) {
+    arithmeticType = InductionVariable::ArithmeticType::kAddition;
+  } else if (arith->opcode() == IrOpcode::kJSSubtract) {
+    arithmeticType = InductionVariable::ArithmeticType::kSubtraction;
+  } else {
+    return nullptr;
+  }
+
+  // TODO(jarin) Support both sides.
+  if (arith->InputAt(0) != phi) {
+    if (arith->InputAt(0)->opcode() != IrOpcode::kJSToNumber ||
+        arith->InputAt(0)->InputAt(0) != phi) {
+      return nullptr;
+    }
+  }
+  Node* incr = arith->InputAt(1);
+  return new (zone())
+      InductionVariable(phi, arith, incr, initial, zone(), arithmeticType);
+}
+
+void LoopVariableOptimizer::DetectInductionVariables(Node* loop) {
+  if (loop->op()->ControlInputCount() != 2) return;
+  TRACE("Loop variables for loop %i:", loop->id());
+  for (Edge edge : loop->use_edges()) {
+    if (NodeProperties::IsControlEdge(edge) &&
+        edge.from()->opcode() == IrOpcode::kPhi) {
+      Node* phi = edge.from();
+      InductionVariable* induction_var = TryGetInductionVariable(phi);
+      if (induction_var) {
+        induction_vars_[phi->id()] = induction_var;
+        TRACE(" %i", induction_var->phi()->id());
+      }
+    }
+  }
+  TRACE("\n");
+}
+
+void LoopVariableOptimizer::ChangeToInductionVariablePhis() {
+  for (auto entry : induction_vars_) {
+    // It only make sense to analyze the induction variables if
+    // there is a bound.
+    InductionVariable* induction_var = entry.second;
+    DCHECK_EQ(MachineRepresentation::kTagged,
+              PhiRepresentationOf(induction_var->phi()->op()));
+    if (induction_var->upper_bounds().size() == 0 &&
+        induction_var->lower_bounds().size() == 0) {
+      continue;
+    }
+    // Insert the increment value to the value inputs.
+    induction_var->phi()->InsertInput(graph()->zone(),
+                                      induction_var->phi()->InputCount() - 1,
+                                      induction_var->increment());
+    // Insert the bound inputs to the value inputs.
+    for (auto bound : induction_var->lower_bounds()) {
+      induction_var->phi()->InsertInput(
+          graph()->zone(), induction_var->phi()->InputCount() - 1, bound.bound);
+    }
+    for (auto bound : induction_var->upper_bounds()) {
+      induction_var->phi()->InsertInput(
+          graph()->zone(), induction_var->phi()->InputCount() - 1, bound.bound);
+    }
+    NodeProperties::ChangeOp(
+        induction_var->phi(),
+        common()->InductionVariablePhi(induction_var->phi()->InputCount() - 1));
+  }
+}
+
+void LoopVariableOptimizer::ChangeToPhisAndInsertGuards() {
+  for (auto entry : induction_vars_) {
+    InductionVariable* induction_var = entry.second;
+    if (induction_var->phi()->opcode() == IrOpcode::kInductionVariablePhi) {
+      // Turn the induction variable phi back to normal phi.
+      int value_count = 2;
+      Node* control = NodeProperties::GetControlInput(induction_var->phi());
+      DCHECK_EQ(value_count, control->op()->ControlInputCount());
+      induction_var->phi()->TrimInputCount(value_count + 1);
+      induction_var->phi()->ReplaceInput(value_count, control);
+      NodeProperties::ChangeOp(
+          induction_var->phi(),
+          common()->Phi(MachineRepresentation::kTagged, value_count));
+
+      // If the backedge is not a subtype of the phi's type, we insert a sigma
+      // to get the typing right.
+      Node* backedge_value = induction_var->phi()->InputAt(1);
+      Type* backedge_type = NodeProperties::GetType(backedge_value);
+      Type* phi_type = NodeProperties::GetType(induction_var->phi());
+      if (!backedge_type->Is(phi_type)) {
+        Node* backedge_control =
+            NodeProperties::GetControlInput(induction_var->phi())->InputAt(1);
+        Node* rename = graph()->NewNode(common()->TypeGuard(phi_type),
+                                        backedge_value, backedge_control);
+        induction_var->phi()->ReplaceInput(1, rename);
+      }
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/loop-variable-optimizer.h b/src/compiler/loop-variable-optimizer.h
new file mode 100644
index 0000000..a5c1ad4
--- /dev/null
+++ b/src/compiler/loop-variable-optimizer.h
@@ -0,0 +1,117 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
+#define V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class CommonOperatorBuilder;
+class Graph;
+class Node;
+
+class InductionVariable : public ZoneObject {
+ public:
+  Node* phi() const { return phi_; }
+  Node* arith() const { return arith_; }
+  Node* increment() const { return increment_; }
+  Node* init_value() const { return init_value_; }
+
+  enum ConstraintKind { kStrict, kNonStrict };
+  enum ArithmeticType { kAddition, kSubtraction };
+  struct Bound {
+    Bound(Node* bound, ConstraintKind kind) : bound(bound), kind(kind) {}
+
+    Node* bound;
+    ConstraintKind kind;
+  };
+
+  const ZoneVector<Bound>& lower_bounds() { return lower_bounds_; }
+  const ZoneVector<Bound>& upper_bounds() { return upper_bounds_; }
+
+  ArithmeticType Type() { return arithmeticType_; }
+
+ private:
+  friend class LoopVariableOptimizer;
+
+  InductionVariable(Node* phi, Node* arith, Node* increment, Node* init_value,
+                    Zone* zone, ArithmeticType arithmeticType)
+      : phi_(phi),
+        arith_(arith),
+        increment_(increment),
+        init_value_(init_value),
+        lower_bounds_(zone),
+        upper_bounds_(zone),
+        arithmeticType_(arithmeticType) {}
+
+  void AddUpperBound(Node* bound, ConstraintKind kind);
+  void AddLowerBound(Node* bound, ConstraintKind kind);
+
+  Node* phi_;
+  Node* arith_;
+  Node* increment_;
+  Node* init_value_;
+  ZoneVector<Bound> lower_bounds_;
+  ZoneVector<Bound> upper_bounds_;
+  ArithmeticType arithmeticType_;
+};
+
+class LoopVariableOptimizer {
+ public:
+  void Run();
+
+  LoopVariableOptimizer(Graph* graph, CommonOperatorBuilder* common,
+                        Zone* zone);
+
+  const ZoneMap<int, InductionVariable*>& induction_variables() {
+    return induction_vars_;
+  }
+
+  void ChangeToInductionVariablePhis();
+  void ChangeToPhisAndInsertGuards();
+
+ private:
+  const int kAssumedLoopEntryIndex = 0;
+  const int kFirstBackedge = 1;
+
+  class Constraint;
+  class VariableLimits;
+
+  void VisitBackedge(Node* from, Node* loop);
+  void VisitNode(Node* node);
+  void VisitMerge(Node* node);
+  void VisitLoop(Node* node);
+  void VisitIf(Node* node, bool polarity);
+  void VisitStart(Node* node);
+  void VisitLoopExit(Node* node);
+  void VisitOtherControl(Node* node);
+
+  void AddCmpToLimits(VariableLimits* limits, Node* node,
+                      InductionVariable::ConstraintKind kind, bool polarity);
+
+  void TakeConditionsFromFirstControl(Node* node);
+  const InductionVariable* FindInductionVariable(Node* node);
+  InductionVariable* TryGetInductionVariable(Node* phi);
+  void DetectInductionVariables(Node* loop);
+
+  Graph* graph() { return graph_; }
+  CommonOperatorBuilder* common() { return common_; }
+  Zone* zone() { return zone_; }
+
+  Graph* graph_;
+  CommonOperatorBuilder* common_;
+  Zone* zone_;
+  ZoneMap<int, const VariableLimits*> limits_;
+  ZoneMap<int, InductionVariable*> induction_vars_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index b566f48..99044aa 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -43,6 +43,20 @@
   return graph()->NewNode(common()->Int64Constant(value));
 }
 
+Node* MachineOperatorReducer::Float64Mul(Node* lhs, Node* rhs) {
+  return graph()->NewNode(machine()->Float64Mul(), lhs, rhs);
+}
+
+Node* MachineOperatorReducer::Float64PowHalf(Node* value) {
+  value =
+      graph()->NewNode(machine()->Float64Add(), Float64Constant(0.0), value);
+  return graph()->NewNode(
+      common()->Select(MachineRepresentation::kFloat64, BranchHint::kFalse),
+      graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
+                       Float64Constant(-V8_INFINITY)),
+      Float64Constant(V8_INFINITY),
+      graph()->NewNode(machine()->Float64Sqrt(), value));
+}
 
 Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) {
   Node* const node = graph()->NewNode(machine()->Word32And(), lhs, rhs);
@@ -153,10 +167,16 @@
     }
     case IrOpcode::kWord32Shl:
       return ReduceWord32Shl(node);
+    case IrOpcode::kWord64Shl:
+      return ReduceWord64Shl(node);
     case IrOpcode::kWord32Shr:
       return ReduceWord32Shr(node);
+    case IrOpcode::kWord64Shr:
+      return ReduceWord64Shr(node);
     case IrOpcode::kWord32Sar:
       return ReduceWord32Sar(node);
+    case IrOpcode::kWord64Sar:
+      return ReduceWord64Sar(node);
     case IrOpcode::kWord32Ror: {
       Int32BinopMatcher m(node);
       if (m.right().Is(0)) return Replace(m.left().node());  // x ror 0 => x
@@ -198,8 +218,12 @@
     }
     case IrOpcode::kInt32Add:
       return ReduceInt32Add(node);
+    case IrOpcode::kInt64Add:
+      return ReduceInt64Add(node);
     case IrOpcode::kInt32Sub:
       return ReduceInt32Sub(node);
+    case IrOpcode::kInt64Sub:
+      return ReduceInt64Sub(node);
     case IrOpcode::kInt32Mul: {
       Int32BinopMatcher m(node);
       if (m.right().Is(0)) return Replace(m.right().node());  // x * 0 => 0
@@ -221,6 +245,21 @@
       }
       break;
     }
+    case IrOpcode::kInt32MulWithOverflow: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(2)) {
+        node->ReplaceInput(1, m.left().node());
+        NodeProperties::ChangeOp(node, machine()->Int32AddWithOverflow());
+        return Changed(node);
+      }
+      if (m.right().Is(-1)) {
+        node->ReplaceInput(0, Int32Constant(0));
+        node->ReplaceInput(1, m.left().node());
+        NodeProperties::ChangeOp(node, machine()->Int32SubWithOverflow());
+        return Changed(node);
+      }
+      break;
+    }
     case IrOpcode::kInt32Div:
       return ReduceInt32Div(node);
     case IrOpcode::kUint32Div:
@@ -235,6 +274,14 @@
         return ReplaceBool(m.left().Value() < m.right().Value());
       }
       if (m.LeftEqualsRight()) return ReplaceBool(false);  // x < x => false
+      if (m.left().IsWord32Or() && m.right().Is(0)) {
+        // (x | K) < 0 => true or (K | x) < 0 => true iff K < 0
+        Int32BinopMatcher mleftmatcher(m.left().node());
+        if (mleftmatcher.left().IsNegative() ||
+            mleftmatcher.right().IsNegative()) {
+          return ReplaceBool(true);
+        }
+      }
       break;
     }
     case IrOpcode::kInt32LessThanOrEqual: {
@@ -280,6 +327,39 @@
       if (m.LeftEqualsRight()) return ReplaceBool(true);  // x <= x => true
       break;
     }
+    case IrOpcode::kFloat32Sub: {
+      Float32BinopMatcher m(node);
+      if (m.right().Is(0) && (copysign(1.0, m.right().Value()) > 0)) {
+        return Replace(m.left().node());  // x - 0 => x
+      }
+      if (m.right().IsNaN()) {  // x - NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.left().IsNaN()) {  // NaN - x => NaN
+        return Replace(m.left().node());
+      }
+      if (m.IsFoldable()) {  // L - R => (L - R)
+        return ReplaceFloat32(m.left().Value() - m.right().Value());
+      }
+      if (m.left().IsMinusZero()) {
+        // -0.0 - round_down(-0.0 - R) => round_up(R)
+        if (machine()->Float32RoundUp().IsSupported() &&
+            m.right().IsFloat32RoundDown()) {
+          if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat32Sub) {
+            Float32BinopMatcher mright0(m.right().InputAt(0));
+            if (mright0.left().IsMinusZero()) {
+              return Replace(graph()->NewNode(machine()->Float32RoundUp().op(),
+                                              mright0.right().node()));
+            }
+          }
+        }
+        // -0.0 - R => -R
+        node->RemoveInput(0);
+        NodeProperties::ChangeOp(node, machine()->Float32Neg());
+        return Changed(node);
+      }
+      break;
+    }
     case IrOpcode::kFloat64Add: {
       Float64BinopMatcher m(node);
       if (m.right().IsNaN()) {  // x + NaN => NaN
@@ -301,9 +381,26 @@
       if (m.left().IsNaN()) {  // NaN - x => NaN
         return Replace(m.left().node());
       }
-      if (m.IsFoldable()) {  // K - K => K
+      if (m.IsFoldable()) {  // L - R => (L - R)
         return ReplaceFloat64(m.left().Value() - m.right().Value());
       }
+      if (m.left().IsMinusZero()) {
+        // -0.0 - round_down(-0.0 - R) => round_up(R)
+        if (machine()->Float64RoundUp().IsSupported() &&
+            m.right().IsFloat64RoundDown()) {
+          if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub) {
+            Float64BinopMatcher mright0(m.right().InputAt(0));
+            if (mright0.left().IsMinusZero()) {
+              return Replace(graph()->NewNode(machine()->Float64RoundUp().op(),
+                                              mright0.right().node()));
+            }
+          }
+        }
+        // -0.0 - R => -R
+        node->RemoveInput(0);
+        NodeProperties::ChangeOp(node, machine()->Float64Neg());
+        return Changed(node);
+      }
       break;
     }
     case IrOpcode::kFloat64Mul: {
@@ -353,11 +450,36 @@
       }
       break;
     }
+    case IrOpcode::kFloat64Acos: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::acos(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Acosh: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::acosh(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Asin: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::asin(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Asinh: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::asinh(m.Value()));
+      break;
+    }
     case IrOpcode::kFloat64Atan: {
       Float64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceFloat64(base::ieee754::atan(m.Value()));
       break;
     }
+    case IrOpcode::kFloat64Atanh: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::atanh(m.Value()));
+      break;
+    }
     case IrOpcode::kFloat64Atan2: {
       Float64BinopMatcher m(node);
       if (m.right().IsNaN()) {
@@ -372,9 +494,9 @@
       }
       break;
     }
-    case IrOpcode::kFloat64Atanh: {
+    case IrOpcode::kFloat64Cbrt: {
       Float64Matcher m(node->InputAt(0));
-      if (m.HasValue()) return ReplaceFloat64(base::ieee754::atanh(m.Value()));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::cbrt(m.Value()));
       break;
     }
     case IrOpcode::kFloat64Cos: {
@@ -382,6 +504,11 @@
       if (m.HasValue()) return ReplaceFloat64(base::ieee754::cos(m.Value()));
       break;
     }
+    case IrOpcode::kFloat64Cosh: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::cosh(m.Value()));
+      break;
+    }
     case IrOpcode::kFloat64Exp: {
       Float64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceFloat64(base::ieee754::exp(m.Value()));
@@ -402,19 +529,40 @@
       if (m.HasValue()) return ReplaceFloat64(base::ieee754::log1p(m.Value()));
       break;
     }
-    case IrOpcode::kFloat64Log2: {
-      Float64Matcher m(node->InputAt(0));
-      if (m.HasValue()) return ReplaceFloat64(base::ieee754::log2(m.Value()));
-      break;
-    }
     case IrOpcode::kFloat64Log10: {
       Float64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceFloat64(base::ieee754::log10(m.Value()));
       break;
     }
-    case IrOpcode::kFloat64Cbrt: {
+    case IrOpcode::kFloat64Log2: {
       Float64Matcher m(node->InputAt(0));
-      if (m.HasValue()) return ReplaceFloat64(base::ieee754::cbrt(m.Value()));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::log2(m.Value()));
+      break;
+    }
+    case IrOpcode::kFloat64Pow: {
+      Float64BinopMatcher m(node);
+      // TODO(bmeurer): Constant fold once we have a unified pow implementation.
+      if (m.right().Is(0.0)) {  // x ** +-0.0 => 1.0
+        return ReplaceFloat64(1.0);
+      } else if (m.right().Is(-2.0)) {  // x ** -2.0 => 1 / (x * x)
+        node->ReplaceInput(0, Float64Constant(1.0));
+        node->ReplaceInput(1, Float64Mul(m.left().node(), m.left().node()));
+        NodeProperties::ChangeOp(node, machine()->Float64Div());
+        return Changed(node);
+      } else if (m.right().Is(2.0)) {  // x ** 2.0 => x * x
+        node->ReplaceInput(1, m.left().node());
+        NodeProperties::ChangeOp(node, machine()->Float64Mul());
+        return Changed(node);
+      } else if (m.right().Is(-0.5)) {
+        // x ** 0.5 => 1 / (if x <= -Infinity then Infinity else sqrt(0.0 + x))
+        node->ReplaceInput(0, Float64Constant(1.0));
+        node->ReplaceInput(1, Float64PowHalf(m.left().node()));
+        NodeProperties::ChangeOp(node, machine()->Float64Div());
+        return Changed(node);
+      } else if (m.right().Is(0.5)) {
+        // x ** 0.5 => if x <= -Infinity then Infinity else sqrt(0.0 + x)
+        return Replace(Float64PowHalf(m.left().node()));
+      }
       break;
     }
     case IrOpcode::kFloat64Sin: {
@@ -422,11 +570,21 @@
       if (m.HasValue()) return ReplaceFloat64(base::ieee754::sin(m.Value()));
       break;
     }
+    case IrOpcode::kFloat64Sinh: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::sinh(m.Value()));
+      break;
+    }
     case IrOpcode::kFloat64Tan: {
       Float64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceFloat64(base::ieee754::tan(m.Value()));
       break;
     }
+    case IrOpcode::kFloat64Tanh: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(base::ieee754::tanh(m.Value()));
+      break;
+    }
     case IrOpcode::kChangeFloat32ToFloat64: {
       Float32Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceFloat64(m.Value());
@@ -493,6 +651,7 @@
     case IrOpcode::kFloat64InsertHighWord32:
       return ReduceFloat64InsertHighWord32(node);
     case IrOpcode::kStore:
+    case IrOpcode::kUnalignedStore:
     case IrOpcode::kCheckedStore:
       return ReduceStore(node);
     case IrOpcode::kFloat64Equal:
@@ -505,7 +664,6 @@
   return NoChange();
 }
 
-
 Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
   DCHECK_EQ(IrOpcode::kInt32Add, node->opcode());
   Int32BinopMatcher m(node);
@@ -536,6 +694,16 @@
   return NoChange();
 }
 
+Reduction MachineOperatorReducer::ReduceInt64Add(Node* node) {
+  DCHECK_EQ(IrOpcode::kInt64Add, node->opcode());
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x + 0 => 0
+  if (m.IsFoldable()) {
+    return Replace(Uint64Constant(bit_cast<uint64_t>(m.left().Value()) +
+                                  bit_cast<uint64_t>(m.right().Value())));
+  }
+  return NoChange();
+}
 
 Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
   DCHECK_EQ(IrOpcode::kInt32Sub, node->opcode());
@@ -555,6 +723,23 @@
   return NoChange();
 }
 
+Reduction MachineOperatorReducer::ReduceInt64Sub(Node* node) {
+  DCHECK_EQ(IrOpcode::kInt64Sub, node->opcode());
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x - 0 => x
+  if (m.IsFoldable()) {                                  // K - K => K
+    return Replace(Uint64Constant(bit_cast<uint64_t>(m.left().Value()) -
+                                  bit_cast<uint64_t>(m.right().Value())));
+  }
+  if (m.LeftEqualsRight()) return Replace(Int64Constant(0));  // x - x => 0
+  if (m.right().HasValue()) {                                 // x - K => x + -K
+    node->ReplaceInput(1, Int64Constant(-m.right().Value()));
+    NodeProperties::ChangeOp(node, machine()->Int64Add());
+    Reduction const reduction = ReduceInt64Add(node);
+    return reduction.Changed() ? reduction : Changed(node);
+  }
+  return NoChange();
+}
 
 Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
   Int32BinopMatcher m(node);
@@ -708,9 +893,13 @@
   if (nm.IsCheckedStore()) {
     rep = CheckedStoreRepresentationOf(node->op());
     value_input = 3;
-  } else {
+  } else if (nm.IsStore()) {
     rep = StoreRepresentationOf(node->op()).representation();
     value_input = 2;
+  } else {
+    DCHECK(nm.IsUnalignedStore());
+    rep = UnalignedStoreRepresentationOf(node->op());
+    value_input = 2;
   }
 
   Node* const value = node->InputAt(value_input);
@@ -757,10 +946,10 @@
         int32_t val;
         bool ovf = base::bits::SignedAddOverflow32(m.left().Value(),
                                                    m.right().Value(), &val);
-        return ReplaceInt32((index == 0) ? val : ovf);
+        return ReplaceInt32(index == 0 ? val : ovf);
       }
       if (m.right().Is(0)) {
-        return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
+        return Replace(index == 0 ? m.left().node() : m.right().node());
       }
       break;
     }
@@ -771,10 +960,27 @@
         int32_t val;
         bool ovf = base::bits::SignedSubOverflow32(m.left().Value(),
                                                    m.right().Value(), &val);
-        return ReplaceInt32((index == 0) ? val : ovf);
+        return ReplaceInt32(index == 0 ? val : ovf);
       }
       if (m.right().Is(0)) {
-        return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
+        return Replace(index == 0 ? m.left().node() : m.right().node());
+      }
+      break;
+    }
+    case IrOpcode::kInt32MulWithOverflow: {
+      DCHECK(index == 0 || index == 1);
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {
+        int32_t val;
+        bool ovf = base::bits::SignedMulOverflow32(m.left().Value(),
+                                                   m.right().Value(), &val);
+        return ReplaceInt32(index == 0 ? val : ovf);
+      }
+      if (m.right().Is(0)) {
+        return Replace(m.right().node());
+      }
+      if (m.right().Is(1)) {
+        return index == 0 ? Replace(m.left().node()) : ReplaceInt32(0);
       }
       break;
     }
@@ -830,6 +1036,16 @@
   return ReduceWord32Shifts(node);
 }
 
+Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
+  DCHECK_EQ(IrOpcode::kWord64Shl, node->opcode());
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x << 0 => x
+  if (m.IsFoldable()) {                                  // K << K => K
+    return ReplaceInt64(m.left().Value() << m.right().Value());
+  }
+  return NoChange();
+}
+
 Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
   Uint32BinopMatcher m(node);
   if (m.right().Is(0)) return Replace(m.left().node());  // x >>> 0 => x
@@ -850,6 +1066,16 @@
   return ReduceWord32Shifts(node);
 }
 
+Reduction MachineOperatorReducer::ReduceWord64Shr(Node* node) {
+  DCHECK_EQ(IrOpcode::kWord64Shr, node->opcode());
+  Uint64BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x >>> 0 => x
+  if (m.IsFoldable()) {                                  // K >> K => K
+    return ReplaceInt64(m.left().Value() >> m.right().Value());
+  }
+  return NoChange();
+}
+
 Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) return Replace(m.left().node());  // x >> 0 => x
@@ -885,6 +1111,14 @@
   return ReduceWord32Shifts(node);
 }
 
+Reduction MachineOperatorReducer::ReduceWord64Sar(Node* node) {
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x >> 0 => x
+  if (m.IsFoldable()) {
+    return ReplaceInt64(m.left().Value() >> m.right().Value());
+  }
+  return NoChange();
+}
 
 Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
   DCHECK_EQ(IrOpcode::kWord32And, node->opcode());
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index e44521e..167bf7e 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -32,8 +32,13 @@
   Node* Int32Constant(int32_t value);
   Node* Int64Constant(int64_t value);
   Node* Uint32Constant(uint32_t value) {
-    return Int32Constant(bit_cast<uint32_t>(value));
+    return Int32Constant(bit_cast<int32_t>(value));
   }
+  Node* Uint64Constant(uint64_t value) {
+    return Int64Constant(bit_cast<int64_t>(value));
+  }
+  Node* Float64Mul(Node* lhs, Node* rhs);
+  Node* Float64PowHalf(Node* value);
   Node* Word32And(Node* lhs, Node* rhs);
   Node* Word32And(Node* lhs, uint32_t rhs) {
     return Word32And(lhs, Uint32Constant(rhs));
@@ -65,7 +70,9 @@
   }
 
   Reduction ReduceInt32Add(Node* node);
+  Reduction ReduceInt64Add(Node* node);
   Reduction ReduceInt32Sub(Node* node);
+  Reduction ReduceInt64Sub(Node* node);
   Reduction ReduceInt32Div(Node* node);
   Reduction ReduceUint32Div(Node* node);
   Reduction ReduceInt32Mod(Node* node);
@@ -74,8 +81,11 @@
   Reduction ReduceProjection(size_t index, Node* node);
   Reduction ReduceWord32Shifts(Node* node);
   Reduction ReduceWord32Shl(Node* node);
+  Reduction ReduceWord64Shl(Node* node);
   Reduction ReduceWord32Shr(Node* node);
+  Reduction ReduceWord64Shr(Node* node);
   Reduction ReduceWord32Sar(Node* node);
+  Reduction ReduceWord64Sar(Node* node);
   Reduction ReduceWord32And(Node* node);
   Reduction ReduceWord32Or(Node* node);
   Reduction ReduceFloat64InsertLowWord32(Node* node);
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 3662d0a..43c6202 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -46,6 +46,16 @@
   return OpParameter<StoreRepresentation>(op);
 }
 
+UnalignedLoadRepresentation UnalignedLoadRepresentationOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kUnalignedLoad, op->opcode());
+  return OpParameter<UnalignedLoadRepresentation>(op);
+}
+
+UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
+    Operator const* op) {
+  DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
+  return OpParameter<UnalignedStoreRepresentation>(op);
+}
 
 CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) {
   DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode());
@@ -68,297 +78,315 @@
   return OpParameter<MachineRepresentation>(op);
 }
 
-#define PURE_OP_LIST(V)                                                       \
-  V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
-  V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Word32Shl, Operator::kNoProperties, 2, 0, 1)                              \
-  V(Word32Shr, Operator::kNoProperties, 2, 0, 1)                              \
-  V(Word32Sar, Operator::kNoProperties, 2, 0, 1)                              \
-  V(Word32Ror, Operator::kNoProperties, 2, 0, 1)                              \
-  V(Word32Equal, Operator::kCommutative, 2, 0, 1)                             \
-  V(Word32Clz, Operator::kNoProperties, 1, 0, 1)                              \
-  V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
-  V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Word64Shl, Operator::kNoProperties, 2, 0, 1)                              \
-  V(Word64Shr, Operator::kNoProperties, 2, 0, 1)                              \
-  V(Word64Sar, Operator::kNoProperties, 2, 0, 1)                              \
-  V(Word64Ror, Operator::kNoProperties, 2, 0, 1)                              \
-  V(Word64Clz, Operator::kNoProperties, 1, 0, 1)                              \
-  V(Word64Equal, Operator::kCommutative, 2, 0, 1)                             \
-  V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
-  V(Int32Sub, Operator::kNoProperties, 2, 0, 1)                               \
-  V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
-  V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
-  V(Int32Div, Operator::kNoProperties, 2, 1, 1)                               \
-  V(Int32Mod, Operator::kNoProperties, 2, 1, 1)                               \
-  V(Int32LessThan, Operator::kNoProperties, 2, 0, 1)                          \
-  V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Uint32Div, Operator::kNoProperties, 2, 1, 1)                              \
-  V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1)                         \
-  V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Uint32Mod, Operator::kNoProperties, 2, 1, 1)                              \
-  V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
-  V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
-  V(Int64Sub, Operator::kNoProperties, 2, 0, 1)                               \
-  V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
-  V(Int64Div, Operator::kNoProperties, 2, 1, 1)                               \
-  V(Int64Mod, Operator::kNoProperties, 2, 1, 1)                               \
-  V(Int64LessThan, Operator::kNoProperties, 2, 0, 1)                          \
-  V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Uint64Div, Operator::kNoProperties, 2, 1, 1)                              \
-  V(Uint64Mod, Operator::kNoProperties, 2, 1, 1)                              \
-  V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1)                         \
-  V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                  \
-  V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1)                    \
-  V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1)                \
-  V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
-  V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
-  V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                  \
-  V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                \
-  V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)                 \
-  V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1)                \
-  V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2)              \
-  V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2)              \
-  V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2)             \
-  V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2)             \
-  V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
-  V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1)                      \
-  V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                    \
-  V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                    \
-  V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1)                    \
-  V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)                    \
-  V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
-  V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
-  V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
-  V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1)                     \
-  V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1)                  \
-  V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1)                   \
-  V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1)               \
-  V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
-  V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)                  \
-  V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1)                  \
-  V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                  \
-  V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)                  \
-  V(Float32Abs, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Float32Add, Operator::kCommutative, 2, 0, 1)                              \
-  V(Float32Sub, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Float32SubPreserveNan, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Float32Mul, Operator::kCommutative, 2, 0, 1)                              \
-  V(Float32Div, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Abs, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Float64Atan, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Atan2, Operator::kNoProperties, 2, 0, 1)                           \
-  V(Float64Atanh, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Cos, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Float64Exp, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Float64Expm1, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Log, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Float64Log1p, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Log2, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Log10, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float64Add, Operator::kCommutative, 2, 0, 1)                              \
-  V(Float64Sub, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Float64SubPreserveNan, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Float64Mul, Operator::kCommutative, 2, 0, 1)                              \
-  V(Float64Div, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Float64Mod, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Float64Sin, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Float64Tan, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Float32Equal, Operator::kCommutative, 2, 0, 1)                            \
-  V(Float32LessThan, Operator::kNoProperties, 2, 0, 1)                        \
-  V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Float64Equal, Operator::kCommutative, 2, 0, 1)                            \
-  V(Float64LessThan, Operator::kNoProperties, 2, 0, 1)                        \
-  V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1)                \
-  V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1)               \
-  V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1)                \
-  V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)                       \
-  V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)                       \
-  V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)                 \
-  V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2)                           \
-  V(Int32PairSub, Operator::kNoProperties, 4, 0, 2)                           \
-  V(Int32PairMul, Operator::kNoProperties, 4, 0, 2)                           \
-  V(Word32PairShl, Operator::kNoProperties, 3, 0, 2)                          \
-  V(Word32PairShr, Operator::kNoProperties, 3, 0, 2)                          \
-  V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)                          \
-  V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1)                        \
-  V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                   \
-  V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1)                           \
-  V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1)                          \
-  V(Float32x4RecipApprox, Operator::kNoProperties, 1, 0, 1)                   \
-  V(Float32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1)               \
-  V(Float32x4Add, Operator::kCommutative, 2, 0, 1)                            \
-  V(Float32x4Sub, Operator::kNoProperties, 2, 0, 1)                           \
-  V(Float32x4Mul, Operator::kCommutative, 2, 0, 1)                            \
-  V(Float32x4Div, Operator::kNoProperties, 2, 0, 1)                           \
-  V(Float32x4Min, Operator::kCommutative, 2, 0, 1)                            \
-  V(Float32x4Max, Operator::kCommutative, 2, 0, 1)                            \
-  V(Float32x4MinNum, Operator::kCommutative, 2, 0, 1)                         \
-  V(Float32x4MaxNum, Operator::kCommutative, 2, 0, 1)                         \
-  V(Float32x4Equal, Operator::kCommutative, 2, 0, 1)                          \
-  V(Float32x4NotEqual, Operator::kCommutative, 2, 0, 1)                       \
-  V(Float32x4LessThan, Operator::kNoProperties, 2, 0, 1)                      \
-  V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)               \
-  V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                   \
-  V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
-  V(Float32x4Select, Operator::kNoProperties, 3, 0, 1)                        \
-  V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                       \
-  V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                       \
-  V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1)                   \
-  V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1)                  \
-  V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1)                          \
-  V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                     \
-  V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                     \
-  V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Int32x4Add, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Int32x4Mul, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int32x4Min, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int32x4Max, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)               \
-  V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)              \
-  V(Int32x4Equal, Operator::kCommutative, 2, 0, 1)                            \
-  V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1)                         \
-  V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1)                        \
-  V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                     \
-  V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
-  V(Int32x4Select, Operator::kNoProperties, 3, 0, 1)                          \
-  V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                         \
-  V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                         \
-  V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                   \
-  V(Uint32x4Min, Operator::kCommutative, 2, 0, 1)                             \
-  V(Uint32x4Max, Operator::kCommutative, 2, 0, 1)                             \
-  V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
-  V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
-  V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1)                       \
-  V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
-  V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
-  V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                  \
-  V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1)                         \
-  V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
-  V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
-  V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1)                        \
-  V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1)                        \
-  V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                        \
-  V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                        \
-  V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1)                           \
-  V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1)                        \
-  V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1)                          \
-  V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                     \
-  V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                     \
-  V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Int16x8Add, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                      \
-  V(Int16x8Sub, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Int16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                     \
-  V(Int16x8Mul, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int16x8Min, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int16x8Max, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)               \
-  V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)              \
-  V(Int16x8Equal, Operator::kCommutative, 2, 0, 1)                            \
-  V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1)                         \
-  V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1)                        \
-  V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                     \
-  V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
-  V(Int16x8Select, Operator::kNoProperties, 3, 0, 1)                          \
-  V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                         \
-  V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                        \
-  V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                     \
-  V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Uint16x8Min, Operator::kCommutative, 2, 0, 1)                             \
-  V(Uint16x8Max, Operator::kCommutative, 2, 0, 1)                             \
-  V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
-  V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
-  V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1)                       \
-  V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
-  V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
-  V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1)                         \
-  V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
-  V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
-  V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1)                        \
-  V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1)                        \
-  V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                        \
-  V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                       \
-  V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1)                           \
-  V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1)                        \
-  V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1)                         \
-  V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                     \
-  V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                     \
-  V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1)                             \
-  V(Int8x16Add, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                      \
-  V(Int8x16Sub, Operator::kNoProperties, 2, 0, 1)                             \
-  V(Int8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                     \
-  V(Int8x16Mul, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int8x16Min, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int8x16Max, Operator::kCommutative, 2, 0, 1)                              \
-  V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)               \
-  V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)              \
-  V(Int8x16Equal, Operator::kCommutative, 2, 0, 1)                            \
-  V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1)                         \
-  V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1)                        \
-  V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                     \
-  V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
-  V(Int8x16Select, Operator::kNoProperties, 3, 0, 1)                          \
-  V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                        \
-  V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                        \
-  V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                     \
-  V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Uint8x16Min, Operator::kCommutative, 2, 0, 1)                             \
-  V(Uint8x16Max, Operator::kCommutative, 2, 0, 1)                             \
-  V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
-  V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
-  V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1)                       \
-  V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
-  V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
-  V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1)                        \
-  V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
-  V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
-  V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
-  V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
-  V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1)                            \
-  V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1)                        \
-  V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1)                        \
-  V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                       \
-  V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                       \
-  V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1)                           \
-  V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1)                        \
-  V(Simd128Load, Operator::kNoProperties, 2, 0, 1)                            \
-  V(Simd128Load1, Operator::kNoProperties, 2, 0, 1)                           \
-  V(Simd128Load2, Operator::kNoProperties, 2, 0, 1)                           \
-  V(Simd128Load3, Operator::kNoProperties, 2, 0, 1)                           \
-  V(Simd128Store, Operator::kNoProperties, 3, 0, 1)                           \
-  V(Simd128Store1, Operator::kNoProperties, 3, 0, 1)                          \
-  V(Simd128Store2, Operator::kNoProperties, 3, 0, 1)                          \
-  V(Simd128Store3, Operator::kNoProperties, 3, 0, 1)                          \
-  V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
-  V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
-  V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+#define PURE_OP_LIST(V)                                                      \
+  V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Word32Shl, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Word32Shr, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Word32Sar, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Word32Ror, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Word32Equal, Operator::kCommutative, 2, 0, 1)                            \
+  V(Word32Clz, Operator::kNoProperties, 1, 0, 1)                             \
+  V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Word64Shl, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Word64Shr, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Word64Sar, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Word64Ror, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Word64Clz, Operator::kNoProperties, 1, 0, 1)                             \
+  V(Word64Equal, Operator::kCommutative, 2, 0, 1)                            \
+  V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Int32Sub, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Int32Div, Operator::kNoProperties, 2, 1, 1)                              \
+  V(Int32Mod, Operator::kNoProperties, 2, 1, 1)                              \
+  V(Int32LessThan, Operator::kNoProperties, 2, 0, 1)                         \
+  V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Uint32Div, Operator::kNoProperties, 2, 1, 1)                             \
+  V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Uint32Mod, Operator::kNoProperties, 2, 1, 1)                             \
+  V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+  V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Int64Sub, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Int64Div, Operator::kNoProperties, 2, 1, 1)                              \
+  V(Int64Mod, Operator::kNoProperties, 2, 1, 1)                              \
+  V(Int64LessThan, Operator::kNoProperties, 2, 0, 1)                         \
+  V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Uint64Div, Operator::kNoProperties, 2, 1, 1)                             \
+  V(Uint64Mod, Operator::kNoProperties, 2, 1, 1)                             \
+  V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
+  V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1)                   \
+  V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1)               \
+  V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)                \
+  V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                  \
+  V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                 \
+  V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)               \
+  V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)                \
+  V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1)               \
+  V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2)             \
+  V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2)             \
+  V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2)            \
+  V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2)            \
+  V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1)                  \
+  V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1)                     \
+  V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
+  V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
+  V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
+  V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
+  V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1)                  \
+  V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1)                  \
+  V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1)                  \
+  V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1)                    \
+  V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
+  V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1)                  \
+  V(ImpossibleToWord32, Operator::kNoProperties, 1, 0, 1)                    \
+  V(ImpossibleToWord64, Operator::kNoProperties, 1, 0, 1)                    \
+  V(ImpossibleToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
+  V(ImpossibleToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
+  V(ImpossibleToTagged, Operator::kNoProperties, 1, 0, 1)                    \
+  V(ImpossibleToBit, Operator::kNoProperties, 1, 0, 1)                       \
+  V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1)              \
+  V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1)                  \
+  V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)                 \
+  V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1)                 \
+  V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                 \
+  V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
+  V(Float32Abs, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float32Add, Operator::kCommutative, 2, 0, 1)                             \
+  V(Float32Sub, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Float32Mul, Operator::kCommutative, 2, 0, 1)                             \
+  V(Float32Div, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Float32Neg, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float32Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Float32Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Float64Abs, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Acos, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Acosh, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Asin, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Asinh, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Atan, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Atan2, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Float64Atanh, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Cos, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Cosh, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Exp, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Expm1, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Log, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Log1p, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Log2, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Log10, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float64Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Float64Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Float64Neg, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Add, Operator::kCommutative, 2, 0, 1)                             \
+  V(Float64Sub, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Float64Mul, Operator::kCommutative, 2, 0, 1)                             \
+  V(Float64Div, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Float64Mod, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Float64Pow, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Float64Sin, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Sinh, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64Tan, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Tanh, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float32Equal, Operator::kCommutative, 2, 0, 1)                           \
+  V(Float32LessThan, Operator::kNoProperties, 2, 0, 1)                       \
+  V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
+  V(Float64Equal, Operator::kCommutative, 2, 0, 1)                           \
+  V(Float64LessThan, Operator::kNoProperties, 2, 0, 1)                       \
+  V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
+  V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1)               \
+  V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1)              \
+  V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1)                \
+  V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1)               \
+  V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)                      \
+  V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)                      \
+  V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)                \
+  V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2)                          \
+  V(Int32PairSub, Operator::kNoProperties, 4, 0, 2)                          \
+  V(Int32PairMul, Operator::kNoProperties, 4, 0, 2)                          \
+  V(Word32PairShl, Operator::kNoProperties, 3, 0, 2)                         \
+  V(Word32PairShr, Operator::kNoProperties, 3, 0, 2)                         \
+  V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)                         \
+  V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1)                       \
+  V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
+  V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1)                         \
+  V(Float32x4RecipApprox, Operator::kNoProperties, 1, 0, 1)                  \
+  V(Float32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1)              \
+  V(Float32x4Add, Operator::kCommutative, 2, 0, 1)                           \
+  V(Float32x4Sub, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Float32x4Mul, Operator::kCommutative, 2, 0, 1)                           \
+  V(Float32x4Div, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Float32x4Min, Operator::kCommutative, 2, 0, 1)                           \
+  V(Float32x4Max, Operator::kCommutative, 2, 0, 1)                           \
+  V(Float32x4MinNum, Operator::kCommutative, 2, 0, 1)                        \
+  V(Float32x4MaxNum, Operator::kCommutative, 2, 0, 1)                        \
+  V(Float32x4Equal, Operator::kCommutative, 2, 0, 1)                         \
+  V(Float32x4NotEqual, Operator::kCommutative, 2, 0, 1)                      \
+  V(Float32x4LessThan, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
+  V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
+  V(Float32x4Select, Operator::kNoProperties, 3, 0, 1)                       \
+  V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                      \
+  V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                      \
+  V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1)                  \
+  V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1)                 \
+  V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1)                         \
+  V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
+  V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Int32x4Add, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Int32x4Mul, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int32x4Min, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int32x4Max, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
+  V(Int32x4Equal, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1)                        \
+  V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1)                       \
+  V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
+  V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
+  V(Int32x4Select, Operator::kNoProperties, 3, 0, 1)                         \
+  V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                        \
+  V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                        \
+  V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                  \
+  V(Uint32x4Min, Operator::kCommutative, 2, 0, 1)                            \
+  V(Uint32x4Max, Operator::kCommutative, 2, 0, 1)                            \
+  V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)             \
+  V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)            \
+  V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1)                      \
+  V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)               \
+  V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
+  V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                 \
+  V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1)                        \
+  V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                   \
+  V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
+  V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
+  V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1)                       \
+  V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1)                       \
+  V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                       \
+  V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                       \
+  V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1)                          \
+  V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1)                       \
+  V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1)                         \
+  V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
+  V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Int16x8Add, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                     \
+  V(Int16x8Sub, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Int16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Int16x8Mul, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int16x8Min, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int16x8Max, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
+  V(Int16x8Equal, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1)                        \
+  V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1)                       \
+  V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
+  V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
+  V(Int16x8Select, Operator::kNoProperties, 3, 0, 1)                         \
+  V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                        \
+  V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                       \
+  V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                    \
+  V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Uint16x8Min, Operator::kCommutative, 2, 0, 1)                            \
+  V(Uint16x8Max, Operator::kCommutative, 2, 0, 1)                            \
+  V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)             \
+  V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)            \
+  V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1)                      \
+  V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)               \
+  V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
+  V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1)                        \
+  V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                   \
+  V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
+  V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
+  V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1)                       \
+  V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1)                       \
+  V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                       \
+  V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                      \
+  V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1)                          \
+  V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1)                       \
+  V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1)                        \
+  V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
+  V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Int8x16Add, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                     \
+  V(Int8x16Sub, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Int8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Int8x16Mul, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int8x16Min, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int8x16Max, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
+  V(Int8x16Equal, Operator::kCommutative, 2, 0, 1)                           \
+  V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1)                        \
+  V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1)                       \
+  V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
+  V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
+  V(Int8x16Select, Operator::kNoProperties, 3, 0, 1)                         \
+  V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                       \
+  V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                       \
+  V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                    \
+  V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Uint8x16Min, Operator::kCommutative, 2, 0, 1)                            \
+  V(Uint8x16Max, Operator::kCommutative, 2, 0, 1)                            \
+  V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)             \
+  V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)            \
+  V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1)                      \
+  V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)               \
+  V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
+  V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1)                       \
+  V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                   \
+  V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
+  V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
+  V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1)                       \
+  V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1)                       \
+  V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                      \
+  V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                      \
+  V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1)                          \
+  V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1)                       \
+  V(Simd128Load, Operator::kNoProperties, 2, 0, 1)                           \
+  V(Simd128Load1, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Simd128Load2, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Simd128Load3, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Simd128Store, Operator::kNoProperties, 3, 0, 1)                          \
+  V(Simd128Store1, Operator::kNoProperties, 3, 0, 1)                         \
+  V(Simd128Store2, Operator::kNoProperties, 3, 0, 1)                         \
+  V(Simd128Store3, Operator::kNoProperties, 3, 0, 1)                         \
+  V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
   V(Simd128Not, Operator::kNoProperties, 1, 0, 1)
 
 #define PURE_OPTIONAL_OP_LIST(V)                            \
@@ -366,12 +394,10 @@
   V(Word64Ctz, Operator::kNoProperties, 1, 0, 1)            \
   V(Word32ReverseBits, Operator::kNoProperties, 1, 0, 1)    \
   V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1)    \
+  V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1)   \
+  V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1)   \
   V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1)         \
   V(Word64Popcnt, Operator::kNoProperties, 1, 0, 1)         \
-  V(Float32Max, Operator::kNoProperties, 2, 0, 1)           \
-  V(Float32Min, Operator::kNoProperties, 2, 0, 1)           \
-  V(Float64Max, Operator::kNoProperties, 2, 0, 1)           \
-  V(Float64Min, Operator::kNoProperties, 2, 0, 1)           \
   V(Float32RoundDown, Operator::kNoProperties, 1, 0, 1)     \
   V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1)     \
   V(Float32RoundUp, Operator::kNoProperties, 1, 0, 1)       \
@@ -380,13 +406,12 @@
   V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
   V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
   V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
-  V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
-  V(Float32Neg, Operator::kNoProperties, 1, 0, 1)           \
-  V(Float64Neg, Operator::kNoProperties, 1, 0, 1)
+  V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
 
 #define OVERFLOW_OP_LIST(V)                                                \
   V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative) \
   V(Int32SubWithOverflow, Operator::kNoProperties)                         \
+  V(Int32MulWithOverflow, Operator::kAssociative | Operator::kCommutative) \
   V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative) \
   V(Int64SubWithOverflow, Operator::kNoProperties)
 
@@ -413,6 +438,8 @@
   V(kWord16)                           \
   V(kWord32)                           \
   V(kWord64)                           \
+  V(kTaggedSigned)                     \
+  V(kTaggedPointer)                    \
   V(kTagged)
 
 #define ATOMIC_TYPE_LIST(V) \
@@ -461,6 +488,14 @@
               Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
               "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}              \
   };                                                                         \
+  struct UnalignedLoad##Type##Operator final                                 \
+      : public Operator1<UnalignedLoadRepresentation> {                      \
+    UnalignedLoad##Type##Operator()                                          \
+        : Operator1<UnalignedLoadRepresentation>(                            \
+              IrOpcode::kUnalignedLoad,                                      \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
+              "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}     \
+  };                                                                         \
   struct CheckedLoad##Type##Operator final                                   \
       : public Operator1<CheckedLoadRepresentation> {                        \
     CheckedLoad##Type##Operator()                                            \
@@ -470,6 +505,7 @@
               "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {}       \
   };                                                                         \
   Load##Type##Operator kLoad##Type;                                          \
+  UnalignedLoad##Type##Operator kUnalignedLoad##Type;                        \
   CheckedLoad##Type##Operator kCheckedLoad##Type;
   MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
@@ -517,6 +553,15 @@
     Store##Type##FullWriteBarrier##Operator()                                  \
         : Store##Type##Operator(kFullWriteBarrier) {}                          \
   };                                                                           \
+  struct UnalignedStore##Type##Operator final                                  \
+      : public Operator1<UnalignedStoreRepresentation> {                       \
+    UnalignedStore##Type##Operator()                                           \
+        : Operator1<UnalignedStoreRepresentation>(                             \
+              IrOpcode::kUnalignedStore,                                       \
+              Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,     \
+              "UnalignedStore", 3, 1, 1, 0, 1, 0,                              \
+              MachineRepresentation::Type) {}                                  \
+  };                                                                           \
   struct CheckedStore##Type##Operator final                                    \
       : public Operator1<CheckedStoreRepresentation> {                         \
     CheckedStore##Type##Operator()                                             \
@@ -531,6 +576,7 @@
   Store##Type##PointerWriteBarrier##Operator                                   \
       kStore##Type##PointerWriteBarrier;                                       \
   Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier;      \
+  UnalignedStore##Type##Operator kUnalignedStore##Type;                        \
   CheckedStore##Type##Operator kCheckedStore##Type;
   MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
@@ -567,6 +613,13 @@
                    0, 0, 0, 0, 0) {}
   };
   DebugBreakOperator kDebugBreak;
+
+  struct UnsafePointerAddOperator final : public Operator {
+    UnsafePointerAddOperator()
+        : Operator(IrOpcode::kUnsafePointerAdd, Operator::kKontrol,
+                   "UnsafePointerAdd", 2, 1, 1, 1, 1, 0) {}
+  };
+  UnsafePointerAddOperator kUnsafePointerAdd;
 };
 
 struct CommentOperator : public Operator1<const char*> {
@@ -590,6 +643,33 @@
          word == MachineRepresentation::kWord64);
 }
 
+const Operator* MachineOperatorBuilder::UnalignedLoad(
+    UnalignedLoadRepresentation rep) {
+#define LOAD(Type)                       \
+  if (rep == MachineType::Type()) {      \
+    return &cache_.kUnalignedLoad##Type; \
+  }
+  MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+  UNREACHABLE();
+  return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::UnalignedStore(
+    UnalignedStoreRepresentation rep) {
+  switch (rep) {
+#define STORE(kRep)                 \
+  case MachineRepresentation::kRep: \
+    return &cache_.kUnalignedStore##kRep;
+    MACHINE_REPRESENTATION_LIST(STORE)
+#undef STORE
+    case MachineRepresentation::kBit:
+    case MachineRepresentation::kNone:
+      break;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
 
 #define PURE(Name, properties, value_input_count, control_input_count, \
              output_count)                                             \
@@ -597,10 +677,10 @@
 PURE_OP_LIST(PURE)
 #undef PURE
 
-#define PURE(Name, properties, value_input_count, control_input_count,     \
-             output_count)                                                 \
-  const OptionalOperator MachineOperatorBuilder::Name() {                  \
-    return OptionalOperator(flags_ & k##Name ? &cache_.k##Name : nullptr); \
+#define PURE(Name, properties, value_input_count, control_input_count, \
+             output_count)                                             \
+  const OptionalOperator MachineOperatorBuilder::Name() {              \
+    return OptionalOperator(flags_ & k##Name, &cache_.k##Name);        \
   }
 PURE_OPTIONAL_OP_LIST(PURE)
 #undef PURE
@@ -657,6 +737,10 @@
   return nullptr;
 }
 
+const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
+  return &cache_.kUnsafePointerAdd;
+}
+
 const Operator* MachineOperatorBuilder::DebugBreak() {
   return &cache_.kDebugBreak;
 }
@@ -694,20 +778,6 @@
   return nullptr;
 }
 
-// On 32 bit platforms we need to get a reference to optional operators of
-// 64-bit instructions for later Int64Lowering, even though 32 bit platforms
-// don't support the original 64-bit instruction.
-const Operator* MachineOperatorBuilder::Word64PopcntPlaceholder() {
-  return &cache_.kWord64Popcnt;
-}
-
-// On 32 bit platforms we need to get a reference to optional operators of
-// 64-bit instructions for later Int64Lowering, even though 32 bit platforms
-// don't support the original 64-bit instruction.
-const Operator* MachineOperatorBuilder::Word64CtzPlaceholder() {
-  return &cache_.kWord64Ctz;
-}
-
 const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
 #define LOAD(Type)                    \
   if (rep == MachineType::Type()) {   \
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 7c443f4..611846a 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -20,15 +20,21 @@
 // For operators that are not supported on all platforms.
 class OptionalOperator final {
  public:
-  explicit OptionalOperator(const Operator* op) : op_(op) {}
+  OptionalOperator(bool supported, const Operator* op)
+      : supported_(supported), op_(op) {}
 
-  bool IsSupported() const { return op_ != nullptr; }
+  bool IsSupported() const { return supported_; }
+  // Gets the operator only if it is supported.
   const Operator* op() const {
-    DCHECK_NOT_NULL(op_);
+    DCHECK(supported_);
     return op_;
   }
+  // Always gets the operator, even for unsupported operators. This is useful to
+  // use the operator as a placeholder in a graph, for instance.
+  const Operator* placeholder() const { return op_; }
 
  private:
+  bool supported_;
   const Operator* const op_;
 };
 
@@ -64,6 +70,15 @@
 
 StoreRepresentation const& StoreRepresentationOf(Operator const*);
 
+typedef MachineType UnalignedLoadRepresentation;
+
+UnalignedLoadRepresentation UnalignedLoadRepresentationOf(Operator const*);
+
+// An UnalignedStore needs a MachineType.
+typedef MachineRepresentation UnalignedStoreRepresentation;
+
+UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
+    Operator const*);
 
 // A CheckedLoad needs a MachineType.
 typedef MachineType CheckedLoadRepresentation;
@@ -87,41 +102,35 @@
  public:
   // Flags that specify which operations are available. This is useful
   // for operations that are unsupported by some back-ends.
-  enum Flag {
+  enum Flag : unsigned {
     kNoFlags = 0u,
-    // Note that Float*Max behaves like `(b < a) ? a : b`, not like Math.max().
-    // Note that Float*Min behaves like `(a < b) ? a : b`, not like Math.min().
-    kFloat32Max = 1u << 0,
-    kFloat32Min = 1u << 1,
-    kFloat64Max = 1u << 2,
-    kFloat64Min = 1u << 3,
-    kFloat32RoundDown = 1u << 4,
-    kFloat64RoundDown = 1u << 5,
-    kFloat32RoundUp = 1u << 6,
-    kFloat64RoundUp = 1u << 7,
-    kFloat32RoundTruncate = 1u << 8,
-    kFloat64RoundTruncate = 1u << 9,
-    kFloat32RoundTiesEven = 1u << 10,
-    kFloat64RoundTiesEven = 1u << 11,
-    kFloat64RoundTiesAway = 1u << 12,
-    kInt32DivIsSafe = 1u << 13,
-    kUint32DivIsSafe = 1u << 14,
-    kWord32ShiftIsSafe = 1u << 15,
-    kWord32Ctz = 1u << 16,
-    kWord64Ctz = 1u << 17,
-    kWord32Popcnt = 1u << 18,
-    kWord64Popcnt = 1u << 19,
-    kWord32ReverseBits = 1u << 20,
-    kWord64ReverseBits = 1u << 21,
-    kFloat32Neg = 1u << 22,
-    kFloat64Neg = 1u << 23,
-    kAllOptionalOps =
-        kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
-        kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
-        kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
-        kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
-        kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
-        kWord32ReverseBits | kWord64ReverseBits | kFloat32Neg | kFloat64Neg
+    kFloat32RoundDown = 1u << 0,
+    kFloat64RoundDown = 1u << 1,
+    kFloat32RoundUp = 1u << 2,
+    kFloat64RoundUp = 1u << 3,
+    kFloat32RoundTruncate = 1u << 4,
+    kFloat64RoundTruncate = 1u << 5,
+    kFloat32RoundTiesEven = 1u << 6,
+    kFloat64RoundTiesEven = 1u << 7,
+    kFloat64RoundTiesAway = 1u << 8,
+    kInt32DivIsSafe = 1u << 9,
+    kUint32DivIsSafe = 1u << 10,
+    kWord32ShiftIsSafe = 1u << 11,
+    kWord32Ctz = 1u << 12,
+    kWord64Ctz = 1u << 13,
+    kWord32Popcnt = 1u << 14,
+    kWord64Popcnt = 1u << 15,
+    kWord32ReverseBits = 1u << 16,
+    kWord64ReverseBits = 1u << 17,
+    kWord32ReverseBytes = 1u << 18,
+    kWord64ReverseBytes = 1u << 19,
+    kAllOptionalOps = kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
+                      kFloat64RoundUp | kFloat32RoundTruncate |
+                      kFloat64RoundTruncate | kFloat64RoundTiesAway |
+                      kFloat32RoundTiesEven | kFloat64RoundTiesEven |
+                      kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
+                      kWord32ReverseBits | kWord64ReverseBits |
+                      kWord32ReverseBytes | kWord64ReverseBytes
   };
   typedef base::Flags<Flag, unsigned> Flags;
 
@@ -131,13 +140,13 @@
 
     bool IsUnalignedLoadSupported(const MachineType& machineType,
                                   uint8_t alignment) const {
-      return IsUnalignedSupported(unalignedLoadSupportedTypes_, machineType,
+      return IsUnalignedSupported(unalignedLoadUnsupportedTypes_, machineType,
                                   alignment);
     }
 
     bool IsUnalignedStoreSupported(const MachineType& machineType,
                                    uint8_t alignment) const {
-      return IsUnalignedSupported(unalignedStoreSupportedTypes_, machineType,
+      return IsUnalignedSupported(unalignedStoreUnsupportedTypes_, machineType,
                                   alignment);
     }
 
@@ -147,25 +156,25 @@
     static AlignmentRequirements NoUnalignedAccessSupport() {
       return AlignmentRequirements(kNoSupport);
     }
-    static AlignmentRequirements SomeUnalignedAccessSupport(
-        const Vector<MachineType>& unalignedLoadSupportedTypes,
-        const Vector<MachineType>& unalignedStoreSupportedTypes) {
-      return AlignmentRequirements(kSomeSupport, unalignedLoadSupportedTypes,
-                                   unalignedStoreSupportedTypes);
+    static AlignmentRequirements SomeUnalignedAccessUnsupported(
+        const Vector<MachineType>& unalignedLoadUnsupportedTypes,
+        const Vector<MachineType>& unalignedStoreUnsupportedTypes) {
+      return AlignmentRequirements(kSomeSupport, unalignedLoadUnsupportedTypes,
+                                   unalignedStoreUnsupportedTypes);
     }
 
    private:
     explicit AlignmentRequirements(
         AlignmentRequirements::UnalignedAccessSupport unalignedAccessSupport,
-        Vector<MachineType> unalignedLoadSupportedTypes =
+        Vector<MachineType> unalignedLoadUnsupportedTypes =
             Vector<MachineType>(NULL, 0),
-        Vector<MachineType> unalignedStoreSupportedTypes =
+        Vector<MachineType> unalignedStoreUnsupportedTypes =
             Vector<MachineType>(NULL, 0))
         : unalignedSupport_(unalignedAccessSupport),
-          unalignedLoadSupportedTypes_(unalignedLoadSupportedTypes),
-          unalignedStoreSupportedTypes_(unalignedStoreSupportedTypes) {}
+          unalignedLoadUnsupportedTypes_(unalignedLoadUnsupportedTypes),
+          unalignedStoreUnsupportedTypes_(unalignedStoreUnsupportedTypes) {}
 
-    bool IsUnalignedSupported(const Vector<MachineType>& supported,
+    bool IsUnalignedSupported(const Vector<MachineType>& unsupported,
                               const MachineType& machineType,
                               uint8_t alignment) const {
       if (unalignedSupport_ == kFullSupport) {
@@ -173,18 +182,18 @@
       } else if (unalignedSupport_ == kNoSupport) {
         return false;
       } else {
-        for (MachineType m : supported) {
+        for (MachineType m : unsupported) {
           if (m == machineType) {
-            return true;
+            return false;
           }
         }
-        return false;
+        return true;
       }
     }
 
     const AlignmentRequirements::UnalignedAccessSupport unalignedSupport_;
-    const Vector<MachineType> unalignedLoadSupportedTypes_;
-    const Vector<MachineType> unalignedStoreSupportedTypes_;
+    const Vector<MachineType> unalignedLoadUnsupportedTypes_;
+    const Vector<MachineType> unalignedStoreUnsupportedTypes_;
   };
 
   explicit MachineOperatorBuilder(
@@ -192,10 +201,11 @@
       MachineRepresentation word = MachineType::PointerRepresentation(),
       Flags supportedOperators = kNoFlags,
       AlignmentRequirements alignmentRequirements =
-          AlignmentRequirements::NoUnalignedAccessSupport());
+          AlignmentRequirements::FullUnalignedAccessSupport());
 
   const Operator* Comment(const char* msg);
   const Operator* DebugBreak();
+  const Operator* UnsafePointerAdd();
 
   const Operator* Word32And();
   const Operator* Word32Or();
@@ -209,9 +219,10 @@
   const OptionalOperator Word32Ctz();
   const OptionalOperator Word32Popcnt();
   const OptionalOperator Word64Popcnt();
-  const Operator* Word64PopcntPlaceholder();
   const OptionalOperator Word32ReverseBits();
   const OptionalOperator Word64ReverseBits();
+  const OptionalOperator Word32ReverseBytes();
+  const OptionalOperator Word64ReverseBytes();
   bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
 
   const Operator* Word64And();
@@ -223,7 +234,6 @@
   const Operator* Word64Ror();
   const Operator* Word64Clz();
   const OptionalOperator Word64Ctz();
-  const Operator* Word64CtzPlaceholder();
   const Operator* Word64Equal();
 
   const Operator* Int32PairAdd();
@@ -238,6 +248,7 @@
   const Operator* Int32Sub();
   const Operator* Int32SubWithOverflow();
   const Operator* Int32Mul();
+  const Operator* Int32MulWithOverflow();
   const Operator* Int32MulHigh();
   const Operator* Int32Div();
   const Operator* Int32Mod();
@@ -291,6 +302,16 @@
   const Operator* ChangeUint32ToFloat64();
   const Operator* ChangeUint32ToUint64();
 
+  // These are changes from impossible values (for example a smi-checked
+  // string).  They can safely emit an abort instruction, which should
+  // never be reached.
+  const Operator* ImpossibleToWord32();
+  const Operator* ImpossibleToWord64();
+  const Operator* ImpossibleToFloat32();
+  const Operator* ImpossibleToFloat64();
+  const Operator* ImpossibleToTagged();
+  const Operator* ImpossibleToBit();
+
   // These operators truncate or round numbers, both changing the representation
   // of the number and mapping multiple input values onto the same output value.
   const Operator* TruncateFloat64ToFloat32();
@@ -314,7 +335,6 @@
   // (single-precision).
   const Operator* Float32Add();
   const Operator* Float32Sub();
-  const Operator* Float32SubPreserveNan();
   const Operator* Float32Mul();
   const Operator* Float32Div();
   const Operator* Float32Sqrt();
@@ -323,7 +343,6 @@
   // (double-precision).
   const Operator* Float64Add();
   const Operator* Float64Sub();
-  const Operator* Float64SubPreserveNan();
   const Operator* Float64Mul();
   const Operator* Float64Div();
   const Operator* Float64Mod();
@@ -339,13 +358,12 @@
   const Operator* Float64LessThan();
   const Operator* Float64LessThanOrEqual();
 
-  // Floating point min/max complying to IEEE 754 (single-precision).
-  const OptionalOperator Float32Max();
-  const OptionalOperator Float32Min();
-
-  // Floating point min/max complying to IEEE 754 (double-precision).
-  const OptionalOperator Float64Max();
-  const OptionalOperator Float64Min();
+  // Floating point min/max complying to EcmaScript 6 (double-precision).
+  const Operator* Float64Max();
+  const Operator* Float64Min();
+  // Floating point min/max complying to WebAssembly (single-precision).
+  const Operator* Float32Max();
+  const Operator* Float32Min();
 
   // Floating point abs complying to IEEE 754 (single-precision).
   const Operator* Float32Abs();
@@ -365,21 +383,28 @@
   const OptionalOperator Float64RoundTiesEven();
 
   // Floating point neg.
-  const OptionalOperator Float32Neg();
-  const OptionalOperator Float64Neg();
+  const Operator* Float32Neg();
+  const Operator* Float64Neg();
 
   // Floating point trigonometric functions (double-precision).
+  const Operator* Float64Acos();
+  const Operator* Float64Acosh();
+  const Operator* Float64Asin();
+  const Operator* Float64Asinh();
   const Operator* Float64Atan();
   const Operator* Float64Atan2();
   const Operator* Float64Atanh();
-
-  // Floating point trigonometric functions (double-precision).
   const Operator* Float64Cos();
+  const Operator* Float64Cosh();
   const Operator* Float64Sin();
+  const Operator* Float64Sinh();
   const Operator* Float64Tan();
+  const Operator* Float64Tanh();
 
   // Floating point exponential functions (double-precision).
   const Operator* Float64Exp();
+  const Operator* Float64Expm1();
+  const Operator* Float64Pow();
 
   // Floating point logarithm (double-precision).
   const Operator* Float64Log();
@@ -387,8 +412,8 @@
   const Operator* Float64Log2();
   const Operator* Float64Log10();
 
+  // Floating point cube root (double-precision).
   const Operator* Float64Cbrt();
-  const Operator* Float64Expm1();
 
   // Floating point bit representation.
   const Operator* Float64ExtractLowWord32();
@@ -590,6 +615,12 @@
   // store [base + index], value
   const Operator* Store(StoreRepresentation rep);
 
+  // unaligned load [base + index]
+  const Operator* UnalignedLoad(UnalignedLoadRepresentation rep);
+
+  // unaligned store [base + index], value
+  const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
+
   const Operator* StackSlot(MachineRepresentation rep);
 
   // Access to the machine stack.
diff --git a/src/compiler/memory-optimizer.cc b/src/compiler/memory-optimizer.cc
index 8c66347..97c4362 100644
--- a/src/compiler/memory-optimizer.cc
+++ b/src/compiler/memory-optimizer.cc
@@ -92,6 +92,8 @@
     case IrOpcode::kIfException:
     case IrOpcode::kLoad:
     case IrOpcode::kStore:
+    case IrOpcode::kRetain:
+    case IrOpcode::kUnsafePointerAdd:
       return VisitOtherEffect(node, state);
     default:
       break;
@@ -370,23 +372,28 @@
 }
 
 Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* key) {
-  Node* index = key;
-  int element_size_shift =
+  Node* index;
+  if (machine()->Is64()) {
+    // On 64-bit platforms, we need to feed a Word64 index to the Load and
+    // Store operators. Since LoadElement or StoreElement don't do any bounds
+    // checking themselves, we can be sure that the {key} was already checked
+    // and is in valid range, so we can do the further address computation on
+    // Word64 below, which ideally allows us to fuse the address computation
+    // with the actual memory access operation on Intel platforms.
+    index = graph()->NewNode(machine()->ChangeUint32ToUint64(), key);
+  } else {
+    index = key;
+  }
+  int const element_size_shift =
       ElementSizeLog2Of(access.machine_type.representation());
   if (element_size_shift) {
-    index = graph()->NewNode(machine()->Word32Shl(), index,
-                             jsgraph()->Int32Constant(element_size_shift));
+    index = graph()->NewNode(machine()->WordShl(), index,
+                             jsgraph()->IntPtrConstant(element_size_shift));
   }
-  const int fixed_offset = access.header_size - access.tag();
+  int const fixed_offset = access.header_size - access.tag();
   if (fixed_offset) {
-    index = graph()->NewNode(machine()->Int32Add(), index,
-                             jsgraph()->Int32Constant(fixed_offset));
-  }
-  if (machine()->Is64()) {
-    // TODO(turbofan): This is probably only correct for typed arrays, and only
-    // if the typed arrays are at most 2GiB in size, which happens to match
-    // exactly our current situation.
-    index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
+    index = graph()->NewNode(machine()->IntAdd(), index,
+                             jsgraph()->IntPtrConstant(fixed_offset));
   }
   return index;
 }
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 5e30e34..d06bc30 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -513,21 +513,7 @@
   __ Pop(ra, fp);
 }
 
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta > 0) {
-    __ addiu(sp, sp, sp_slot_delta * kPointerSize);
-  }
-  frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta < 0) {
-    __ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
-    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
-  }
+void CodeGenerator::AssemblePrepareTailCall() {
   if (frame_access_state()->has_frame()) {
     __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
     __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -560,6 +546,38 @@
   __ bind(&done);
 }
 
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+                                   FrameAccessState* state,
+                                   int new_slot_above_sp,
+                                   bool allow_shrinkage = true) {
+  int current_sp_offset = state->GetSPToFPSlotCount() +
+                          StandardFrameConstants::kFixedSlotCountAboveFp;
+  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+  if (stack_slot_delta > 0) {
+    masm->Subu(sp, sp, stack_slot_delta * kPointerSize);
+    state->IncreaseSPDelta(stack_slot_delta);
+  } else if (allow_shrinkage && stack_slot_delta < 0) {
+    masm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
+    state->IncreaseSPDelta(stack_slot_delta);
+  }
+}
+
+}  // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+                                              int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+                                             int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot);
+}
+
 // Assembles an instruction after register allocation, producing machine code.
 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     Instruction* instr) {
@@ -582,8 +600,6 @@
     }
     case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -597,14 +613,14 @@
         __ Jump(at);
       }
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchTailCallAddress: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       CHECK(!instr->InputAt(0)->IsImmediate());
       __ Jump(i.InputRegister(0));
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchCallJSFunction: {
@@ -620,6 +636,7 @@
       __ Call(at);
       RecordCallPosition(instr);
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchTailCallJSFunctionFromJSFunction:
@@ -631,8 +648,6 @@
         __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
       }
 
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -651,7 +666,7 @@
       break;
     }
     case kArchPrepareTailCall:
-      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      AssemblePrepareTailCall();
       break;
     case kArchCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
@@ -678,6 +693,9 @@
     case kArchDebugBreak:
       __ stop("kArchDebugBreak");
       break;
+    case kArchImpossible:
+      __ Abort(kConversionFromImpossibleValue);
+      break;
     case kArchComment: {
       Address comment_string = i.InputExternalReference(0).address();
       __ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -741,15 +759,33 @@
               Operand(offset.offset()));
       break;
     }
+    case kIeee754Float64Acos:
+      ASSEMBLE_IEEE754_UNOP(acos);
+      break;
+    case kIeee754Float64Acosh:
+      ASSEMBLE_IEEE754_UNOP(acosh);
+      break;
+    case kIeee754Float64Asin:
+      ASSEMBLE_IEEE754_UNOP(asin);
+      break;
+    case kIeee754Float64Asinh:
+      ASSEMBLE_IEEE754_UNOP(asinh);
+      break;
     case kIeee754Float64Atan:
       ASSEMBLE_IEEE754_UNOP(atan);
       break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
     case kIeee754Float64Atan2:
       ASSEMBLE_IEEE754_BINOP(atan2);
       break;
     case kIeee754Float64Cos:
       ASSEMBLE_IEEE754_UNOP(cos);
       break;
+    case kIeee754Float64Cosh:
+      ASSEMBLE_IEEE754_UNOP(cosh);
+      break;
     case kIeee754Float64Cbrt:
       ASSEMBLE_IEEE754_UNOP(cbrt);
       break;
@@ -759,9 +795,6 @@
     case kIeee754Float64Expm1:
       ASSEMBLE_IEEE754_UNOP(expm1);
       break;
-    case kIeee754Float64Atanh:
-      ASSEMBLE_IEEE754_UNOP(atanh);
-      break;
     case kIeee754Float64Log:
       ASSEMBLE_IEEE754_UNOP(log);
       break;
@@ -774,12 +807,23 @@
     case kIeee754Float64Log2:
       ASSEMBLE_IEEE754_UNOP(log2);
       break;
+    case kIeee754Float64Pow: {
+      MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+      __ CallStub(&stub);
+      break;
+    }
     case kIeee754Float64Sin:
       ASSEMBLE_IEEE754_UNOP(sin);
       break;
+    case kIeee754Float64Sinh:
+      ASSEMBLE_IEEE754_UNOP(sinh);
+      break;
     case kIeee754Float64Tan:
       ASSEMBLE_IEEE754_UNOP(tan);
       break;
+    case kIeee754Float64Tanh:
+      ASSEMBLE_IEEE754_UNOP(tanh);
+      break;
     case kMipsAdd:
       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -795,6 +839,9 @@
     case kMipsMul:
       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
+    case kMipsMulOvf:
+      // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+      break;
     case kMipsMulHigh:
       __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -1008,11 +1055,6 @@
       __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
       break;
-    case kMipsSubPreserveNanS:
-      __ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
-                                        i.InputDoubleRegister(0),
-                                        i.InputDoubleRegister(1));
-      break;
     case kMipsMulS:
       // TODO(plind): add special case: right op is -1.0, see arm port.
       __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1079,11 +1121,6 @@
       __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
       break;
-    case kMipsSubPreserveNanD:
-      __ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
-                                        i.InputDoubleRegister(0),
-                                        i.InputDoubleRegister(1));
-      break;
     case kMipsMulD:
       // TODO(plind): add special case: right op is -1.0, see arm port.
       __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1109,6 +1146,12 @@
     case kMipsAbsD:
       __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
+    case kMipsNegS:
+      __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+      break;
+    case kMipsNegD:
+      __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
     case kMipsSqrtD: {
       __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
@@ -1153,60 +1196,48 @@
       ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
       break;
     }
-    case kMipsFloat64Max: {
-      // (b < a) ? a : b
-      if (IsMipsArchVariant(kMips32r6)) {
-        __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-        __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-      } else {
-        __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
-        // Left operand is result, passthrough if false.
-        __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
-      }
-      break;
-    }
-    case kMipsFloat64Min: {
-      // (a < b) ? a : b
-      if (IsMipsArchVariant(kMips32r6)) {
-        __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                 i.InputDoubleRegister(1));
-        __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-      } else {
-        __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
-        // Right operand is result, passthrough if false.
-        __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
-      }
-      break;
-    }
     case kMipsFloat32Max: {
-      // (b < a) ? a : b
-      if (IsMipsArchVariant(kMips32r6)) {
-        __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-        __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-      } else {
-        __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
-        // Left operand is result, passthrough if false.
-        __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
-      }
+      Label compare_nan, done_compare;
+      __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+                       i.InputSingleRegister(1), &compare_nan);
+      __ Branch(&done_compare);
+      __ bind(&compare_nan);
+      __ Move(i.OutputSingleRegister(),
+              std::numeric_limits<float>::quiet_NaN());
+      __ bind(&done_compare);
+      break;
+    }
+    case kMipsFloat64Max: {
+      Label compare_nan, done_compare;
+      __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                       i.InputDoubleRegister(1), &compare_nan);
+      __ Branch(&done_compare);
+      __ bind(&compare_nan);
+      __ Move(i.OutputDoubleRegister(),
+              std::numeric_limits<double>::quiet_NaN());
+      __ bind(&done_compare);
       break;
     }
     case kMipsFloat32Min: {
-      // (a < b) ? a : b
-      if (IsMipsArchVariant(kMips32r6)) {
-        __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                 i.InputDoubleRegister(1));
-        __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-      } else {
-        __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
-        // Right operand is result, passthrough if false.
-        __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
-      }
+      Label compare_nan, done_compare;
+      __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+                       i.InputSingleRegister(1), &compare_nan);
+      __ Branch(&done_compare);
+      __ bind(&compare_nan);
+      __ Move(i.OutputSingleRegister(),
+              std::numeric_limits<float>::quiet_NaN());
+      __ bind(&done_compare);
+      break;
+    }
+    case kMipsFloat64Min: {
+      Label compare_nan, done_compare;
+      __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                       i.InputDoubleRegister(1), &compare_nan);
+      __ Branch(&done_compare);
+      __ bind(&compare_nan);
+      __ Move(i.OutputDoubleRegister(),
+              std::numeric_limits<double>::quiet_NaN());
+      __ bind(&done_compare);
       break;
     }
     case kMipsCvtSD: {
@@ -1287,6 +1318,11 @@
       FPURegister scratch = kScratchDoubleReg;
       __ trunc_w_s(scratch, i.InputDoubleRegister(0));
       __ mfc1(i.OutputRegister(), scratch);
+      // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+      // because INT32_MIN allows easier out-of-bounds detection.
+      __ addiu(kScratchReg, i.OutputRegister(), 1);
+      __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
+      __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
       break;
     }
     case kMipsTruncUwD: {
@@ -1299,6 +1335,10 @@
       FPURegister scratch = kScratchDoubleReg;
       // TODO(plind): Fix wrong param order of Trunc_uw_s() macro-asm function.
       __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+      // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+      // because 0 allows easier out-of-bounds detection.
+      __ addiu(kScratchReg, i.OutputRegister(), 1);
+      __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
       break;
     }
     case kMipsFloat64ExtractLowWord32:
@@ -1313,19 +1353,9 @@
     case kMipsFloat64InsertHighWord32:
       __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
       break;
-    case kMipsFloat64SilenceNaN: {
-      FPURegister value = i.InputDoubleRegister(0);
-      FPURegister result = i.OutputDoubleRegister();
-      Register scratch0 = i.TempRegister(0);
-      Label is_nan, not_nan;
-      __ BranchF(NULL, &is_nan, eq, value, value);
-      __ Branch(&not_nan);
-      __ bind(&is_nan);
-      __ LoadRoot(scratch0, Heap::kNanValueRootIndex);
-      __ ldc1(result, FieldMemOperand(scratch0, HeapNumber::kValueOffset));
-      __ bind(&not_nan);
+    case kMipsFloat64SilenceNaN:
+      __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
-    }
 
     // ... more basic instructions ...
 
@@ -1341,34 +1371,65 @@
     case kMipsLhu:
       __ lhu(i.OutputRegister(), i.MemoryOperand());
       break;
+    case kMipsUlhu:
+      __ Ulhu(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kMipsLh:
       __ lh(i.OutputRegister(), i.MemoryOperand());
       break;
+    case kMipsUlh:
+      __ Ulh(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kMipsSh:
       __ sh(i.InputRegister(2), i.MemoryOperand());
       break;
+    case kMipsUsh:
+      __ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
+      break;
     case kMipsLw:
       __ lw(i.OutputRegister(), i.MemoryOperand());
       break;
+    case kMipsUlw:
+      __ Ulw(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kMipsSw:
       __ sw(i.InputRegister(2), i.MemoryOperand());
       break;
+    case kMipsUsw:
+      __ Usw(i.InputRegister(2), i.MemoryOperand());
+      break;
     case kMipsLwc1: {
       __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
       break;
     }
+    case kMipsUlwc1: {
+      __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
+      break;
+    }
     case kMipsSwc1: {
       size_t index = 0;
       MemOperand operand = i.MemoryOperand(&index);
       __ swc1(i.InputSingleRegister(index), operand);
       break;
     }
+    case kMipsUswc1: {
+      size_t index = 0;
+      MemOperand operand = i.MemoryOperand(&index);
+      __ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
+      break;
+    }
     case kMipsLdc1:
       __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
       break;
+    case kMipsUldc1:
+      __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
+      break;
     case kMipsSdc1:
       __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
       break;
+    case kMipsUsdc1:
+      __ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
+      break;
     case kMipsPush:
       if (instr->InputAt(0)->IsFPRegister()) {
         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -1398,6 +1459,10 @@
       }
       break;
     }
+    case kMipsByteSwap32: {
+      __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
+      break;
+    }
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
       break;
@@ -1544,6 +1609,20 @@
         UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
         break;
     }
+  } else if (instr->arch_opcode() == kMipsMulOvf) {
+    switch (branch->condition) {
+      case kOverflow:
+        __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
+                        i.InputOperand(1), tlabel, flabel);
+        break;
+      case kNotOverflow:
+        __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
+                        i.InputOperand(1), flabel, tlabel);
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsMulOvf, branch->condition);
+        break;
+    }
   } else if (instr->arch_opcode() == kMipsCmp) {
     cc = FlagsConditionToConditionCmp(branch->condition);
     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
@@ -1609,7 +1688,8 @@
     }
     return;
   } else if (instr->arch_opcode() == kMipsAddOvf ||
-             instr->arch_opcode() == kMipsSubOvf) {
+             instr->arch_opcode() == kMipsSubOvf ||
+             instr->arch_opcode() == kMipsMulOvf) {
     Label flabel, tlabel;
     switch (instr->arch_opcode()) {
       case kMipsAddOvf:
@@ -1621,6 +1701,10 @@
         __ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
                           i.InputOperand(1), &flabel);
         break;
+      case kMipsMulOvf:
+        __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+                          i.InputOperand(1), &flabel);
+        break;
       default:
         UNREACHABLE();
         break;
@@ -1763,6 +1847,9 @@
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -1926,10 +2013,7 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int slot;
-          if (IsMaterializableFromFrame(src_object, &slot)) {
-            __ lw(dst, g.SlotToMemOperand(slot));
-          } else if (IsMaterializableFromRoot(src_object, &index)) {
+          if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
             __ li(dst, src_object);
@@ -1967,23 +2051,42 @@
       __ Move(dst, src);
     } else {
       DCHECK(destination->IsFPStackSlot());
-      __ sdc1(src, g.ToMemOperand(destination));
+      MachineRepresentation rep =
+          LocationOperand::cast(source)->representation();
+      if (rep == MachineRepresentation::kFloat64) {
+        __ sdc1(src, g.ToMemOperand(destination));
+      } else if (rep == MachineRepresentation::kFloat32) {
+        __ swc1(src, g.ToMemOperand(destination));
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        UNREACHABLE();
+      }
     }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
     if (destination->IsFPRegister()) {
-      LocationOperand* op = LocationOperand::cast(source);
-      if (op->representation() == MachineRepresentation::kFloat64) {
+      if (rep == MachineRepresentation::kFloat64) {
         __ ldc1(g.ToDoubleRegister(destination), src);
-      } else {
-        DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+      } else if (rep == MachineRepresentation::kFloat32) {
         __ lwc1(g.ToDoubleRegister(destination), src);
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        UNREACHABLE();
       }
     } else {
       FPURegister temp = kScratchDoubleReg;
-      __ ldc1(temp, src);
-      __ sdc1(temp, g.ToMemOperand(destination));
+      if (rep == MachineRepresentation::kFloat64) {
+        __ ldc1(temp, src);
+        __ sdc1(temp, g.ToMemOperand(destination));
+      } else if (rep == MachineRepresentation::kFloat32) {
+        __ lwc1(temp, src);
+        __ swc1(temp, g.ToMemOperand(destination));
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        UNREACHABLE();
+      }
     }
   } else {
     UNREACHABLE();
@@ -2033,24 +2136,46 @@
     } else {
       DCHECK(destination->IsFPStackSlot());
       MemOperand dst = g.ToMemOperand(destination);
-      __ Move(temp, src);
-      __ ldc1(src, dst);
-      __ sdc1(temp, dst);
+      MachineRepresentation rep =
+          LocationOperand::cast(source)->representation();
+      if (rep == MachineRepresentation::kFloat64) {
+        __ Move(temp, src);
+        __ ldc1(src, dst);
+        __ sdc1(temp, dst);
+      } else if (rep == MachineRepresentation::kFloat32) {
+        __ Move(temp, src);
+        __ lwc1(src, dst);
+        __ swc1(temp, dst);
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        UNREACHABLE();
+      }
     }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPStackSlot());
     Register temp_0 = kScratchReg;
     FPURegister temp_1 = kScratchDoubleReg;
     MemOperand src0 = g.ToMemOperand(source);
-    MemOperand src1(src0.rm(), src0.offset() + kIntSize);
     MemOperand dst0 = g.ToMemOperand(destination);
-    MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
-    __ ldc1(temp_1, dst0);  // Save destination in temp_1.
-    __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
-    __ sw(temp_0, dst0);
-    __ lw(temp_0, src1);
-    __ sw(temp_0, dst1);
-    __ sdc1(temp_1, src0);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
+    if (rep == MachineRepresentation::kFloat64) {
+      MemOperand src1(src0.rm(), src0.offset() + kIntSize);
+      MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
+      __ ldc1(temp_1, dst0);  // Save destination in temp_1.
+      __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
+      __ sw(temp_0, dst0);
+      __ lw(temp_0, src1);
+      __ sw(temp_0, dst1);
+      __ sdc1(temp_1, src0);
+    } else if (rep == MachineRepresentation::kFloat32) {
+      __ lwc1(temp_1, dst0);  // Save destination in temp_1.
+      __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
+      __ sw(temp_0, dst0);
+      __ swc1(temp_1, src0);
+    } else {
+      DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+      UNREACHABLE();
+    }
   } else {
     // No other combinations are possible.
     UNREACHABLE();
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index 766a5b1..269ac0f 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -17,6 +17,7 @@
   V(MipsSub)                       \
   V(MipsSubOvf)                    \
   V(MipsMul)                       \
+  V(MipsMulOvf)                    \
   V(MipsMulHigh)                   \
   V(MipsMulHighU)                  \
   V(MipsDiv)                       \
@@ -46,7 +47,6 @@
   V(MipsCmpS)                      \
   V(MipsAddS)                      \
   V(MipsSubS)                      \
-  V(MipsSubPreserveNanS)           \
   V(MipsMulS)                      \
   V(MipsDivS)                      \
   V(MipsModS)                      \
@@ -57,7 +57,6 @@
   V(MipsCmpD)                      \
   V(MipsAddD)                      \
   V(MipsSubD)                      \
-  V(MipsSubPreserveNanD)           \
   V(MipsMulD)                      \
   V(MipsDivD)                      \
   V(MipsModD)                      \
@@ -65,6 +64,8 @@
   V(MipsSqrtD)                     \
   V(MipsMaxD)                      \
   V(MipsMinD)                      \
+  V(MipsNegS)                      \
+  V(MipsNegD)                      \
   V(MipsAddPair)                   \
   V(MipsSubPair)                   \
   V(MipsMulPair)                   \
@@ -96,25 +97,35 @@
   V(MipsLbu)                       \
   V(MipsSb)                        \
   V(MipsLh)                        \
+  V(MipsUlh)                       \
   V(MipsLhu)                       \
+  V(MipsUlhu)                      \
   V(MipsSh)                        \
+  V(MipsUsh)                       \
   V(MipsLw)                        \
+  V(MipsUlw)                       \
   V(MipsSw)                        \
+  V(MipsUsw)                       \
   V(MipsLwc1)                      \
+  V(MipsUlwc1)                     \
   V(MipsSwc1)                      \
+  V(MipsUswc1)                     \
   V(MipsLdc1)                      \
+  V(MipsUldc1)                     \
   V(MipsSdc1)                      \
+  V(MipsUsdc1)                     \
   V(MipsFloat64ExtractLowWord32)   \
   V(MipsFloat64ExtractHighWord32)  \
   V(MipsFloat64InsertLowWord32)    \
   V(MipsFloat64InsertHighWord32)   \
   V(MipsFloat64SilenceNaN)         \
-  V(MipsFloat64Max)                \
-  V(MipsFloat64Min)                \
   V(MipsFloat32Max)                \
+  V(MipsFloat64Max)                \
   V(MipsFloat32Min)                \
+  V(MipsFloat64Min)                \
   V(MipsPush)                      \
   V(MipsStoreToStackSlot)          \
+  V(MipsByteSwap32)                \
   V(MipsStackClaim)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index c95613e..4c35369 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -104,7 +104,14 @@
     inputs[input_count++] = g.Label(cont->false_block());
   }
 
-  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsDeoptimize()) {
+    // If we can deoptimize as a result of the binop, we need to make sure that
+    // the deopt inputs are not overwritten by the binop result. One way
+    // to achieve that is to declare the output register as same-as-first.
+    outputs[output_count++] = g.DefineSameAsFirst(node);
+  } else {
+    outputs[output_count++] = g.DefineAsRegister(node);
+  }
   if (cont->IsSet()) {
     outputs[output_count++] = g.DefineAsRegister(cont->result());
   }
@@ -117,7 +124,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -152,6 +159,8 @@
     case MachineRepresentation::kWord16:
       opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
       break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
     case MachineRepresentation::kWord32:
       opcode = kMipsLw;
@@ -231,6 +240,8 @@
       case MachineRepresentation::kWord16:
         opcode = kMipsSh;
         break;
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
       case MachineRepresentation::kTagged:  // Fall through.
       case MachineRepresentation::kWord32:
         opcode = kMipsSw;
@@ -473,6 +484,13 @@
 
 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsByteSwap32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
 
 void InstructionSelector::VisitWord32Ctz(Node* node) {
   MipsOperandGenerator g(this);
@@ -754,32 +772,10 @@
   VisitRRR(this, kMipsSubS, node);
 }
 
-void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
-  VisitRRR(this, kMipsSubPreserveNanS, node);
-}
-
 void InstructionSelector::VisitFloat64Sub(Node* node) {
-  MipsOperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
-      CanCover(m.node(), m.right().node())) {
-    if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
-        CanCover(m.right().node(), m.right().InputAt(0))) {
-      Float64BinopMatcher mright0(m.right().InputAt(0));
-      if (mright0.left().IsMinusZero()) {
-        Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node),
-             g.UseRegister(mright0.right().node()));
-        return;
-      }
-    }
-  }
   VisitRRR(this, kMipsSubD, node);
 }
 
-void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
-  VisitRRR(this, kMipsSubPreserveNanD, node);
-}
-
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   VisitRRR(this, kMipsMulS, node);
 }
@@ -806,64 +802,28 @@
        g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
 }
 
-
 void InstructionSelector::VisitFloat32Max(Node* node) {
   MipsOperandGenerator g(this);
-  if (IsMipsArchVariant(kMips32r6)) {
-    Emit(kMipsFloat32Max, g.DefineAsRegister(node),
-         g.UseUniqueRegister(node->InputAt(0)),
-         g.UseUniqueRegister(node->InputAt(1)));
-
-  } else {
-    // Reverse operands, and use same reg. for result and right operand.
-    Emit(kMipsFloat32Max, g.DefineSameAsFirst(node),
-         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
-  }
+  Emit(kMipsFloat32Max, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
 }
 
-
 void InstructionSelector::VisitFloat64Max(Node* node) {
   MipsOperandGenerator g(this);
-  if (IsMipsArchVariant(kMips32r6)) {
-    Emit(kMipsFloat64Max, g.DefineAsRegister(node),
-         g.UseUniqueRegister(node->InputAt(0)),
-         g.UseUniqueRegister(node->InputAt(1)));
-
-  } else {
-    // Reverse operands, and use same reg. for result and right operand.
-    Emit(kMipsFloat64Max, g.DefineSameAsFirst(node),
-         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
-  }
+  Emit(kMipsFloat64Max, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
 }
 
-
 void InstructionSelector::VisitFloat32Min(Node* node) {
   MipsOperandGenerator g(this);
-  if (IsMipsArchVariant(kMips32r6)) {
-    Emit(kMipsFloat32Min, g.DefineAsRegister(node),
-         g.UseUniqueRegister(node->InputAt(0)),
-         g.UseUniqueRegister(node->InputAt(1)));
-
-  } else {
-    // Reverse operands, and use same reg. for result and right operand.
-    Emit(kMipsFloat32Min, g.DefineSameAsFirst(node),
-         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
-  }
+  Emit(kMipsFloat32Min, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
 }
 
-
 void InstructionSelector::VisitFloat64Min(Node* node) {
   MipsOperandGenerator g(this);
-  if (IsMipsArchVariant(kMips32r6)) {
-    Emit(kMipsFloat64Min, g.DefineAsRegister(node),
-         g.UseUniqueRegister(node->InputAt(0)),
-         g.UseUniqueRegister(node->InputAt(1)));
-
-  } else {
-    // Reverse operands, and use same reg. for result and right operand.
-    Emit(kMipsFloat64Min, g.DefineSameAsFirst(node),
-         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
-  }
+  Emit(kMipsFloat64Min, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
 }
 
 
@@ -930,15 +890,19 @@
   VisitRR(this, kMipsFloat64RoundTiesEven, node);
 }
 
-void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+  VisitRR(this, kMipsNegS, node);
+}
 
-void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+  VisitRR(this, kMipsNegD, node);
+}
 
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
   MipsOperandGenerator g(this);
-  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
-       g.UseFixed(node->InputAt(1), f14))
+  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
+       g.UseFixed(node->InputAt(1), f4))
       ->MarkAsCall();
 }
 
@@ -957,7 +921,7 @@
   // Prepare for C function call.
   if (descriptor->IsCFunctionCall()) {
     Emit(kArchPrepareCallCFunction |
-             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+             MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
          0, nullptr, 0, nullptr);
 
     // Poke any stack arguments.
@@ -991,6 +955,104 @@
 
 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
+void InstructionSelector::VisitUnalignedLoad(Node* node) {
+  UnalignedLoadRepresentation load_rep =
+      UnalignedLoadRepresentationOf(node->op());
+  MipsOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      UNREACHABLE();
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
+      break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kWord32:
+      opcode = kMipsUlw;
+      break;
+    case MachineRepresentation::kFloat32:
+      opcode = kMipsUlwc1;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kMipsUldc1;
+      break;
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    InstructionOperand addr_reg = g.TempRegister();
+    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired load opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+  }
+}
+
+void InstructionSelector::VisitUnalignedStore(Node* node) {
+  MipsOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+
+  // TODO(mips): I guess this could be done in a better way.
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kFloat32:
+      opcode = kMipsUswc1;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kMipsUsdc1;
+      break;
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      UNREACHABLE();
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kMipsUsh;
+      break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kWord32:
+      opcode = kMipsUsw;
+      break;
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    InstructionOperand addr_reg = g.TempRegister();
+    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired store opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+         addr_reg, g.TempImmediate(0), g.UseRegister(value));
+  }
+}
+
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
   MipsOperandGenerator g(this);
@@ -1015,6 +1077,8 @@
       opcode = kCheckedLoadFloat64;
       break;
     case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
@@ -1093,7 +1157,7 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
                              cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
@@ -1267,6 +1331,9 @@
               case IrOpcode::kInt32SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop(selector, node, kMipsSubOvf, cont);
+              case IrOpcode::kInt32MulWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kMipsMulOvf, cont);
               default:
                 break;
             }
@@ -1290,7 +1357,8 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
-                             g.TempImmediate(0), cont->frame_state());
+                             g.TempImmediate(0), cont->reason(),
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
@@ -1307,14 +1375,14 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1401,6 +1469,14 @@
   VisitBinop(this, node, kMipsSubOvf, &cont);
 }
 
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop(this, node, kMipsMulOvf, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kMipsMulOvf, &cont);
+}
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
@@ -1558,19 +1634,18 @@
              MachineOperatorBuilder::kFloat64RoundTruncate |
              MachineOperatorBuilder::kFloat64RoundTiesEven;
   }
+
   return flags | MachineOperatorBuilder::kWord32Ctz |
          MachineOperatorBuilder::kWord32Popcnt |
          MachineOperatorBuilder::kInt32DivIsSafe |
          MachineOperatorBuilder::kUint32DivIsSafe |
          MachineOperatorBuilder::kWord32ShiftIsSafe |
-         MachineOperatorBuilder::kFloat64Min |
-         MachineOperatorBuilder::kFloat64Max |
-         MachineOperatorBuilder::kFloat32Min |
-         MachineOperatorBuilder::kFloat32Max |
          MachineOperatorBuilder::kFloat32RoundDown |
          MachineOperatorBuilder::kFloat32RoundUp |
          MachineOperatorBuilder::kFloat32RoundTruncate |
-         MachineOperatorBuilder::kFloat32RoundTiesEven;
+         MachineOperatorBuilder::kFloat32RoundTiesEven |
+         MachineOperatorBuilder::kWord32ReverseBytes |
+         MachineOperatorBuilder::kWord64ReverseBytes;
 }
 
 // static
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index 9d4201f..3e2e8e2 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -524,21 +524,7 @@
   __ Pop(ra, fp);
 }
 
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta > 0) {
-    __ daddiu(sp, sp, sp_slot_delta * kPointerSize);
-  }
-  frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta < 0) {
-    __ Dsubu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
-    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
-  }
+void CodeGenerator::AssemblePrepareTailCall() {
   if (frame_access_state()->has_frame()) {
     __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
     __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -571,6 +557,38 @@
   __ bind(&done);
 }
 
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+                                   FrameAccessState* state,
+                                   int new_slot_above_sp,
+                                   bool allow_shrinkage = true) {
+  int current_sp_offset = state->GetSPToFPSlotCount() +
+                          StandardFrameConstants::kFixedSlotCountAboveFp;
+  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+  if (stack_slot_delta > 0) {
+    masm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
+    state->IncreaseSPDelta(stack_slot_delta);
+  } else if (allow_shrinkage && stack_slot_delta < 0) {
+    masm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
+    state->IncreaseSPDelta(stack_slot_delta);
+  }
+}
+
+}  // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+                                              int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+                                             int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot);
+}
+
 // Assembles an instruction after register allocation, producing machine code.
 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     Instruction* instr) {
@@ -593,8 +611,6 @@
     }
     case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -608,14 +624,14 @@
         __ Jump(at);
       }
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchTailCallAddress: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       CHECK(!instr->InputAt(0)->IsImmediate());
       __ Jump(i.InputRegister(0));
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchCallJSFunction: {
@@ -640,8 +656,6 @@
         __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
         __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
       }
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -650,6 +664,7 @@
       __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(at);
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchPrepareCallCFunction: {
@@ -660,7 +675,7 @@
       break;
     }
     case kArchPrepareTailCall:
-      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      AssemblePrepareTailCall();
       break;
     case kArchCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
@@ -687,6 +702,9 @@
     case kArchDebugBreak:
       __ stop("kArchDebugBreak");
       break;
+    case kArchImpossible:
+      __ Abort(kConversionFromImpossibleValue);
+      break;
     case kArchComment: {
       Address comment_string = i.InputExternalReference(0).address();
       __ RecordComment(reinterpret_cast<const char*>(comment_string));
@@ -750,18 +768,33 @@
                Operand(offset.offset()));
       break;
     }
+    case kIeee754Float64Acos:
+      ASSEMBLE_IEEE754_UNOP(acos);
+      break;
+    case kIeee754Float64Acosh:
+      ASSEMBLE_IEEE754_UNOP(acosh);
+      break;
+    case kIeee754Float64Asin:
+      ASSEMBLE_IEEE754_UNOP(asin);
+      break;
+    case kIeee754Float64Asinh:
+      ASSEMBLE_IEEE754_UNOP(asinh);
+      break;
     case kIeee754Float64Atan:
       ASSEMBLE_IEEE754_UNOP(atan);
       break;
-    case kIeee754Float64Atan2:
-      ASSEMBLE_IEEE754_BINOP(atan2);
-      break;
     case kIeee754Float64Atanh:
       ASSEMBLE_IEEE754_UNOP(atanh);
       break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
     case kIeee754Float64Cos:
       ASSEMBLE_IEEE754_UNOP(cos);
       break;
+    case kIeee754Float64Cosh:
+      ASSEMBLE_IEEE754_UNOP(cosh);
+      break;
     case kIeee754Float64Cbrt:
       ASSEMBLE_IEEE754_UNOP(cbrt);
       break;
@@ -783,12 +816,23 @@
     case kIeee754Float64Log10:
       ASSEMBLE_IEEE754_UNOP(log10);
       break;
+    case kIeee754Float64Pow: {
+      MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+      __ CallStub(&stub);
+      break;
+    }
     case kIeee754Float64Sin:
       ASSEMBLE_IEEE754_UNOP(sin);
       break;
+    case kIeee754Float64Sinh:
+      ASSEMBLE_IEEE754_UNOP(sinh);
+      break;
     case kIeee754Float64Tan:
       ASSEMBLE_IEEE754_UNOP(tan);
       break;
+    case kIeee754Float64Tanh:
+      ASSEMBLE_IEEE754_UNOP(tanh);
+      break;
     case kMips64Add:
       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -810,6 +854,9 @@
     case kMips64Mul:
       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
+    case kMips64MulOvf:
+      // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+      break;
     case kMips64MulHigh:
       __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -879,9 +926,29 @@
     case kMips64And:
       __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
+    case kMips64And32:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+        __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
+        __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      } else {
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+        __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
     case kMips64Or:
       __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
+    case kMips64Or32:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+        __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
+        __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      } else {
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+        __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
     case kMips64Nor:
       if (instr->InputAt(1)->IsRegister()) {
         __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -890,9 +957,30 @@
         __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
       }
       break;
+    case kMips64Nor32:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+        __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
+        __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      } else {
+        DCHECK(i.InputOperand(1).immediate() == 0);
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+        __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+      }
+      break;
     case kMips64Xor:
       __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
+    case kMips64Xor32:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+        __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
+        __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      } else {
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+        __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
     case kMips64Clz:
       __ Clz(i.OutputRegister(), i.InputRegister(0));
       break;
@@ -1046,18 +1134,22 @@
       break;
     case kMips64Shr:
       if (instr->InputAt(1)->IsRegister()) {
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
         __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
       } else {
         int64_t imm = i.InputOperand(1).immediate();
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
         __ srl(i.OutputRegister(), i.InputRegister(0),
                static_cast<uint16_t>(imm));
       }
       break;
     case kMips64Sar:
       if (instr->InputAt(1)->IsRegister()) {
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
         __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
       } else {
         int64_t imm = i.InputOperand(1).immediate();
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
         __ sra(i.OutputRegister(), i.InputRegister(0),
                static_cast<uint16_t>(imm));
       }
@@ -1172,11 +1264,6 @@
       __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
       break;
-    case kMips64SubPreserveNanS:
-      __ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
-                                        i.InputDoubleRegister(0),
-                                        i.InputDoubleRegister(1));
-      break;
     case kMips64MulS:
       // TODO(plind): add special case: right op is -1.0, see arm port.
       __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1203,6 +1290,9 @@
     case kMips64AbsS:
       __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
       break;
+    case kMips64NegS:
+      __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+      break;
     case kMips64SqrtS: {
       __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
@@ -1227,11 +1317,6 @@
       __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
       break;
-    case kMips64SubPreserveNanD:
-      __ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
-                                        i.InputDoubleRegister(0),
-                                        i.InputDoubleRegister(1));
-      break;
     case kMips64MulD:
       // TODO(plind): add special case: right op is -1.0, see arm port.
       __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1257,6 +1342,9 @@
     case kMips64AbsD:
       __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
+    case kMips64NegD:
+      __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
     case kMips64SqrtD: {
       __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
@@ -1301,65 +1389,53 @@
       ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
       break;
     }
+    case kMips64Float32Max: {
+      Label compare_nan, done_compare;
+      __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+                       i.InputSingleRegister(1), &compare_nan);
+      __ Branch(&done_compare);
+      __ bind(&compare_nan);
+      __ Move(i.OutputSingleRegister(),
+              std::numeric_limits<float>::quiet_NaN());
+      __ bind(&done_compare);
+      break;
+    }
     case kMips64Float64Max: {
-      // (b < a) ? a : b
-      if (kArchVariant == kMips64r6) {
-        __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-        __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-      } else {
-        __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
-        // Left operand is result, passthrough if false.
-        __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
-      }
+      Label compare_nan, done_compare;
+      __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                       i.InputDoubleRegister(1), &compare_nan);
+      __ Branch(&done_compare);
+      __ bind(&compare_nan);
+      __ Move(i.OutputDoubleRegister(),
+              std::numeric_limits<double>::quiet_NaN());
+      __ bind(&done_compare);
+      break;
+    }
+    case kMips64Float32Min: {
+      Label compare_nan, done_compare;
+      __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+                       i.InputSingleRegister(1), &compare_nan);
+      __ Branch(&done_compare);
+      __ bind(&compare_nan);
+      __ Move(i.OutputSingleRegister(),
+              std::numeric_limits<float>::quiet_NaN());
+      __ bind(&done_compare);
       break;
     }
     case kMips64Float64Min: {
-      // (a < b) ? a : b
-      if (kArchVariant == kMips64r6) {
-        __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                 i.InputDoubleRegister(1));
-        __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-      } else {
-        __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
-        // Right operand is result, passthrough if false.
-        __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
-      }
+      Label compare_nan, done_compare;
+      __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                       i.InputDoubleRegister(1), &compare_nan);
+      __ Branch(&done_compare);
+      __ bind(&compare_nan);
+      __ Move(i.OutputDoubleRegister(),
+              std::numeric_limits<double>::quiet_NaN());
+      __ bind(&done_compare);
       break;
     }
     case kMips64Float64SilenceNaN:
       __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
-    case kMips64Float32Max: {
-      // (b < a) ? a : b
-      if (kArchVariant == kMips64r6) {
-        __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-        __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-      } else {
-        __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
-        // Left operand is result, passthrough if false.
-        __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
-      }
-      break;
-    }
-    case kMips64Float32Min: {
-      // (a < b) ? a : b
-      if (kArchVariant == kMips64r6) {
-        __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                 i.InputDoubleRegister(1));
-        __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(0));
-      } else {
-        __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
-        // Right operand is result, passthrough if false.
-        __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
-      }
-      break;
-    }
     case kMips64CvtSD:
       __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
       break;
@@ -1453,6 +1529,11 @@
       FPURegister scratch = kScratchDoubleReg;
       __ trunc_w_s(scratch, i.InputDoubleRegister(0));
       __ mfc1(i.OutputRegister(), scratch);
+      // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+      // because INT32_MIN allows easier out-of-bounds detection.
+      __ addiu(kScratchReg, i.OutputRegister(), 1);
+      __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
+      __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
       break;
     }
     case kMips64TruncLS: {
@@ -1521,6 +1602,10 @@
       FPURegister scratch = kScratchDoubleReg;
       // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
       __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+      // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+      // because 0 allows easier out-of-bounds detection.
+      __ addiu(kScratchReg, i.OutputRegister(), 1);
+      __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
       break;
     }
     case kMips64TruncUlS: {
@@ -1571,43 +1656,83 @@
     case kMips64Lhu:
       __ lhu(i.OutputRegister(), i.MemoryOperand());
       break;
+    case kMips64Ulhu:
+      __ Ulhu(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kMips64Lh:
       __ lh(i.OutputRegister(), i.MemoryOperand());
       break;
+    case kMips64Ulh:
+      __ Ulh(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kMips64Sh:
       __ sh(i.InputRegister(2), i.MemoryOperand());
       break;
+    case kMips64Ush:
+      __ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
+      break;
     case kMips64Lw:
       __ lw(i.OutputRegister(), i.MemoryOperand());
       break;
+    case kMips64Ulw:
+      __ Ulw(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kMips64Lwu:
       __ lwu(i.OutputRegister(), i.MemoryOperand());
       break;
+    case kMips64Ulwu:
+      __ Ulwu(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kMips64Ld:
       __ ld(i.OutputRegister(), i.MemoryOperand());
       break;
+    case kMips64Uld:
+      __ Uld(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kMips64Sw:
       __ sw(i.InputRegister(2), i.MemoryOperand());
       break;
+    case kMips64Usw:
+      __ Usw(i.InputRegister(2), i.MemoryOperand());
+      break;
     case kMips64Sd:
       __ sd(i.InputRegister(2), i.MemoryOperand());
       break;
+    case kMips64Usd:
+      __ Usd(i.InputRegister(2), i.MemoryOperand());
+      break;
     case kMips64Lwc1: {
       __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
       break;
     }
+    case kMips64Ulwc1: {
+      __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
+      break;
+    }
     case kMips64Swc1: {
       size_t index = 0;
       MemOperand operand = i.MemoryOperand(&index);
       __ swc1(i.InputSingleRegister(index), operand);
       break;
     }
+    case kMips64Uswc1: {
+      size_t index = 0;
+      MemOperand operand = i.MemoryOperand(&index);
+      __ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
+      break;
+    }
     case kMips64Ldc1:
       __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
       break;
+    case kMips64Uldc1:
+      __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
+      break;
     case kMips64Sdc1:
       __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
       break;
+    case kMips64Usdc1:
+      __ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
+      break;
     case kMips64Push:
       if (instr->InputAt(0)->IsFPRegister()) {
         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -1631,6 +1756,15 @@
       }
       break;
     }
+    case kMips64ByteSwap64: {
+      __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
+      break;
+    }
+    case kMips64ByteSwap32: {
+      __ ByteSwapUnsigned(i.OutputRegister(0), i.InputRegister(0), 4);
+      __ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
+      break;
+    }
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
       break;
@@ -1785,6 +1919,20 @@
         UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
         break;
     }
+  } else if (instr->arch_opcode() == kMips64MulOvf) {
+    switch (branch->condition) {
+      case kOverflow: {
+        __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
+                        i.InputOperand(1), tlabel, flabel, kScratchReg);
+      } break;
+      case kNotOverflow: {
+        __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
+                        i.InputOperand(1), flabel, tlabel, kScratchReg);
+      } break;
+      default:
+        UNSUPPORTED_COND(kMips64MulOvf, branch->condition);
+        break;
+    }
   } else if (instr->arch_opcode() == kMips64Cmp) {
     cc = FlagsConditionToConditionCmp(branch->condition);
     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
@@ -1860,7 +2008,8 @@
       __ xori(result, result, 1);
     return;
   } else if (instr->arch_opcode() == kMips64DaddOvf ||
-             instr->arch_opcode() == kMips64DsubOvf) {
+             instr->arch_opcode() == kMips64DsubOvf ||
+             instr->arch_opcode() == kMips64MulOvf) {
     Label flabel, tlabel;
     switch (instr->arch_opcode()) {
       case kMips64DaddOvf:
@@ -1872,6 +2021,10 @@
         __ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
                            i.InputOperand(1), &flabel);
         break;
+      case kMips64MulOvf:
+        __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+                          i.InputOperand(1), &flabel, kScratchReg);
+        break;
       default:
         UNREACHABLE();
         break;
@@ -2015,6 +2168,9 @@
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2179,10 +2335,7 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int slot;
-          if (IsMaterializableFromFrame(src_object, &slot)) {
-            __ ld(dst, g.SlotToMemOperand(slot));
-          } else if (IsMaterializableFromRoot(src_object, &index)) {
+          if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
             __ li(dst, src_object);
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index 67c84f1..e3dedd1 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -19,6 +19,7 @@
   V(Mips64Dsub)                     \
   V(Mips64DsubOvf)                  \
   V(Mips64Mul)                      \
+  V(Mips64MulOvf)                   \
   V(Mips64MulHigh)                  \
   V(Mips64DMulHigh)                 \
   V(Mips64MulHighU)                 \
@@ -32,9 +33,13 @@
   V(Mips64ModU)                     \
   V(Mips64DmodU)                    \
   V(Mips64And)                      \
+  V(Mips64And32)                    \
   V(Mips64Or)                       \
+  V(Mips64Or32)                     \
   V(Mips64Nor)                      \
+  V(Mips64Nor32)                    \
   V(Mips64Xor)                      \
+  V(Mips64Xor32)                    \
   V(Mips64Clz)                      \
   V(Mips64Lsa)                      \
   V(Mips64Dlsa)                     \
@@ -61,22 +66,22 @@
   V(Mips64CmpS)                     \
   V(Mips64AddS)                     \
   V(Mips64SubS)                     \
-  V(Mips64SubPreserveNanS)          \
   V(Mips64MulS)                     \
   V(Mips64DivS)                     \
   V(Mips64ModS)                     \
   V(Mips64AbsS)                     \
+  V(Mips64NegS)                     \
   V(Mips64SqrtS)                    \
   V(Mips64MaxS)                     \
   V(Mips64MinS)                     \
   V(Mips64CmpD)                     \
   V(Mips64AddD)                     \
   V(Mips64SubD)                     \
-  V(Mips64SubPreserveNanD)          \
   V(Mips64MulD)                     \
   V(Mips64DivD)                     \
   V(Mips64ModD)                     \
   V(Mips64AbsD)                     \
+  V(Mips64NegD)                     \
   V(Mips64SqrtD)                    \
   V(Mips64MaxD)                     \
   V(Mips64MinD)                     \
@@ -116,30 +121,44 @@
   V(Mips64Lbu)                      \
   V(Mips64Sb)                       \
   V(Mips64Lh)                       \
+  V(Mips64Ulh)                      \
   V(Mips64Lhu)                      \
+  V(Mips64Ulhu)                     \
   V(Mips64Sh)                       \
-  V(Mips64Lw)                       \
-  V(Mips64Lwu)                      \
-  V(Mips64Sw)                       \
+  V(Mips64Ush)                      \
   V(Mips64Ld)                       \
+  V(Mips64Uld)                      \
+  V(Mips64Lw)                       \
+  V(Mips64Ulw)                      \
+  V(Mips64Lwu)                      \
+  V(Mips64Ulwu)                     \
+  V(Mips64Sw)                       \
+  V(Mips64Usw)                      \
   V(Mips64Sd)                       \
+  V(Mips64Usd)                      \
   V(Mips64Lwc1)                     \
+  V(Mips64Ulwc1)                    \
   V(Mips64Swc1)                     \
+  V(Mips64Uswc1)                    \
   V(Mips64Ldc1)                     \
+  V(Mips64Uldc1)                    \
   V(Mips64Sdc1)                     \
+  V(Mips64Usdc1)                    \
   V(Mips64BitcastDL)                \
   V(Mips64BitcastLD)                \
   V(Mips64Float64ExtractLowWord32)  \
   V(Mips64Float64ExtractHighWord32) \
   V(Mips64Float64InsertLowWord32)   \
   V(Mips64Float64InsertHighWord32)  \
+  V(Mips64Float32Max)               \
   V(Mips64Float64Max)               \
+  V(Mips64Float32Min)               \
   V(Mips64Float64Min)               \
   V(Mips64Float64SilenceNaN)        \
-  V(Mips64Float32Max)               \
-  V(Mips64Float32Min)               \
   V(Mips64Push)                     \
   V(Mips64StoreToStackSlot)         \
+  V(Mips64ByteSwap64)               \
+  V(Mips64ByteSwap32)               \
   V(Mips64StackClaim)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 3e1f98e..1167117 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -109,7 +109,14 @@
     inputs[input_count++] = g.Label(cont->false_block());
   }
 
-  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsDeoptimize()) {
+    // If we can deoptimize as a result of the binop, we need to make sure that
+    // the deopt inputs are not overwritten by the binop result. One way
+    // to achieve that is to declare the output register as same-as-first.
+    outputs[output_count++] = g.DefineSameAsFirst(node);
+  } else {
+    outputs[output_count++] = g.DefineAsRegister(node);
+  }
   if (cont->IsSet()) {
     outputs[output_count++] = g.DefineAsRegister(cont->result());
   }
@@ -122,7 +129,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -135,12 +142,29 @@
   VisitBinop(selector, node, opcode, &cont);
 }
 
+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
+              Node* output = nullptr) {
+  Mips64OperandGenerator g(selector);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  if (g.CanBeImmediate(index, opcode)) {
+    selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+                   g.DefineAsRegister(output == nullptr ? node : output),
+                   g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    InstructionOperand addr_reg = g.TempRegister();
+    selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+                   addr_reg, g.UseRegister(index), g.UseRegister(base));
+    // Emit desired load opcode, using temp addr_reg.
+    selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+                   g.DefineAsRegister(output == nullptr ? node : output),
+                   addr_reg, g.TempImmediate(0));
+  }
+}
 
 void InstructionSelector::VisitLoad(Node* node) {
   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
-  Mips64OperandGenerator g(this);
-  Node* base = node->InputAt(0);
-  Node* index = node->InputAt(1);
 
   ArchOpcode opcode = kArchNop;
   switch (load_rep.representation()) {
@@ -160,6 +184,8 @@
     case MachineRepresentation::kWord32:
       opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
       break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
     case MachineRepresentation::kWord64:
       opcode = kMips64Ld;
@@ -170,17 +196,7 @@
       return;
   }
 
-  if (g.CanBeImmediate(index, opcode)) {
-    Emit(opcode | AddressingModeField::encode(kMode_MRI),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
-  } else {
-    InstructionOperand addr_reg = g.TempRegister();
-    Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
-         g.UseRegister(index), g.UseRegister(base));
-    // Emit desired load opcode, using temp addr_reg.
-    Emit(opcode | AddressingModeField::encode(kMode_MRI),
-         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
-  }
+  EmitLoad(this, node, opcode);
 }
 
 
@@ -241,6 +257,8 @@
       case MachineRepresentation::kWord32:
         opcode = kMips64Sw;
         break;
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
       case MachineRepresentation::kTagged:  // Fall through.
       case MachineRepresentation::kWord64:
         opcode = kMips64Sd;
@@ -312,7 +330,7 @@
       return;
     }
   }
-  VisitBinop(this, node, kMips64And);
+  VisitBinop(this, node, kMips64And32);
 }
 
 
@@ -368,7 +386,7 @@
 
 
 void InstructionSelector::VisitWord32Or(Node* node) {
-  VisitBinop(this, node, kMips64Or);
+  VisitBinop(this, node, kMips64Or32);
 }
 
 
@@ -384,7 +402,7 @@
     Int32BinopMatcher mleft(m.left().node());
     if (!mleft.right().HasValue()) {
       Mips64OperandGenerator g(this);
-      Emit(kMips64Nor, g.DefineAsRegister(node),
+      Emit(kMips64Nor32, g.DefineAsRegister(node),
            g.UseRegister(mleft.left().node()),
            g.UseRegister(mleft.right().node()));
       return;
@@ -393,11 +411,11 @@
   if (m.right().Is(-1)) {
     // Use Nor for bit negation and eliminate constant loading for xori.
     Mips64OperandGenerator g(this);
-    Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+    Emit(kMips64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
          g.TempImmediate(0));
     return;
   }
-  VisitBinop(this, node, kMips64Xor);
+  VisitBinop(this, node, kMips64Xor32);
 }
 
 
@@ -490,7 +508,7 @@
   Mips64OperandGenerator g(this);
   Int64BinopMatcher m(node);
   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
-      m.right().IsInRange(32, 63)) {
+      m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
     // 32 bits anyway.
     Emit(kMips64Dshl, g.DefineSameAsFirst(node),
@@ -572,6 +590,17 @@
 
 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
 
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
 
 void InstructionSelector::VisitWord32Ctz(Node* node) {
   Mips64OperandGenerator g(this);
@@ -1042,9 +1071,32 @@
 
 
 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
-  Mips64OperandGenerator g(this);
-  Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
-       g.TempImmediate(0));
+  Node* value = node->InputAt(0);
+  if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+    // Generate sign-extending load.
+    LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+    InstructionCode opcode = kArchNop;
+    switch (load_rep.representation()) {
+      case MachineRepresentation::kBit:  // Fall through.
+      case MachineRepresentation::kWord8:
+        opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
+        break;
+      case MachineRepresentation::kWord16:
+        opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
+        break;
+      case MachineRepresentation::kWord32:
+        opcode = kMips64Lw;
+        break;
+      default:
+        UNREACHABLE();
+        return;
+    }
+    EmitLoad(this, value, opcode, node);
+  } else {
+    Mips64OperandGenerator g(this);
+    Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+         g.TempImmediate(0));
+  }
 }
 
 
@@ -1159,32 +1211,10 @@
   VisitRRR(this, kMips64SubS, node);
 }
 
-void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
-  VisitRRR(this, kMips64SubPreserveNanS, node);
-}
-
 void InstructionSelector::VisitFloat64Sub(Node* node) {
-  Mips64OperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
-      CanCover(m.node(), m.right().node())) {
-    if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
-        CanCover(m.right().node(), m.right().InputAt(0))) {
-      Float64BinopMatcher mright0(m.right().InputAt(0));
-      if (mright0.left().IsMinusZero()) {
-        Emit(kMips64Float64RoundUp, g.DefineAsRegister(node),
-             g.UseRegister(mright0.right().node()));
-        return;
-      }
-    }
-  }
   VisitRRR(this, kMips64SubD, node);
 }
 
-void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
-  VisitRRR(this, kMips64SubPreserveNanD, node);
-}
-
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   VisitRRR(this, kMips64MulS, node);
 }
@@ -1212,64 +1242,28 @@
        g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
 }
 
-
 void InstructionSelector::VisitFloat32Max(Node* node) {
   Mips64OperandGenerator g(this);
-  if (kArchVariant == kMips64r6) {
-    Emit(kMips64Float32Max, g.DefineAsRegister(node),
-         g.UseUniqueRegister(node->InputAt(0)),
-         g.UseUniqueRegister(node->InputAt(1)));
-
-  } else {
-    // Reverse operands, and use same reg. for result and right operand.
-    Emit(kMips64Float32Max, g.DefineSameAsFirst(node),
-         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
-  }
+  Emit(kMips64Float32Max, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
 }
 
-
 void InstructionSelector::VisitFloat64Max(Node* node) {
   Mips64OperandGenerator g(this);
-  if (kArchVariant == kMips64r6) {
-    Emit(kMips64Float64Max, g.DefineAsRegister(node),
-         g.UseUniqueRegister(node->InputAt(0)),
-         g.UseUniqueRegister(node->InputAt(1)));
-
-  } else {
-    // Reverse operands, and use same reg. for result and right operand.
-    Emit(kMips64Float64Max, g.DefineSameAsFirst(node),
-         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
-  }
+  Emit(kMips64Float64Max, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
 }
 
-
 void InstructionSelector::VisitFloat32Min(Node* node) {
   Mips64OperandGenerator g(this);
-  if (kArchVariant == kMips64r6) {
-    Emit(kMips64Float32Min, g.DefineAsRegister(node),
-         g.UseUniqueRegister(node->InputAt(0)),
-         g.UseUniqueRegister(node->InputAt(1)));
-
-  } else {
-    // Reverse operands, and use same reg. for result and right operand.
-    Emit(kMips64Float32Min, g.DefineSameAsFirst(node),
-         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
-  }
+  Emit(kMips64Float32Min, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
 }
 
-
 void InstructionSelector::VisitFloat64Min(Node* node) {
   Mips64OperandGenerator g(this);
-  if (kArchVariant == kMips64r6) {
-    Emit(kMips64Float64Min, g.DefineAsRegister(node),
-         g.UseUniqueRegister(node->InputAt(0)),
-         g.UseUniqueRegister(node->InputAt(1)));
-
-  } else {
-    // Reverse operands, and use same reg. for result and right operand.
-    Emit(kMips64Float64Min, g.DefineSameAsFirst(node),
-         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
-  }
+  Emit(kMips64Float64Min, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
 }
 
 
@@ -1336,15 +1330,19 @@
   VisitRR(this, kMips64Float64RoundTiesEven, node);
 }
 
-void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+  VisitRR(this, kMips64NegS, node);
+}
 
-void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+  VisitRR(this, kMips64NegD, node);
+}
 
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
   Mips64OperandGenerator g(this);
-  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
-       g.UseFixed(node->InputAt(1), f14))
+  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
+       g.UseFixed(node->InputAt(1), f4))
       ->MarkAsCall();
 }
 
@@ -1363,7 +1361,7 @@
   // Prepare for C function call.
   if (descriptor->IsCFunctionCall()) {
     Emit(kArchPrepareCallCFunction |
-             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+             MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
          0, nullptr, 0, nullptr);
 
     // Poke any stack arguments.
@@ -1394,6 +1392,106 @@
 
 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
+void InstructionSelector::VisitUnalignedLoad(Node* node) {
+  UnalignedLoadRepresentation load_rep =
+      UnalignedLoadRepresentationOf(node->op());
+  Mips64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kFloat32:
+      opcode = kMips64Ulwc1;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kMips64Uldc1;
+      break;
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      UNREACHABLE();
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
+      break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kWord64:
+      opcode = kMips64Uld;
+      break;
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    InstructionOperand addr_reg = g.TempRegister();
+    Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired load opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+  }
+}
+
+void InstructionSelector::VisitUnalignedStore(Node* node) {
+  Mips64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kFloat32:
+      opcode = kMips64Uswc1;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kMips64Usdc1;
+      break;
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      UNREACHABLE();
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kMips64Ush;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kMips64Usw;
+      break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kWord64:
+      opcode = kMips64Usd;
+      break;
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    InstructionOperand addr_reg = g.TempRegister();
+    Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired store opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+         addr_reg, g.TempImmediate(0), g.UseRegister(value));
+  }
+}
+
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
   Mips64OperandGenerator g(this);
@@ -1421,6 +1519,8 @@
       opcode = kCheckedLoadFloat64;
       break;
     case MachineRepresentation::kBit:
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:
     case MachineRepresentation::kSimd128:
     case MachineRepresentation::kNone:
@@ -1471,6 +1571,8 @@
       opcode = kCheckedStoreFloat64;
       break;
     case MachineRepresentation::kBit:
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:
     case MachineRepresentation::kSimd128:
     case MachineRepresentation::kNone:
@@ -1505,7 +1607,7 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
                              cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
@@ -1630,7 +1732,8 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
-                             g.TempImmediate(0), cont->frame_state());
+                             g.TempImmediate(0), cont->reason(),
+                             cont->frame_state());
   } else {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    g.TempImmediate(0));
@@ -1730,6 +1833,9 @@
               case IrOpcode::kInt32SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop(selector, node, kMips64Dsub, cont);
+              case IrOpcode::kInt32MulWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kMips64MulOvf, cont);
               case IrOpcode::kInt64AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop(selector, node, kMips64DaddOvf, cont);
@@ -1764,14 +1870,14 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1859,6 +1965,14 @@
   VisitBinop(this, node, kMips64Dsub, &cont);
 }
 
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop(this, node, kMips64MulOvf, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kMips64MulOvf, &cont);
+}
 
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
@@ -2056,17 +2170,14 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kWord32Ctz |
+  MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
+  return flags | MachineOperatorBuilder::kWord32Ctz |
          MachineOperatorBuilder::kWord64Ctz |
          MachineOperatorBuilder::kWord32Popcnt |
          MachineOperatorBuilder::kWord64Popcnt |
          MachineOperatorBuilder::kWord32ShiftIsSafe |
          MachineOperatorBuilder::kInt32DivIsSafe |
          MachineOperatorBuilder::kUint32DivIsSafe |
-         MachineOperatorBuilder::kFloat64Min |
-         MachineOperatorBuilder::kFloat64Max |
-         MachineOperatorBuilder::kFloat32Min |
-         MachineOperatorBuilder::kFloat32Max |
          MachineOperatorBuilder::kFloat64RoundDown |
          MachineOperatorBuilder::kFloat32RoundDown |
          MachineOperatorBuilder::kFloat64RoundUp |
@@ -2074,7 +2185,9 @@
          MachineOperatorBuilder::kFloat64RoundTruncate |
          MachineOperatorBuilder::kFloat32RoundTruncate |
          MachineOperatorBuilder::kFloat64RoundTiesEven |
-         MachineOperatorBuilder::kFloat32RoundTiesEven;
+         MachineOperatorBuilder::kFloat32RoundTiesEven |
+         MachineOperatorBuilder::kWord32ReverseBytes |
+         MachineOperatorBuilder::kWord64ReverseBytes;
 }
 
 // static
diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc
index 4753d15..482c254 100644
--- a/src/compiler/move-optimizer.cc
+++ b/src/compiler/move-optimizer.cc
@@ -28,33 +28,7 @@
 typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
 
 bool Blocks(const OperandSet& set, const InstructionOperand& operand) {
-  if (set.find(operand) != set.end()) return true;
-  // Only FP registers on archs with non-simple aliasing need extra checks.
-  if (!operand.IsFPRegister() || kSimpleFPAliasing) return false;
-
-  const LocationOperand& loc = LocationOperand::cast(operand);
-  MachineRepresentation rep = loc.representation();
-  MachineRepresentation other_fp_rep = rep == MachineRepresentation::kFloat64
-                                           ? MachineRepresentation::kFloat32
-                                           : MachineRepresentation::kFloat64;
-  const RegisterConfiguration* config = RegisterConfiguration::Turbofan();
-  if (config->fp_aliasing_kind() != RegisterConfiguration::COMBINE) {
-    // Overlap aliasing case.
-    return set.find(LocationOperand(loc.kind(), loc.location_kind(),
-                                    other_fp_rep, loc.register_code())) !=
-           set.end();
-  }
-  // Combine aliasing case.
-  int alias_base_index = -1;
-  int aliases = config->GetAliases(rep, loc.register_code(), other_fp_rep,
-                                   &alias_base_index);
-  while (aliases--) {
-    int aliased_reg = alias_base_index + aliases;
-    if (set.find(LocationOperand(loc.kind(), loc.location_kind(), other_fp_rep,
-                                 aliased_reg)) != set.end())
-      return true;
-  }
-  return false;
+  return set.find(operand) != set.end();
 }
 
 int FindFirstNonEmptySlot(const Instruction* instr) {
@@ -145,7 +119,7 @@
 
   // The ret instruction makes any assignment before it unnecessary, except for
   // the one for its input.
-  if (instruction->opcode() == ArchOpcode::kArchRet) {
+  if (instruction->IsRet() || instruction->IsTailCall()) {
     for (MoveOperands* move : *moves) {
       if (inputs.find(move->destination()) == inputs.end()) {
         move->Eliminate();
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index 6238be3..10aed51 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -128,6 +128,7 @@
     return this->HasValue() && this->Value() < 0 &&
            (-this->Value() & (-this->Value() - 1)) == 0;
   }
+  bool IsNegative() const { return this->HasValue() && this->Value() < 0; }
 };
 
 typedef IntMatcher<int32_t, IrOpcode::kInt32Constant> Int32Matcher;
@@ -157,6 +158,7 @@
   bool IsMinusZero() const {
     return this->Is(0.0) && std::signbit(this->Value());
   }
+  bool IsNegative() const { return this->HasValue() && this->Value() < 0.0; }
   bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
   bool IsZero() const { return this->Is(0.0) && !std::signbit(this->Value()); }
 };
@@ -171,6 +173,10 @@
     : public ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant> {
   explicit HeapObjectMatcher(Node* node)
       : ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>(node) {}
+
+  bool Is(Handle<HeapObject> const& value) const {
+    return this->HasValue() && this->Value().address() == value.address();
+  }
 };
 
 
@@ -313,11 +319,12 @@
 typedef ScaleMatcher<Int64BinopMatcher, IrOpcode::kInt64Mul,
                      IrOpcode::kWord64Shl> Int64ScaleMatcher;
 
-
-template <class BinopMatcher, IrOpcode::Value kAddOpcode,
-          IrOpcode::Value kMulOpcode, IrOpcode::Value kShiftOpcode>
+template <class BinopMatcher, IrOpcode::Value AddOpcode,
+          IrOpcode::Value SubOpcode, IrOpcode::Value kMulOpcode,
+          IrOpcode::Value kShiftOpcode>
 struct AddMatcher : public BinopMatcher {
-  static const IrOpcode::Value kOpcode = kAddOpcode;
+  static const IrOpcode::Value kAddOpcode = AddOpcode;
+  static const IrOpcode::Value kSubOpcode = SubOpcode;
   typedef ScaleMatcher<BinopMatcher, kMulOpcode, kShiftOpcode> Matcher;
 
   AddMatcher(Node* node, bool allow_input_swap)
@@ -368,6 +375,9 @@
     if (this->right().opcode() == kAddOpcode &&
         this->left().opcode() != kAddOpcode) {
       this->SwapInputs();
+    } else if (this->right().opcode() == kSubOpcode &&
+               this->left().opcode() != kSubOpcode) {
+      this->SwapInputs();
     }
   }
 
@@ -375,21 +385,35 @@
   bool power_of_two_plus_one_;
 };
 
-typedef AddMatcher<Int32BinopMatcher, IrOpcode::kInt32Add, IrOpcode::kInt32Mul,
-                   IrOpcode::kWord32Shl> Int32AddMatcher;
-typedef AddMatcher<Int64BinopMatcher, IrOpcode::kInt64Add, IrOpcode::kInt64Mul,
-                   IrOpcode::kWord64Shl> Int64AddMatcher;
+typedef AddMatcher<Int32BinopMatcher, IrOpcode::kInt32Add, IrOpcode::kInt32Sub,
+                   IrOpcode::kInt32Mul, IrOpcode::kWord32Shl>
+    Int32AddMatcher;
+typedef AddMatcher<Int64BinopMatcher, IrOpcode::kInt64Add, IrOpcode::kInt64Sub,
+                   IrOpcode::kInt64Mul, IrOpcode::kWord64Shl>
+    Int64AddMatcher;
 
+enum DisplacementMode { kPositiveDisplacement, kNegativeDisplacement };
+
+enum class AddressOption : uint8_t {
+  kAllowNone = 0u,
+  kAllowInputSwap = 1u << 0,
+  kAllowScale = 1u << 1,
+  kAllowAll = kAllowInputSwap | kAllowScale
+};
+
+typedef base::Flags<AddressOption, uint8_t> AddressOptions;
+DEFINE_OPERATORS_FOR_FLAGS(AddressOptions);
 
 template <class AddMatcher>
 struct BaseWithIndexAndDisplacementMatcher {
-  BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
+  BaseWithIndexAndDisplacementMatcher(Node* node, AddressOptions options)
       : matches_(false),
         index_(nullptr),
         scale_(0),
         base_(nullptr),
-        displacement_(nullptr) {
-    Initialize(node, allow_input_swap);
+        displacement_(nullptr),
+        displacement_mode_(kPositiveDisplacement) {
+    Initialize(node, options);
   }
 
   explicit BaseWithIndexAndDisplacementMatcher(Node* node)
@@ -397,8 +421,12 @@
         index_(nullptr),
         scale_(0),
         base_(nullptr),
-        displacement_(nullptr) {
-    Initialize(node, node->op()->HasProperty(Operator::kCommutative));
+        displacement_(nullptr),
+        displacement_mode_(kPositiveDisplacement) {
+    Initialize(node, AddressOption::kAllowScale |
+                         (node->op()->HasProperty(Operator::kCommutative)
+                              ? AddressOption::kAllowInputSwap
+                              : AddressOption::kAllowNone));
   }
 
   bool matches() const { return matches_; }
@@ -406,6 +434,7 @@
   int scale() const { return scale_; }
   Node* base() const { return base_; }
   Node* displacement() const { return displacement_; }
+  DisplacementMode displacement_mode() const { return displacement_mode_; }
 
  private:
   bool matches_;
@@ -413,8 +442,9 @@
   int scale_;
   Node* base_;
   Node* displacement_;
+  DisplacementMode displacement_mode_;
 
-  void Initialize(Node* node, bool allow_input_swap) {
+  void Initialize(Node* node, AddressOptions options) {
     // The BaseWithIndexAndDisplacementMatcher canonicalizes the order of
     // displacements and scale factors that are used as inputs, so instead of
     // enumerating all possible patterns by brute force, checking for node
@@ -432,7 +462,7 @@
     // (B + D)
     // (B + B)
     if (node->InputCount() < 2) return;
-    AddMatcher m(node, allow_input_swap);
+    AddMatcher m(node, options & AddressOption::kAllowInputSwap);
     Node* left = m.left().node();
     Node* right = m.right().node();
     Node* displacement = nullptr;
@@ -440,82 +470,123 @@
     Node* index = nullptr;
     Node* scale_expression = nullptr;
     bool power_of_two_plus_one = false;
+    DisplacementMode displacement_mode = kPositiveDisplacement;
     int scale = 0;
     if (m.HasIndexInput() && left->OwnedBy(node)) {
       index = m.IndexInput();
       scale = m.scale();
       scale_expression = left;
       power_of_two_plus_one = m.power_of_two_plus_one();
-      if (right->opcode() == AddMatcher::kOpcode && right->OwnedBy(node)) {
+      bool match_found = false;
+      if (right->opcode() == AddMatcher::kSubOpcode && right->OwnedBy(node)) {
         AddMatcher right_matcher(right);
         if (right_matcher.right().HasValue()) {
-          // (S + (B + D))
+          // (S + (B - D))
           base = right_matcher.left().node();
           displacement = right_matcher.right().node();
+          displacement_mode = kNegativeDisplacement;
+          match_found = true;
+        }
+      }
+      if (!match_found) {
+        if (right->opcode() == AddMatcher::kAddOpcode && right->OwnedBy(node)) {
+          AddMatcher right_matcher(right);
+          if (right_matcher.right().HasValue()) {
+            // (S + (B + D))
+            base = right_matcher.left().node();
+            displacement = right_matcher.right().node();
+          } else {
+            // (S + (B + B))
+            base = right;
+          }
+        } else if (m.right().HasValue()) {
+          // (S + D)
+          displacement = right;
         } else {
-          // (S + (B + B))
+          // (S + B)
           base = right;
         }
-      } else if (m.right().HasValue()) {
-        // (S + D)
-        displacement = right;
-      } else {
-        // (S + B)
-        base = right;
       }
     } else {
-      if (left->opcode() == AddMatcher::kOpcode && left->OwnedBy(node)) {
+      bool match_found = false;
+      if (left->opcode() == AddMatcher::kSubOpcode && left->OwnedBy(node)) {
         AddMatcher left_matcher(left);
         Node* left_left = left_matcher.left().node();
         Node* left_right = left_matcher.right().node();
-        if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
-          if (left_matcher.right().HasValue()) {
-            // ((S + D) + B)
+        if (left_matcher.right().HasValue()) {
+          if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
+            // ((S - D) + B)
             index = left_matcher.IndexInput();
             scale = left_matcher.scale();
             scale_expression = left_left;
             power_of_two_plus_one = left_matcher.power_of_two_plus_one();
             displacement = left_right;
+            displacement_mode = kNegativeDisplacement;
             base = right;
-          } else if (m.right().HasValue()) {
-            // ((S + B) + D)
-            index = left_matcher.IndexInput();
-            scale = left_matcher.scale();
-            scale_expression = left_left;
-            power_of_two_plus_one = left_matcher.power_of_two_plus_one();
-            base = left_right;
-            displacement = right;
           } else {
-            // (B + B)
-            index = left;
-            base = right;
-          }
-        } else {
-          if (left_matcher.right().HasValue()) {
-            // ((B + D) + B)
+            // ((B - D) + B)
             index = left_left;
             displacement = left_right;
-            base = right;
-          } else if (m.right().HasValue()) {
-            // ((B + B) + D)
-            index = left_left;
-            base = left_right;
-            displacement = right;
-          } else {
-            // (B + B)
-            index = left;
+            displacement_mode = kNegativeDisplacement;
             base = right;
           }
+          match_found = true;
         }
-      } else {
-        if (m.right().HasValue()) {
-          // (B + D)
-          base = left;
-          displacement = right;
+      }
+      if (!match_found) {
+        if (left->opcode() == AddMatcher::kAddOpcode && left->OwnedBy(node)) {
+          AddMatcher left_matcher(left);
+          Node* left_left = left_matcher.left().node();
+          Node* left_right = left_matcher.right().node();
+          if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
+            if (left_matcher.right().HasValue()) {
+              // ((S + D) + B)
+              index = left_matcher.IndexInput();
+              scale = left_matcher.scale();
+              scale_expression = left_left;
+              power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+              displacement = left_right;
+              base = right;
+            } else if (m.right().HasValue()) {
+              // ((S + B) + D)
+              index = left_matcher.IndexInput();
+              scale = left_matcher.scale();
+              scale_expression = left_left;
+              power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+              base = left_right;
+              displacement = right;
+            } else {
+              // (B + B)
+              index = left;
+              base = right;
+            }
+          } else {
+            if (left_matcher.right().HasValue()) {
+              // ((B + D) + B)
+              index = left_left;
+              displacement = left_right;
+              base = right;
+            } else if (m.right().HasValue()) {
+              // ((B + B) + D)
+              index = left_left;
+              base = left_right;
+              displacement = right;
+            } else {
+              // (B + B)
+              index = left;
+              base = right;
+            }
+          }
         } else {
-          // (B + B)
-          base = left;
-          index = right;
+          if (m.right().HasValue()) {
+            // (B + D)
+            base = left;
+            displacement = right;
+          } else {
+            // (B + B)
+            base = left;
+            index = right;
+          }
         }
       }
     }
@@ -550,8 +621,13 @@
         base = index;
       }
     }
+    if (!(options & AddressOption::kAllowScale) && scale != 0) {
+      index = scale_expression;
+      scale = 0;
+    }
     base_ = base;
     displacement_ = displacement;
+    displacement_mode_ = displacement_mode;
     index_ = index;
     scale_ = scale;
     matches_ = true;
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
index dc33d60..22539cb 100644
--- a/src/compiler/node-properties.cc
+++ b/src/compiler/node-properties.cc
@@ -62,9 +62,9 @@
 
 
 // static
-Node* NodeProperties::GetFrameStateInput(Node* node, int index) {
-  DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
-  return node->InputAt(FirstFrameStateIndex(node) + index);
+Node* NodeProperties::GetFrameStateInput(Node* node) {
+  DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+  return node->InputAt(FirstFrameStateIndex(node));
 }
 
 
@@ -172,10 +172,9 @@
 
 
 // static
-void NodeProperties::ReplaceFrameStateInput(Node* node, int index,
-                                            Node* frame_state) {
-  DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
-  node->ReplaceInput(FirstFrameStateIndex(node) + index, frame_state);
+void NodeProperties::ReplaceFrameStateInput(Node* node, Node* frame_state) {
+  DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+  node->ReplaceInput(FirstFrameStateIndex(node), frame_state);
 }
 
 
@@ -244,7 +243,7 @@
     DCHECK_EQ(1, effect->op()->EffectInputCount());
     effect = NodeProperties::GetEffectInput(effect);
   }
-  Node* frame_state = GetFrameStateInput(effect, 0);
+  Node* frame_state = GetFrameStateInput(effect);
   return frame_state;
 }
 
@@ -356,7 +355,6 @@
       case IrOpcode::kJSCreateBlockContext:
       case IrOpcode::kJSCreateCatchContext:
       case IrOpcode::kJSCreateFunctionContext:
-      case IrOpcode::kJSCreateModuleContext:
       case IrOpcode::kJSCreateScriptContext:
       case IrOpcode::kJSCreateWithContext: {
         // Skip over the intermediate contexts, we're only interested in the
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index fbc06fc..9812158 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -41,7 +41,7 @@
 
   static Node* GetValueInput(Node* node, int index);
   static Node* GetContextInput(Node* node);
-  static Node* GetFrameStateInput(Node* node, int index);
+  static Node* GetFrameStateInput(Node* node);
   static Node* GetEffectInput(Node* node, int index = 0);
   static Node* GetControlInput(Node* node, int index = 0);
 
@@ -83,7 +83,7 @@
   static void ReplaceContextInput(Node* node, Node* context);
   static void ReplaceControlInput(Node* node, Node* control, int index = 0);
   static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
-  static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
+  static void ReplaceFrameStateInput(Node* node, Node* frame_state);
   static void RemoveNonValueInputs(Node* node);
   static void RemoveValueInputs(Node* node);
 
diff --git a/src/compiler/node.cc b/src/compiler/node.cc
index 198c353..f4e7b17 100644
--- a/src/compiler/node.cc
+++ b/src/compiler/node.cc
@@ -193,6 +193,22 @@
   Verify();
 }
 
+void Node::InsertInputs(Zone* zone, int index, int count) {
+  DCHECK_NOT_NULL(zone);
+  DCHECK_LE(0, index);
+  DCHECK_LT(0, count);
+  DCHECK_LT(index, InputCount());
+  for (int i = 0; i < count; i++) {
+    AppendInput(zone, InputAt(Max(InputCount() - count, 0)));
+  }
+  for (int i = InputCount() - count - 1; i >= Max(index, count); --i) {
+    ReplaceInput(i, InputAt(i - count));
+  }
+  for (int i = 0; i < count; i++) {
+    ReplaceInput(index + i, nullptr);
+  }
+  Verify();
+}
 
 void Node::RemoveInput(int index) {
   DCHECK_LE(0, index);
@@ -369,7 +385,11 @@
     os << "(";
     for (int i = 0; i < n.InputCount(); ++i) {
       if (i != 0) os << ", ";
-      os << n.InputAt(i)->id();
+      if (n.InputAt(i)) {
+        os << n.InputAt(i)->id();
+      } else {
+        os << "null";
+      }
     }
     os << ")";
   }
diff --git a/src/compiler/node.h b/src/compiler/node.h
index c73482f..4935187 100644
--- a/src/compiler/node.h
+++ b/src/compiler/node.h
@@ -100,6 +100,7 @@
 
   void AppendInput(Zone* zone, Node* new_to);
   void InsertInput(Zone* zone, int index, Node* new_to);
+  void InsertInputs(Zone* zone, int index, int count);
   void RemoveInput(int index);
   void NullAllInputs();
   void TrimInputCount(int new_input_count);
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index c823afb..c1b5945 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -43,21 +43,27 @@
   V(RelocatableInt32Constant) \
   V(RelocatableInt64Constant)
 
-#define INNER_OP_LIST(V) \
-  V(Select)              \
-  V(Phi)                 \
-  V(EffectPhi)           \
-  V(Checkpoint)          \
-  V(BeginRegion)         \
-  V(FinishRegion)        \
-  V(FrameState)          \
-  V(StateValues)         \
-  V(TypedStateValues)    \
-  V(ObjectState)         \
-  V(Call)                \
-  V(Parameter)           \
-  V(OsrValue)            \
-  V(Projection)
+#define INNER_OP_LIST(V)  \
+  V(Select)               \
+  V(Phi)                  \
+  V(EffectPhi)            \
+  V(InductionVariablePhi) \
+  V(Checkpoint)           \
+  V(BeginRegion)          \
+  V(FinishRegion)         \
+  V(FrameState)           \
+  V(StateValues)          \
+  V(TypedStateValues)     \
+  V(ObjectState)          \
+  V(Call)                 \
+  V(Parameter)            \
+  V(OsrValue)             \
+  V(LoopExit)             \
+  V(LoopExitValue)        \
+  V(LoopExitEffect)       \
+  V(Projection)           \
+  V(Retain)               \
+  V(TypeGuard)
 
 #define COMMON_OP_LIST(V) \
   CONSTANT_OP_LIST(V)     \
@@ -137,7 +143,6 @@
   V(JSCreateCatchContext)     \
   V(JSCreateWithContext)      \
   V(JSCreateBlockContext)     \
-  V(JSCreateModuleContext)    \
   V(JSCreateScriptContext)
 
 #define JS_OTHER_OP_LIST(V)         \
@@ -164,105 +169,155 @@
   JS_OTHER_OP_LIST(V)
 
 // Opcodes for VirtuaMachine-level operators.
+#define SIMPLIFIED_CHANGE_OP_LIST(V) \
+  V(ChangeTaggedSignedToInt32)       \
+  V(ChangeTaggedToInt32)             \
+  V(ChangeTaggedToUint32)            \
+  V(ChangeTaggedToFloat64)           \
+  V(ChangeInt31ToTaggedSigned)       \
+  V(ChangeInt32ToTagged)             \
+  V(ChangeUint32ToTagged)            \
+  V(ChangeFloat64ToTagged)           \
+  V(ChangeTaggedToBit)               \
+  V(ChangeBitToTagged)               \
+  V(TruncateTaggedToWord32)          \
+  V(TruncateTaggedToFloat64)
+
+#define SIMPLIFIED_CHECKED_OP_LIST(V) \
+  V(CheckedInt32Add)                  \
+  V(CheckedInt32Sub)                  \
+  V(CheckedInt32Div)                  \
+  V(CheckedInt32Mod)                  \
+  V(CheckedUint32Div)                 \
+  V(CheckedUint32Mod)                 \
+  V(CheckedInt32Mul)                  \
+  V(CheckedUint32ToInt32)             \
+  V(CheckedFloat64ToInt32)            \
+  V(CheckedTaggedSignedToInt32)       \
+  V(CheckedTaggedToInt32)             \
+  V(CheckedTruncateTaggedToWord32)    \
+  V(CheckedTaggedToFloat64)
+
 #define SIMPLIFIED_COMPARE_BINOP_LIST(V) \
   V(NumberEqual)                         \
   V(NumberLessThan)                      \
   V(NumberLessThanOrEqual)               \
+  V(SpeculativeNumberEqual)              \
+  V(SpeculativeNumberLessThan)           \
+  V(SpeculativeNumberLessThanOrEqual)    \
   V(ReferenceEqual)                      \
   V(StringEqual)                         \
   V(StringLessThan)                      \
   V(StringLessThanOrEqual)
 
-#define SIMPLIFIED_OP_LIST(V)         \
-  SIMPLIFIED_COMPARE_BINOP_LIST(V)    \
-  V(PlainPrimitiveToNumber)           \
-  V(PlainPrimitiveToWord32)           \
-  V(PlainPrimitiveToFloat64)          \
-  V(BooleanNot)                       \
-  V(BooleanToNumber)                  \
-  V(SpeculativeNumberAdd)             \
-  V(SpeculativeNumberSubtract)        \
-  V(SpeculativeNumberMultiply)        \
-  V(SpeculativeNumberDivide)          \
-  V(SpeculativeNumberModulus)         \
-  V(SpeculativeNumberEqual)           \
-  V(SpeculativeNumberLessThan)        \
-  V(SpeculativeNumberLessThanOrEqual) \
-  V(NumberAdd)                        \
-  V(NumberSubtract)                   \
-  V(NumberMultiply)                   \
-  V(NumberDivide)                     \
-  V(NumberModulus)                    \
-  V(NumberBitwiseOr)                  \
-  V(NumberBitwiseXor)                 \
-  V(NumberBitwiseAnd)                 \
-  V(NumberShiftLeft)                  \
-  V(NumberShiftRight)                 \
-  V(NumberShiftRightLogical)          \
-  V(NumberImul)                       \
-  V(NumberAbs)                        \
-  V(NumberClz32)                      \
-  V(NumberCeil)                       \
-  V(NumberCos)                        \
-  V(NumberFloor)                      \
-  V(NumberFround)                     \
-  V(NumberAtan)                       \
-  V(NumberAtan2)                      \
-  V(NumberAtanh)                      \
-  V(NumberExp)                        \
-  V(NumberExpm1)                      \
-  V(NumberLog)                        \
-  V(NumberLog1p)                      \
-  V(NumberLog2)                       \
-  V(NumberLog10)                      \
-  V(NumberCbrt)                       \
-  V(NumberRound)                      \
-  V(NumberSin)                        \
-  V(NumberSqrt)                       \
-  V(NumberTan)                        \
-  V(NumberTrunc)                      \
-  V(NumberToInt32)                    \
-  V(NumberToUint32)                   \
-  V(NumberSilenceNaN)                 \
-  V(StringFromCharCode)               \
-  V(StringToNumber)                   \
-  V(ChangeTaggedSignedToInt32)        \
-  V(ChangeTaggedToInt32)              \
-  V(ChangeTaggedToUint32)             \
-  V(ChangeTaggedToFloat64)            \
-  V(ChangeInt31ToTaggedSigned)        \
-  V(ChangeInt32ToTagged)              \
-  V(ChangeUint32ToTagged)             \
-  V(ChangeFloat64ToTagged)            \
-  V(ChangeTaggedToBit)                \
-  V(ChangeBitToTagged)                \
-  V(CheckBounds)                      \
-  V(CheckTaggedPointer)               \
-  V(CheckTaggedSigned)                \
-  V(CheckedInt32Add)                  \
-  V(CheckedInt32Sub)                  \
-  V(CheckedUint32ToInt32)             \
-  V(CheckedFloat64ToInt32)            \
-  V(CheckedTaggedToInt32)             \
-  V(CheckedTaggedToFloat64)           \
-  V(CheckFloat64Hole)                 \
-  V(CheckTaggedHole)                  \
-  V(TruncateTaggedToWord32)           \
-  V(TruncateTaggedToFloat64)          \
-  V(Allocate)                         \
-  V(LoadField)                        \
-  V(LoadBuffer)                       \
-  V(LoadElement)                      \
-  V(StoreField)                       \
-  V(StoreBuffer)                      \
-  V(StoreElement)                     \
-  V(ObjectIsCallable)                 \
-  V(ObjectIsNumber)                   \
-  V(ObjectIsReceiver)                 \
-  V(ObjectIsSmi)                      \
-  V(ObjectIsString)                   \
-  V(ObjectIsUndetectable)             \
-  V(TypeGuard)
+#define SIMPLIFIED_NUMBER_BINOP_LIST(V) \
+  V(NumberAdd)                          \
+  V(NumberSubtract)                     \
+  V(NumberMultiply)                     \
+  V(NumberDivide)                       \
+  V(NumberModulus)                      \
+  V(NumberBitwiseOr)                    \
+  V(NumberBitwiseXor)                   \
+  V(NumberBitwiseAnd)                   \
+  V(NumberShiftLeft)                    \
+  V(NumberShiftRight)                   \
+  V(NumberShiftRightLogical)            \
+  V(NumberAtan2)                        \
+  V(NumberImul)                         \
+  V(NumberMax)                          \
+  V(NumberMin)                          \
+  V(NumberPow)
+
+#define SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
+  V(SpeculativeNumberAdd)                           \
+  V(SpeculativeNumberSubtract)                      \
+  V(SpeculativeNumberMultiply)                      \
+  V(SpeculativeNumberDivide)                        \
+  V(SpeculativeNumberModulus)                       \
+  V(SpeculativeNumberBitwiseAnd)                    \
+  V(SpeculativeNumberBitwiseOr)                     \
+  V(SpeculativeNumberBitwiseXor)                    \
+  V(SpeculativeNumberShiftLeft)                     \
+  V(SpeculativeNumberShiftRight)                    \
+  V(SpeculativeNumberShiftRightLogical)
+
+#define SIMPLIFIED_NUMBER_UNOP_LIST(V) \
+  V(NumberAbs)                         \
+  V(NumberAcos)                        \
+  V(NumberAcosh)                       \
+  V(NumberAsin)                        \
+  V(NumberAsinh)                       \
+  V(NumberAtan)                        \
+  V(NumberAtanh)                       \
+  V(NumberCbrt)                        \
+  V(NumberCeil)                        \
+  V(NumberClz32)                       \
+  V(NumberCos)                         \
+  V(NumberCosh)                        \
+  V(NumberExp)                         \
+  V(NumberExpm1)                       \
+  V(NumberFloor)                       \
+  V(NumberFround)                      \
+  V(NumberLog)                         \
+  V(NumberLog1p)                       \
+  V(NumberLog2)                        \
+  V(NumberLog10)                       \
+  V(NumberRound)                       \
+  V(NumberSign)                        \
+  V(NumberSin)                         \
+  V(NumberSinh)                        \
+  V(NumberSqrt)                        \
+  V(NumberTan)                         \
+  V(NumberTanh)                        \
+  V(NumberTrunc)                       \
+  V(NumberToInt32)                     \
+  V(NumberToUint32)                    \
+  V(NumberSilenceNaN)
+
+#define SIMPLIFIED_OTHER_OP_LIST(V) \
+  V(PlainPrimitiveToNumber)         \
+  V(PlainPrimitiveToWord32)         \
+  V(PlainPrimitiveToFloat64)        \
+  V(BooleanNot)                     \
+  V(StringCharCodeAt)               \
+  V(StringFromCharCode)             \
+  V(CheckBounds)                    \
+  V(CheckIf)                        \
+  V(CheckMaps)                      \
+  V(CheckNumber)                    \
+  V(CheckString)                    \
+  V(CheckTaggedPointer)             \
+  V(CheckTaggedSigned)              \
+  V(CheckFloat64Hole)               \
+  V(CheckTaggedHole)                \
+  V(ConvertTaggedHoleToUndefined)   \
+  V(Allocate)                       \
+  V(LoadField)                      \
+  V(LoadBuffer)                     \
+  V(LoadElement)                    \
+  V(LoadTypedElement)               \
+  V(StoreField)                     \
+  V(StoreBuffer)                    \
+  V(StoreElement)                   \
+  V(StoreTypedElement)              \
+  V(ObjectIsCallable)               \
+  V(ObjectIsNumber)                 \
+  V(ObjectIsReceiver)               \
+  V(ObjectIsSmi)                    \
+  V(ObjectIsString)                 \
+  V(ObjectIsUndetectable)           \
+  V(EnsureWritableFastElements)     \
+  V(MaybeGrowFastElements)          \
+  V(TransitionElementsKind)
+
+#define SIMPLIFIED_OP_LIST(V)                 \
+  SIMPLIFIED_CHANGE_OP_LIST(V)                \
+  SIMPLIFIED_CHECKED_OP_LIST(V)               \
+  SIMPLIFIED_COMPARE_BINOP_LIST(V)            \
+  SIMPLIFIED_NUMBER_BINOP_LIST(V)             \
+  SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
+  SIMPLIFIED_NUMBER_UNOP_LIST(V)              \
+  SIMPLIFIED_OTHER_OP_LIST(V)
 
 // Opcodes for Machine-level operators.
 #define MACHINE_COMPARE_BINOP_LIST(V) \
@@ -300,6 +355,7 @@
   V(Word32Clz)                  \
   V(Word32Ctz)                  \
   V(Word32ReverseBits)          \
+  V(Word32ReverseBytes)         \
   V(Word32Popcnt)               \
   V(Word64Popcnt)               \
   V(Word64And)                  \
@@ -312,11 +368,13 @@
   V(Word64Clz)                  \
   V(Word64Ctz)                  \
   V(Word64ReverseBits)          \
+  V(Word64ReverseBytes)         \
   V(Int32Add)                   \
   V(Int32AddWithOverflow)       \
   V(Int32Sub)                   \
   V(Int32SubWithOverflow)       \
   V(Int32Mul)                   \
+  V(Int32MulWithOverflow)       \
   V(Int32MulHigh)               \
   V(Int32Div)                   \
   V(Int32Mod)                   \
@@ -349,6 +407,12 @@
   V(ChangeInt32ToInt64)         \
   V(ChangeUint32ToFloat64)      \
   V(ChangeUint32ToUint64)       \
+  V(ImpossibleToBit)            \
+  V(ImpossibleToWord32)         \
+  V(ImpossibleToWord64)         \
+  V(ImpossibleToFloat32)        \
+  V(ImpossibleToFloat64)        \
+  V(ImpossibleToTagged)         \
   V(TruncateFloat64ToFloat32)   \
   V(TruncateInt64ToInt32)       \
   V(RoundFloat64ToInt32)        \
@@ -364,18 +428,16 @@
   V(BitcastInt64ToFloat64)      \
   V(Float32Add)                 \
   V(Float32Sub)                 \
-  V(Float32SubPreserveNan)      \
   V(Float32Neg)                 \
   V(Float32Mul)                 \
   V(Float32Div)                 \
-  V(Float32Max)                 \
-  V(Float32Min)                 \
   V(Float32Abs)                 \
   V(Float32Sqrt)                \
   V(Float32RoundDown)           \
+  V(Float32Max)                 \
+  V(Float32Min)                 \
   V(Float64Add)                 \
   V(Float64Sub)                 \
-  V(Float64SubPreserveNan)      \
   V(Float64Neg)                 \
   V(Float64Mul)                 \
   V(Float64Div)                 \
@@ -383,20 +445,28 @@
   V(Float64Max)                 \
   V(Float64Min)                 \
   V(Float64Abs)                 \
+  V(Float64Acos)                \
+  V(Float64Acosh)               \
+  V(Float64Asin)                \
+  V(Float64Asinh)               \
   V(Float64Atan)                \
-  V(Float64Atan2)               \
   V(Float64Atanh)               \
+  V(Float64Atan2)               \
   V(Float64Cbrt)                \
   V(Float64Cos)                 \
+  V(Float64Cosh)                \
   V(Float64Exp)                 \
   V(Float64Expm1)               \
   V(Float64Log)                 \
   V(Float64Log1p)               \
   V(Float64Log10)               \
   V(Float64Log2)                \
+  V(Float64Pow)                 \
   V(Float64Sin)                 \
+  V(Float64Sinh)                \
   V(Float64Sqrt)                \
   V(Float64Tan)                 \
+  V(Float64Tanh)                \
   V(Float64RoundDown)           \
   V(Float32RoundUp)             \
   V(Float64RoundUp)             \
@@ -414,6 +484,8 @@
   V(LoadParentFramePointer)     \
   V(CheckedLoad)                \
   V(CheckedStore)               \
+  V(UnalignedLoad)              \
+  V(UnalignedStore)             \
   V(Int32PairAdd)               \
   V(Int32PairSub)               \
   V(Int32PairMul)               \
@@ -421,7 +493,8 @@
   V(Word32PairShr)              \
   V(Word32PairSar)              \
   V(AtomicLoad)                 \
-  V(AtomicStore)
+  V(AtomicStore)                \
+  V(UnsafePointerAdd)
 
 #define MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
   V(CreateFloat32x4)                        \
diff --git a/src/compiler/operation-typer.cc b/src/compiler/operation-typer.cc
index b2860e0..f3ef778 100644
--- a/src/compiler/operation-typer.cc
+++ b/src/compiler/operation-typer.cc
@@ -4,6 +4,7 @@
 
 #include "src/compiler/operation-typer.h"
 
+#include "src/compiler/common-operator.h"
 #include "src/factory.h"
 #include "src/isolate.h"
 #include "src/type-cache.h"
@@ -18,9 +19,20 @@
 OperationTyper::OperationTyper(Isolate* isolate, Zone* zone)
     : zone_(zone), cache_(TypeCache::Get()) {
   Factory* factory = isolate->factory();
+  infinity_ = Type::Constant(factory->infinity_value(), zone);
+  minus_infinity_ = Type::Constant(factory->minus_infinity_value(), zone);
+  // Unfortunately, the infinities created in other places might be different
+  // ones (eg the result of NewNumber in TypeNumberConstant).
+  Type* truncating_to_zero =
+      Type::Union(Type::Union(infinity_, minus_infinity_, zone),
+                  Type::MinusZeroOrNaN(), zone);
+  DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
+
   singleton_false_ = Type::Constant(factory->false_value(), zone);
   singleton_true_ = Type::Constant(factory->true_value(), zone);
   singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
+  signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
+  unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
 }
 
 Type* OperationTyper::Merge(Type* left, Type* right) {
@@ -162,29 +174,31 @@
   results[3] = lhs_max + rhs_max;
   // Since none of the inputs can be -0, the result cannot be -0 either.
   // However, it can be nan (the sum of two infinities of opposite sign).
-  // On the other hand, if none of the "results" above is nan, then the actual
-  // result cannot be nan either.
+  // On the other hand, if none of the "results" above is nan, then the
+  // actual result cannot be nan either.
   int nans = 0;
   for (int i = 0; i < 4; ++i) {
     if (std::isnan(results[i])) ++nans;
   }
-  if (nans == 4) return Type::NaN();  // [-inf..-inf] + [inf..inf] or vice versa
-  Type* range =
+  if (nans == 4) return Type::NaN();
+  Type* type =
       Type::Range(array_min(results, 4), array_max(results, 4), zone());
-  return nans == 0 ? range : Type::Union(range, Type::NaN(), zone());
+  if (nans > 0) type = Type::Union(type, Type::NaN(), zone());
   // Examples:
   //   [-inf, -inf] + [+inf, +inf] = NaN
   //   [-inf, -inf] + [n, +inf] = [-inf, -inf] \/ NaN
   //   [-inf, +inf] + [n, +inf] = [-inf, +inf] \/ NaN
   //   [-inf, m] + [n, +inf] = [-inf, +inf] \/ NaN
+  return type;
 }
 
-Type* OperationTyper::SubtractRanger(RangeType* lhs, RangeType* rhs) {
+Type* OperationTyper::SubtractRanger(double lhs_min, double lhs_max,
+                                     double rhs_min, double rhs_max) {
   double results[4];
-  results[0] = lhs->Min() - rhs->Min();
-  results[1] = lhs->Min() - rhs->Max();
-  results[2] = lhs->Max() - rhs->Min();
-  results[3] = lhs->Max() - rhs->Max();
+  results[0] = lhs_min - rhs_min;
+  results[1] = lhs_min - rhs_max;
+  results[2] = lhs_max - rhs_min;
+  results[3] = lhs_max - rhs_max;
   // Since none of the inputs can be -0, the result cannot be -0.
   // However, it can be nan (the subtraction of two infinities of same sign).
   // On the other hand, if none of the "results" above is nan, then the actual
@@ -194,9 +208,9 @@
     if (std::isnan(results[i])) ++nans;
   }
   if (nans == 4) return Type::NaN();  // [inf..inf] - [inf..inf] (all same sign)
-  Type* range =
+  Type* type =
       Type::Range(array_min(results, 4), array_max(results, 4), zone());
-  return nans == 0 ? range : Type::Union(range, Type::NaN(), zone());
+  return nans == 0 ? type : Type::Union(type, Type::NaN(), zone());
   // Examples:
   //   [-inf, +inf] - [-inf, +inf] = [-inf, +inf] \/ NaN
   //   [-inf, -inf] - [-inf, -inf] = NaN
@@ -204,36 +218,6 @@
   //   [m, +inf] - [-inf, n] = [-inf, +inf] \/ NaN
 }
 
-Type* OperationTyper::ModulusRanger(RangeType* lhs, RangeType* rhs) {
-  double lmin = lhs->Min();
-  double lmax = lhs->Max();
-  double rmin = rhs->Min();
-  double rmax = rhs->Max();
-
-  double labs = std::max(std::abs(lmin), std::abs(lmax));
-  double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
-  double abs = std::min(labs, rabs);
-  bool maybe_minus_zero = false;
-  double omin = 0;
-  double omax = 0;
-  if (lmin >= 0) {  // {lhs} positive.
-    omin = 0;
-    omax = abs;
-  } else if (lmax <= 0) {  // {lhs} negative.
-    omin = 0 - abs;
-    omax = 0;
-    maybe_minus_zero = true;
-  } else {
-    omin = 0 - abs;
-    omax = abs;
-    maybe_minus_zero = true;
-  }
-
-  Type* result = Type::Range(omin, omax, zone());
-  if (maybe_minus_zero) result = Type::Union(result, Type::MinusZero(), zone());
-  return result;
-}
-
 Type* OperationTyper::MultiplyRanger(Type* lhs, Type* rhs) {
   double results[4];
   double lmin = lhs->AsRange()->Min();
@@ -244,12 +228,9 @@
   results[1] = lmin * rmax;
   results[2] = lmax * rmin;
   results[3] = lmax * rmax;
-  // If the result may be nan, we give up on calculating a precise type,
-  // because
-  // the discontinuity makes it too complicated.  Note that even if none of
-  // the
-  // "results" above is nan, the actual result may still be, so we have to do
-  // a
+  // If the result may be nan, we give up on calculating a precise type, because
+  // the discontinuity makes it too complicated.  Note that even if none of the
+  // "results" above is nan, the actual result may still be, so we have to do a
   // different check:
   bool maybe_nan = (lhs->Maybe(cache_.kSingletonZero) &&
                     (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
@@ -271,59 +252,347 @@
     if (type->Is(Type::Undefined())) return Type::NaN();
     return Type::Union(Type::NaN(), cache_.kSingletonZero, zone());
   }
-  if (type->Is(Type::NumberOrUndefined())) {
-    return Type::Union(Type::Intersect(type, Type::Number(), zone()),
-                       Type::NaN(), zone());
+  if (type->Is(Type::Boolean())) {
+    if (type->Is(singleton_false_)) return cache_.kSingletonZero;
+    if (type->Is(singleton_true_)) return cache_.kSingletonOne;
+    return cache_.kZeroOrOne;
   }
-  if (type->Is(singleton_false_)) return cache_.kSingletonZero;
-  if (type->Is(singleton_true_)) return cache_.kSingletonOne;
-  if (type->Is(Type::Boolean())) return cache_.kZeroOrOne;
-  if (type->Is(Type::BooleanOrNumber())) {
-    return Type::Union(Type::Intersect(type, Type::Number(), zone()),
-                       cache_.kZeroOrOne, zone());
+  if (type->Is(Type::NumberOrOddball())) {
+    if (type->Is(Type::NumberOrUndefined())) {
+      type = Type::Union(type, Type::NaN(), zone());
+    } else if (type->Is(Type::NullOrNumber())) {
+      type = Type::Union(type, cache_.kSingletonZero, zone());
+    } else if (type->Is(Type::BooleanOrNullOrNumber())) {
+      type = Type::Union(type, cache_.kZeroOrOne, zone());
+    } else {
+      type = Type::Union(type, cache_.kZeroOrOneOrNaN, zone());
+    }
+    return Type::Intersect(type, Type::Number(), zone());
   }
   return Type::Number();
 }
 
-Type* OperationTyper::NumericAdd(Type* lhs, Type* rhs) {
+Type* OperationTyper::NumberAbs(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+
+  if (!type->IsInhabited()) {
+    return Type::None();
+  }
+
+  bool const maybe_nan = type->Maybe(Type::NaN());
+  bool const maybe_minuszero = type->Maybe(Type::MinusZero());
+  type = Type::Intersect(type, Type::PlainNumber(), zone());
+  double const max = type->Max();
+  double const min = type->Min();
+  if (min < 0) {
+    if (type->Is(cache_.kInteger)) {
+      type = Type::Range(0.0, std::max(std::fabs(min), std::fabs(max)), zone());
+    } else {
+      type = Type::PlainNumber();
+    }
+  }
+  if (maybe_minuszero) {
+    type = Type::Union(type, cache_.kSingletonZero, zone());
+  }
+  if (maybe_nan) {
+    type = Type::Union(type, Type::NaN(), zone());
+  }
+  return type;
+}
+
+Type* OperationTyper::NumberAcos(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberAcosh(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberAsin(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberAsinh(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberAtan(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberAtanh(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberCbrt(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberCeil(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+Type* OperationTyper::NumberClz32(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return cache_.kZeroToThirtyTwo;
+}
+
+Type* OperationTyper::NumberCos(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberCosh(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberExp(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Union(Type::PlainNumber(), Type::NaN(), zone());
+}
+
+Type* OperationTyper::NumberExpm1(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Union(Type::PlainNumber(), Type::NaN(), zone());
+}
+
+Type* OperationTyper::NumberFloor(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+Type* OperationTyper::NumberFround(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberLog(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberLog1p(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberLog2(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberLog10(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberRound(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+Type* OperationTyper::NumberSign(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(cache_.kZeroish)) return type;
+  bool maybe_minuszero = type->Maybe(Type::MinusZero());
+  bool maybe_nan = type->Maybe(Type::NaN());
+  type = Type::Intersect(type, Type::PlainNumber(), zone());
+  if (type->Max() < 0.0) {
+    type = cache_.kSingletonMinusOne;
+  } else if (type->Max() <= 0.0) {
+    type = cache_.kMinusOneOrZero;
+  } else if (type->Min() > 0.0) {
+    type = cache_.kSingletonOne;
+  } else if (type->Min() >= 0.0) {
+    type = cache_.kZeroOrOne;
+  } else {
+    type = Type::Range(-1.0, 1.0, zone());
+  }
+  if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+  if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+  return type;
+}
+
+Type* OperationTyper::NumberSin(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberSinh(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberSqrt(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberTan(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberTanh(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberTrunc(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+Type* OperationTyper::NumberToInt32(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+
+  if (type->Is(Type::Signed32())) return type;
+  if (type->Is(cache_.kZeroish)) return cache_.kSingletonZero;
+  if (type->Is(signed32ish_)) {
+    return Type::Intersect(Type::Union(type, cache_.kSingletonZero, zone()),
+                           Type::Signed32(), zone());
+  }
+  return Type::Signed32();
+}
+
+Type* OperationTyper::NumberToUint32(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+
+  if (type->Is(Type::Unsigned32())) return type;
+  if (type->Is(cache_.kZeroish)) return cache_.kSingletonZero;
+  if (type->Is(unsigned32ish_)) {
+    return Type::Intersect(Type::Union(type, cache_.kSingletonZero, zone()),
+                           Type::Unsigned32(), zone());
+  }
+  return Type::Unsigned32();
+}
+
+Type* OperationTyper::NumberSilenceNaN(Type* type) {
+  DCHECK(type->Is(Type::Number()));
+  // TODO(jarin): This is a terrible hack; we definitely need a dedicated type
+  // for the hole (tagged and/or double). Otherwise if the input is the hole
+  // NaN constant, we'd just eliminate this node in JSTypedLowering.
+  if (type->Maybe(Type::NaN())) return Type::Number();
+  return type;
+}
+
+Type* OperationTyper::NumberAdd(Type* lhs, Type* rhs) {
   DCHECK(lhs->Is(Type::Number()));
   DCHECK(rhs->Is(Type::Number()));
 
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
+    return Type::None();
+  }
+
+  // Addition can return NaN if either input can be NaN or we try to compute
+  // the sum of two infinities of opposite sign.
+  bool maybe_nan = lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN());
+
+  // Addition can yield minus zero only if both inputs can be minus zero.
+  bool maybe_minuszero = true;
+  if (lhs->Maybe(Type::MinusZero())) {
+    lhs = Type::Union(lhs, cache_.kSingletonZero, zone());
+  } else {
+    maybe_minuszero = false;
+  }
+  if (rhs->Maybe(Type::MinusZero())) {
+    rhs = Type::Union(rhs, cache_.kSingletonZero, zone());
+  } else {
+    maybe_minuszero = false;
+  }
+
   // We can give more precise types for integers.
-  if (!lhs->Is(cache_.kIntegerOrMinusZeroOrNaN) ||
-      !rhs->Is(cache_.kIntegerOrMinusZeroOrNaN)) {
-    return Type::Number();
+  Type* type = Type::None();
+  lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
+  rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
+  if (lhs->IsInhabited() && rhs->IsInhabited()) {
+    if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
+      type = AddRanger(lhs->Min(), lhs->Max(), rhs->Min(), rhs->Max());
+    } else {
+      if ((lhs->Maybe(minus_infinity_) && rhs->Maybe(infinity_)) ||
+          (rhs->Maybe(minus_infinity_) && lhs->Maybe(infinity_))) {
+        maybe_nan = true;
+      }
+      type = Type::PlainNumber();
+    }
   }
-  Type* int_lhs = Type::Intersect(lhs, cache_.kInteger, zone());
-  Type* int_rhs = Type::Intersect(rhs, cache_.kInteger, zone());
-  Type* result =
-      AddRanger(int_lhs->Min(), int_lhs->Max(), int_rhs->Min(), int_rhs->Max());
-  if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
-    result = Type::Union(result, Type::NaN(), zone());
-  }
-  if (lhs->Maybe(Type::MinusZero()) && rhs->Maybe(Type::MinusZero())) {
-    result = Type::Union(result, Type::MinusZero(), zone());
-  }
-  return result;
+
+  // Take into account the -0 and NaN information computed earlier.
+  if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+  if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+  return type;
 }
 
-Type* OperationTyper::NumericSubtract(Type* lhs, Type* rhs) {
+Type* OperationTyper::NumberSubtract(Type* lhs, Type* rhs) {
   DCHECK(lhs->Is(Type::Number()));
   DCHECK(rhs->Is(Type::Number()));
 
-  lhs = Rangify(lhs);
-  rhs = Rangify(rhs);
-  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
-  if (lhs->IsRange() && rhs->IsRange()) {
-    return SubtractRanger(lhs->AsRange(), rhs->AsRange());
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
+    return Type::None();
   }
-  // TODO(neis): Deal with numeric bitsets here and elsewhere.
-  return Type::Number();
+
+  // Subtraction can return NaN if either input can be NaN or we try to
+  // compute the sum of two infinities of opposite sign.
+  bool maybe_nan = lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN());
+
+  // Subtraction can yield minus zero if {lhs} can be minus zero and {rhs}
+  // can be zero.
+  bool maybe_minuszero = false;
+  if (lhs->Maybe(Type::MinusZero())) {
+    lhs = Type::Union(lhs, cache_.kSingletonZero, zone());
+    maybe_minuszero = rhs->Maybe(cache_.kSingletonZero);
+  }
+  if (rhs->Maybe(Type::MinusZero())) {
+    rhs = Type::Union(rhs, cache_.kSingletonZero, zone());
+  }
+
+  // We can give more precise types for integers.
+  Type* type = Type::None();
+  lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
+  rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
+  if (lhs->IsInhabited() && rhs->IsInhabited()) {
+    if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
+      type = SubtractRanger(lhs->Min(), lhs->Max(), rhs->Min(), rhs->Max());
+    } else {
+      if ((lhs->Maybe(infinity_) && rhs->Maybe(infinity_)) ||
+          (rhs->Maybe(minus_infinity_) && lhs->Maybe(minus_infinity_))) {
+        maybe_nan = true;
+      }
+      type = Type::PlainNumber();
+    }
+  }
+
+  // Take into account the -0 and NaN information computed earlier.
+  if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+  if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+  return type;
 }
 
-Type* OperationTyper::NumericMultiply(Type* lhs, Type* rhs) {
+Type* OperationTyper::NumberMultiply(Type* lhs, Type* rhs) {
   DCHECK(lhs->Is(Type::Number()));
   DCHECK(rhs->Is(Type::Number()));
+
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
+    return Type::None();
+  }
+
   lhs = Rangify(lhs);
   rhs = Rangify(rhs);
   if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
@@ -333,10 +602,14 @@
   return Type::Number();
 }
 
-Type* OperationTyper::NumericDivide(Type* lhs, Type* rhs) {
+Type* OperationTyper::NumberDivide(Type* lhs, Type* rhs) {
   DCHECK(lhs->Is(Type::Number()));
   DCHECK(rhs->Is(Type::Number()));
 
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
+    return Type::None();
+  }
+
   if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
   // Division is tricky, so all we do is try ruling out nan.
   bool maybe_nan =
@@ -346,25 +619,311 @@
   return maybe_nan ? Type::Number() : Type::OrderedNumber();
 }
 
-Type* OperationTyper::NumericModulus(Type* lhs, Type* rhs) {
+Type* OperationTyper::NumberModulus(Type* lhs, Type* rhs) {
   DCHECK(lhs->Is(Type::Number()));
   DCHECK(rhs->Is(Type::Number()));
-  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
 
-  if (lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
-      lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) {
-    // Result maybe NaN.
-    return Type::Number();
+  // Modulus can yield NaN if either {lhs} or {rhs} are NaN, or
+  // {lhs} is not finite, or the {rhs} is a zero value.
+  bool maybe_nan = lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
+                   lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY;
+
+  // Deal with -0 inputs, only the signbit of {lhs} matters for the result.
+  bool maybe_minuszero = false;
+  if (lhs->Maybe(Type::MinusZero())) {
+    maybe_minuszero = true;
+    lhs = Type::Union(lhs, cache_.kSingletonZero, zone());
+  }
+  if (rhs->Maybe(Type::MinusZero())) {
+    rhs = Type::Union(rhs, cache_.kSingletonZero, zone());
   }
 
-  lhs = Rangify(lhs);
-  rhs = Rangify(rhs);
-  if (lhs->IsRange() && rhs->IsRange()) {
-    return ModulusRanger(lhs->AsRange(), rhs->AsRange());
+  // Rule out NaN and -0, and check what we can do with the remaining type info.
+  Type* type = Type::None();
+  lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
+  rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
+
+  // We can only derive a meaningful type if both {lhs} and {rhs} are inhabited,
+  // and the {rhs} is not 0, otherwise the result is NaN independent of {lhs}.
+  if (lhs->IsInhabited() && !rhs->Is(cache_.kSingletonZero)) {
+    // Determine the bounds of {lhs} and {rhs}.
+    double const lmin = lhs->Min();
+    double const lmax = lhs->Max();
+    double const rmin = rhs->Min();
+    double const rmax = rhs->Max();
+
+    // The sign of the result is the sign of the {lhs}.
+    if (lmin < 0.0) maybe_minuszero = true;
+
+    // For integer inputs {lhs} and {rhs} we can infer a precise type.
+    if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
+      double labs = std::max(std::abs(lmin), std::abs(lmax));
+      double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
+      double abs = std::min(labs, rabs);
+      double min = 0.0, max = 0.0;
+      if (lmin >= 0.0) {
+        // {lhs} positive.
+        min = 0.0;
+        max = abs;
+      } else if (lmax <= 0.0) {
+        // {lhs} negative.
+        min = 0.0 - abs;
+        max = 0.0;
+      } else {
+        // {lhs} positive or negative.
+        min = 0.0 - abs;
+        max = abs;
+      }
+      type = Type::Range(min, max, zone());
+    } else {
+      type = Type::PlainNumber();
+    }
   }
-  return Type::OrderedNumber();
+
+  // Take into account the -0 and NaN information computed earlier.
+  if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+  if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+  return type;
 }
 
+Type* OperationTyper::NumberBitwiseOr(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+  lhs = NumberToInt32(lhs);
+  rhs = NumberToInt32(rhs);
+
+  double lmin = lhs->Min();
+  double rmin = rhs->Min();
+  double lmax = lhs->Max();
+  double rmax = rhs->Max();
+  // Or-ing any two values results in a value no smaller than their minimum.
+  // Even no smaller than their maximum if both values are non-negative.
+  double min =
+      lmin >= 0 && rmin >= 0 ? std::max(lmin, rmin) : std::min(lmin, rmin);
+  double max = kMaxInt;
+
+  // Or-ing with 0 is essentially a conversion to int32.
+  if (rmin == 0 && rmax == 0) {
+    min = lmin;
+    max = lmax;
+  }
+  if (lmin == 0 && lmax == 0) {
+    min = rmin;
+    max = rmax;
+  }
+
+  if (lmax < 0 || rmax < 0) {
+    // Or-ing two values of which at least one is negative results in a negative
+    // value.
+    max = std::min(max, -1.0);
+  }
+  return Type::Range(min, max, zone());
+}
+
+Type* OperationTyper::NumberBitwiseAnd(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+  lhs = NumberToInt32(lhs);
+  rhs = NumberToInt32(rhs);
+
+  double lmin = lhs->Min();
+  double rmin = rhs->Min();
+  double lmax = lhs->Max();
+  double rmax = rhs->Max();
+  double min = kMinInt;
+  // And-ing any two values results in a value no larger than their maximum.
+  // Even no larger than their minimum if both values are non-negative.
+  double max =
+      lmin >= 0 && rmin >= 0 ? std::min(lmax, rmax) : std::max(lmax, rmax);
+  // And-ing with a non-negative value x causes the result to be between
+  // zero and x.
+  if (lmin >= 0) {
+    min = 0;
+    max = std::min(max, lmax);
+  }
+  if (rmin >= 0) {
+    min = 0;
+    max = std::min(max, rmax);
+  }
+  return Type::Range(min, max, zone());
+}
+
+Type* OperationTyper::NumberBitwiseXor(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+  lhs = NumberToInt32(lhs);
+  rhs = NumberToInt32(rhs);
+
+  double lmin = lhs->Min();
+  double rmin = rhs->Min();
+  double lmax = lhs->Max();
+  double rmax = rhs->Max();
+  if ((lmin >= 0 && rmin >= 0) || (lmax < 0 && rmax < 0)) {
+    // Xor-ing negative or non-negative values results in a non-negative value.
+    return Type::Unsigned31();
+  }
+  if ((lmax < 0 && rmin >= 0) || (lmin >= 0 && rmax < 0)) {
+    // Xor-ing a negative and a non-negative value results in a negative value.
+    // TODO(jarin) Use a range here.
+    return Type::Negative32();
+  }
+  return Type::Signed32();
+}
+
+Type* OperationTyper::NumberShiftLeft(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+
+  // TODO(turbofan): Infer a better type here.
+  return Type::Signed32();
+}
+
+Type* OperationTyper::NumberShiftRight(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+  lhs = NumberToInt32(lhs);
+  rhs = NumberToUint32(rhs);
+
+  double min = kMinInt;
+  double max = kMaxInt;
+  if (lhs->Min() >= 0) {
+    // Right-shifting a non-negative value cannot make it negative, nor larger.
+    min = std::max(min, 0.0);
+    max = std::min(max, lhs->Max());
+    if (rhs->Min() > 0 && rhs->Max() <= 31) {
+      max = static_cast<int>(max) >> static_cast<int>(rhs->Min());
+    }
+  }
+  if (lhs->Max() < 0) {
+    // Right-shifting a negative value cannot make it non-negative, nor smaller.
+    min = std::max(min, lhs->Min());
+    max = std::min(max, -1.0);
+    if (rhs->Min() > 0 && rhs->Max() <= 31) {
+      min = static_cast<int>(min) >> static_cast<int>(rhs->Min());
+    }
+  }
+  if (rhs->Min() > 0 && rhs->Max() <= 31) {
+    // Right-shifting by a positive value yields a small integer value.
+    double shift_min = kMinInt >> static_cast<int>(rhs->Min());
+    double shift_max = kMaxInt >> static_cast<int>(rhs->Min());
+    min = std::max(min, shift_min);
+    max = std::min(max, shift_max);
+  }
+  // TODO(jarin) Ideally, the following micro-optimization should be performed
+  // by the type constructor.
+  if (max == kMaxInt && min == kMinInt) return Type::Signed32();
+  return Type::Range(min, max, zone());
+}
+
+Type* OperationTyper::NumberShiftRightLogical(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+
+  if (!lhs->IsInhabited()) return Type::None();
+
+  lhs = NumberToUint32(lhs);
+
+  // Logical right-shifting any value cannot make it larger.
+  return Type::Range(0.0, lhs->Max(), zone());
+}
+
+Type* OperationTyper::NumberAtan2(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+  return Type::Number();
+}
+
+Type* OperationTyper::NumberImul(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+  // TODO(turbofan): We should be able to do better here.
+  return Type::Signed32();
+}
+
+Type* OperationTyper::NumberMax(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) {
+    return Type::NaN();
+  }
+  Type* type = Type::None();
+  // TODO(turbofan): Improve minus zero handling here.
+  if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
+    type = Type::Union(type, Type::NaN(), zone());
+  }
+  lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
+  rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
+  if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
+    double max = std::max(lhs->Max(), rhs->Max());
+    double min = std::max(lhs->Min(), rhs->Min());
+    type = Type::Union(type, Type::Range(min, max, zone()), zone());
+  } else {
+    type = Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
+  }
+  return type;
+}
+
+Type* OperationTyper::NumberMin(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) {
+    return Type::NaN();
+  }
+  Type* type = Type::None();
+  // TODO(turbofan): Improve minus zero handling here.
+  if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
+    type = Type::Union(type, Type::NaN(), zone());
+  }
+  lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
+  rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
+  if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
+    double max = std::min(lhs->Max(), rhs->Max());
+    double min = std::min(lhs->Min(), rhs->Min());
+    type = Type::Union(type, Type::Range(min, max, zone()), zone());
+  } else {
+    type = Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
+  }
+  return type;
+}
+
+Type* OperationTyper::NumberPow(Type* lhs, Type* rhs) {
+  DCHECK(lhs->Is(Type::Number()));
+  DCHECK(rhs->Is(Type::Number()));
+  // TODO(turbofan): We should be able to do better here.
+  return Type::Number();
+}
+
+#define SPECULATIVE_NUMBER_BINOP(Name)                                     \
+  Type* OperationTyper::Speculative##Name(Type* lhs, Type* rhs) {          \
+    lhs = ToNumber(Type::Intersect(lhs, Type::NumberOrOddball(), zone())); \
+    rhs = ToNumber(Type::Intersect(rhs, Type::NumberOrOddball(), zone())); \
+    return Name(lhs, rhs);                                                 \
+  }
+SPECULATIVE_NUMBER_BINOP(NumberAdd)
+SPECULATIVE_NUMBER_BINOP(NumberSubtract)
+SPECULATIVE_NUMBER_BINOP(NumberMultiply)
+SPECULATIVE_NUMBER_BINOP(NumberDivide)
+SPECULATIVE_NUMBER_BINOP(NumberModulus)
+SPECULATIVE_NUMBER_BINOP(NumberBitwiseOr)
+SPECULATIVE_NUMBER_BINOP(NumberBitwiseAnd)
+SPECULATIVE_NUMBER_BINOP(NumberBitwiseXor)
+SPECULATIVE_NUMBER_BINOP(NumberShiftLeft)
+SPECULATIVE_NUMBER_BINOP(NumberShiftRight)
+SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical)
+#undef SPECULATIVE_NUMBER_BINOP
+
 Type* OperationTyper::ToPrimitive(Type* type) {
   if (type->Is(Type::Primitive()) && !type->Maybe(Type::Receiver())) {
     return type;
@@ -400,23 +959,8 @@
   return singleton_true();
 }
 
-Type* OperationTyper::TypeJSAdd(Type* lhs, Type* rhs) {
-  lhs = ToPrimitive(lhs);
-  rhs = ToPrimitive(rhs);
-  if (lhs->Maybe(Type::String()) || rhs->Maybe(Type::String())) {
-    if (lhs->Is(Type::String()) || rhs->Is(Type::String())) {
-      return Type::String();
-    } else {
-      return Type::NumberOrString();
-    }
-  }
-  lhs = ToNumber(lhs);
-  rhs = ToNumber(rhs);
-  return NumericAdd(lhs, rhs);
-}
-
-Type* OperationTyper::TypeJSSubtract(Type* lhs, Type* rhs) {
-  return NumericSubtract(ToNumber(lhs), ToNumber(rhs));
+Type* OperationTyper::TypeTypeGuard(const Operator* sigma_op, Type* input) {
+  return Type::Intersect(input, TypeGuardTypeOf(sigma_op), zone());
 }
 
 }  // namespace compiler
diff --git a/src/compiler/operation-typer.h b/src/compiler/operation-typer.h
index aa669ac..dcfe0c4 100644
--- a/src/compiler/operation-typer.h
+++ b/src/compiler/operation-typer.h
@@ -19,6 +19,8 @@
 
 namespace compiler {
 
+class Operator;
+
 class OperationTyper {
  public:
   OperationTyper(Isolate* isolate, Zone* zone);
@@ -32,11 +34,18 @@
   Type* ToNumber(Type* type);
   Type* WeakenRange(Type* current_range, Type* previous_range);
 
-  Type* NumericAdd(Type* lhs, Type* rhs);
-  Type* NumericSubtract(Type* lhs, Type* rhs);
-  Type* NumericMultiply(Type* lhs, Type* rhs);
-  Type* NumericDivide(Type* lhs, Type* rhs);
-  Type* NumericModulus(Type* lhs, Type* rhs);
+// Number unary operators.
+#define DECLARE_METHOD(Name) Type* Name(Type* type);
+  SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+
+// Number binary operators.
+#define DECLARE_METHOD(Name) Type* Name(Type* lhs, Type* rhs);
+  SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+  SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+
+  Type* TypeTypeGuard(const Operator* sigma_op, Type* input);
 
   enum ComparisonOutcomeFlags {
     kComparisonTrue = 1,
@@ -44,14 +53,9 @@
     kComparisonUndefined = 4
   };
 
-// Javascript binop typers.
-#define DECLARE_CASE(x) Type* Type##x(Type* lhs, Type* rhs);
-  JS_SIMPLE_BINOP_LIST(DECLARE_CASE)
-#undef DECLARE_CASE
-
-  Type* singleton_false() { return singleton_false_; }
-  Type* singleton_true() { return singleton_true_; }
-  Type* singleton_the_hole() { return singleton_the_hole_; }
+  Type* singleton_false() const { return singleton_false_; }
+  Type* singleton_true() const { return singleton_true_; }
+  Type* singleton_the_hole() const { return singleton_the_hole_; }
 
  private:
   typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
@@ -63,18 +67,22 @@
   Type* Rangify(Type*);
   Type* AddRanger(double lhs_min, double lhs_max, double rhs_min,
                   double rhs_max);
-  Type* SubtractRanger(RangeType* lhs, RangeType* rhs);
+  Type* SubtractRanger(double lhs_min, double lhs_max, double rhs_min,
+                       double rhs_max);
   Type* MultiplyRanger(Type* lhs, Type* rhs);
-  Type* ModulusRanger(RangeType* lhs, RangeType* rhs);
 
-  Zone* zone() { return zone_; }
+  Zone* zone() const { return zone_; }
 
-  Zone* zone_;
+  Zone* const zone_;
   TypeCache const& cache_;
 
+  Type* infinity_;
+  Type* minus_infinity_;
   Type* singleton_false_;
   Type* singleton_true_;
   Type* singleton_the_hole_;
+  Type* signed32ish_;
+  Type* unsigned32ish_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index 43b0076..68d884d 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -20,24 +20,45 @@
 
 
 // static
-int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
+bool OperatorProperties::HasFrameStateInput(const Operator* op) {
   switch (op->opcode()) {
     case IrOpcode::kCheckpoint:
     case IrOpcode::kFrameState:
-      return 1;
+      return true;
     case IrOpcode::kJSCallRuntime: {
       const CallRuntimeParameters& p = CallRuntimeParametersOf(op);
-      return Linkage::NeedsFrameStateInput(p.id()) ? 1 : 0;
+      return Linkage::NeedsFrameStateInput(p.id());
     }
 
     // Strict equality cannot lazily deoptimize.
     case IrOpcode::kJSStrictEqual:
     case IrOpcode::kJSStrictNotEqual:
-      return 0;
+      return false;
+
+    // Binary operations
+    case IrOpcode::kJSAdd:
+    case IrOpcode::kJSSubtract:
+    case IrOpcode::kJSMultiply:
+    case IrOpcode::kJSDivide:
+    case IrOpcode::kJSModulus:
+
+    // Bitwise operations
+    case IrOpcode::kJSBitwiseOr:
+    case IrOpcode::kJSBitwiseXor:
+    case IrOpcode::kJSBitwiseAnd:
+
+    // Shift operations
+    case IrOpcode::kJSShiftLeft:
+    case IrOpcode::kJSShiftRight:
+    case IrOpcode::kJSShiftRightLogical:
 
     // Compare operations
     case IrOpcode::kJSEqual:
     case IrOpcode::kJSNotEqual:
+    case IrOpcode::kJSGreaterThan:
+    case IrOpcode::kJSGreaterThanOrEqual:
+    case IrOpcode::kJSLessThan:
+    case IrOpcode::kJSLessThanOrEqual:
     case IrOpcode::kJSHasProperty:
     case IrOpcode::kJSInstanceOf:
 
@@ -78,35 +99,10 @@
     case IrOpcode::kJSForInNext:
     case IrOpcode::kJSForInPrepare:
     case IrOpcode::kJSStackCheck:
-      return 1;
-
-    // Binary operators that can deopt in the middle the operation (e.g.,
-    // as a result of lazy deopt in ToNumber conversion) need a second frame
-    // state so that we can resume before the operation.
-    case IrOpcode::kJSMultiply:
-    case IrOpcode::kJSAdd:
-    case IrOpcode::kJSBitwiseAnd:
-    case IrOpcode::kJSBitwiseOr:
-    case IrOpcode::kJSBitwiseXor:
-    case IrOpcode::kJSDivide:
-    case IrOpcode::kJSModulus:
-    case IrOpcode::kJSShiftLeft:
-    case IrOpcode::kJSShiftRight:
-    case IrOpcode::kJSShiftRightLogical:
-    case IrOpcode::kJSSubtract:
-      return 2;
-
-    // Compare operators that can deopt in the middle the operation (e.g.,
-    // as a result of lazy deopt in ToNumber conversion) need a second frame
-    // state so that we can resume before the operation.
-    case IrOpcode::kJSGreaterThan:
-    case IrOpcode::kJSGreaterThanOrEqual:
-    case IrOpcode::kJSLessThan:
-    case IrOpcode::kJSLessThanOrEqual:
-      return 2;
+      return true;
 
     default:
-      return 0;
+      return false;
   }
 }
 
diff --git a/src/compiler/operator-properties.h b/src/compiler/operator-properties.h
index e7ecd93..4fe5f59 100644
--- a/src/compiler/operator-properties.h
+++ b/src/compiler/operator-properties.h
@@ -14,14 +14,17 @@
 // Forward declarations.
 class Operator;
 
-
 class OperatorProperties final {
  public:
   static bool HasContextInput(const Operator* op);
   static int GetContextInputCount(const Operator* op) {
     return HasContextInput(op) ? 1 : 0;
   }
-  static int GetFrameStateInputCount(const Operator* op);
+
+  static bool HasFrameStateInput(const Operator* op);
+  static int GetFrameStateInputCount(const Operator* op) {
+    return HasFrameStateInput(op) ? 1 : 0;
+  }
 
   static int GetTotalInputCount(const Operator* op);
 
diff --git a/src/compiler/operator.cc b/src/compiler/operator.cc
index ae10348..fa1b2d8 100644
--- a/src/compiler/operator.cc
+++ b/src/compiler/operator.cc
@@ -44,8 +44,22 @@
   return os;
 }
 
+void Operator::PrintToImpl(std::ostream& os, PrintVerbosity verbose) const {
+  os << mnemonic();
+}
 
-void Operator::PrintTo(std::ostream& os) const { os << mnemonic(); }
+void Operator::PrintPropsTo(std::ostream& os) const {
+  std::string separator = "";
+
+#define PRINT_PROP_IF_SET(name)         \
+  if (HasProperty(Operator::k##name)) { \
+    os << separator;                    \
+    os << #name;                        \
+    separator = ", ";                   \
+  }
+  OPERATOR_PROPERTY_LIST(PRINT_PROP_IF_SET)
+#undef PRINT_PROP_IF_SET
+}
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/operator.h b/src/compiler/operator.h
index 8f288cb..b6ec2c6 100644
--- a/src/compiler/operator.h
+++ b/src/compiler/operator.h
@@ -49,7 +49,14 @@
     kEliminatable = kNoDeopt | kNoWrite | kNoThrow,
     kPure = kNoDeopt | kNoRead | kNoWrite | kNoThrow | kIdempotent
   };
+
+// List of all bits, for the visualizer.
+#define OPERATOR_PROPERTY_LIST(V) \
+  V(Commutative)                  \
+  V(Associative) V(Idempotent) V(NoRead) V(NoWrite) V(NoThrow) V(NoDeopt)
+
   typedef base::Flags<Property, uint8_t> Properties;
+  enum class PrintVerbosity { kVerbose, kSilent };
 
   // Constructor.
   Operator(Opcode opcode, Properties properties, const char* mnemonic,
@@ -111,11 +118,20 @@
   }
 
   // TODO(titzer): API for input and output types, for typechecking graph.
- protected:
+
   // Print the full operator into the given stream, including any
   // static parameters. Useful for debugging and visualizing the IR.
-  virtual void PrintTo(std::ostream& os) const;
-  friend std::ostream& operator<<(std::ostream& os, const Operator& op);
+  void PrintTo(std::ostream& os,
+               PrintVerbosity verbose = PrintVerbosity::kVerbose) const {
+    // We cannot make PrintTo virtual, because default arguments to virtual
+    // methods are banned in the style guide.
+    return PrintToImpl(os, verbose);
+  }
+
+  void PrintPropsTo(std::ostream& os) const;
+
+ protected:
+  virtual void PrintToImpl(std::ostream& os, PrintVerbosity verbose) const;
 
  private:
   Opcode opcode_;
@@ -172,14 +188,19 @@
   size_t HashCode() const final {
     return base::hash_combine(this->opcode(), this->hash_(this->parameter()));
   }
-  virtual void PrintParameter(std::ostream& os) const {
-    os << "[" << this->parameter() << "]";
+  // For most parameter types, we have only a verbose way to print them, namely
+  // ostream << parameter. But for some types it is particularly useful to have
+  // a shorter way to print them for the node labels in Turbolizer. The
+  // following method can be overridden to provide a concise and a verbose
+  // printing of a parameter.
+
+  virtual void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
+    os << "[" << parameter() << "]";
   }
 
- protected:
-  void PrintTo(std::ostream& os) const final {
+  virtual void PrintToImpl(std::ostream& os, PrintVerbosity verbose) const {
     os << mnemonic();
-    PrintParameter(os);
+    PrintParameter(os, verbose);
   }
 
  private:
diff --git a/src/compiler/osr.cc b/src/compiler/osr.cc
index 55431c2..187e612 100644
--- a/src/compiler/osr.cc
+++ b/src/compiler/osr.cc
@@ -24,10 +24,16 @@
 namespace compiler {
 
 OsrHelper::OsrHelper(CompilationInfo* info)
-    : parameter_count_(info->scope()->num_parameters()),
-      stack_slot_count_(info->scope()->num_stack_slots() +
-                        info->osr_expr_stack_height()) {}
-
+    : parameter_count_(
+          info->is_optimizing_from_bytecode()
+              ? info->shared_info()->bytecode_array()->parameter_count()
+              : info->scope()->num_parameters()),
+      stack_slot_count_(
+          info->is_optimizing_from_bytecode()
+              ? info->shared_info()->bytecode_array()->register_count() +
+                    InterpreterFrameConstants::kExtraSlotCount
+              : info->scope()->num_stack_slots() +
+                    info->osr_expr_stack_height()) {}
 
 #ifdef DEBUG
 #define TRACE_COND (FLAG_trace_turbo_graph && FLAG_trace_osr)
@@ -78,8 +84,8 @@
     }
 
     // Copy all nodes.
-    for (size_t i = 0; i < all.live.size(); i++) {
-      Node* orig = all.live[i];
+    for (size_t i = 0; i < all.reachable.size(); i++) {
+      Node* orig = all.reachable[i];
       Node* copy = mapping->at(orig->id());
       if (copy != sentinel) {
         // Mapping already exists.
@@ -107,7 +113,7 @@
     }
 
     // Fix missing inputs.
-    for (Node* orig : all.live) {
+    for (Node* orig : all.reachable) {
       Node* copy = mapping->at(orig->id());
       for (int j = 0; j < copy->InputCount(); j++) {
         if (copy->InputAt(j) == sentinel) {
diff --git a/src/compiler/pipeline-statistics.cc b/src/compiler/pipeline-statistics.cc
index b98f837..5b97abe 100644
--- a/src/compiler/pipeline-statistics.cc
+++ b/src/compiler/pipeline-statistics.cc
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <memory>
+
 #include "src/compiler.h"
 #include "src/compiler/pipeline-statistics.h"
 #include "src/compiler/zone-pool.h"
@@ -12,8 +14,8 @@
 
 void PipelineStatistics::CommonStats::Begin(
     PipelineStatistics* pipeline_stats) {
-  DCHECK(scope_.is_empty());
-  scope_.Reset(new ZonePool::StatsScope(pipeline_stats->zone_pool_));
+  DCHECK(!scope_);
+  scope_.reset(new ZonePool::StatsScope(pipeline_stats->zone_pool_));
   timer_.Start();
   outer_zone_initial_size_ = pipeline_stats->OuterZoneSize();
   allocated_bytes_at_start_ =
@@ -26,7 +28,7 @@
 void PipelineStatistics::CommonStats::End(
     PipelineStatistics* pipeline_stats,
     CompilationStatistics::BasicStats* diff) {
-  DCHECK(!scope_.is_empty());
+  DCHECK(scope_);
   diff->function_name_ = pipeline_stats->function_name_;
   diff->delta_ = timer_.Elapsed();
   size_t outer_zone_diff =
@@ -36,7 +38,7 @@
       diff->max_allocated_bytes_ + allocated_bytes_at_start_;
   diff->total_allocated_bytes_ =
       outer_zone_diff + scope_->GetTotalAllocatedBytes();
-  scope_.Reset(nullptr);
+  scope_.reset();
   timer_.Stop();
 }
 
@@ -52,7 +54,7 @@
       phase_name_(nullptr) {
   if (info->has_shared_info()) {
     source_size_ = static_cast<size_t>(info->shared_info()->SourceSize());
-    base::SmartArrayPointer<char> name =
+    std::unique_ptr<char[]> name =
         info->shared_info()->DebugName()->ToCString();
     function_name_ = name.get();
   }
diff --git a/src/compiler/pipeline-statistics.h b/src/compiler/pipeline-statistics.h
index c52c61c..a9931eb 100644
--- a/src/compiler/pipeline-statistics.h
+++ b/src/compiler/pipeline-statistics.h
@@ -5,10 +5,10 @@
 #ifndef V8_COMPILER_PIPELINE_STATISTICS_H_
 #define V8_COMPILER_PIPELINE_STATISTICS_H_
 
+#include <memory>
 #include <string>
 
 #include "src/base/platform/elapsed-timer.h"
-#include "src/base/smart-pointers.h"
 #include "src/compilation-statistics.h"
 #include "src/compiler/zone-pool.h"
 
@@ -39,16 +39,19 @@
     void End(PipelineStatistics* pipeline_stats,
              CompilationStatistics::BasicStats* diff);
 
-    base::SmartPointer<ZonePool::StatsScope> scope_;
+    std::unique_ptr<ZonePool::StatsScope> scope_;
     base::ElapsedTimer timer_;
     size_t outer_zone_initial_size_;
     size_t allocated_bytes_at_start_;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(CommonStats);
   };
 
-  bool InPhaseKind() { return !phase_kind_stats_.scope_.is_empty(); }
+  bool InPhaseKind() { return !!phase_kind_stats_.scope_; }
 
   friend class PhaseScope;
-  bool InPhase() { return !phase_stats_.scope_.is_empty(); }
+  bool InPhase() { return !!phase_stats_.scope_; }
   void BeginPhase(const char* name);
   void EndPhase();
 
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index d592000..ba7aa96 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -5,6 +5,7 @@
 #include "src/compiler/pipeline.h"
 
 #include <fstream>  // NOLINT(readability/streams)
+#include <memory>
 #include <sstream>
 
 #include "src/base/adapters.h"
@@ -44,6 +45,7 @@
 #include "src/compiler/load-elimination.h"
 #include "src/compiler/loop-analysis.h"
 #include "src/compiler/loop-peeling.h"
+#include "src/compiler/loop-variable-optimizer.h"
 #include "src/compiler/machine-operator-reducer.h"
 #include "src/compiler/memory-optimizer.h"
 #include "src/compiler/move-optimizer.h"
@@ -67,7 +69,7 @@
 #include "src/compiler/zone-pool.h"
 #include "src/isolate-inl.h"
 #include "src/ostreams.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parse-info.h"
 #include "src/register-configuration.h"
 #include "src/type-info.h"
 #include "src/utils.h"
@@ -99,7 +101,8 @@
     simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
     machine_ = new (graph_zone_) MachineOperatorBuilder(
         graph_zone_, MachineType::PointerRepresentation(),
-        InstructionSelector::SupportedMachineOperatorFlags());
+        InstructionSelector::SupportedMachineOperatorFlags(),
+        InstructionSelector::AlignmentRequirements());
     common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
     javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
     jsgraph_ = new (graph_zone_)
@@ -304,7 +307,7 @@
  private:
   Isolate* const isolate_;
   CompilationInfo* const info_;
-  base::SmartArrayPointer<char> debug_name_;
+  std::unique_ptr<char[]> debug_name_;
   Zone* outer_zone_ = nullptr;
   ZonePool* const zone_pool_;
   PipelineStatistics* pipeline_statistics_ = nullptr;
@@ -531,7 +534,7 @@
   if (FLAG_trace_turbo) {
     TurboJsonFile json_of(info, std::ios_base::trunc);
     Handle<Script> script = info->script();
-    base::SmartArrayPointer<char> function_name = info->GetDebugName();
+    std::unique_ptr<char[]> function_name = info->GetDebugName();
     int pos = info->shared_info()->start_position();
     json_of << "{\"function\":\"" << function_name.get()
             << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
@@ -570,22 +573,24 @@
         linkage_(nullptr) {}
 
  protected:
-  Status CreateGraphImpl() final;
-  Status OptimizeGraphImpl() final;
-  Status GenerateCodeImpl() final;
+  Status PrepareJobImpl() final;
+  Status ExecuteJobImpl() final;
+  Status FinalizeJobImpl() final;
 
  private:
   Zone zone_;
   ZonePool zone_pool_;
   ParseInfo parse_info_;
   CompilationInfo info_;
-  base::SmartPointer<PipelineStatistics> pipeline_statistics_;
+  std::unique_ptr<PipelineStatistics> pipeline_statistics_;
   PipelineData data_;
   PipelineImpl pipeline_;
   Linkage* linkage_;
+
+  DISALLOW_COPY_AND_ASSIGN(PipelineCompilationJob);
 };
 
-PipelineCompilationJob::Status PipelineCompilationJob::CreateGraphImpl() {
+PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
   if (info()->shared_info()->asm_function()) {
     if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
     info()->MarkAsFunctionContextSpecializing();
@@ -601,12 +606,23 @@
     info()->MarkAsDeoptimizationEnabled();
   }
   if (!info()->is_optimizing_from_bytecode()) {
+    if (FLAG_inline_accessors) {
+      info()->MarkAsAccessorInliningEnabled();
+    }
     if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
       info()->MarkAsTypeFeedbackEnabled();
     }
     if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
   }
 
+  // TODO(mstarzinger): Hack to ensure that certain call descriptors are
+  // initialized on the main thread, since it is needed off-thread by the
+  // effect control linearizer.
+  CodeFactory::CopyFastSmiOrObjectElements(info()->isolate());
+  CodeFactory::GrowFastDoubleElements(info()->isolate());
+  CodeFactory::GrowFastSmiOrObjectElements(info()->isolate());
+  CodeFactory::ToNumber(info()->isolate());
+
   linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
 
   if (!pipeline_.CreateGraph()) {
@@ -617,12 +633,12 @@
   return SUCCEEDED;
 }
 
-PipelineCompilationJob::Status PipelineCompilationJob::OptimizeGraphImpl() {
+PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
   if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
   return SUCCEEDED;
 }
 
-PipelineCompilationJob::Status PipelineCompilationJob::GenerateCodeImpl() {
+PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
   Handle<Code> code = pipeline_.GenerateCode(linkage_);
   if (code.is_null()) {
     if (info()->bailout_reason() == kNoReason) {
@@ -644,16 +660,16 @@
   explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
                                       CallDescriptor* descriptor,
                                       SourcePositionTable* source_positions)
-      : CompilationJob(info, "TurboFan"),
+      : CompilationJob(info, "TurboFan", State::kReadyToExecute),
         zone_pool_(info->isolate()->allocator()),
         data_(&zone_pool_, info, graph, source_positions),
         pipeline_(&data_),
         linkage_(descriptor) {}
 
  protected:
-  Status CreateGraphImpl() final;
-  Status OptimizeGraphImpl() final;
-  Status GenerateCodeImpl() final;
+  Status PrepareJobImpl() final;
+  Status ExecuteJobImpl() final;
+  Status FinalizeJobImpl() final;
 
  private:
   ZonePool zone_pool_;
@@ -663,12 +679,13 @@
 };
 
 PipelineWasmCompilationJob::Status
-PipelineWasmCompilationJob::CreateGraphImpl() {
+PipelineWasmCompilationJob::PrepareJobImpl() {
+  UNREACHABLE();  // Prepare should always be skipped for WasmCompilationJob.
   return SUCCEEDED;
 }
 
 PipelineWasmCompilationJob::Status
-PipelineWasmCompilationJob::OptimizeGraphImpl() {
+PipelineWasmCompilationJob::ExecuteJobImpl() {
   if (FLAG_trace_turbo) {
     TurboJsonFile json_of(info(), std::ios_base::trunc);
     json_of << "{\"function\":\"" << info()->GetDebugName().get()
@@ -682,7 +699,7 @@
 }
 
 PipelineWasmCompilationJob::Status
-PipelineWasmCompilationJob::GenerateCodeImpl() {
+PipelineWasmCompilationJob::FinalizeJobImpl() {
   pipeline_.GenerateCode(&linkage_);
   return SUCCEEDED;
 }
@@ -786,6 +803,9 @@
         data->info()->dependencies());
     JSNativeContextSpecialization::Flags flags =
         JSNativeContextSpecialization::kNoFlags;
+    if (data->info()->is_accessor_inlining_enabled()) {
+      flags |= JSNativeContextSpecialization::kAccessorInliningEnabled;
+    }
     if (data->info()->is_bailout_on_uninitialized()) {
       flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
     }
@@ -800,6 +820,11 @@
                                      ? JSInliningHeuristic::kGeneralInlining
                                      : JSInliningHeuristic::kRestrictedInlining,
                                  temp_zone, data->info(), data->jsgraph());
+    JSIntrinsicLowering intrinsic_lowering(
+        &graph_reducer, data->jsgraph(),
+        data->info()->is_deoptimization_enabled()
+            ? JSIntrinsicLowering::kDeoptimizationEnabled
+            : JSIntrinsicLowering::kDeoptimizationDisabled);
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &common_reducer);
     if (data->info()->is_frame_specializing()) {
@@ -810,6 +835,7 @@
     }
     AddReducer(data, &graph_reducer, &native_context_specialization);
     AddReducer(data, &graph_reducer, &context_specialization);
+    AddReducer(data, &graph_reducer, &intrinsic_lowering);
     AddReducer(data, &graph_reducer, &call_reducer);
     if (!data->info()->is_optimizing_from_bytecode()) {
       AddReducer(data, &graph_reducer, &inlining);
@@ -825,7 +851,10 @@
   void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
     NodeVector roots(temp_zone);
     data->jsgraph()->GetCachedNodes(&roots);
-    typer->Run(roots);
+    LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
+                                         data->common(), temp_zone);
+    if (FLAG_turbo_loop_variable) induction_vars.Run();
+    typer->Run(roots, &induction_vars);
   }
 };
 
@@ -872,9 +901,12 @@
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
-    LoadElimination load_elimination(&graph_reducer, data->graph(),
-                                     data->jsgraph()->simplified());
-    JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
+    JSBuiltinReducer builtin_reducer(
+        &graph_reducer, data->jsgraph(),
+        data->info()->is_deoptimization_enabled()
+            ? JSBuiltinReducer::kDeoptimizationEnabled
+            : JSBuiltinReducer::kNoFlags,
+        data->info()->dependencies());
     MaybeHandle<LiteralsArray> literals_array =
         data->info()->is_native_context_specializing()
             ? handle(data->info()->closure()->literals(), data->isolate())
@@ -886,20 +918,9 @@
     if (data->info()->is_deoptimization_enabled()) {
       typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
     }
-    if (data->info()->shared_info()->HasBytecodeArray()) {
-      typed_lowering_flags |= JSTypedLowering::kDisableBinaryOpReduction;
-    }
-    if (data->info()->is_type_feedback_enabled()) {
-      typed_lowering_flags |= JSTypedLowering::kTypeFeedbackEnabled;
-    }
     JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
                                    typed_lowering_flags, data->jsgraph(),
                                    temp_zone);
-    JSIntrinsicLowering intrinsic_lowering(
-        &graph_reducer, data->jsgraph(),
-        data->info()->is_deoptimization_enabled()
-            ? JSIntrinsicLowering::kDeoptimizationEnabled
-            : JSIntrinsicLowering::kDeoptimizationDisabled);
     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
     CheckpointElimination checkpoint_elimination(&graph_reducer);
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -910,8 +931,6 @@
       AddReducer(data, &graph_reducer, &create_lowering);
     }
     AddReducer(data, &graph_reducer, &typed_lowering);
-    AddReducer(data, &graph_reducer, &intrinsic_lowering);
-    AddReducer(data, &graph_reducer, &load_elimination);
     AddReducer(data, &graph_reducer, &simple_reducer);
     AddReducer(data, &graph_reducer, &checkpoint_elimination);
     AddReducer(data, &graph_reducer, &common_reducer);
@@ -920,22 +939,6 @@
 };
 
 
-struct BranchEliminationPhase {
-  static const char* phase_name() { return "branch condition elimination"; }
-
-  void Run(PipelineData* data, Zone* temp_zone) {
-    JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
-    BranchElimination branch_condition_elimination(&graph_reducer,
-                                                   data->jsgraph(), temp_zone);
-    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
-                                              data->common());
-    AddReducer(data, &graph_reducer, &branch_condition_elimination);
-    AddReducer(data, &graph_reducer, &dead_code_elimination);
-    graph_reducer.ReduceGraph();
-  }
-};
-
-
 struct EscapeAnalysisPhase {
   static const char* phase_name() { return "escape analysis"; }
 
@@ -956,34 +959,63 @@
   static const char* phase_name() { return "representation selection"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
-    SimplifiedLowering::Flags flags =
-        data->info()->is_type_feedback_enabled()
-            ? SimplifiedLowering::kTypeFeedbackEnabled
-            : SimplifiedLowering::kNoFlag;
     SimplifiedLowering lowering(data->jsgraph(), temp_zone,
-                                data->source_positions(), flags);
+                                data->source_positions());
     lowering.LowerAllNodes();
   }
 };
 
+struct LoopPeelingPhase {
+  static const char* phase_name() { return "loop peeling"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    GraphTrimmer trimmer(temp_zone, data->graph());
+    NodeVector roots(temp_zone);
+    data->jsgraph()->GetCachedNodes(&roots);
+    trimmer.TrimGraph(roots.begin(), roots.end());
+
+    LoopTree* loop_tree =
+        LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
+    LoopPeeler::PeelInnerLoopsOfTree(data->graph(), data->common(), loop_tree,
+                                     temp_zone);
+  }
+};
+
+struct LoopExitEliminationPhase {
+  static const char* phase_name() { return "loop exit elimination"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
+  }
+};
+
+struct GenericLoweringPhase {
+  static const char* phase_name() { return "generic lowering"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+    JSGenericLowering generic_lowering(data->jsgraph());
+    AddReducer(data, &graph_reducer, &generic_lowering);
+    graph_reducer.ReduceGraph();
+  }
+};
+
 struct EarlyOptimizationPhase {
   static const char* phase_name() { return "early optimization"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
-    JSGenericLowering generic_lowering(data->jsgraph());
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
-    ValueNumberingReducer value_numbering(temp_zone);
+    ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
     MachineOperatorReducer machine_reducer(data->jsgraph());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &simple_reducer);
     AddReducer(data, &graph_reducer, &redundancy_elimination);
-    AddReducer(data, &graph_reducer, &generic_lowering);
     AddReducer(data, &graph_reducer, &value_numbering);
     AddReducer(data, &graph_reducer, &machine_reducer);
     AddReducer(data, &graph_reducer, &common_reducer);
@@ -1032,12 +1064,61 @@
   }
 };
 
-struct StoreStoreEliminationPhase {
-  static const char* phase_name() { return "Store-store elimination"; }
+// The store-store elimination greatly benefits from doing a common operator
+// reducer just before it, to eliminate conditional deopts with a constant
+// condition.
+
+struct DeadCodeEliminationPhase {
+  static const char* phase_name() { return "common operator reducer"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
-    StoreStoreElimination store_store_elimination(data->jsgraph(), temp_zone);
-    store_store_elimination.Run();
+    // Run the common operator reducer.
+    JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+                                              data->common());
+    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+                                         data->common(), data->machine());
+    AddReducer(data, &graph_reducer, &dead_code_elimination);
+    AddReducer(data, &graph_reducer, &common_reducer);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+struct StoreStoreEliminationPhase {
+  static const char* phase_name() { return "store-store elimination"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    GraphTrimmer trimmer(temp_zone, data->graph());
+    NodeVector roots(temp_zone);
+    data->jsgraph()->GetCachedNodes(&roots);
+    trimmer.TrimGraph(roots.begin(), roots.end());
+
+    StoreStoreElimination::Run(data->jsgraph(), temp_zone);
+  }
+};
+
+struct LoadEliminationPhase {
+  static const char* phase_name() { return "load elimination"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+    BranchElimination branch_condition_elimination(&graph_reducer,
+                                                   data->jsgraph(), temp_zone);
+    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+                                              data->common());
+    RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
+    LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
+                                     temp_zone);
+    ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
+    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+                                         data->common(), data->machine());
+    AddReducer(data, &graph_reducer, &branch_condition_elimination);
+    AddReducer(data, &graph_reducer, &dead_code_elimination);
+    AddReducer(data, &graph_reducer, &redundancy_elimination);
+    AddReducer(data, &graph_reducer, &load_elimination);
+    AddReducer(data, &graph_reducer, &value_numbering);
+    AddReducer(data, &graph_reducer, &common_reducer);
+    graph_reducer.ReduceGraph();
   }
 };
 
@@ -1062,15 +1143,18 @@
 
   void Run(PipelineData* data, Zone* temp_zone) {
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+    BranchElimination branch_condition_elimination(&graph_reducer,
+                                                   data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
-    ValueNumberingReducer value_numbering(temp_zone);
+    ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
     MachineOperatorReducer machine_reducer(data->jsgraph());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
     SelectLowering select_lowering(data->jsgraph()->graph(),
                                    data->jsgraph()->common());
     TailCallOptimization tco(data->common(), data->graph());
+    AddReducer(data, &graph_reducer, &branch_condition_elimination);
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &value_numbering);
     AddReducer(data, &graph_reducer, &machine_reducer);
@@ -1422,10 +1506,7 @@
     // Type the graph and keep the Typer running on newly created nodes within
     // this scope; the Typer is automatically unlinked from the Graph once we
     // leave this scope below.
-    Typer typer(isolate(), data->graph(), info()->is_deoptimization_enabled()
-                                              ? Typer::kDeoptimizationEnabled
-                                              : Typer::kNoFlags,
-                info()->dependencies());
+    Typer typer(isolate(), data->graph());
     Run<TyperPhase>(&typer);
     RunPrintAndVerify("Typed");
 
@@ -1435,6 +1516,14 @@
     Run<TypedLoweringPhase>();
     RunPrintAndVerify("Lowered typed");
 
+    if (FLAG_turbo_loop_peeling) {
+      Run<LoopPeelingPhase>();
+      RunPrintAndVerify("Loops peeled", true);
+    } else {
+      Run<LoopExitEliminationPhase>();
+      RunPrintAndVerify("Loop exits eliminated", true);
+    }
+
     if (FLAG_turbo_stress_loop_peeling) {
       Run<StressLoopPeelingPhase>();
       RunPrintAndVerify("Loop peeled");
@@ -1445,11 +1534,18 @@
       RunPrintAndVerify("Escape Analysed");
     }
 
-    // Select representations.
-    Run<RepresentationSelectionPhase>();
-    RunPrintAndVerify("Representations selected", true);
+    if (!info()->shared_info()->asm_function() && FLAG_turbo_load_elimination) {
+      Run<LoadEliminationPhase>();
+      RunPrintAndVerify("Load eliminated");
+    }
   }
 
+  // Select representations. This has to run w/o the Typer decorator, because
+  // we cannot compute meaningful types anyways, and the computed types might
+  // even conflict with the representation/truncation logic.
+  Run<RepresentationSelectionPhase>();
+  RunPrintAndVerify("Representations selected", true);
+
 #ifdef DEBUG
   // From now on it is invalid to look at types on the nodes, because:
   //
@@ -1467,9 +1563,9 @@
   RunPrintAndVerify("Untyped", true);
 #endif
 
-  // Run early optimization pass.
-  Run<EarlyOptimizationPhase>();
-  RunPrintAndVerify("Early optimized", true);
+  // Run generic lowering pass.
+  Run<GenericLoweringPhase>();
+  RunPrintAndVerify("Generic lowering", true);
 
   data->EndPhaseKind();
 
@@ -1481,17 +1577,21 @@
 
   data->BeginPhaseKind("block building");
 
+  // Run early optimization pass.
+  Run<EarlyOptimizationPhase>();
+  RunPrintAndVerify("Early optimized", true);
+
   Run<EffectControlLinearizationPhase>();
   RunPrintAndVerify("Effect and control linearized", true);
 
+  Run<DeadCodeEliminationPhase>();
+  RunPrintAndVerify("Common operator reducer", true);
+
   if (FLAG_turbo_store_elimination) {
     Run<StoreStoreEliminationPhase>();
     RunPrintAndVerify("Store-store elimination", true);
   }
 
-  Run<BranchEliminationPhase>();
-  RunPrintAndVerify("Branch conditions eliminated", true);
-
   // Optimize control flow.
   if (FLAG_turbo_cf_optimization) {
     Run<ControlFlowOptimizationPhase>();
@@ -1527,9 +1627,9 @@
   // Construct a pipeline for scheduling and code generation.
   ZonePool zone_pool(isolate->allocator());
   PipelineData data(&zone_pool, &info, graph, schedule);
-  base::SmartPointer<PipelineStatistics> pipeline_statistics;
+  std::unique_ptr<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
-    pipeline_statistics.Reset(new PipelineStatistics(&info, &zone_pool));
+    pipeline_statistics.reset(new PipelineStatistics(&info, &zone_pool));
     pipeline_statistics->BeginPhaseKind("stub codegen");
   }
 
@@ -1552,7 +1652,7 @@
 // static
 Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
   ZonePool zone_pool(info->isolate()->allocator());
-  base::SmartPointer<PipelineStatistics> pipeline_statistics(
+  std::unique_ptr<PipelineStatistics> pipeline_statistics(
       CreatePipelineStatistics(info, &zone_pool));
   PipelineData data(&zone_pool, info, pipeline_statistics.get());
   PipelineImpl pipeline(&data);
@@ -1581,9 +1681,9 @@
   // Construct a pipeline for scheduling and code generation.
   ZonePool zone_pool(info->isolate()->allocator());
   PipelineData data(&zone_pool, info, graph, schedule);
-  base::SmartPointer<PipelineStatistics> pipeline_statistics;
+  std::unique_ptr<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
-    pipeline_statistics.Reset(new PipelineStatistics(info, &zone_pool));
+    pipeline_statistics.reset(new PipelineStatistics(info, &zone_pool));
     pipeline_statistics->BeginPhaseKind("test codegen");
   }
 
@@ -1750,10 +1850,10 @@
                                      bool run_verifier) {
   PipelineData* data = this->data_;
   // Don't track usage for this zone in compiler stats.
-  base::SmartPointer<Zone> verifier_zone;
+  std::unique_ptr<Zone> verifier_zone;
   RegisterAllocatorVerifier* verifier = nullptr;
   if (run_verifier) {
-    verifier_zone.Reset(new Zone(isolate()->allocator()));
+    verifier_zone.reset(new Zone(isolate()->allocator()));
     verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
         verifier_zone.get(), config, data->sequence());
   }
@@ -1766,6 +1866,7 @@
 
   data->InitializeRegisterAllocationData(config, descriptor);
   if (info()->is_osr()) {
+    AllowHandleDereference allow_deref;
     OsrHelper osr_helper(info());
     osr_helper.SetupFrame(data->frame());
   }
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index 4909414..9db36b4 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -470,19 +470,88 @@
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                       \
   } while (0)
 
-#define ASSEMBLE_FLOAT_MAX(scratch_reg)                                       \
+#define ASSEMBLE_FLOAT_MAX()                                                  \
   do {                                                                        \
-    __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
-    __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(0),  \
-            i.InputDoubleRegister(1));                                        \
-  } while (0)
+    DoubleRegister left_reg = i.InputDoubleRegister(0);                       \
+    DoubleRegister right_reg = i.InputDoubleRegister(1);                      \
+    DoubleRegister result_reg = i.OutputDoubleRegister();                     \
+    Label check_nan_left, check_zero, return_left, return_right, done;        \
+    __ fcmpu(left_reg, right_reg);                                            \
+    __ bunordered(&check_nan_left);                                           \
+    __ beq(&check_zero);                                                      \
+    __ bge(&return_left);                                                     \
+    __ b(&return_right);                                                      \
+                                                                              \
+    __ bind(&check_zero);                                                     \
+    __ fcmpu(left_reg, kDoubleRegZero);                                       \
+    /* left == right != 0. */                                                 \
+    __ bne(&return_left);                                                     \
+    /* At this point, both left and right are either 0 or -0. */              \
+    __ fadd(result_reg, left_reg, right_reg);                                 \
+    __ b(&done);                                                              \
+                                                                              \
+    __ bind(&check_nan_left);                                                 \
+    __ fcmpu(left_reg, left_reg);                                             \
+    /* left == NaN. */                                                        \
+    __ bunordered(&return_left);                                              \
+    __ bind(&return_right);                                                   \
+    if (!right_reg.is(result_reg)) {                                          \
+      __ fmr(result_reg, right_reg);                                          \
+    }                                                                         \
+    __ b(&done);                                                              \
+                                                                              \
+    __ bind(&return_left);                                                    \
+    if (!left_reg.is(result_reg)) {                                           \
+      __ fmr(result_reg, left_reg);                                           \
+    }                                                                         \
+    __ bind(&done);                                                           \
+  } while (0)                                                                 \
 
 
-#define ASSEMBLE_FLOAT_MIN(scratch_reg)                                       \
-  do {                                                                        \
-    __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
-    __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(1),  \
-            i.InputDoubleRegister(0));                                        \
+#define ASSEMBLE_FLOAT_MIN()                                                   \
+  do {                                                                         \
+    DoubleRegister left_reg = i.InputDoubleRegister(0);                        \
+    DoubleRegister right_reg = i.InputDoubleRegister(1);                       \
+    DoubleRegister result_reg = i.OutputDoubleRegister();                      \
+    Label check_nan_left, check_zero, return_left, return_right, done;         \
+    __ fcmpu(left_reg, right_reg);                                             \
+    __ bunordered(&check_nan_left);                                            \
+    __ beq(&check_zero);                                                       \
+    __ ble(&return_left);                                                      \
+    __ b(&return_right);                                                       \
+                                                                               \
+    __ bind(&check_zero);                                                      \
+    __ fcmpu(left_reg, kDoubleRegZero);                                        \
+    /* left == right != 0. */                                                  \
+    __ bne(&return_left);                                                      \
+    /* At this point, both left and right are either 0 or -0. */               \
+    /* Min: The algorithm is: -((-L) + (-R)), which in case of L and R being */\
+    /* different registers is most efficiently expressed as -((-L) - R). */    \
+    __ fneg(left_reg, left_reg);                                               \
+    if (left_reg.is(right_reg)) {                                              \
+      __ fadd(result_reg, left_reg, right_reg);                                \
+    } else {                                                                   \
+      __ fsub(result_reg, left_reg, right_reg);                                \
+    }                                                                          \
+    __ fneg(result_reg, result_reg);                                           \
+    __ b(&done);                                                               \
+                                                                               \
+    __ bind(&check_nan_left);                                                  \
+    __ fcmpu(left_reg, left_reg);                                              \
+    /* left == NaN. */                                                         \
+    __ bunordered(&return_left);                                               \
+                                                                               \
+    __ bind(&return_right);                                                    \
+    if (!right_reg.is(result_reg)) {                                           \
+      __ fmr(result_reg, right_reg);                                           \
+    }                                                                          \
+    __ b(&done);                                                               \
+                                                                               \
+    __ bind(&return_left);                                                     \
+    if (!left_reg.is(result_reg)) {                                            \
+      __ fmr(result_reg, left_reg);                                            \
+    }                                                                          \
+    __ bind(&done);                                                            \
   } while (0)
 
 
@@ -728,21 +797,7 @@
   __ LeaveFrame(StackFrame::MANUAL);
 }
 
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta > 0) {
-    __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
-  }
-  frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta < 0) {
-    __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
-    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
-  }
+void CodeGenerator::AssemblePrepareTailCall() {
   if (frame_access_state()->has_frame()) {
     __ RestoreFrameStateForTailCall();
   }
@@ -774,6 +829,116 @@
   __ bind(&done);
 }
 
+namespace {
+
+void FlushPendingPushRegisters(MacroAssembler* masm,
+                               FrameAccessState* frame_access_state,
+                               ZoneVector<Register>* pending_pushes) {
+  switch (pending_pushes->size()) {
+    case 0:
+      break;
+    case 1:
+      masm->Push((*pending_pushes)[0]);
+      break;
+    case 2:
+      masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+      break;
+    case 3:
+      masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+                 (*pending_pushes)[2]);
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  frame_access_state->IncreaseSPDelta(pending_pushes->size());
+  pending_pushes->resize(0);
+}
+
+void AddPendingPushRegister(MacroAssembler* masm,
+                            FrameAccessState* frame_access_state,
+                            ZoneVector<Register>* pending_pushes,
+                            Register reg) {
+  pending_pushes->push_back(reg);
+  if (pending_pushes->size() == 3 || reg.is(ip)) {
+    FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+  }
+}
+
+void AdjustStackPointerForTailCall(
+    MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+    ZoneVector<Register>* pending_pushes = nullptr,
+    bool allow_shrinkage = true) {
+  int current_sp_offset = state->GetSPToFPSlotCount() +
+                          StandardFrameConstants::kFixedSlotCountAboveFp;
+  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+  if (stack_slot_delta > 0) {
+    if (pending_pushes != nullptr) {
+      FlushPendingPushRegisters(masm, state, pending_pushes);
+    }
+    masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
+    state->IncreaseSPDelta(stack_slot_delta);
+  } else if (allow_shrinkage && stack_slot_delta < 0) {
+    if (pending_pushes != nullptr) {
+      FlushPendingPushRegisters(masm, state, pending_pushes);
+    }
+    masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
+    state->IncreaseSPDelta(stack_slot_delta);
+  }
+}
+
+}  // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+                                              int first_unused_stack_slot) {
+  CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+  ZoneVector<MoveOperands*> pushes(zone());
+  GetPushCompatibleMoves(instr, flags, &pushes);
+
+  if (!pushes.empty() &&
+      (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+       first_unused_stack_slot)) {
+    PPCOperandConverter g(this, instr);
+    ZoneVector<Register> pending_pushes(zone());
+    for (auto move : pushes) {
+      LocationOperand destination_location(
+          LocationOperand::cast(move->destination()));
+      InstructionOperand source(move->source());
+      AdjustStackPointerForTailCall(
+          masm(), frame_access_state(),
+          destination_location.index() - pending_pushes.size(),
+          &pending_pushes);
+      if (source.IsStackSlot()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        __ LoadP(ip, g.SlotToMemOperand(source_location.index()));
+        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+                               ip);
+      } else if (source.IsRegister()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+                               source_location.GetRegister());
+      } else if (source.IsImmediate()) {
+        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+                               ip);
+      } else {
+        // Pushes of non-scalar data types is not supported.
+        UNIMPLEMENTED();
+      }
+      move->Eliminate();
+    }
+    FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+  }
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot, nullptr, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+                                             int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot);
+}
+
+
 // Assembles an instruction after register allocation, producing machine code.
 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     Instruction* instr) {
@@ -800,8 +965,6 @@
     }
     case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (opcode == kArchTailCallCodeObjectFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -820,14 +983,14 @@
       }
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchTailCallAddress: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       CHECK(!instr->InputAt(0)->IsImmediate());
       __ Jump(i.InputRegister(0));
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchCallJSFunction: {
@@ -859,8 +1022,6 @@
         __ cmp(cp, kScratchReg);
         __ Assert(eq, kWrongFunctionContext);
       }
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (opcode == kArchTailCallJSFunctionFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -870,6 +1031,7 @@
       __ Jump(ip);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchPrepareCallCFunction: {
@@ -880,8 +1042,13 @@
       break;
     }
     case kArchPrepareTailCall:
-      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      AssemblePrepareTailCall();
       break;
+    case kArchComment: {
+      Address comment_string = i.InputExternalReference(0).address();
+      __ RecordComment(reinterpret_cast<const char*>(comment_string));
+      break;
+    }
     case kArchCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
       if (instr->InputAt(0)->IsImmediate()) {
@@ -910,6 +1077,9 @@
     case kArchDebugBreak:
       __ stop("kArchDebugBreak");
       break;
+    case kArchImpossible:
+      __ Abort(kConversionFromImpossibleValue);
+      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -1207,6 +1377,24 @@
                LeaveOE, i.OutputRCBit());
       break;
 #endif
+
+    case kPPC_Mul32WithHigh32:
+      if (i.OutputRegister(0).is(i.InputRegister(0)) ||
+          i.OutputRegister(0).is(i.InputRegister(1)) ||
+          i.OutputRegister(1).is(i.InputRegister(0)) ||
+          i.OutputRegister(1).is(i.InputRegister(1))) {
+        __ mullw(kScratchReg,
+                 i.InputRegister(0), i.InputRegister(1));  // low
+        __ mulhw(i.OutputRegister(1),
+                 i.InputRegister(0), i.InputRegister(1));  // high
+        __ mr(i.OutputRegister(0), kScratchReg);
+      } else {
+        __ mullw(i.OutputRegister(0),
+                 i.InputRegister(0), i.InputRegister(1));  // low
+        __ mulhw(i.OutputRegister(1),
+                 i.InputRegister(0), i.InputRegister(1));  // high
+      }
+      break;
     case kPPC_MulHigh32:
       __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
                i.OutputRCBit());
@@ -1262,33 +1450,54 @@
       // and generate a CallAddress instruction instead.
       ASSEMBLE_FLOAT_MODULO();
       break;
+    case kIeee754Float64Acos:
+      ASSEMBLE_IEEE754_UNOP(acos);
+      break;
+    case kIeee754Float64Acosh:
+      ASSEMBLE_IEEE754_UNOP(acosh);
+      break;
+    case kIeee754Float64Asin:
+      ASSEMBLE_IEEE754_UNOP(asin);
+      break;
+    case kIeee754Float64Asinh:
+      ASSEMBLE_IEEE754_UNOP(asinh);
+      break;
     case kIeee754Float64Atan:
       ASSEMBLE_IEEE754_UNOP(atan);
       break;
     case kIeee754Float64Atan2:
       ASSEMBLE_IEEE754_BINOP(atan2);
       break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
     case kIeee754Float64Tan:
       ASSEMBLE_IEEE754_UNOP(tan);
       break;
+    case kIeee754Float64Tanh:
+      ASSEMBLE_IEEE754_UNOP(tanh);
+      break;
     case kIeee754Float64Cbrt:
       ASSEMBLE_IEEE754_UNOP(cbrt);
       break;
     case kIeee754Float64Sin:
       ASSEMBLE_IEEE754_UNOP(sin);
       break;
+    case kIeee754Float64Sinh:
+      ASSEMBLE_IEEE754_UNOP(sinh);
+      break;
     case kIeee754Float64Cos:
       ASSEMBLE_IEEE754_UNOP(cos);
       break;
+    case kIeee754Float64Cosh:
+      ASSEMBLE_IEEE754_UNOP(cosh);
+      break;
     case kIeee754Float64Exp:
       ASSEMBLE_IEEE754_UNOP(exp);
       break;
     case kIeee754Float64Expm1:
       ASSEMBLE_IEEE754_UNOP(expm1);
       break;
-    case kIeee754Float64Atanh:
-      ASSEMBLE_IEEE754_UNOP(atanh);
-      break;
     case kIeee754Float64Log:
       ASSEMBLE_IEEE754_UNOP(log);
       break;
@@ -1301,14 +1510,20 @@
     case kIeee754Float64Log10:
       ASSEMBLE_IEEE754_UNOP(log10);
       break;
+    case kIeee754Float64Pow: {
+      MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+      __ CallStub(&stub);
+      __ Move(d1, d3);
+      break;
+    }
     case kPPC_Neg:
       __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
       break;
     case kPPC_MaxDouble:
-      ASSEMBLE_FLOAT_MAX(kScratchDoubleReg);
+      ASSEMBLE_FLOAT_MAX();
       break;
     case kPPC_MinDouble:
-      ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
+      ASSEMBLE_FLOAT_MIN();
       break;
     case kPPC_AbsDouble:
       ASSEMBLE_FLOAT_UNOP_RC(fabs, 0);
@@ -1863,6 +2078,9 @@
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2059,10 +2277,7 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int slot;
-          if (IsMaterializableFromFrame(src_object, &slot)) {
-            __ LoadP(dst, g.SlotToMemOperand(slot));
-          } else if (IsMaterializableFromRoot(src_object, &index)) {
+          if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
             __ Move(dst, src_object);
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index d697da3..9198bcb 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -42,6 +42,7 @@
   V(PPC_SubPair)                   \
   V(PPC_SubDouble)                 \
   V(PPC_Mul32)                     \
+  V(PPC_Mul32WithHigh32)          \
   V(PPC_Mul64)                     \
   V(PPC_MulHigh32)                 \
   V(PPC_MulHighU32)                \
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index f41900d..dee8494 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -44,6 +44,7 @@
     case kPPC_SubPair:
     case kPPC_SubDouble:
     case kPPC_Mul32:
+    case kPPC_Mul32WithHigh32:
     case kPPC_Mul64:
     case kPPC_MulHigh32:
     case kPPC_MulHighU32:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index b724001..bad8ded 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -134,7 +134,14 @@
     inputs[input_count++] = g.Label(cont->false_block());
   }
 
-  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsDeoptimize()) {
+    // If we can deoptimize as a result of the binop, we need to make sure that
+    // the deopt inputs are not overwritten by the binop result. One way
+    // to achieve that is to declare the output register as same-as-first.
+    outputs[output_count++] = g.DefineSameAsFirst(node);
+  } else {
+    outputs[output_count++] = g.DefineAsRegister(node);
+  }
   if (cont->IsSet()) {
     outputs[output_count++] = g.DefineAsRegister(cont->result());
   }
@@ -147,7 +154,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -187,12 +194,16 @@
       opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
       break;
 #if !V8_TARGET_ARCH_PPC64
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
 #endif
     case MachineRepresentation::kWord32:
       opcode = kPPC_LoadWordU32;
       break;
 #if V8_TARGET_ARCH_PPC64
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
     case MachineRepresentation::kWord64:
       opcode = kPPC_LoadWord64;
@@ -288,12 +299,16 @@
         opcode = kPPC_StoreWord16;
         break;
 #if !V8_TARGET_ARCH_PPC64
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
       case MachineRepresentation::kTagged:  // Fall through.
 #endif
       case MachineRepresentation::kWord32:
         opcode = kPPC_StoreWord32;
         break;
 #if V8_TARGET_ARCH_PPC64
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
       case MachineRepresentation::kTagged:  // Fall through.
       case MachineRepresentation::kWord64:
         opcode = kPPC_StoreWord64;
@@ -320,6 +335,11 @@
   }
 }
 
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -350,6 +370,8 @@
       opcode = kCheckedLoadFloat64;
       break;
     case MachineRepresentation::kBit:     // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
 #if !V8_TARGET_ARCH_PPC64
     case MachineRepresentation::kWord64:  // Fall through.
@@ -396,6 +418,8 @@
       opcode = kCheckedStoreFloat64;
       break;
     case MachineRepresentation::kBit:     // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
 #if !V8_TARGET_ARCH_PPC64
     case MachineRepresentation::kWord64:  // Fall through.
@@ -865,7 +889,8 @@
       m.right().Is(32)) {
     // Just load and sign-extend the interesting 4 bytes instead. This happens,
     // for example, when we're loading and untagging SMIs.
-    BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+    BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
+                                                AddressOption::kAllowAll);
     if (mleft.matches() && mleft.index() == nullptr) {
       int64_t offset = 0;
       Node* displacement = mleft.displacement();
@@ -947,6 +972,9 @@
 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
 #endif
 
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitInt32Add(Node* node) {
   VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
@@ -982,6 +1010,36 @@
 }
 #endif
 
+namespace {
+
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                  InstructionOperand left, InstructionOperand right,
+                  FlagsContinuation* cont);
+void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
+                              FlagsContinuation* cont) {
+  PPCOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand result_operand = g.DefineAsRegister(node);
+  InstructionOperand high32_operand = g.TempRegister();
+  InstructionOperand temp_operand = g.TempRegister();
+  {
+    InstructionOperand outputs[] = {result_operand, high32_operand};
+    InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
+                                   g.UseRegister(m.right().node())};
+    selector->Emit(kPPC_Mul32WithHigh32, 2, outputs, 2, inputs);
+  }
+  {
+    InstructionOperand shift_31 = g.UseImmediate(31);
+    InstructionOperand outputs[] = {temp_operand};
+    InstructionOperand inputs[] = {result_operand, shift_31};
+    selector->Emit(kPPC_ShiftRightAlg32, 1, outputs, 2, inputs);
+  }
+
+  VisitCompare(selector, kPPC_Cmp32, high32_operand, temp_operand, cont);
+}
+
+}  // namespace
+
 
 void InstructionSelector::VisitInt32Mul(Node* node) {
   VisitRRR(this, kPPC_Mul32, node);
@@ -1216,47 +1274,11 @@
 
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
-  PPCOperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    Emit(kPPC_NegDouble | MiscField::encode(1), g.DefineAsRegister(node),
-         g.UseRegister(m.right().node()));
-    return;
-  }
-  VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
-}
-
-void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
-  PPCOperandGenerator g(this);
   VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   // TODO(mbrandy): detect multiply-subtract
-  PPCOperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    if (m.right().IsFloat64RoundDown() &&
-        CanCover(m.node(), m.right().node())) {
-      if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
-          CanCover(m.right().node(), m.right().InputAt(0))) {
-        Float64BinopMatcher mright0(m.right().InputAt(0));
-        if (mright0.left().IsMinusZero()) {
-          // -floor(-x) = ceil(x)
-          Emit(kPPC_CeilDouble, g.DefineAsRegister(node),
-               g.UseRegister(mright0.right().node()));
-          return;
-        }
-      }
-    }
-    Emit(kPPC_NegDouble, g.DefineAsRegister(node),
-         g.UseRegister(m.right().node()));
-    return;
-  }
-  VisitRRR(this, kPPC_SubDouble, node);
-}
-
-void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
   VisitRRR(this, kPPC_SubDouble, node);
 }
 
@@ -1288,21 +1310,26 @@
        g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
 }
 
+void InstructionSelector::VisitFloat32Max(Node* node) {
+  VisitRRR(this, kPPC_MaxDouble | MiscField::encode(1), node);
+}
 
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+  VisitRRR(this, kPPC_MaxDouble, node);
+}
 
 
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
-
 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
   VisitRR(this, kPPC_Float64SilenceNaN, node);
 }
 
+void InstructionSelector::VisitFloat32Min(Node* node) {
+  VisitRRR(this, kPPC_MinDouble | MiscField::encode(1), node);
+}
 
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Min(Node* node) {
+  VisitRRR(this, kPPC_MinDouble, node);
+}
 
 
 void InstructionSelector::VisitFloat32Abs(Node* node) {
@@ -1382,9 +1409,13 @@
   UNREACHABLE();
 }
 
-void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+  VisitRR(this, kPPC_NegDouble, node);
+}
 
-void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+  VisitRR(this, kPPC_NegDouble, node);
+}
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
@@ -1461,7 +1492,7 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
                              cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
@@ -1619,6 +1650,9 @@
                 return VisitBinop<Int32BinopMatcher>(selector, node,
                                                      kPPC_SubWithOverflow32,
                                                      kInt16Imm_Negate, cont);
+              case IrOpcode::kInt32MulWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kNotEqual);
+                return EmitInt32MulWithOverflow(selector, node, cont);
 #if V8_TARGET_ARCH_PPC64
               case IrOpcode::kInt64AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1701,14 +1735,14 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1813,6 +1847,15 @@
 }
 #endif
 
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
+    return EmitInt32MulWithOverflow(this, node, &cont);
+  }
+  FlagsContinuation cont;
+  EmitInt32MulWithOverflow(this, node, &cont);
+}
+
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
@@ -1860,7 +1903,7 @@
   // Prepare for C function call.
   if (descriptor->IsCFunctionCall()) {
     Emit(kArchPrepareCallCFunction |
-             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+             MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
          0, nullptr, 0, nullptr);
 
     // Poke any stack arguments.
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index ef23bc4..ae40f55 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -13,14 +13,14 @@
 namespace internal {
 namespace compiler {
 
-RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
-                                         CallDescriptor* call_descriptor,
-                                         MachineRepresentation word,
-                                         MachineOperatorBuilder::Flags flags)
+RawMachineAssembler::RawMachineAssembler(
+    Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
+    MachineRepresentation word, MachineOperatorBuilder::Flags flags,
+    MachineOperatorBuilder::AlignmentRequirements alignment_requirements)
     : isolate_(isolate),
       graph_(graph),
       schedule_(new (zone()) Schedule(zone())),
-      machine_(zone(), word, flags),
+      machine_(zone(), word, flags, alignment_requirements),
       common_(zone()),
       call_descriptor_(call_descriptor),
       parameters_(parameter_count(), zone()),
@@ -85,9 +85,16 @@
   current_block_ = nullptr;
 }
 
+void RawMachineAssembler::Continuations(Node* call, RawMachineLabel* if_success,
+                                        RawMachineLabel* if_exception) {
+  DCHECK_NOT_NULL(schedule_);
+  DCHECK_NOT_NULL(current_block_);
+  schedule()->AddCall(CurrentBlock(), call, Use(if_success), Use(if_exception));
+  current_block_ = nullptr;
+}
 
 void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
-                                 int32_t* case_values,
+                                 const int32_t* case_values,
                                  RawMachineLabel** case_labels,
                                  size_t case_count) {
   DCHECK_NE(schedule()->end(), current_block_);
@@ -112,7 +119,6 @@
   current_block_ = nullptr;
 }
 
-
 void RawMachineAssembler::Return(Node* value) {
   Node* ret = MakeNode(common()->Return(), 1, &value);
   schedule()->AddReturn(CurrentBlock(), ret);
@@ -143,8 +149,7 @@
 
 Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
                                  Node** args) {
-  int param_count =
-      static_cast<int>(desc->GetMachineSignature()->parameter_count());
+  int param_count = static_cast<int>(desc->ParameterCount());
   int input_count = param_count + 1;
   Node** buffer = zone()->NewArray<Node*>(input_count);
   int index = 0;
@@ -160,8 +165,7 @@
                                                Node* function, Node** args,
                                                Node* frame_state) {
   DCHECK(desc->NeedsFrameState());
-  int param_count =
-      static_cast<int>(desc->GetMachineSignature()->parameter_count());
+  int param_count = static_cast<int>(desc->ParameterCount());
   int input_count = param_count + 2;
   Node** buffer = zone()->NewArray<Node*>(input_count);
   int index = 0;
@@ -252,8 +256,7 @@
 
 Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
                                      Node** args) {
-  int param_count =
-      static_cast<int>(desc->GetMachineSignature()->parameter_count());
+  int param_count = static_cast<int>(desc->ParameterCount());
   int input_count = param_count + 1;
   Node** buffer = zone()->NewArray<Node*>(input_count);
   int index = 0;
@@ -376,6 +379,29 @@
   return tail_call;
 }
 
+Node* RawMachineAssembler::TailCallRuntime5(Runtime::FunctionId function,
+                                            Node* arg1, Node* arg2, Node* arg3,
+                                            Node* arg4, Node* arg5,
+                                            Node* context) {
+  const int kArity = 5;
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, kArity, Operator::kNoProperties,
+      CallDescriptor::kSupportsTailCalls);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(kArity);
+
+  Node* nodes[] = {centry, arg1, arg2, arg3, arg4, arg5, ref, arity, context};
+  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+  schedule()->AddTailCall(CurrentBlock(), tail_call);
+  current_block_ = nullptr;
+  return tail_call;
+}
+
 Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
                                           Node* function) {
   MachineSignature::Builder builder(zone(), 1, 0);
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index 387e961..c7d4236 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -40,7 +40,10 @@
       Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
       MachineRepresentation word = MachineType::PointerRepresentation(),
       MachineOperatorBuilder::Flags flags =
-          MachineOperatorBuilder::Flag::kNoFlags);
+          MachineOperatorBuilder::Flag::kNoFlags,
+      MachineOperatorBuilder::AlignmentRequirements alignment_requirements =
+          MachineOperatorBuilder::AlignmentRequirements::
+              FullUnalignedAccessSupport());
   ~RawMachineAssembler() {}
 
   Isolate* isolate() const { return isolate_; }
@@ -133,6 +136,34 @@
                    base, index, value);
   }
 
+  // Unaligned memory operations
+  Node* UnalignedLoad(MachineType rep, Node* base) {
+    return UnalignedLoad(rep, base, IntPtrConstant(0));
+  }
+  Node* UnalignedLoad(MachineType rep, Node* base, Node* index) {
+    if (machine()->UnalignedLoadSupported(rep, 1)) {
+      return AddNode(machine()->Load(rep), base, index);
+    } else {
+      return AddNode(machine()->UnalignedLoad(rep), base, index);
+    }
+  }
+  Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* value) {
+    return UnalignedStore(rep, base, IntPtrConstant(0), value);
+  }
+  Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* index,
+                       Node* value) {
+    MachineType t = MachineType::TypeForRepresentation(rep);
+    if (machine()->UnalignedStoreSupported(t, 1)) {
+      return AddNode(machine()->Store(StoreRepresentation(
+                         rep, WriteBarrierKind::kNoWriteBarrier)),
+                     base, index, value);
+    } else {
+      return AddNode(
+          machine()->UnalignedStore(UnalignedStoreRepresentation(rep)), base,
+          index, value);
+    }
+  }
+
   // Atomic memory operations.
   Node* AtomicLoad(MachineType rep, Node* base, Node* index) {
     return AddNode(machine()->AtomicLoad(rep), base, index);
@@ -255,6 +286,9 @@
   Node* Int32MulHigh(Node* a, Node* b) {
     return AddNode(machine()->Int32MulHigh(), a, b);
   }
+  Node* Int32MulWithOverflow(Node* a, Node* b) {
+    return AddNode(machine()->Int32MulWithOverflow(), a, b);
+  }
   Node* Int32Div(Node* a, Node* b) {
     return AddNode(machine()->Int32Div(), a, b);
   }
@@ -399,23 +433,14 @@
   Node* Float32Sub(Node* a, Node* b) {
     return AddNode(machine()->Float32Sub(), a, b);
   }
-  Node* Float32SubPreserveNan(Node* a, Node* b) {
-    return AddNode(machine()->Float32SubPreserveNan(), a, b);
-  }
   Node* Float32Mul(Node* a, Node* b) {
     return AddNode(machine()->Float32Mul(), a, b);
   }
   Node* Float32Div(Node* a, Node* b) {
     return AddNode(machine()->Float32Div(), a, b);
   }
-  Node* Float32Max(Node* a, Node* b) {
-    return AddNode(machine()->Float32Max().op(), a, b);
-  }
-  Node* Float32Min(Node* a, Node* b) {
-    return AddNode(machine()->Float32Min().op(), a, b);
-  }
   Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
-  Node* Float32Neg(Node* a) { return Float32Sub(Float32Constant(-0.0f), a); }
+  Node* Float32Neg(Node* a) { return AddNode(machine()->Float32Neg(), a); }
   Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
   Node* Float32Equal(Node* a, Node* b) {
     return AddNode(machine()->Float32Equal(), a, b);
@@ -433,16 +458,18 @@
   Node* Float32GreaterThanOrEqual(Node* a, Node* b) {
     return Float32LessThanOrEqual(b, a);
   }
-
+  Node* Float32Max(Node* a, Node* b) {
+    return AddNode(machine()->Float32Max(), a, b);
+  }
+  Node* Float32Min(Node* a, Node* b) {
+    return AddNode(machine()->Float32Min(), a, b);
+  }
   Node* Float64Add(Node* a, Node* b) {
     return AddNode(machine()->Float64Add(), a, b);
   }
   Node* Float64Sub(Node* a, Node* b) {
     return AddNode(machine()->Float64Sub(), a, b);
   }
-  Node* Float64SubPreserveNan(Node* a, Node* b) {
-    return AddNode(machine()->Float64SubPreserveNan(), a, b);
-  }
   Node* Float64Mul(Node* a, Node* b) {
     return AddNode(machine()->Float64Mul(), a, b);
   }
@@ -453,29 +480,39 @@
     return AddNode(machine()->Float64Mod(), a, b);
   }
   Node* Float64Max(Node* a, Node* b) {
-    return AddNode(machine()->Float64Max().op(), a, b);
+    return AddNode(machine()->Float64Max(), a, b);
   }
   Node* Float64Min(Node* a, Node* b) {
-    return AddNode(machine()->Float64Min().op(), a, b);
+    return AddNode(machine()->Float64Min(), a, b);
   }
   Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
-  Node* Float64Neg(Node* a) { return Float64Sub(Float64Constant(-0.0), a); }
+  Node* Float64Neg(Node* a) { return AddNode(machine()->Float64Neg(), a); }
+  Node* Float64Acos(Node* a) { return AddNode(machine()->Float64Acos(), a); }
+  Node* Float64Acosh(Node* a) { return AddNode(machine()->Float64Acosh(), a); }
+  Node* Float64Asin(Node* a) { return AddNode(machine()->Float64Asin(), a); }
+  Node* Float64Asinh(Node* a) { return AddNode(machine()->Float64Asinh(), a); }
   Node* Float64Atan(Node* a) { return AddNode(machine()->Float64Atan(), a); }
+  Node* Float64Atanh(Node* a) { return AddNode(machine()->Float64Atanh(), a); }
   Node* Float64Atan2(Node* a, Node* b) {
     return AddNode(machine()->Float64Atan2(), a, b);
   }
-  Node* Float64Atanh(Node* a) { return AddNode(machine()->Float64Atanh(), a); }
   Node* Float64Cbrt(Node* a) { return AddNode(machine()->Float64Cbrt(), a); }
   Node* Float64Cos(Node* a) { return AddNode(machine()->Float64Cos(), a); }
+  Node* Float64Cosh(Node* a) { return AddNode(machine()->Float64Cosh(), a); }
   Node* Float64Exp(Node* a) { return AddNode(machine()->Float64Exp(), a); }
   Node* Float64Expm1(Node* a) { return AddNode(machine()->Float64Expm1(), a); }
   Node* Float64Log(Node* a) { return AddNode(machine()->Float64Log(), a); }
   Node* Float64Log1p(Node* a) { return AddNode(machine()->Float64Log1p(), a); }
   Node* Float64Log10(Node* a) { return AddNode(machine()->Float64Log10(), a); }
   Node* Float64Log2(Node* a) { return AddNode(machine()->Float64Log2(), a); }
+  Node* Float64Pow(Node* a, Node* b) {
+    return AddNode(machine()->Float64Pow(), a, b);
+  }
   Node* Float64Sin(Node* a) { return AddNode(machine()->Float64Sin(), a); }
+  Node* Float64Sinh(Node* a) { return AddNode(machine()->Float64Sinh(), a); }
   Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
   Node* Float64Tan(Node* a) { return AddNode(machine()->Float64Tan(), a); }
+  Node* Float64Tanh(Node* a) { return AddNode(machine()->Float64Tanh(), a); }
   Node* Float64Equal(Node* a, Node* b) {
     return AddNode(machine()->Float64Equal(), a, b);
   }
@@ -640,6 +677,14 @@
   Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
     return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
   }
+  Node* UnalignedLoadFromPointer(void* address, MachineType rep,
+                                 int32_t offset = 0) {
+    return UnalignedLoad(rep, PointerConstant(address), Int32Constant(offset));
+  }
+  Node* UnalignedStoreToPointer(void* address, MachineRepresentation rep,
+                                Node* node) {
+    return UnalignedStore(rep, PointerConstant(address), node);
+  }
   Node* StringConstant(const char* string) {
     return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
   }
@@ -696,6 +741,9 @@
   // Tail call to a runtime function with four arguments.
   Node* TailCallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
                          Node* arg3, Node* arg4, Node* context);
+  // Tail call to a runtime function with five arguments.
+  Node* TailCallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
+                         Node* arg3, Node* arg4, Node* arg5, Node* context);
 
   // ===========================================================================
   // The following utility methods deal with control flow, hence might switch
@@ -705,8 +753,9 @@
   void Goto(RawMachineLabel* label);
   void Branch(Node* condition, RawMachineLabel* true_val,
               RawMachineLabel* false_val);
-  void Switch(Node* index, RawMachineLabel* default_label, int32_t* case_values,
-              RawMachineLabel** case_labels, size_t case_count);
+  void Switch(Node* index, RawMachineLabel* default_label,
+              const int32_t* case_values, RawMachineLabel** case_labels,
+              size_t case_count);
   void Return(Node* value);
   void Return(Node* v1, Node* v2);
   void Return(Node* v1, Node* v2, Node* v3);
@@ -715,6 +764,11 @@
   void DebugBreak();
   void Comment(const char* msg);
 
+  // Add success / exception successor blocks and ends the current block ending
+  // in a potentially throwing call node.
+  void Continuations(Node* call, RawMachineLabel* if_success,
+                     RawMachineLabel* if_exception);
+
   // Variables.
   Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
     return AddNode(common()->Phi(rep, 2), n1, n2, graph()->start());
@@ -752,10 +806,7 @@
   BasicBlock* CurrentBlock();
 
   Schedule* schedule() { return schedule_; }
-  size_t parameter_count() const { return machine_sig()->parameter_count(); }
-  const MachineSignature* machine_sig() const {
-    return call_descriptor_->GetMachineSignature();
-  }
+  size_t parameter_count() const { return call_descriptor_->ParameterCount(); }
 
   Isolate* isolate_;
   Graph* graph_;
diff --git a/src/compiler/redundancy-elimination.cc b/src/compiler/redundancy-elimination.cc
index ae87349..c671fc2 100644
--- a/src/compiler/redundancy-elimination.cc
+++ b/src/compiler/redundancy-elimination.cc
@@ -17,14 +17,22 @@
 
 Reduction RedundancyElimination::Reduce(Node* node) {
   switch (node->opcode()) {
+    case IrOpcode::kCheckBounds:
     case IrOpcode::kCheckFloat64Hole:
+    case IrOpcode::kCheckIf:
+    case IrOpcode::kCheckNumber:
+    case IrOpcode::kCheckString:
     case IrOpcode::kCheckTaggedHole:
     case IrOpcode::kCheckTaggedPointer:
     case IrOpcode::kCheckTaggedSigned:
     case IrOpcode::kCheckedFloat64ToInt32:
     case IrOpcode::kCheckedInt32Add:
     case IrOpcode::kCheckedInt32Sub:
+    case IrOpcode::kCheckedInt32Div:
+    case IrOpcode::kCheckedInt32Mod:
+    case IrOpcode::kCheckedInt32Mul:
     case IrOpcode::kCheckedTaggedToFloat64:
+    case IrOpcode::kCheckedTaggedSignedToInt32:
     case IrOpcode::kCheckedTaggedToInt32:
     case IrOpcode::kCheckedUint32ToInt32:
       return ReduceCheckNode(node);
@@ -53,6 +61,19 @@
   return new (zone->New(sizeof(EffectPathChecks))) EffectPathChecks(nullptr, 0);
 }
 
+bool RedundancyElimination::EffectPathChecks::Equals(
+    EffectPathChecks const* that) const {
+  if (this->size_ != that->size_) return false;
+  Check* this_head = this->head_;
+  Check* that_head = that->head_;
+  while (this_head != that_head) {
+    if (this_head->node != that_head->node) return false;
+    this_head = this_head->next;
+    that_head = that_head->next;
+  }
+  return true;
+}
+
 void RedundancyElimination::EffectPathChecks::Merge(
     EffectPathChecks const* that) {
   // Change the current check list to a longest common tail of this check
@@ -205,8 +226,10 @@
   // Only signal that the {node} has Changed, if the information about {checks}
   // has changed wrt. the {original}.
   if (checks != original) {
-    node_checks_.Set(node, checks);
-    return Changed(node);
+    if (original == nullptr || !checks->Equals(original)) {
+      node_checks_.Set(node, checks);
+      return Changed(node);
+    }
   }
   return NoChange();
 }
diff --git a/src/compiler/redundancy-elimination.h b/src/compiler/redundancy-elimination.h
index a4886e4..88f9032 100644
--- a/src/compiler/redundancy-elimination.h
+++ b/src/compiler/redundancy-elimination.h
@@ -29,6 +29,7 @@
    public:
     static EffectPathChecks* Copy(Zone* zone, EffectPathChecks const* checks);
     static EffectPathChecks const* Empty(Zone* zone);
+    bool Equals(EffectPathChecks const* that) const;
     void Merge(EffectPathChecks const* that);
 
     EffectPathChecks const* AddCheck(Zone* zone, Node* node) const;
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index 2d10de0..cefd04a 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -160,7 +160,7 @@
     int vreg = unallocated->virtual_register();
     constraint->virtual_register_ = vreg;
     if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
-      constraint->type_ = sequence()->IsFP(vreg) ? kFPSlot : kSlot;
+      constraint->type_ = kFixedSlot;
       constraint->value_ = unallocated->fixed_slot_index();
     } else {
       switch (unallocated->extended_policy()) {
@@ -193,7 +193,9 @@
           }
           break;
         case UnallocatedOperand::MUST_HAVE_SLOT:
-          constraint->type_ = sequence()->IsFP(vreg) ? kFPSlot : kSlot;
+          constraint->type_ = kSlot;
+          constraint->value_ =
+              ElementSizeLog2Of(sequence()->GetRepresentation(vreg));
           break;
         case UnallocatedOperand::SAME_AS_FIRST_INPUT:
           constraint->type_ = kSameAsFirst;
@@ -239,14 +241,13 @@
       CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
       return;
     case kFixedSlot:
-      CHECK(op->IsStackSlot());
+      CHECK(op->IsStackSlot() || op->IsFPStackSlot());
       CHECK_EQ(LocationOperand::cast(op)->index(), constraint->value_);
       return;
     case kSlot:
-      CHECK(op->IsStackSlot());
-      return;
-    case kFPSlot:
-      CHECK(op->IsFPStackSlot());
+      CHECK(op->IsStackSlot() || op->IsFPStackSlot());
+      CHECK_EQ(ElementSizeLog2Of(LocationOperand::cast(op)->representation()),
+               constraint->value_);
       return;
     case kNone:
       CHECK(op->IsRegister() || op->IsStackSlot());
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
index 72e6e06..2db8af5 100644
--- a/src/compiler/register-allocator-verifier.h
+++ b/src/compiler/register-allocator-verifier.h
@@ -178,7 +178,6 @@
     kFPRegister,
     kFixedFPRegister,
     kSlot,
-    kFPSlot,
     kFixedSlot,
     kNone,
     kNoneFP,
@@ -189,7 +188,9 @@
 
   struct OperandConstraint {
     ConstraintType type_;
-    int value_;  // subkind index when relevant
+    // Constant or immediate value, register code, slot index, or slot size
+    // when relevant.
+    int value_;
     int spilled_slot_;
     int virtual_register_;
   };
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 9c8d999..5b55b02 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -33,7 +33,7 @@
 
 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
                                 RegisterKind kind) {
-  return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
+  return kind == FP_REGISTERS ? cfg->num_allocatable_aliased_double_registers()
                               : cfg->num_allocatable_general_registers();
 }
 
@@ -64,37 +64,6 @@
   return code->InstructionAt(block->last_instruction_index());
 }
 
-bool IsOutputRegisterOf(Instruction* instr, int code) {
-  for (size_t i = 0; i < instr->OutputCount(); i++) {
-    InstructionOperand* output = instr->OutputAt(i);
-    if (output->IsRegister() &&
-        LocationOperand::cast(output)->register_code() == code) {
-      return true;
-    }
-  }
-  return false;
-}
-
-bool IsOutputFPRegisterOf(Instruction* instr, MachineRepresentation rep,
-                          int code) {
-  for (size_t i = 0; i < instr->OutputCount(); i++) {
-    InstructionOperand* output = instr->OutputAt(i);
-    if (output->IsFPRegister()) {
-      const LocationOperand* op = LocationOperand::cast(output);
-      if (kSimpleFPAliasing) {
-        if (op->register_code() == code) return true;
-      } else {
-        if (RegisterConfiguration::Turbofan()->AreAliases(
-                op->representation(), op->register_code(), rep, code)) {
-          return true;
-        }
-      }
-    }
-  }
-  return false;
-}
-
-
 // TODO(dcarney): fix frame to allow frame accesses to half size location.
 int GetByteWidth(MachineRepresentation rep) {
   switch (rep) {
@@ -102,14 +71,22 @@
     case MachineRepresentation::kWord8:
     case MachineRepresentation::kWord16:
     case MachineRepresentation::kWord32:
+    case MachineRepresentation::kTaggedSigned:
+    case MachineRepresentation::kTaggedPointer:
     case MachineRepresentation::kTagged:
       return kPointerSize;
     case MachineRepresentation::kFloat32:
+// TODO(bbudge) Eliminate this when FP register aliasing works.
+#if V8_TARGET_ARCH_ARM
+      return kDoubleSize;
+#else
+      return kPointerSize;
+#endif
     case MachineRepresentation::kWord64:
     case MachineRepresentation::kFloat64:
-      return 8;
+      return kDoubleSize;
     case MachineRepresentation::kSimd128:
-      return 16;
+      return kSimd128Size;
     case MachineRepresentation::kNone:
       break;
   }
@@ -1222,12 +1199,10 @@
   return os;
 }
 
-
 SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
     : live_ranges_(zone),
       assigned_slot_(kUnassignedSlot),
-      byte_width_(GetByteWidth(parent->representation())),
-      kind_(parent->kind()) {
+      byte_width_(GetByteWidth(parent->representation())) {
   // Spill ranges are created for top level, non-splintered ranges. This is so
   // that, when merging decisions are made, we consider the full extent of the
   // virtual register, and avoid clobbering it.
@@ -1266,11 +1241,8 @@
 
 bool SpillRange::TryMerge(SpillRange* other) {
   if (HasSlot() || other->HasSlot()) return false;
-  // TODO(dcarney): byte widths should be compared here not kinds.
-  if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
-      IsIntersectingWith(other)) {
+  if (byte_width() != other->byte_width() || IsIntersectingWith(other))
     return false;
-  }
 
   LifetimePosition max = LifetimePosition::MaxPosition();
   if (End() < other->End() && other->End() != max) {
@@ -1371,8 +1343,6 @@
                    allocation_zone()),
       fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
                          allocation_zone()),
-      fixed_float_live_ranges_(this->config()->num_float_registers(), nullptr,
-                               allocation_zone()),
       fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
                                 allocation_zone()),
       spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
@@ -1552,19 +1522,8 @@
                                            int index) {
   switch (rep) {
     case MachineRepresentation::kFloat32:
-      if (kSimpleFPAliasing) {
-        assigned_double_registers_->Add(index);
-      } else {
-        int alias_base_index = -1;
-        int aliases = config()->GetAliases(
-            rep, index, MachineRepresentation::kFloat64, &alias_base_index);
-        while (aliases--) {
-          int aliased_reg = alias_base_index + aliases;
-          assigned_double_registers_->Add(aliased_reg);
-        }
-      }
-      break;
     case MachineRepresentation::kFloat64:
+    case MachineRepresentation::kSimd128:
       assigned_double_registers_->Add(index);
       break;
     default:
@@ -1888,17 +1847,18 @@
 }
 
 int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
+  int result = -index - 1;
   switch (rep) {
+    case MachineRepresentation::kSimd128:
     case MachineRepresentation::kFloat32:
-      return -index - 1 - config()->num_general_registers();
     case MachineRepresentation::kFloat64:
-      return -index - 1 - config()->num_general_registers() -
-             config()->num_float_registers();
+      result -= config()->num_general_registers();
+      break;
     default:
+      UNREACHABLE();
       break;
   }
-  UNREACHABLE();
-  return 0;
+  return result;
 }
 
 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
@@ -1918,27 +1878,23 @@
 TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
     int index, MachineRepresentation rep) {
   TopLevelLiveRange* result = nullptr;
-  if (rep == MachineRepresentation::kFloat64) {
-    DCHECK(index < config()->num_double_registers());
-    result = data()->fixed_double_live_ranges()[index];
-    if (result == nullptr) {
-      result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
-      DCHECK(result->IsFixed());
-      result->set_assigned_register(index);
-      data()->MarkAllocated(rep, index);
-      data()->fixed_double_live_ranges()[index] = result;
-    }
-  } else {
-    DCHECK(rep == MachineRepresentation::kFloat32);
-    DCHECK(index < config()->num_float_registers());
-    result = data()->fixed_float_live_ranges()[index];
-    if (result == nullptr) {
-      result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
-      DCHECK(result->IsFixed());
-      result->set_assigned_register(index);
-      data()->MarkAllocated(rep, index);
-      data()->fixed_float_live_ranges()[index] = result;
-    }
+  switch (rep) {
+    case MachineRepresentation::kFloat32:
+    case MachineRepresentation::kFloat64:
+    case MachineRepresentation::kSimd128:
+      DCHECK(index < config()->num_double_registers());
+      result = data()->fixed_double_live_ranges()[index];
+      if (result == nullptr) {
+        result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
+        DCHECK(result->IsFixed());
+        result->set_assigned_register(index);
+        data()->MarkAllocated(rep, index);
+        data()->fixed_double_live_ranges()[index] = result;
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
   }
   return result;
 }
@@ -2050,38 +2006,27 @@
 
     if (instr->ClobbersRegisters()) {
       for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
+        // Create a UseInterval at this instruction for all fixed registers,
+        // (including the instruction outputs). Adding another UseInterval here
+        // is OK because AddUseInterval will just merge it with the existing
+        // one at the end of the range.
         int code = config()->GetAllocatableGeneralCode(i);
-        if (!IsOutputRegisterOf(instr, code)) {
-          TopLevelLiveRange* range = FixedLiveRangeFor(code);
-          range->AddUseInterval(curr_position, curr_position.End(),
-                                allocation_zone());
-        }
+        TopLevelLiveRange* range = FixedLiveRangeFor(code);
+        range->AddUseInterval(curr_position, curr_position.End(),
+                              allocation_zone());
       }
     }
 
     if (instr->ClobbersDoubleRegisters()) {
-      for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
+      for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
+           ++i) {
+        // Add a UseInterval for all DoubleRegisters. See comment above for
+        // general registers.
         int code = config()->GetAllocatableDoubleCode(i);
-        if (!IsOutputFPRegisterOf(instr, MachineRepresentation::kFloat64,
-                                  code)) {
-          TopLevelLiveRange* range =
-              FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
-          range->AddUseInterval(curr_position, curr_position.End(),
-                                allocation_zone());
-        }
-      }
-      // Preserve fixed float registers on archs with non-simple aliasing.
-      if (!kSimpleFPAliasing) {
-        for (int i = 0; i < config()->num_allocatable_float_registers(); ++i) {
-          int code = config()->GetAllocatableFloatCode(i);
-          if (!IsOutputFPRegisterOf(instr, MachineRepresentation::kFloat32,
-                                    code)) {
-            TopLevelLiveRange* range =
-                FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
-            range->AddUseInterval(curr_position, curr_position.End(),
-                                  allocation_zone());
-          }
-        }
+        TopLevelLiveRange* range =
+            FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
+        range->AddUseInterval(curr_position, curr_position.End(),
+                              allocation_zone());
       }
     }
 
@@ -2646,9 +2591,6 @@
       if (current != nullptr) AddToInactive(current);
     }
   } else {
-    for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
-      if (current != nullptr) AddToInactive(current);
-    }
     for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
       if (current != nullptr) AddToInactive(current);
     }
@@ -2821,12 +2763,7 @@
   int num_regs = num_registers();
   int num_codes = num_allocatable_registers();
   const int* codes = allocatable_register_codes();
-  if (!kSimpleFPAliasing &&
-      (current->representation() == MachineRepresentation::kFloat32)) {
-    num_regs = data()->config()->num_float_registers();
-    num_codes = data()->config()->num_allocatable_float_registers();
-    codes = data()->config()->allocatable_float_codes();
-  }
+
   LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
   for (int i = 0; i < num_regs; i++) {
     free_until_pos[i] = LifetimePosition::MaxPosition();
@@ -2834,21 +2771,9 @@
 
   for (LiveRange* cur_active : active_live_ranges()) {
     int cur_reg = cur_active->assigned_register();
-    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
-      free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
-      TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
-            LifetimePosition::GapFromInstructionIndex(0).value());
-    } else {
-      int alias_base_index = -1;
-      int aliases = data()->config()->GetAliases(
-          cur_active->representation(), cur_reg, current->representation(),
-          &alias_base_index);
-      while (aliases--) {
-        int aliased_reg = alias_base_index + aliases;
-        free_until_pos[aliased_reg] =
-            LifetimePosition::GapFromInstructionIndex(0);
-      }
-    }
+    free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
+    TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
+          LifetimePosition::GapFromInstructionIndex(0).value());
   }
 
   for (LiveRange* cur_inactive : inactive_live_ranges()) {
@@ -2857,21 +2782,9 @@
         cur_inactive->FirstIntersection(current);
     if (!next_intersection.IsValid()) continue;
     int cur_reg = cur_inactive->assigned_register();
-    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
-      free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
-      TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
-            Min(free_until_pos[cur_reg], next_intersection).value());
-    } else {
-      int alias_base_index = -1;
-      int aliases = data()->config()->GetAliases(
-          cur_inactive->representation(), cur_reg, current->representation(),
-          &alias_base_index);
-      while (aliases--) {
-        int aliased_reg = alias_base_index + aliases;
-        free_until_pos[aliased_reg] =
-            Min(free_until_pos[aliased_reg], next_intersection);
-      }
-    }
+    free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+    TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
+          Min(free_until_pos[cur_reg], next_intersection).value());
   }
 
   int hint_register;
@@ -2938,12 +2851,6 @@
   int num_regs = num_registers();
   int num_codes = num_allocatable_registers();
   const int* codes = allocatable_register_codes();
-  if (!kSimpleFPAliasing &&
-      (current->representation() == MachineRepresentation::kFloat32)) {
-    num_regs = data()->config()->num_float_registers();
-    num_codes = data()->config()->num_allocatable_float_registers();
-    codes = data()->config()->allocatable_float_codes();
-  }
 
   LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
   LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
@@ -2955,38 +2862,16 @@
     int cur_reg = range->assigned_register();
     bool is_fixed_or_cant_spill =
         range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
-    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
-      if (is_fixed_or_cant_spill) {
-        block_pos[cur_reg] = use_pos[cur_reg] =
-            LifetimePosition::GapFromInstructionIndex(0);
-      } else {
-        UsePosition* next_use =
-            range->NextUsePositionRegisterIsBeneficial(current->Start());
-        if (next_use == nullptr) {
-          use_pos[cur_reg] = range->End();
-        } else {
-          use_pos[cur_reg] = next_use->pos();
-        }
-      }
+    if (is_fixed_or_cant_spill) {
+      block_pos[cur_reg] = use_pos[cur_reg] =
+          LifetimePosition::GapFromInstructionIndex(0);
     } else {
-      int alias_base_index = -1;
-      int aliases = data()->config()->GetAliases(
-          range->representation(), cur_reg, current->representation(),
-          &alias_base_index);
-      while (aliases--) {
-        int aliased_reg = alias_base_index + aliases;
-        if (is_fixed_or_cant_spill) {
-          block_pos[aliased_reg] = use_pos[aliased_reg] =
-              LifetimePosition::GapFromInstructionIndex(0);
-        } else {
-          UsePosition* next_use =
-              range->NextUsePositionRegisterIsBeneficial(current->Start());
-          if (next_use == nullptr) {
-            use_pos[aliased_reg] = range->End();
-          } else {
-            use_pos[aliased_reg] = next_use->pos();
-          }
-        }
+      UsePosition* next_use =
+          range->NextUsePositionRegisterIsBeneficial(current->Start());
+      if (next_use == nullptr) {
+        use_pos[cur_reg] = range->End();
+      } else {
+        use_pos[cur_reg] = next_use->pos();
       }
     }
   }
@@ -2997,29 +2882,11 @@
     if (!next_intersection.IsValid()) continue;
     int cur_reg = range->assigned_register();
     bool is_fixed = range->TopLevel()->IsFixed();
-    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
-      if (is_fixed) {
-        block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
-        use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
-      } else {
-        use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
-      }
+    if (is_fixed) {
+      block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+      use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
     } else {
-      int alias_base_index = -1;
-      int aliases = data()->config()->GetAliases(
-          range->representation(), cur_reg, current->representation(),
-          &alias_base_index);
-      while (aliases--) {
-        int aliased_reg = alias_base_index + aliases;
-        if (is_fixed) {
-          block_pos[aliased_reg] =
-              Min(block_pos[aliased_reg], next_intersection);
-          use_pos[aliased_reg] =
-              Min(block_pos[aliased_reg], use_pos[aliased_reg]);
-        } else {
-          use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
-        }
-      }
+      use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
     }
   }
 
@@ -3071,15 +2938,7 @@
   LifetimePosition split_pos = current->Start();
   for (size_t i = 0; i < active_live_ranges().size(); ++i) {
     LiveRange* range = active_live_ranges()[i];
-    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
-      if (range->assigned_register() != reg) continue;
-    } else {
-      if (!data()->config()->AreAliases(current->representation(), reg,
-                                        range->representation(),
-                                        range->assigned_register())) {
-        continue;
-      }
-    }
+    if (range->assigned_register() != reg) continue;
 
     UsePosition* next_pos = range->NextRegisterPosition(current->Start());
     LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
@@ -3106,14 +2965,7 @@
     LiveRange* range = inactive_live_ranges()[i];
     DCHECK(range->End() > current->Start());
     if (range->TopLevel()->IsFixed()) continue;
-    if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
-      if (range->assigned_register() != reg) continue;
-    } else {
-      if (!data()->config()->AreAliases(current->representation(), reg,
-                                        range->representation(),
-                                        range->assigned_register()))
-        continue;
-    }
+    if (range->assigned_register() != reg) continue;
 
     LifetimePosition next_intersection = range->FirstIntersection(current);
     if (next_intersection.IsValid()) {
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index caadcba..6bfc6c4 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -695,8 +695,8 @@
     return live_ranges_;
   }
   ZoneVector<TopLevelLiveRange*>& live_ranges() { return live_ranges_; }
+  // Spill slots can be 4, 8, or 16 bytes wide.
   int byte_width() const { return byte_width_; }
-  RegisterKind kind() const { return kind_; }
   void Print() const;
 
  private:
@@ -710,7 +710,6 @@
   LifetimePosition end_position_;
   int assigned_slot_;
   int byte_width_;
-  RegisterKind kind_;
 
   DISALLOW_COPY_AND_ASSIGN(SpillRange);
 };
@@ -767,12 +766,6 @@
   ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() {
     return fixed_live_ranges_;
   }
-  ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() {
-    return fixed_float_live_ranges_;
-  }
-  const ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() const {
-    return fixed_float_live_ranges_;
-  }
   ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() {
     return fixed_double_live_ranges_;
   }
@@ -840,7 +833,6 @@
   ZoneVector<BitVector*> live_out_sets_;
   ZoneVector<TopLevelLiveRange*> live_ranges_;
   ZoneVector<TopLevelLiveRange*> fixed_live_ranges_;
-  ZoneVector<TopLevelLiveRange*> fixed_float_live_ranges_;
   ZoneVector<TopLevelLiveRange*> fixed_double_live_ranges_;
   ZoneVector<SpillRange*> spill_ranges_;
   DelayedReferences delayed_references_;
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index d1aa5af..5427bdb 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -63,6 +63,11 @@
       LessGeneral(rep2, TruncationKind::kFloat64)) {
     return TruncationKind::kFloat64;
   }
+  // Handle the generalization of any-representable values.
+  if (LessGeneral(rep1, TruncationKind::kAny) &&
+      LessGeneral(rep2, TruncationKind::kAny)) {
+    return TruncationKind::kAny;
+  }
   // All other combinations are illegal.
   FATAL("Tried to combine incompatible truncations");
   return TruncationKind::kNone;
@@ -112,8 +117,10 @@
 Node* RepresentationChanger::GetRepresentationFor(
     Node* node, MachineRepresentation output_rep, Type* output_type,
     Node* use_node, UseInfo use_info) {
-  if (output_rep == MachineRepresentation::kNone) {
-    // The output representation should be set.
+  if (output_rep == MachineRepresentation::kNone &&
+      output_type->IsInhabited()) {
+    // The output representation should be set if the type is inhabited (i.e.,
+    // if the value is possible).
     return TypeError(node, output_rep, output_type, use_info.representation());
   }
 
@@ -134,6 +141,8 @@
   }
 
   switch (use_info.representation()) {
+    case MachineRepresentation::kTaggedSigned:
+    case MachineRepresentation::kTaggedPointer:
     case MachineRepresentation::kTagged:
       DCHECK(use_info.type_check() == TypeCheckKind::kNone);
       return GetTaggedRepresentationFor(node, output_rep, output_type);
@@ -179,7 +188,7 @@
       } else if (output_type->Is(Type::Unsigned32())) {
         uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
         return jsgraph()->Constant(static_cast<double>(value));
-      } else if (output_rep == MachineRepresentation::kBit) {
+      } else if (output_type->Is(Type::Boolean())) {
         return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
                                                : jsgraph()->TrueConstant();
       } else {
@@ -195,8 +204,17 @@
   }
   // Select the correct X -> Tagged operator.
   const Operator* op;
-  if (output_rep == MachineRepresentation::kBit) {
-    op = simplified()->ChangeBitToTagged();
+  if (output_rep == MachineRepresentation::kNone) {
+    // We should only asisgn this representation if the type is empty.
+    CHECK(!output_type->IsInhabited());
+    op = machine()->ImpossibleToTagged();
+  } else if (output_rep == MachineRepresentation::kBit) {
+    if (output_type->Is(Type::Boolean())) {
+      op = simplified()->ChangeBitToTagged();
+    } else {
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kTagged);
+    }
   } else if (IsWord(output_rep)) {
     if (output_type->Is(Type::Signed31())) {
       op = simplified()->ChangeInt31ToTaggedSigned();
@@ -211,8 +229,10 @@
   } else if (output_rep ==
              MachineRepresentation::kFloat32) {  // float32 -> float64 -> tagged
     node = InsertChangeFloat32ToFloat64(node);
-    // TODO(bmeurer): Pass -0 hint to ChangeFloat64ToTagged.
-    op = simplified()->ChangeFloat64ToTagged();
+    op = simplified()->ChangeFloat64ToTagged(
+        output_type->Maybe(Type::MinusZero())
+            ? CheckForMinusZeroMode::kCheckForMinusZero
+            : CheckForMinusZeroMode::kDontCheckForMinusZero);
   } else if (output_rep == MachineRepresentation::kFloat64) {
     if (output_type->Is(Type::Signed31())) {  // float64 -> int32 -> tagged
       node = InsertChangeFloat64ToInt32(node);
@@ -226,8 +246,10 @@
       node = InsertChangeFloat64ToUint32(node);
       op = simplified()->ChangeUint32ToTagged();
     } else {
-      // TODO(bmeurer): Pass -0 hint to ChangeFloat64ToTagged.
-      op = simplified()->ChangeFloat64ToTagged();
+      op = simplified()->ChangeFloat64ToTagged(
+          output_type->Maybe(Type::MinusZero())
+              ? CheckForMinusZeroMode::kCheckForMinusZero
+              : CheckForMinusZeroMode::kDontCheckForMinusZero);
     }
   } else {
     return TypeError(node, output_rep, output_type,
@@ -261,14 +283,18 @@
   }
   // Select the correct X -> Float32 operator.
   const Operator* op = nullptr;
-  if (IsWord(output_rep)) {
+  if (output_rep == MachineRepresentation::kNone) {
+    // We should only use kNone representation if the type is empty.
+    CHECK(!output_type->IsInhabited());
+    op = machine()->ImpossibleToFloat32();
+  } else if (IsWord(output_rep)) {
     if (output_type->Is(Type::Signed32())) {
       // int32 -> float64 -> float32
       op = machine()->ChangeInt32ToFloat64();
       node = jsgraph()->graph()->NewNode(op, node);
       op = machine()->TruncateFloat64ToFloat32();
     } else if (output_type->Is(Type::Unsigned32()) ||
-               truncation.TruncatesToWord32()) {
+               truncation.IsUsedAsWord32()) {
       // Either the output is uint32 or the uses only care about the
       // low 32 bits (so we can pick uint32 safely).
 
@@ -278,7 +304,7 @@
       op = machine()->TruncateFloat64ToFloat32();
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
-    if (output_type->Is(Type::NumberOrUndefined())) {
+    if (output_type->Is(Type::NumberOrOddball())) {
       // tagged -> float64 -> float32
       if (output_type->Is(Type::Number())) {
         op = simplified()->ChangeTaggedToFloat64();
@@ -326,15 +352,21 @@
   }
   // Select the correct X -> Float64 operator.
   const Operator* op = nullptr;
-  if (IsWord(output_rep)) {
+  if (output_rep == MachineRepresentation::kNone) {
+    // We should only use kNone representation if the type is empty.
+    CHECK(!output_type->IsInhabited());
+    op = machine()->ImpossibleToFloat64();
+  } else if (IsWord(output_rep)) {
     if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeInt32ToFloat64();
     } else if (output_type->Is(Type::Unsigned32()) ||
-               use_info.truncation().TruncatesToWord32()) {
+               use_info.truncation().IsUsedAsWord32()) {
       // Either the output is uint32 or the uses only care about the
       // low 32 bits (so we can pick uint32 safely).
       op = machine()->ChangeUint32ToFloat64();
     }
+  } else if (output_rep == MachineRepresentation::kBit) {
+    op = machine()->ChangeUint32ToFloat64();
   } else if (output_rep == MachineRepresentation::kTagged) {
     if (output_type->Is(Type::Undefined())) {
       return jsgraph()->Float64Constant(
@@ -344,11 +376,16 @@
       op = machine()->ChangeInt32ToFloat64();
     } else if (output_type->Is(Type::Number())) {
       op = simplified()->ChangeTaggedToFloat64();
-    } else if (output_type->Is(Type::NumberOrUndefined())) {
+    } else if (output_type->Is(Type::NumberOrOddball())) {
       // TODO(jarin) Here we should check that truncation is Number.
       op = simplified()->TruncateTaggedToFloat64();
-    } else if (use_info.type_check() == TypeCheckKind::kNumberOrUndefined) {
-      op = simplified()->CheckedTaggedToFloat64();
+    } else if (use_info.type_check() == TypeCheckKind::kNumber ||
+               (use_info.type_check() == TypeCheckKind::kNumberOrOddball &&
+                !output_type->Maybe(Type::BooleanOrNullOrNumber()))) {
+      op = simplified()->CheckedTaggedToFloat64(CheckTaggedInputMode::kNumber);
+    } else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
+      op = simplified()->CheckedTaggedToFloat64(
+          CheckTaggedInputMode::kNumberOrOddball);
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     op = machine()->ChangeFloat32ToFloat64();
@@ -374,7 +411,8 @@
     case IrOpcode::kFloat32Constant: {
       float const fv = OpParameter<float>(node);
       if (use_info.type_check() == TypeCheckKind::kNone ||
-          (use_info.type_check() == TypeCheckKind::kSigned32 &&
+          ((use_info.type_check() == TypeCheckKind::kSignedSmall ||
+            use_info.type_check() == TypeCheckKind::kSigned32) &&
            IsInt32Double(fv))) {
         return MakeTruncatedInt32Constant(fv);
       }
@@ -384,7 +422,8 @@
     case IrOpcode::kFloat64Constant: {
       double const fv = OpParameter<double>(node);
       if (use_info.type_check() == TypeCheckKind::kNone ||
-          (use_info.type_check() == TypeCheckKind::kSigned32 &&
+          ((use_info.type_check() == TypeCheckKind::kSignedSmall ||
+            use_info.type_check() == TypeCheckKind::kSigned32) &&
            IsInt32Double(fv))) {
         return MakeTruncatedInt32Constant(fv);
       }
@@ -396,17 +435,25 @@
 
   // Select the correct X -> Word32 operator.
   const Operator* op = nullptr;
-  if (output_rep == MachineRepresentation::kBit) {
+  if (output_rep == MachineRepresentation::kNone) {
+    // We should only use kNone representation if the type is empty.
+    CHECK(!output_type->IsInhabited());
+    op = machine()->ImpossibleToWord32();
+  } else if (output_rep == MachineRepresentation::kBit) {
     return node;  // Sloppy comparison -> word32
   } else if (output_rep == MachineRepresentation::kFloat64) {
     if (output_type->Is(Type::Unsigned32())) {
       op = machine()->ChangeFloat64ToUint32();
     } else if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else if (use_info.truncation().TruncatesToWord32()) {
+    } else if (use_info.truncation().IsUsedAsWord32()) {
       op = machine()->TruncateFloat64ToWord32();
-    } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
-      op = simplified()->CheckedFloat64ToInt32();
+    } else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
+               use_info.type_check() == TypeCheckKind::kSigned32) {
+      op = simplified()->CheckedFloat64ToInt32(
+          output_type->Maybe(Type::MinusZero())
+              ? CheckForMinusZeroMode::kCheckForMinusZero
+              : CheckForMinusZeroMode::kDontCheckForMinusZero);
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     node = InsertChangeFloat32ToFloat64(node);  // float32 -> float64 -> int32
@@ -414,10 +461,14 @@
       op = machine()->ChangeFloat64ToUint32();
     } else if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else if (use_info.truncation().TruncatesToWord32()) {
+    } else if (use_info.truncation().IsUsedAsWord32()) {
       op = machine()->TruncateFloat64ToWord32();
-    } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
-      op = simplified()->CheckedFloat64ToInt32();
+    } else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
+               use_info.type_check() == TypeCheckKind::kSigned32) {
+      op = simplified()->CheckedFloat64ToInt32(
+          output_type->Maybe(Type::MinusZero())
+              ? CheckForMinusZeroMode::kCheckForMinusZero
+              : CheckForMinusZeroMode::kDontCheckForMinusZero);
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
     if (output_type->Is(Type::TaggedSigned())) {
@@ -426,24 +477,39 @@
       op = simplified()->ChangeTaggedToUint32();
     } else if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeTaggedToInt32();
-    } else if (use_info.truncation().TruncatesToWord32()) {
-      op = simplified()->TruncateTaggedToWord32();
+    } else if (use_info.truncation().IsUsedAsWord32()) {
+      if (use_info.type_check() != TypeCheckKind::kNone) {
+        op = simplified()->CheckedTruncateTaggedToWord32();
+      } else {
+        op = simplified()->TruncateTaggedToWord32();
+      }
+    } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+      op = simplified()->CheckedTaggedSignedToInt32();
     } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
-      op = simplified()->CheckedTaggedToInt32();
+      op = simplified()->CheckedTaggedToInt32(
+          output_type->Maybe(Type::MinusZero())
+              ? CheckForMinusZeroMode::kCheckForMinusZero
+              : CheckForMinusZeroMode::kDontCheckForMinusZero);
     }
   } else if (output_rep == MachineRepresentation::kWord32) {
     // Only the checked case should get here, the non-checked case is
     // handled in GetRepresentationFor.
-    DCHECK(use_info.type_check() == TypeCheckKind::kSigned32);
-    if (output_type->Is(Type::Signed32())) {
+    if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
+        use_info.type_check() == TypeCheckKind::kSigned32) {
+      if (output_type->Is(Type::Signed32())) {
+        return node;
+      } else if (output_type->Is(Type::Unsigned32())) {
+        op = simplified()->CheckedUint32ToInt32();
+      }
+    } else {
+      DCHECK_EQ(TypeCheckKind::kNumberOrOddball, use_info.type_check());
       return node;
-    } else if (output_type->Is(Type::Unsigned32())) {
-      op = simplified()->CheckedUint32ToInt32();
     }
   } else if (output_rep == MachineRepresentation::kWord8 ||
              output_rep == MachineRepresentation::kWord16) {
     DCHECK(use_info.representation() == MachineRepresentation::kWord32);
-    DCHECK(use_info.type_check() == TypeCheckKind::kSigned32);
+    DCHECK(use_info.type_check() == TypeCheckKind::kSignedSmall ||
+           use_info.type_check() == TypeCheckKind::kSigned32);
     return node;
   }
 
@@ -485,7 +551,11 @@
   }
   // Select the correct X -> Bit operator.
   const Operator* op;
-  if (output_rep == MachineRepresentation::kTagged) {
+  if (output_rep == MachineRepresentation::kNone) {
+    // We should only use kNone representation if the type is empty.
+    CHECK(!output_type->IsInhabited());
+    op = machine()->ImpossibleToBit();
+  } else if (output_rep == MachineRepresentation::kTagged) {
     op = simplified()->ChangeTaggedToBit();
   } else {
     return TypeError(node, output_rep, output_type,
@@ -496,7 +566,11 @@
 
 Node* RepresentationChanger::GetWord64RepresentationFor(
     Node* node, MachineRepresentation output_rep, Type* output_type) {
-  if (output_rep == MachineRepresentation::kBit) {
+  if (output_rep == MachineRepresentation::kNone) {
+    // We should only use kNone representation if the type is empty.
+    CHECK(!output_type->IsInhabited());
+    return jsgraph()->graph()->NewNode(machine()->ImpossibleToFloat64(), node);
+  } else if (output_rep == MachineRepresentation::kBit) {
     return node;  // Sloppy comparison -> word64
   }
   // Can't really convert Word64 to anything else. Purported to be internal.
@@ -504,74 +578,6 @@
                    MachineRepresentation::kWord64);
 }
 
-Node* RepresentationChanger::GetCheckedWord32RepresentationFor(
-    Node* node, MachineRepresentation output_rep, Type* output_type,
-    Node* use_node, Truncation truncation, TypeCheckKind check) {
-  // TODO(jarin) Eagerly fold constants (or insert hard deopt if the constant
-  // does not pass the check).
-
-  // If the input is already Signed32 in Word32 representation, we do not
-  // have to do anything. (We could fold this into the big if below, but
-  // it feels nicer to have the shortcut return first).
-  if (output_rep == MachineRepresentation::kWord32 ||
-      output_type->Is(Type::Signed32())) {
-    return node;
-  }
-
-  // Select the correct X -> Word32 operator.
-  const Operator* op = nullptr;
-  if (output_rep == MachineRepresentation::kWord32) {
-    if (output_type->Is(Type::Unsigned32())) {
-      op = simplified()->CheckedUint32ToInt32();
-    }
-  } else if (output_rep == MachineRepresentation::kBit) {
-    return node;  // Sloppy comparison -> word32
-  } else if (output_rep == MachineRepresentation::kFloat64) {
-    if (output_type->Is(Type::Unsigned32())) {
-      op = machine()->ChangeFloat64ToUint32();
-    } else if (output_type->Is(Type::Signed32())) {
-      op = machine()->ChangeFloat64ToInt32();
-    } else if (truncation.TruncatesToWord32()) {
-      op = machine()->TruncateFloat64ToWord32();
-    } else if (check == TypeCheckKind::kSigned32) {
-      op = simplified()->CheckedFloat64ToInt32();
-    }
-  } else if (output_rep == MachineRepresentation::kFloat32) {
-    node = InsertChangeFloat32ToFloat64(node);  // float32 -> float64 -> int32
-    if (output_type->Is(Type::Unsigned32())) {
-      op = machine()->ChangeFloat64ToUint32();
-    } else if (output_type->Is(Type::Signed32())) {
-      op = machine()->ChangeFloat64ToInt32();
-    } else if (truncation.TruncatesToWord32()) {
-      op = machine()->TruncateFloat64ToWord32();
-    } else if (check == TypeCheckKind::kSigned32) {
-      op = simplified()->CheckedFloat64ToInt32();
-    }
-  } else if (output_rep == MachineRepresentation::kTagged) {
-    if (output_type->Is(Type::TaggedSigned())) {
-      op = simplified()->ChangeTaggedSignedToInt32();
-    } else if (output_type->Is(Type::Unsigned32())) {
-      op = simplified()->ChangeTaggedToUint32();
-    } else if (output_type->Is(Type::Signed32())) {
-      op = simplified()->ChangeTaggedToInt32();
-    } else if (truncation.TruncatesToWord32()) {
-      op = simplified()->TruncateTaggedToWord32();
-    } else if (check == TypeCheckKind::kSigned32) {
-      op = simplified()->CheckedTaggedToInt32();
-    }
-  }
-  if (op == nullptr) {
-    return TypeError(node, output_rep, output_type,
-                     MachineRepresentation::kWord32);
-  }
-  if (op->ControlInputCount() > 0) {
-    // If the operator can deoptimize (which means it has control
-    // input), we need to connect it to the effect and control chains.
-    UNIMPLEMENTED();
-  }
-  return jsgraph()->graph()->NewNode(op, node);
-}
-
 const Operator* RepresentationChanger::Int32OperatorFor(
     IrOpcode::Value opcode) {
   switch (opcode) {
@@ -590,10 +596,13 @@
     case IrOpcode::kSpeculativeNumberModulus:
     case IrOpcode::kNumberModulus:
       return machine()->Int32Mod();
+    case IrOpcode::kSpeculativeNumberBitwiseOr:  // Fall through.
     case IrOpcode::kNumberBitwiseOr:
       return machine()->Word32Or();
+    case IrOpcode::kSpeculativeNumberBitwiseXor:  // Fall through.
     case IrOpcode::kNumberBitwiseXor:
       return machine()->Word32Xor();
+    case IrOpcode::kSpeculativeNumberBitwiseAnd:  // Fall through.
     case IrOpcode::kNumberBitwiseAnd:
       return machine()->Word32And();
     case IrOpcode::kNumberEqual:
@@ -614,10 +623,14 @@
 const Operator* RepresentationChanger::Int32OverflowOperatorFor(
     IrOpcode::Value opcode) {
   switch (opcode) {
-    case IrOpcode::kSpeculativeNumberAdd:  // Fall through.
+    case IrOpcode::kSpeculativeNumberAdd:
       return simplified()->CheckedInt32Add();
-    case IrOpcode::kSpeculativeNumberSubtract:  // Fall through.
+    case IrOpcode::kSpeculativeNumberSubtract:
       return simplified()->CheckedInt32Sub();
+    case IrOpcode::kSpeculativeNumberDivide:
+      return simplified()->CheckedInt32Div();
+    case IrOpcode::kSpeculativeNumberModulus:
+      return simplified()->CheckedInt32Mod();
     default:
       UNREACHABLE();
       return nullptr;
@@ -659,6 +672,18 @@
   }
 }
 
+const Operator* RepresentationChanger::Uint32OverflowOperatorFor(
+    IrOpcode::Value opcode) {
+  switch (opcode) {
+    case IrOpcode::kSpeculativeNumberDivide:
+      return simplified()->CheckedUint32Div();
+    case IrOpcode::kSpeculativeNumberModulus:
+      return simplified()->CheckedUint32Mod();
+    default:
+      UNREACHABLE();
+      return nullptr;
+  }
+}
 
 const Operator* RepresentationChanger::Float64OperatorFor(
     IrOpcode::Value opcode) {
@@ -689,18 +714,36 @@
       return machine()->Float64LessThanOrEqual();
     case IrOpcode::kNumberAbs:
       return machine()->Float64Abs();
+    case IrOpcode::kNumberAcos:
+      return machine()->Float64Acos();
+    case IrOpcode::kNumberAcosh:
+      return machine()->Float64Acosh();
+    case IrOpcode::kNumberAsin:
+      return machine()->Float64Asin();
+    case IrOpcode::kNumberAsinh:
+      return machine()->Float64Asinh();
     case IrOpcode::kNumberAtan:
       return machine()->Float64Atan();
-    case IrOpcode::kNumberAtan2:
-      return machine()->Float64Atan2();
-    case IrOpcode::kNumberCos:
-      return machine()->Float64Cos();
-    case IrOpcode::kNumberExp:
-      return machine()->Float64Exp();
-    case IrOpcode::kNumberFround:
-      return machine()->TruncateFloat64ToFloat32();
     case IrOpcode::kNumberAtanh:
       return machine()->Float64Atanh();
+    case IrOpcode::kNumberAtan2:
+      return machine()->Float64Atan2();
+    case IrOpcode::kNumberCbrt:
+      return machine()->Float64Cbrt();
+    case IrOpcode::kNumberCeil:
+      return machine()->Float64RoundUp().placeholder();
+    case IrOpcode::kNumberCos:
+      return machine()->Float64Cos();
+    case IrOpcode::kNumberCosh:
+      return machine()->Float64Cosh();
+    case IrOpcode::kNumberExp:
+      return machine()->Float64Exp();
+    case IrOpcode::kNumberExpm1:
+      return machine()->Float64Expm1();
+    case IrOpcode::kNumberFloor:
+      return machine()->Float64RoundDown().placeholder();
+    case IrOpcode::kNumberFround:
+      return machine()->TruncateFloat64ToFloat32();
     case IrOpcode::kNumberLog:
       return machine()->Float64Log();
     case IrOpcode::kNumberLog1p:
@@ -709,16 +752,24 @@
       return machine()->Float64Log2();
     case IrOpcode::kNumberLog10:
       return machine()->Float64Log10();
+    case IrOpcode::kNumberMax:
+      return machine()->Float64Max();
+    case IrOpcode::kNumberMin:
+      return machine()->Float64Min();
+    case IrOpcode::kNumberPow:
+      return machine()->Float64Pow();
     case IrOpcode::kNumberSin:
       return machine()->Float64Sin();
-    case IrOpcode::kNumberTan:
-      return machine()->Float64Tan();
+    case IrOpcode::kNumberSinh:
+      return machine()->Float64Sinh();
     case IrOpcode::kNumberSqrt:
       return machine()->Float64Sqrt();
-    case IrOpcode::kNumberCbrt:
-      return machine()->Float64Cbrt();
-    case IrOpcode::kNumberExpm1:
-      return machine()->Float64Expm1();
+    case IrOpcode::kNumberTan:
+      return machine()->Float64Tan();
+    case IrOpcode::kNumberTanh:
+      return machine()->Float64Tanh();
+    case IrOpcode::kNumberTrunc:
+      return machine()->Float64RoundTruncate().placeholder();
     case IrOpcode::kNumberSilenceNaN:
       return machine()->Float64SilenceNaN();
     default:
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index 8a38644..fac3280 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -28,17 +28,18 @@
   }
 
   // Queries.
-  bool TruncatesToWord32() const {
+  bool IsUnused() const { return kind_ == TruncationKind::kNone; }
+  bool IsUsedAsWord32() const {
     return LessGeneral(kind_, TruncationKind::kWord32);
   }
-  bool TruncatesToFloat64() const {
+  bool IsUsedAsFloat64() const {
     return LessGeneral(kind_, TruncationKind::kFloat64);
   }
-  bool TruncatesNaNToZero() {
+  bool IdentifiesNaNAndZero() {
     return LessGeneral(kind_, TruncationKind::kWord32) ||
            LessGeneral(kind_, TruncationKind::kBool);
   }
-  bool TruncatesUndefinedToZeroOrNaN() {
+  bool IdentifiesUndefinedAndNaNAndZero() {
     return LessGeneral(kind_, TruncationKind::kFloat64) ||
            LessGeneral(kind_, TruncationKind::kWord64);
   }
@@ -75,11 +76,29 @@
 
 enum class TypeCheckKind : uint8_t {
   kNone,
+  kSignedSmall,
   kSigned32,
-  kNumberOrUndefined,
-  kNumber
+  kNumber,
+  kNumberOrOddball
 };
 
+inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
+  switch (type_check) {
+    case TypeCheckKind::kNone:
+      return os << "None";
+    case TypeCheckKind::kSignedSmall:
+      return os << "SignedSmall";
+    case TypeCheckKind::kSigned32:
+      return os << "Signed32";
+    case TypeCheckKind::kNumber:
+      return os << "Number";
+    case TypeCheckKind::kNumberOrOddball:
+      return os << "NumberOrOddball";
+  }
+  UNREACHABLE();
+  return os;
+}
+
 // The {UseInfo} class is used to describe a use of an input of a node.
 //
 // This information is used in two different ways, based on the phase:
@@ -122,13 +141,29 @@
   }
 
   // Possibly deoptimizing conversions.
+  static UseInfo CheckedSignedSmallAsWord32() {
+    return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
+                   TypeCheckKind::kSignedSmall);
+  }
   static UseInfo CheckedSigned32AsWord32() {
     return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
                    TypeCheckKind::kSigned32);
   }
-  static UseInfo CheckedNumberOrUndefinedAsFloat64() {
+  static UseInfo CheckedNumberAsFloat64() {
+    return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64(),
+                   TypeCheckKind::kNumber);
+  }
+  static UseInfo CheckedNumberAsWord32() {
+    return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
+                   TypeCheckKind::kNumber);
+  }
+  static UseInfo CheckedNumberOrOddballAsFloat64() {
     return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
-                   TypeCheckKind::kNumberOrUndefined);
+                   TypeCheckKind::kNumberOrOddball);
+  }
+  static UseInfo CheckedNumberOrOddballAsWord32() {
+    return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
+                   TypeCheckKind::kNumberOrOddball);
   }
 
   // Undetermined representation.
@@ -175,6 +210,7 @@
   const Operator* Int32OperatorFor(IrOpcode::Value opcode);
   const Operator* Int32OverflowOperatorFor(IrOpcode::Value opcode);
   const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
+  const Operator* Uint32OverflowOperatorFor(IrOpcode::Value opcode);
   const Operator* Float64OperatorFor(IrOpcode::Value opcode);
 
   MachineType TypeForBasePointer(const FieldAccess& access) {
@@ -212,11 +248,6 @@
                                 Type* output_type);
   Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
                                    Type* output_type);
-  Node* GetCheckedWord32RepresentationFor(Node* node,
-                                          MachineRepresentation output_rep,
-                                          Type* output_type, Node* use_node,
-                                          Truncation truncation,
-                                          TypeCheckKind check);
   Node* TypeError(Node* node, MachineRepresentation output_rep,
                   Type* output_type, MachineRepresentation use);
   Node* MakeTruncatedInt32Constant(double value);
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
index ac24529..e69a7ac 100644
--- a/src/compiler/s390/code-generator-s390.cc
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -27,6 +27,16 @@
 
   size_t OutputCount() { return instr_->OutputCount(); }
 
+  bool Is64BitOperand(int index) {
+    return LocationOperand::cast(instr_->InputAt(index))->representation() ==
+           MachineRepresentation::kWord64;
+  }
+
+  bool Is32BitOperand(int index) {
+    return LocationOperand::cast(instr_->InputAt(index))->representation() ==
+           MachineRepresentation::kWord32;
+  }
+
   bool CompareLogical() const {
     switch (instr_->flags_condition()) {
       case kUnsignedLessThan:
@@ -71,12 +81,19 @@
     switch (AddressingModeField::decode(instr_->opcode())) {
       case kMode_None:
         break;
+      case kMode_MR:
+        *first_index += 1;
+        return MemOperand(InputRegister(index + 0), 0);
       case kMode_MRI:
         *first_index += 2;
         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
       case kMode_MRR:
         *first_index += 2;
         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+      case kMode_MRRI:
+        *first_index += 3;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+                          InputInt32(index + 2));
     }
     UNREACHABLE();
     return MemOperand(r0);
@@ -97,12 +114,25 @@
     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
+
+  MemOperand InputStackSlot(size_t index) {
+    InstructionOperand* op = instr_->InputAt(index);
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
 };
 
 static inline bool HasRegisterInput(Instruction* instr, int index) {
   return instr->InputAt(index)->IsRegister();
 }
 
+static inline bool HasImmediateInput(Instruction* instr, size_t index) {
+  return instr->InputAt(index)->IsImmediate();
+}
+
+static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
+  return instr->InputAt(index)->IsStackSlot();
+}
+
 namespace {
 
 class OutOfLineLoadNAN32 final : public OutOfLineCode {
@@ -235,26 +265,22 @@
     case kOverflow:
       // Overflow checked for AddP/SubP only.
       switch (op) {
-#if V8_TARGET_ARCH_S390X
-        case kS390_Add:
-        case kS390_Sub:
-#endif
-        case kS390_AddWithOverflow32:
-        case kS390_SubWithOverflow32:
-          return lt;
+        case kS390_Add32:
+        case kS390_Add64:
+        case kS390_Sub32:
+        case kS390_Sub64:
+          return overflow;
         default:
           break;
       }
       break;
     case kNotOverflow:
       switch (op) {
-#if V8_TARGET_ARCH_S390X
-        case kS390_Add:
-        case kS390_Sub:
-#endif
-        case kS390_AddWithOverflow32:
-        case kS390_SubWithOverflow32:
-          return ge;
+        case kS390_Add32:
+        case kS390_Add64:
+        case kS390_Sub32:
+        case kS390_Sub64:
+          return nooverflow;
         default:
           break;
       }
@@ -279,67 +305,19 @@
                  i.InputDoubleRegister(1));                          \
   } while (0)
 
-#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
-  do {                                                         \
-    if (HasRegisterInput(instr, 1)) {                          \
-      __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
-                       i.InputRegister(1));                    \
-    } else {                                                   \
-      __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
-                       i.InputImmediate(1));                   \
-    }                                                          \
+#define ASSEMBLE_BINOP(asm_instr)                          \
+  do {                                                     \
+    if (HasRegisterInput(instr, 1)) {                      \
+      __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
+                   i.InputRegister(1));                    \
+    } else if (HasImmediateInput(instr, 1)) {              \
+      __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
+                   i.InputImmediate(1));                   \
+    } else {                                               \
+      UNIMPLEMENTED();                                     \
+    }                                                      \
   } while (0)
 
-#define ASSEMBLE_BINOP_INT(asm_instr_reg, asm_instr_imm)       \
-  do {                                                         \
-    if (HasRegisterInput(instr, 1)) {                          \
-      __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
-                       i.InputRegister(1));                    \
-    } else {                                                   \
-      __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
-                       i.InputInt32(1));                       \
-    }                                                          \
-  } while (0)
-
-#define ASSEMBLE_ADD_WITH_OVERFLOW()                                    \
-  do {                                                                  \
-    if (HasRegisterInput(instr, 1)) {                                   \
-      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
-                                i.InputRegister(1), kScratchReg, r0);   \
-    } else {                                                            \
-      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
-                                i.InputInt32(1), kScratchReg, r0);      \
-    }                                                                   \
-  } while (0)
-
-#define ASSEMBLE_SUB_WITH_OVERFLOW()                                    \
-  do {                                                                  \
-    if (HasRegisterInput(instr, 1)) {                                   \
-      __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
-                                i.InputRegister(1), kScratchReg, r0);   \
-    } else {                                                            \
-      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
-                                -i.InputInt32(1), kScratchReg, r0);     \
-    }                                                                   \
-  } while (0)
-
-#if V8_TARGET_ARCH_S390X
-#define ASSEMBLE_ADD_WITH_OVERFLOW32()                   \
-  do {                                                   \
-    ASSEMBLE_ADD_WITH_OVERFLOW();                        \
-    __ LoadAndTestP_ExtendSrc(kScratchReg, kScratchReg); \
-  } while (0)
-
-#define ASSEMBLE_SUB_WITH_OVERFLOW32()                   \
-  do {                                                   \
-    ASSEMBLE_SUB_WITH_OVERFLOW();                        \
-    __ LoadAndTestP_ExtendSrc(kScratchReg, kScratchReg); \
-  } while (0)
-#else
-#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
-#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
-#endif
-
 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                 \
   do {                                                          \
     if (HasRegisterInput(instr, 1)) {                           \
@@ -412,30 +390,185 @@
     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
   } while (0)
 
-#define ASSEMBLE_FLOAT_MAX(double_scratch_reg, general_scratch_reg) \
-  do {                                                              \
-    Label ge, done;                                                 \
-    __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));    \
-    __ bge(&ge, Label::kNear);                                      \
-    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1));    \
-    __ b(&done, Label::kNear);                                      \
-    __ bind(&ge);                                                   \
-    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));    \
-    __ bind(&done);                                                 \
+#define ASSEMBLE_DOUBLE_MAX()                                          \
+  do {                                                                 \
+    DoubleRegister left_reg = i.InputDoubleRegister(0);                \
+    DoubleRegister right_reg = i.InputDoubleRegister(1);               \
+    DoubleRegister result_reg = i.OutputDoubleRegister();              \
+    Label check_nan_left, check_zero, return_left, return_right, done; \
+    __ cdbr(left_reg, right_reg);                                      \
+    __ bunordered(&check_nan_left, Label::kNear);                      \
+    __ beq(&check_zero);                                               \
+    __ bge(&return_left, Label::kNear);                                \
+    __ b(&return_right, Label::kNear);                                 \
+                                                                       \
+    __ bind(&check_zero);                                              \
+    __ lzdr(kDoubleRegZero);                                           \
+    __ cdbr(left_reg, kDoubleRegZero);                                 \
+    /* left == right != 0. */                                          \
+    __ bne(&return_left, Label::kNear);                                \
+    /* At this point, both left and right are either 0 or -0. */       \
+    /* N.B. The following works because +0 + -0 == +0 */               \
+    /* For max we want logical-and of sign bit: (L + R) */             \
+    __ ldr(result_reg, left_reg);                                      \
+    __ adbr(result_reg, right_reg);                                    \
+    __ b(&done, Label::kNear);                                         \
+                                                                       \
+    __ bind(&check_nan_left);                                          \
+    __ cdbr(left_reg, left_reg);                                       \
+    /* left == NaN. */                                                 \
+    __ bunordered(&return_left, Label::kNear);                         \
+                                                                       \
+    __ bind(&return_right);                                            \
+    if (!right_reg.is(result_reg)) {                                   \
+      __ ldr(result_reg, right_reg);                                   \
+    }                                                                  \
+    __ b(&done, Label::kNear);                                         \
+                                                                       \
+    __ bind(&return_left);                                             \
+    if (!left_reg.is(result_reg)) {                                    \
+      __ ldr(result_reg, left_reg);                                    \
+    }                                                                  \
+    __ bind(&done);                                                    \
   } while (0)
 
-#define ASSEMBLE_FLOAT_MIN(double_scratch_reg, general_scratch_reg) \
-  do {                                                              \
-    Label ge, done;                                                 \
-    __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));    \
-    __ bge(&ge, Label::kNear);                                      \
-    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));    \
-    __ b(&done, Label::kNear);                                      \
-    __ bind(&ge);                                                   \
-    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1));    \
-    __ bind(&done);                                                 \
+#define ASSEMBLE_DOUBLE_MIN()                                          \
+  do {                                                                 \
+    DoubleRegister left_reg = i.InputDoubleRegister(0);                \
+    DoubleRegister right_reg = i.InputDoubleRegister(1);               \
+    DoubleRegister result_reg = i.OutputDoubleRegister();              \
+    Label check_nan_left, check_zero, return_left, return_right, done; \
+    __ cdbr(left_reg, right_reg);                                      \
+    __ bunordered(&check_nan_left, Label::kNear);                      \
+    __ beq(&check_zero);                                               \
+    __ ble(&return_left, Label::kNear);                                \
+    __ b(&return_right, Label::kNear);                                 \
+                                                                       \
+    __ bind(&check_zero);                                              \
+    __ lzdr(kDoubleRegZero);                                           \
+    __ cdbr(left_reg, kDoubleRegZero);                                 \
+    /* left == right != 0. */                                          \
+    __ bne(&return_left, Label::kNear);                                \
+    /* At this point, both left and right are either 0 or -0. */       \
+    /* N.B. The following works because +0 + -0 == +0 */               \
+    /* For min we want logical-or of sign bit: -(-L + -R) */           \
+    __ lcdbr(left_reg, left_reg);                                      \
+    __ ldr(result_reg, left_reg);                                      \
+    if (left_reg.is(right_reg)) {                                      \
+      __ adbr(result_reg, right_reg);                                  \
+    } else {                                                           \
+      __ sdbr(result_reg, right_reg);                                  \
+    }                                                                  \
+    __ lcdbr(result_reg, result_reg);                                  \
+    __ b(&done, Label::kNear);                                         \
+                                                                       \
+    __ bind(&check_nan_left);                                          \
+    __ cdbr(left_reg, left_reg);                                       \
+    /* left == NaN. */                                                 \
+    __ bunordered(&return_left, Label::kNear);                         \
+                                                                       \
+    __ bind(&return_right);                                            \
+    if (!right_reg.is(result_reg)) {                                   \
+      __ ldr(result_reg, right_reg);                                   \
+    }                                                                  \
+    __ b(&done, Label::kNear);                                         \
+                                                                       \
+    __ bind(&return_left);                                             \
+    if (!left_reg.is(result_reg)) {                                    \
+      __ ldr(result_reg, left_reg);                                    \
+    }                                                                  \
+    __ bind(&done);                                                    \
   } while (0)
 
+#define ASSEMBLE_FLOAT_MAX()                                           \
+  do {                                                                 \
+    DoubleRegister left_reg = i.InputDoubleRegister(0);                \
+    DoubleRegister right_reg = i.InputDoubleRegister(1);               \
+    DoubleRegister result_reg = i.OutputDoubleRegister();              \
+    Label check_nan_left, check_zero, return_left, return_right, done; \
+    __ cebr(left_reg, right_reg);                                      \
+    __ bunordered(&check_nan_left, Label::kNear);                      \
+    __ beq(&check_zero);                                               \
+    __ bge(&return_left, Label::kNear);                                \
+    __ b(&return_right, Label::kNear);                                 \
+                                                                       \
+    __ bind(&check_zero);                                              \
+    __ lzdr(kDoubleRegZero);                                           \
+    __ cebr(left_reg, kDoubleRegZero);                                 \
+    /* left == right != 0. */                                          \
+    __ bne(&return_left, Label::kNear);                                \
+    /* At this point, both left and right are either 0 or -0. */       \
+    /* N.B. The following works because +0 + -0 == +0 */               \
+    /* For max we want logical-and of sign bit: (L + R) */             \
+    __ ldr(result_reg, left_reg);                                      \
+    __ aebr(result_reg, right_reg);                                    \
+    __ b(&done, Label::kNear);                                         \
+                                                                       \
+    __ bind(&check_nan_left);                                          \
+    __ cebr(left_reg, left_reg);                                       \
+    /* left == NaN. */                                                 \
+    __ bunordered(&return_left, Label::kNear);                         \
+                                                                       \
+    __ bind(&return_right);                                            \
+    if (!right_reg.is(result_reg)) {                                   \
+      __ ldr(result_reg, right_reg);                                   \
+    }                                                                  \
+    __ b(&done, Label::kNear);                                         \
+                                                                       \
+    __ bind(&return_left);                                             \
+    if (!left_reg.is(result_reg)) {                                    \
+      __ ldr(result_reg, left_reg);                                    \
+    }                                                                  \
+    __ bind(&done);                                                    \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_MIN()                                           \
+  do {                                                                 \
+    DoubleRegister left_reg = i.InputDoubleRegister(0);                \
+    DoubleRegister right_reg = i.InputDoubleRegister(1);               \
+    DoubleRegister result_reg = i.OutputDoubleRegister();              \
+    Label check_nan_left, check_zero, return_left, return_right, done; \
+    __ cebr(left_reg, right_reg);                                      \
+    __ bunordered(&check_nan_left, Label::kNear);                      \
+    __ beq(&check_zero);                                               \
+    __ ble(&return_left, Label::kNear);                                \
+    __ b(&return_right, Label::kNear);                                 \
+                                                                       \
+    __ bind(&check_zero);                                              \
+    __ lzdr(kDoubleRegZero);                                           \
+    __ cebr(left_reg, kDoubleRegZero);                                 \
+    /* left == right != 0. */                                          \
+    __ bne(&return_left, Label::kNear);                                \
+    /* At this point, both left and right are either 0 or -0. */       \
+    /* N.B. The following works because +0 + -0 == +0 */               \
+    /* For min we want logical-or of sign bit: -(-L + -R) */           \
+    __ lcebr(left_reg, left_reg);                                      \
+    __ ldr(result_reg, left_reg);                                      \
+    if (left_reg.is(right_reg)) {                                      \
+      __ aebr(result_reg, right_reg);                                  \
+    } else {                                                           \
+      __ sebr(result_reg, right_reg);                                  \
+    }                                                                  \
+    __ lcebr(result_reg, result_reg);                                  \
+    __ b(&done, Label::kNear);                                         \
+                                                                       \
+    __ bind(&check_nan_left);                                          \
+    __ cebr(left_reg, left_reg);                                       \
+    /* left == NaN. */                                                 \
+    __ bunordered(&return_left, Label::kNear);                         \
+                                                                       \
+    __ bind(&return_right);                                            \
+    if (!right_reg.is(result_reg)) {                                   \
+      __ ldr(result_reg, right_reg);                                   \
+    }                                                                  \
+    __ b(&done, Label::kNear);                                         \
+                                                                       \
+    __ bind(&return_left);                                             \
+    if (!left_reg.is(result_reg)) {                                    \
+      __ ldr(result_reg, left_reg);                                    \
+    }                                                                  \
+    __ bind(&done);                                                    \
+  } while (0)
 // Only MRI mode for these instructions available
 #define ASSEMBLE_LOAD_FLOAT(asm_instr)                \
   do {                                                \
@@ -580,20 +713,7 @@
   __ LeaveFrame(StackFrame::MANUAL);
 }
 
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta > 0) {
-    __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
-  }
-  frame_access_state()->SetFrameAccessToDefault();
-}
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta < 0) {
-    __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
-    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
-  }
+void CodeGenerator::AssemblePrepareTailCall() {
   if (frame_access_state()->has_frame()) {
     __ RestoreFrameStateForTailCall();
   }
@@ -625,6 +745,114 @@
   __ bind(&done);
 }
 
+namespace {
+
+void FlushPendingPushRegisters(MacroAssembler* masm,
+                               FrameAccessState* frame_access_state,
+                               ZoneVector<Register>* pending_pushes) {
+  switch (pending_pushes->size()) {
+    case 0:
+      break;
+    case 1:
+      masm->Push((*pending_pushes)[0]);
+      break;
+    case 2:
+      masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+      break;
+    case 3:
+      masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+                 (*pending_pushes)[2]);
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  frame_access_state->IncreaseSPDelta(pending_pushes->size());
+  pending_pushes->resize(0);
+}
+
+void AddPendingPushRegister(MacroAssembler* masm,
+                            FrameAccessState* frame_access_state,
+                            ZoneVector<Register>* pending_pushes,
+                            Register reg) {
+  pending_pushes->push_back(reg);
+  if (pending_pushes->size() == 3 || reg.is(ip)) {
+    FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+  }
+}
+void AdjustStackPointerForTailCall(
+    MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+    ZoneVector<Register>* pending_pushes = nullptr,
+    bool allow_shrinkage = true) {
+  int current_sp_offset = state->GetSPToFPSlotCount() +
+                          StandardFrameConstants::kFixedSlotCountAboveFp;
+  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+  if (stack_slot_delta > 0) {
+    if (pending_pushes != nullptr) {
+      FlushPendingPushRegisters(masm, state, pending_pushes);
+    }
+    masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+    state->IncreaseSPDelta(stack_slot_delta);
+  } else if (allow_shrinkage && stack_slot_delta < 0) {
+    if (pending_pushes != nullptr) {
+      FlushPendingPushRegisters(masm, state, pending_pushes);
+    }
+    masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+    state->IncreaseSPDelta(stack_slot_delta);
+  }
+}
+
+}  // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+                                              int first_unused_stack_slot) {
+  CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+  ZoneVector<MoveOperands*> pushes(zone());
+  GetPushCompatibleMoves(instr, flags, &pushes);
+
+  if (!pushes.empty() &&
+      (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+       first_unused_stack_slot)) {
+    S390OperandConverter g(this, instr);
+    ZoneVector<Register> pending_pushes(zone());
+    for (auto move : pushes) {
+      LocationOperand destination_location(
+          LocationOperand::cast(move->destination()));
+      InstructionOperand source(move->source());
+      AdjustStackPointerForTailCall(
+          masm(), frame_access_state(),
+          destination_location.index() - pending_pushes.size(),
+          &pending_pushes);
+      if (source.IsStackSlot()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        __ LoadP(ip, g.SlotToMemOperand(source_location.index()));
+        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+                               ip);
+      } else if (source.IsRegister()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+                               source_location.GetRegister());
+      } else if (source.IsImmediate()) {
+        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+                               ip);
+      } else {
+        // Pushes of non-scalar data types is not supported.
+        UNIMPLEMENTED();
+      }
+      move->Eliminate();
+    }
+    FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+  }
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot, nullptr, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+                                             int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot);
+}
+
 // Assembles an instruction after register allocation, producing machine code.
 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     Instruction* instr) {
@@ -632,6 +860,11 @@
   ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
 
   switch (opcode) {
+    case kArchComment: {
+      Address comment_string = i.InputExternalReference(0).address();
+      __ RecordComment(reinterpret_cast<const char*>(comment_string));
+      break;
+    }
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (HasRegisterInput(instr, 0)) {
@@ -648,8 +881,6 @@
     }
     case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (opcode == kArchTailCallCodeObjectFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -667,14 +898,14 @@
                 RelocInfo::CODE_TARGET);
       }
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchTailCallAddress: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       CHECK(!instr->InputAt(0)->IsImmediate());
       __ Jump(i.InputRegister(0));
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchCallJSFunction: {
@@ -703,8 +934,6 @@
         __ CmpP(cp, kScratchReg);
         __ Assert(eq, kWrongFunctionContext);
       }
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (opcode == kArchTailCallJSFunctionFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -713,6 +942,7 @@
       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(ip);
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchPrepareCallCFunction: {
@@ -723,7 +953,7 @@
       break;
     }
     case kArchPrepareTailCall:
-      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      AssemblePrepareTailCall();
       break;
     case kArchCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
@@ -750,6 +980,9 @@
     case kArchDebugBreak:
       __ stop("kArchDebugBreak");
       break;
+    case kArchImpossible:
+      __ Abort(kConversionFromImpossibleValue);
+      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -820,22 +1053,22 @@
               Operand(offset.offset()));
       break;
     }
-    case kS390_And:
-      ASSEMBLE_BINOP(AndP, AndP);
+    case kS390_And32:
+      ASSEMBLE_BINOP(And);
       break;
-    case kS390_AndComplement:
-      __ NotP(i.InputRegister(1));
-      __ AndP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+    case kS390_And64:
+      ASSEMBLE_BINOP(AndP);
       break;
-    case kS390_Or:
-      ASSEMBLE_BINOP(OrP, OrP);
+    case kS390_Or32:
+      ASSEMBLE_BINOP(Or);
+    case kS390_Or64:
+      ASSEMBLE_BINOP(OrP);
       break;
-    case kS390_OrComplement:
-      __ NotP(i.InputRegister(1));
-      __ OrP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+    case kS390_Xor32:
+      ASSEMBLE_BINOP(Xor);
       break;
-    case kS390_Xor:
-      ASSEMBLE_BINOP(XorP, XorP);
+    case kS390_Xor64:
+      ASSEMBLE_BINOP(XorP);
       break;
     case kS390_ShiftLeft32:
       if (HasRegisterInput(instr, 1)) {
@@ -844,16 +1077,16 @@
           __ LoadRR(kScratchReg, i.InputRegister(1));
           __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
         } else {
-          ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
+          ASSEMBLE_BINOP(ShiftLeft);
         }
       } else {
-        ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
+        ASSEMBLE_BINOP(ShiftLeft);
       }
       __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ShiftLeft64:
-      ASSEMBLE_BINOP(sllg, sllg);
+      ASSEMBLE_BINOP(sllg);
       break;
 #endif
     case kS390_ShiftRight32:
@@ -863,16 +1096,16 @@
           __ LoadRR(kScratchReg, i.InputRegister(1));
           __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
         } else {
-          ASSEMBLE_BINOP(ShiftRight, ShiftRight);
+          ASSEMBLE_BINOP(ShiftRight);
         }
       } else {
-        ASSEMBLE_BINOP(ShiftRight, ShiftRight);
+        ASSEMBLE_BINOP(ShiftRight);
       }
       __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ShiftRight64:
-      ASSEMBLE_BINOP(srlg, srlg);
+      ASSEMBLE_BINOP(srlg);
       break;
 #endif
     case kS390_ShiftRightArith32:
@@ -883,16 +1116,16 @@
           __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
                              kScratchReg);
         } else {
-          ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
+          ASSEMBLE_BINOP(ShiftRightArith);
         }
       } else {
-        ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
+        ASSEMBLE_BINOP(ShiftRightArith);
       }
       __ LoadlW(i.OutputRegister(), i.OutputRegister());
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ShiftRightArith64:
-      ASSEMBLE_BINOP(srag, srag);
+      ASSEMBLE_BINOP(srag);
       break;
 #endif
 #if !V8_TARGET_ARCH_S390X
@@ -983,9 +1216,11 @@
       }
       break;
 #endif
-    case kS390_Not:
-      __ LoadRR(i.OutputRegister(), i.InputRegister(0));
-      __ NotP(i.OutputRegister());
+    case kS390_Not32:
+      __ Not32(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_Not64:
+      __ Not64(i.OutputRegister(), i.InputRegister(0));
       break;
     case kS390_RotLeftAndMask32:
       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -1041,19 +1276,12 @@
       }
       break;
 #endif
-    case kS390_Add:
-#if V8_TARGET_ARCH_S390X
-      if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
-        ASSEMBLE_ADD_WITH_OVERFLOW();
-      } else {
-#endif
-        ASSEMBLE_BINOP(AddP, AddP);
-#if V8_TARGET_ARCH_S390X
-      }
-#endif
+    case kS390_Add32:
+      ASSEMBLE_BINOP(Add32);
+      __ LoadW(i.OutputRegister(), i.OutputRegister());
       break;
-    case kS390_AddWithOverflow32:
-      ASSEMBLE_ADD_WITH_OVERFLOW32();
+    case kS390_Add64:
+      ASSEMBLE_BINOP(AddP);
       break;
     case kS390_AddFloat:
       // Ensure we don't clobber right/InputReg(1)
@@ -1075,19 +1303,12 @@
         __ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
       }
       break;
-    case kS390_Sub:
-#if V8_TARGET_ARCH_S390X
-      if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
-        ASSEMBLE_SUB_WITH_OVERFLOW();
-      } else {
-#endif
-        ASSEMBLE_BINOP(SubP, SubP);
-#if V8_TARGET_ARCH_S390X
-      }
-#endif
+    case kS390_Sub32:
+      ASSEMBLE_BINOP(Sub32);
+      __ LoadW(i.OutputRegister(), i.OutputRegister());
       break;
-    case kS390_SubWithOverflow32:
-      ASSEMBLE_SUB_WITH_OVERFLOW32();
+    case kS390_Sub64:
+      ASSEMBLE_BINOP(SubP);
       break;
     case kS390_SubFloat:
       // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
@@ -1116,19 +1337,80 @@
       }
       break;
     case kS390_Mul32:
-#if V8_TARGET_ARCH_S390X
-    case kS390_Mul64:
+      if (HasRegisterInput(instr, 1)) {
+        __ Mul32(i.InputRegister(0), i.InputRegister(1));
+      } else if (HasImmediateInput(instr, 1)) {
+        __ Mul32(i.InputRegister(0), i.InputImmediate(1));
+      } else if (HasStackSlotInput(instr, 1)) {
+#ifdef V8_TARGET_ARCH_S390X
+        // Avoid endian-issue here:
+        // stg r1, 0(fp)
+        // ...
+        // msy r2, 0(fp) <-- This will read the upper 32 bits
+        __ lg(kScratchReg, i.InputStackSlot(1));
+        __ Mul32(i.InputRegister(0), kScratchReg);
+#else
+        __ Mul32(i.InputRegister(0), i.InputStackSlot(1));
 #endif
-      __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        UNIMPLEMENTED();
+      }
+      break;
+    case kS390_Mul64:
+      if (HasRegisterInput(instr, 1)) {
+        __ Mul64(i.InputRegister(0), i.InputRegister(1));
+      } else if (HasImmediateInput(instr, 1)) {
+        __ Mul64(i.InputRegister(0), i.InputImmediate(1));
+      } else if (HasStackSlotInput(instr, 1)) {
+        __ Mul64(i.InputRegister(0), i.InputStackSlot(1));
+      } else {
+        UNIMPLEMENTED();
+      }
       break;
     case kS390_MulHigh32:
       __ LoadRR(r1, i.InputRegister(0));
-      __ mr_z(r0, i.InputRegister(1));
+      if (HasRegisterInput(instr, 1)) {
+        __ mr_z(r0, i.InputRegister(1));
+      } else if (HasStackSlotInput(instr, 1)) {
+#ifdef V8_TARGET_ARCH_S390X
+        // Avoid endian-issue here:
+        // stg r1, 0(fp)
+        // ...
+        // mfy r2, 0(fp) <-- This will read the upper 32 bits
+        __ lg(kScratchReg, i.InputStackSlot(1));
+        __ mr_z(r0, kScratchReg);
+#else
+        __ mfy(r0, i.InputStackSlot(1));
+#endif
+      } else {
+        UNIMPLEMENTED();
+      }
       __ LoadW(i.OutputRegister(), r0);
       break;
+    case kS390_Mul32WithHigh32:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ mr_z(r0, i.InputRegister(1));
+      __ LoadW(i.OutputRegister(0), r1);  // low
+      __ LoadW(i.OutputRegister(1), r0);  // high
+      break;
     case kS390_MulHighU32:
       __ LoadRR(r1, i.InputRegister(0));
-      __ mlr(r0, i.InputRegister(1));
+      if (HasRegisterInput(instr, 1)) {
+        __ mlr(r0, i.InputRegister(1));
+      } else if (HasStackSlotInput(instr, 1)) {
+#ifdef V8_TARGET_ARCH_S390X
+        // Avoid endian-issue here:
+        // stg r1, 0(fp)
+        // ...
+        // mfy r2, 0(fp) <-- This will read the upper 32 bits
+        __ lg(kScratchReg, i.InputStackSlot(1));
+        __ mlr(r0, kScratchReg);
+#else
+        __ ml(r0, i.InputStackSlot(1));
+#endif
+      } else {
+        UNIMPLEMENTED();
+      }
       __ LoadlW(i.OutputRegister(), r0);
       break;
     case kS390_MulFloat:
@@ -1246,6 +1528,21 @@
     case kS390_ModDouble:
       ASSEMBLE_FLOAT_MODULO();
       break;
+    case kIeee754Float64Acos:
+      ASSEMBLE_IEEE754_UNOP(acos);
+      break;
+    case kIeee754Float64Acosh:
+      ASSEMBLE_IEEE754_UNOP(acosh);
+      break;
+    case kIeee754Float64Asin:
+      ASSEMBLE_IEEE754_UNOP(asin);
+      break;
+    case kIeee754Float64Asinh:
+      ASSEMBLE_IEEE754_UNOP(asinh);
+      break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
     case kIeee754Float64Atan:
       ASSEMBLE_IEEE754_UNOP(atan);
       break;
@@ -1255,24 +1552,30 @@
     case kIeee754Float64Tan:
       ASSEMBLE_IEEE754_UNOP(tan);
       break;
+    case kIeee754Float64Tanh:
+      ASSEMBLE_IEEE754_UNOP(tanh);
+      break;
     case kIeee754Float64Cbrt:
       ASSEMBLE_IEEE754_UNOP(cbrt);
       break;
     case kIeee754Float64Sin:
       ASSEMBLE_IEEE754_UNOP(sin);
       break;
+    case kIeee754Float64Sinh:
+      ASSEMBLE_IEEE754_UNOP(sinh);
+      break;
     case kIeee754Float64Cos:
       ASSEMBLE_IEEE754_UNOP(cos);
       break;
+    case kIeee754Float64Cosh:
+      ASSEMBLE_IEEE754_UNOP(cosh);
+      break;
     case kIeee754Float64Exp:
       ASSEMBLE_IEEE754_UNOP(exp);
       break;
     case kIeee754Float64Expm1:
       ASSEMBLE_IEEE754_UNOP(expm1);
       break;
-    case kIeee754Float64Atanh:
-      ASSEMBLE_IEEE754_UNOP(atanh);
-      break;
     case kIeee754Float64Log:
       ASSEMBLE_IEEE754_UNOP(log);
       break;
@@ -1285,14 +1588,30 @@
     case kIeee754Float64Log10:
       ASSEMBLE_IEEE754_UNOP(log10);
       break;
-    case kS390_Neg:
-      __ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
+    case kIeee754Float64Pow: {
+      MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+      __ CallStub(&stub);
+      __ Move(d1, d3);
+      break;
+    }
+    case kS390_Neg32:
+      __ lcr(i.OutputRegister(), i.InputRegister(0));
+      __ LoadW(i.OutputRegister(), i.OutputRegister());
+      break;
+    case kS390_Neg64:
+      __ lcgr(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_MaxFloat:
+      ASSEMBLE_FLOAT_MAX();
       break;
     case kS390_MaxDouble:
-      ASSEMBLE_FLOAT_MAX(kScratchDoubleReg, kScratchReg);
+      ASSEMBLE_DOUBLE_MAX();
+      break;
+    case kS390_MinFloat:
+      ASSEMBLE_FLOAT_MIN();
       break;
     case kS390_MinDouble:
-      ASSEMBLE_FLOAT_MIN(kScratchDoubleReg, kScratchReg);
+      ASSEMBLE_DOUBLE_MIN();
       break;
     case kS390_AbsDouble:
       __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
@@ -1316,6 +1635,9 @@
       __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
                 v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
       break;
+    case kS390_NegFloat:
+      ASSEMBLE_FLOAT_UNOP(lcebr);
+      break;
     case kS390_NegDouble:
       ASSEMBLE_FLOAT_UNOP(lcdbr);
       break;
@@ -1501,7 +1823,7 @@
     case kS390_Float32ToInt32: {
       bool check_conversion = (i.OutputCount() > 1);
       __ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
-                               kScratchDoubleReg);
+                               kScratchDoubleReg, kRoundToZero);
       if (check_conversion) {
         Label conversion_done;
         __ LoadImmP(i.OutputRegister(1), Operand::Zero());
@@ -1644,6 +1966,25 @@
     case kS390_LoadWordS32:
       ASSEMBLE_LOAD_INTEGER(LoadW);
       break;
+    case kS390_LoadReverse16:
+      ASSEMBLE_LOAD_INTEGER(lrvh);
+      break;
+    case kS390_LoadReverse32:
+      ASSEMBLE_LOAD_INTEGER(lrv);
+      break;
+    case kS390_LoadReverse64:
+      ASSEMBLE_LOAD_INTEGER(lrvg);
+      break;
+    case kS390_LoadReverse16RR:
+      __ lrvr(i.OutputRegister(), i.InputRegister(0));
+      __ rll(i.OutputRegister(), i.OutputRegister(), Operand(16));
+      break;
+    case kS390_LoadReverse32RR:
+      __ lrvr(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_LoadReverse64RR:
+      __ lrvgr(i.OutputRegister(), i.InputRegister(0));
+      break;
 #if V8_TARGET_ARCH_S390X
     case kS390_LoadWord64:
       ASSEMBLE_LOAD_INTEGER(lg);
@@ -1669,6 +2010,15 @@
       ASSEMBLE_STORE_INTEGER(StoreP);
       break;
 #endif
+    case kS390_StoreReverse16:
+      ASSEMBLE_STORE_INTEGER(strvh);
+      break;
+    case kS390_StoreReverse32:
+      ASSEMBLE_STORE_INTEGER(strv);
+      break;
+    case kS390_StoreReverse64:
+      ASSEMBLE_STORE_INTEGER(strvg);
+      break;
     case kS390_StoreFloat32:
       ASSEMBLE_STORE_FLOAT32();
       break;
@@ -1791,63 +2141,29 @@
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
                                         FlagsCondition condition) {
   S390OperandConverter i(this, instr);
-  Label done;
   ArchOpcode op = instr->arch_opcode();
-  bool check_unordered = (op == kS390_CmpDouble || kS390_CmpFloat);
+  bool check_unordered = (op == kS390_CmpDouble || op == kS390_CmpFloat);
 
   // Overflow checked for add/sub only.
   DCHECK((condition != kOverflow && condition != kNotOverflow) ||
-         (op == kS390_AddWithOverflow32 || op == kS390_SubWithOverflow32) ||
-         (op == kS390_Add || op == kS390_Sub));
+         (op == kS390_Add32 || kS390_Add64 || op == kS390_Sub32 ||
+          op == kS390_Sub64));
 
   // Materialize a full 32-bit 1 or 0 value. The result register is always the
   // last output of the instruction.
   DCHECK_NE(0u, instr->OutputCount());
   Register reg = i.OutputRegister(instr->OutputCount() - 1);
   Condition cond = FlagsConditionToCondition(condition, op);
-  switch (cond) {
-    case ne:
-    case ge:
-    case gt:
-      if (check_unordered) {
-        __ LoadImmP(reg, Operand(1));
-        __ LoadImmP(kScratchReg, Operand::Zero());
-        __ bunordered(&done);
-        Label cond_true;
-        __ b(cond, &cond_true, Label::kNear);
-        __ LoadRR(reg, kScratchReg);
-        __ bind(&cond_true);
-      } else {
-        Label cond_true, done_here;
-        __ LoadImmP(reg, Operand(1));
-        __ b(cond, &cond_true, Label::kNear);
-        __ LoadImmP(reg, Operand::Zero());
-        __ bind(&cond_true);
-      }
-      break;
-    case eq:
-    case lt:
-    case le:
-      if (check_unordered) {
-        __ LoadImmP(reg, Operand::Zero());
-        __ LoadImmP(kScratchReg, Operand(1));
-        __ bunordered(&done);
-        Label cond_false;
-        __ b(NegateCondition(cond), &cond_false, Label::kNear);
-        __ LoadRR(reg, kScratchReg);
-        __ bind(&cond_false);
-      } else {
-        __ LoadImmP(reg, Operand::Zero());
-        Label cond_false;
-        __ b(NegateCondition(cond), &cond_false, Label::kNear);
-        __ LoadImmP(reg, Operand(1));
-        __ bind(&cond_false);
-      }
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  Label done;
+  if (check_unordered) {
+    __ LoadImmP(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
+                                                              : Operand(1));
+    __ bunordered(&done);
   }
+  __ LoadImmP(reg, Operand::Zero());
+  __ LoadImmP(kScratchReg, Operand(1));
+  // locr is sufficient since reg's upper 32 is guarrantee to be 0
+  __ locr(cond, reg, kScratchReg);
   __ bind(&done);
 }
 
@@ -1886,6 +2202,9 @@
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2063,10 +2382,7 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int slot;
-          if (IsMaterializableFromFrame(src_object, &slot)) {
-            __ LoadP(dst, g.SlotToMemOperand(slot));
-          } else if (IsMaterializableFromRoot(src_object, &index)) {
+          if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
             __ Move(dst, src_object);
diff --git a/src/compiler/s390/instruction-codes-s390.h b/src/compiler/s390/instruction-codes-s390.h
index b53136c..80e1532 100644
--- a/src/compiler/s390/instruction-codes-s390.h
+++ b/src/compiler/s390/instruction-codes-s390.h
@@ -12,11 +12,12 @@
 // S390-specific opcodes that specify which assembly sequence to emit.
 // Most opcodes specify a single instruction.
 #define TARGET_ARCH_OPCODE_LIST(V) \
-  V(S390_And)                      \
-  V(S390_AndComplement)            \
-  V(S390_Or)                       \
-  V(S390_OrComplement)             \
-  V(S390_Xor)                      \
+  V(S390_And32)                    \
+  V(S390_And64)                    \
+  V(S390_Or32)                     \
+  V(S390_Or64)                     \
+  V(S390_Xor32)                    \
+  V(S390_Xor64)                    \
   V(S390_ShiftLeft32)              \
   V(S390_ShiftLeft64)              \
   V(S390_ShiftLeftPair)            \
@@ -28,23 +29,25 @@
   V(S390_ShiftRightArithPair)      \
   V(S390_RotRight32)               \
   V(S390_RotRight64)               \
-  V(S390_Not)                      \
+  V(S390_Not32)                    \
+  V(S390_Not64)                    \
   V(S390_RotLeftAndMask32)         \
   V(S390_RotLeftAndClear64)        \
   V(S390_RotLeftAndClearLeft64)    \
   V(S390_RotLeftAndClearRight64)   \
-  V(S390_Add)                      \
-  V(S390_AddWithOverflow32)        \
+  V(S390_Add32)                    \
+  V(S390_Add64)                    \
   V(S390_AddPair)                  \
   V(S390_AddFloat)                 \
   V(S390_AddDouble)                \
-  V(S390_Sub)                      \
-  V(S390_SubWithOverflow32)        \
+  V(S390_Sub32)                    \
+  V(S390_Sub64)                    \
   V(S390_SubFloat)                 \
   V(S390_SubDouble)                \
   V(S390_SubPair)                  \
   V(S390_MulPair)                  \
   V(S390_Mul32)                    \
+  V(S390_Mul32WithHigh32)          \
   V(S390_Mul64)                    \
   V(S390_MulHigh32)                \
   V(S390_MulHighU32)               \
@@ -61,8 +64,10 @@
   V(S390_ModU32)                   \
   V(S390_ModU64)                   \
   V(S390_ModDouble)                \
-  V(S390_Neg)                      \
+  V(S390_Neg32)                    \
+  V(S390_Neg64)                    \
   V(S390_NegDouble)                \
+  V(S390_NegFloat)                 \
   V(S390_SqrtFloat)                \
   V(S390_FloorFloat)               \
   V(S390_CeilFloat)                \
@@ -73,7 +78,9 @@
   V(S390_CeilDouble)               \
   V(S390_TruncateDouble)           \
   V(S390_RoundDouble)              \
+  V(S390_MaxFloat)                 \
   V(S390_MaxDouble)                \
+  V(S390_MinFloat)                 \
   V(S390_MinDouble)                \
   V(S390_AbsDouble)                \
   V(S390_Cntlz32)                  \
@@ -128,6 +135,12 @@
   V(S390_LoadWordU16)              \
   V(S390_LoadWordS32)              \
   V(S390_LoadWordU32)              \
+  V(S390_LoadReverse16RR)          \
+  V(S390_LoadReverse32RR)          \
+  V(S390_LoadReverse64RR)          \
+  V(S390_LoadReverse16)            \
+  V(S390_LoadReverse32)            \
+  V(S390_LoadReverse64)            \
   V(S390_LoadWord64)               \
   V(S390_LoadFloat32)              \
   V(S390_LoadDouble)               \
@@ -135,6 +148,9 @@
   V(S390_StoreWord16)              \
   V(S390_StoreWord32)              \
   V(S390_StoreWord64)              \
+  V(S390_StoreReverse16)           \
+  V(S390_StoreReverse32)           \
+  V(S390_StoreReverse64)           \
   V(S390_StoreFloat32)             \
   V(S390_StoreDouble)
 
@@ -152,8 +168,10 @@
 // MRI = [register + immediate]
 // MRR = [register + register]
 #define TARGET_ADDRESSING_MODE_LIST(V) \
-  V(MRI) /* [%r0 + K] */               \
-  V(MRR) /* [%r0 + %r1] */
+  V(MR)   /* [%r0          ] */        \
+  V(MRI)  /* [%r0       + K] */        \
+  V(MRR)  /* [%r0 + %r1    ] */        \
+  V(MRRI) /* [%r0 + %r1 + K] */
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/s390/instruction-scheduler-s390.cc b/src/compiler/s390/instruction-scheduler-s390.cc
index 5b9722e..5ebe489 100644
--- a/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/src/compiler/s390/instruction-scheduler-s390.cc
@@ -13,11 +13,12 @@
 int InstructionScheduler::GetTargetInstructionFlags(
     const Instruction* instr) const {
   switch (instr->arch_opcode()) {
-    case kS390_And:
-    case kS390_AndComplement:
-    case kS390_Or:
-    case kS390_OrComplement:
-    case kS390_Xor:
+    case kS390_And32:
+    case kS390_And64:
+    case kS390_Or32:
+    case kS390_Or64:
+    case kS390_Xor32:
+    case kS390_Xor64:
     case kS390_ShiftLeft32:
     case kS390_ShiftLeft64:
     case kS390_ShiftLeftPair:
@@ -29,23 +30,25 @@
     case kS390_ShiftRightArithPair:
     case kS390_RotRight32:
     case kS390_RotRight64:
-    case kS390_Not:
+    case kS390_Not32:
+    case kS390_Not64:
     case kS390_RotLeftAndMask32:
     case kS390_RotLeftAndClear64:
     case kS390_RotLeftAndClearLeft64:
     case kS390_RotLeftAndClearRight64:
-    case kS390_Add:
-    case kS390_AddWithOverflow32:
+    case kS390_Add32:
+    case kS390_Add64:
     case kS390_AddPair:
     case kS390_AddFloat:
     case kS390_AddDouble:
-    case kS390_Sub:
-    case kS390_SubWithOverflow32:
+    case kS390_Sub32:
+    case kS390_Sub64:
     case kS390_SubPair:
     case kS390_MulPair:
     case kS390_SubFloat:
     case kS390_SubDouble:
     case kS390_Mul32:
+    case kS390_Mul32WithHigh32:
     case kS390_Mul64:
     case kS390_MulHigh32:
     case kS390_MulHighU32:
@@ -62,8 +65,10 @@
     case kS390_ModU32:
     case kS390_ModU64:
     case kS390_ModDouble:
-    case kS390_Neg:
+    case kS390_Neg32:
+    case kS390_Neg64:
     case kS390_NegDouble:
+    case kS390_NegFloat:
     case kS390_SqrtFloat:
     case kS390_FloorFloat:
     case kS390_CeilFloat:
@@ -74,7 +79,9 @@
     case kS390_CeilDouble:
     case kS390_TruncateDouble:
     case kS390_RoundDouble:
+    case kS390_MaxFloat:
     case kS390_MaxDouble:
+    case kS390_MinFloat:
     case kS390_MinDouble:
     case kS390_AbsDouble:
     case kS390_Cntlz32:
@@ -120,6 +127,9 @@
     case kS390_BitcastFloat32ToInt32:
     case kS390_BitcastInt64ToDouble:
     case kS390_BitcastDoubleToInt64:
+    case kS390_LoadReverse16RR:
+    case kS390_LoadReverse32RR:
+    case kS390_LoadReverse64RR:
       return kNoOpcodeFlags;
 
     case kS390_LoadWordS8:
@@ -131,12 +141,18 @@
     case kS390_LoadWord64:
     case kS390_LoadFloat32:
     case kS390_LoadDouble:
+    case kS390_LoadReverse16:
+    case kS390_LoadReverse32:
+    case kS390_LoadReverse64:
       return kIsLoadOperation;
 
     case kS390_StoreWord8:
     case kS390_StoreWord16:
     case kS390_StoreWord32:
     case kS390_StoreWord64:
+    case kS390_StoreReverse16:
+    case kS390_StoreReverse32:
+    case kS390_StoreReverse64:
     case kS390_StoreFloat32:
     case kS390_StoreDouble:
     case kS390_Push:
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
index 1b1bd2f..6fc8a4d 100644
--- a/src/compiler/s390/instruction-selector-s390.cc
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -13,12 +13,12 @@
 namespace compiler {
 
 enum ImmediateMode {
-  kInt16Imm,
-  kInt16Imm_Unsigned,
-  kInt16Imm_Negate,
-  kInt16Imm_4ByteAligned,
   kShift32Imm,
   kShift64Imm,
+  kInt32Imm,
+  kInt32Imm_Negate,
+  kUint32Imm,
+  kInt20Imm,
   kNoImmediate
 };
 
@@ -35,6 +35,16 @@
     return UseRegister(node);
   }
 
+  int64_t GetImmediate(Node* node) {
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      return OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      return OpParameter<int64_t>(node);
+    else
+      UNIMPLEMENTED();
+    return 0L;
+  }
+
   bool CanBeImmediate(Node* node, ImmediateMode mode) {
     int64_t value;
     if (node->opcode() == IrOpcode::kInt32Constant)
@@ -48,23 +58,102 @@
 
   bool CanBeImmediate(int64_t value, ImmediateMode mode) {
     switch (mode) {
-      case kInt16Imm:
-        return is_int16(value);
-      case kInt16Imm_Unsigned:
-        return is_uint16(value);
-      case kInt16Imm_Negate:
-        return is_int16(-value);
-      case kInt16Imm_4ByteAligned:
-        return is_int16(value) && !(value & 3);
       case kShift32Imm:
         return 0 <= value && value < 32;
       case kShift64Imm:
         return 0 <= value && value < 64;
+      case kInt32Imm:
+        return is_int32(value);
+      case kInt32Imm_Negate:
+        return is_int32(-value);
+      case kUint32Imm:
+        return is_uint32(value);
+      case kInt20Imm:
+        return is_int20(value);
       case kNoImmediate:
         return false;
     }
     return false;
   }
+
+  AddressingMode GenerateMemoryOperandInputs(Node* index, Node* base,
+                                             Node* displacement,
+                                             DisplacementMode displacement_mode,
+                                             InstructionOperand inputs[],
+                                             size_t* input_count) {
+    AddressingMode mode = kMode_MRI;
+    if (base != nullptr) {
+      inputs[(*input_count)++] = UseRegister(base);
+      if (index != nullptr) {
+        inputs[(*input_count)++] = UseRegister(index);
+        if (displacement != nullptr) {
+          inputs[(*input_count)++] = displacement_mode
+                                         ? UseNegatedImmediate(displacement)
+                                         : UseImmediate(displacement);
+          mode = kMode_MRRI;
+        } else {
+          mode = kMode_MRR;
+        }
+      } else {
+        if (displacement == nullptr) {
+          mode = kMode_MR;
+        } else {
+          inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+                                         ? UseNegatedImmediate(displacement)
+                                         : UseImmediate(displacement);
+          mode = kMode_MRI;
+        }
+      }
+    } else {
+      DCHECK_NOT_NULL(index);
+      inputs[(*input_count)++] = UseRegister(index);
+      if (displacement != nullptr) {
+        inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+                                       ? UseNegatedImmediate(displacement)
+                                       : UseImmediate(displacement);
+        mode = kMode_MRI;
+      } else {
+        mode = kMode_MR;
+      }
+    }
+    return mode;
+  }
+
+  AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
+                                                  InstructionOperand inputs[],
+                                                  size_t* input_count) {
+#if V8_TARGET_ARCH_S390X
+    BaseWithIndexAndDisplacement64Matcher m(operand,
+                                            AddressOption::kAllowInputSwap);
+#else
+    BaseWithIndexAndDisplacement32Matcher m(operand,
+                                            AddressOption::kAllowInputSwap);
+#endif
+    DCHECK(m.matches());
+    if ((m.displacement() == nullptr ||
+         CanBeImmediate(m.displacement(), kInt20Imm))) {
+      DCHECK(m.scale() == 0);
+      return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
+                                         m.displacement_mode(), inputs,
+                                         input_count);
+    } else {
+      inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
+      inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
+      return kMode_MRR;
+    }
+  }
+
+  bool CanBeBetterLeftOperand(Node* node) const {
+    return !selector()->IsLive(node);
+  }
+
+  MachineRepresentation GetRepresentation(Node* node) {
+    return sequence()->GetRepresentation(selector()->GetVirtualRegister(node));
+  }
+
+  bool Is64BitOperand(Node* node) {
+    return MachineRepresentation::kWord64 == GetRepresentation(node);
+  }
 };
 
 namespace {
@@ -115,20 +204,50 @@
                 FlagsContinuation* cont) {
   S390OperandGenerator g(selector);
   Matcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
   InstructionOperand inputs[4];
   size_t input_count = 0;
   InstructionOperand outputs[2];
   size_t output_count = 0;
 
-  inputs[input_count++] = g.UseRegister(m.left().node());
-  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+  // TODO(turbofan): match complex addressing modes.
+  if (left == right) {
+    // If both inputs refer to the same operand, enforce allocating a register
+    // for both of them to ensure that we don't end up generating code like
+    // this:
+    //
+    //   mov rax, [rbp-0x10]
+    //   add rax, [rbp-0x10]
+    //   jo label
+    InstructionOperand const input = g.UseRegister(left);
+    inputs[input_count++] = input;
+    inputs[input_count++] = input;
+  } else if (g.CanBeImmediate(right, operand_mode)) {
+    inputs[input_count++] = g.UseRegister(left);
+    inputs[input_count++] = g.UseImmediate(right);
+  } else {
+    if (node->op()->HasProperty(Operator::kCommutative) &&
+        g.CanBeBetterLeftOperand(right)) {
+      std::swap(left, right);
+    }
+    inputs[input_count++] = g.UseRegister(left);
+    inputs[input_count++] = g.UseRegister(right);
+  }
 
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
   }
 
-  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsDeoptimize()) {
+    // If we can deoptimize as a result of the binop, we need to make sure that
+    // the deopt inputs are not overwritten by the binop result. One way
+    // to achieve that is to declare the output register as same-as-first.
+    outputs[output_count++] = g.DefineSameAsFirst(node);
+  } else {
+    outputs[output_count++] = g.DefineAsRegister(node);
+  }
   if (cont->IsSet()) {
     outputs[output_count++] = g.DefineAsRegister(cont->result());
   }
@@ -141,7 +260,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -160,10 +279,7 @@
 void InstructionSelector::VisitLoad(Node* node) {
   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   S390OperandGenerator g(this);
-  Node* base = node->InputAt(0);
-  Node* offset = node->InputAt(1);
   ArchOpcode opcode = kArchNop;
-  ImmediateMode mode = kInt16Imm;
   switch (load_rep.representation()) {
     case MachineRepresentation::kFloat32:
       opcode = kS390_LoadFloat32;
@@ -179,16 +295,19 @@
       opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
       break;
 #if !V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
 #endif
     case MachineRepresentation::kWord32:
       opcode = kS390_LoadWordU32;
       break;
 #if V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
     case MachineRepresentation::kWord64:
       opcode = kS390_LoadWord64;
-      mode = kInt16Imm_4ByteAligned;
       break;
 #else
     case MachineRepresentation::kWord64:    // Fall through.
@@ -198,16 +317,14 @@
       UNREACHABLE();
       return;
   }
-  if (g.CanBeImmediate(offset, mode)) {
-    Emit(opcode | AddressingModeField::encode(kMode_MRI),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
-  } else if (g.CanBeImmediate(base, mode)) {
-    Emit(opcode | AddressingModeField::encode(kMode_MRI),
-         g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
-  } else {
-    Emit(opcode | AddressingModeField::encode(kMode_MRR),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
-  }
+  InstructionOperand outputs[1];
+  outputs[0] = g.DefineAsRegister(node);
+  InstructionOperand inputs[3];
+  size_t input_count = 0;
+  AddressingMode mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(mode);
+  Emit(code, 1, outputs, input_count, inputs);
 }
 
 void InstructionSelector::VisitStore(Node* node) {
@@ -228,11 +345,7 @@
     inputs[input_count++] = g.UseUniqueRegister(base);
     // OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
     // for the store itself, so we must check compatibility with both.
-    if (g.CanBeImmediate(offset, kInt16Imm)
-#if V8_TARGET_ARCH_S390X
-        && g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
-#endif
-            ) {
+    if (g.CanBeImmediate(offset, kInt20Imm)) {
       inputs[input_count++] = g.UseImmediate(offset);
       addressing_mode = kMode_MRI;
     } else {
@@ -263,7 +376,7 @@
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
     ArchOpcode opcode = kArchNop;
-    ImmediateMode mode = kInt16Imm;
+    NodeMatcher m(value);
     switch (rep) {
       case MachineRepresentation::kFloat32:
         opcode = kS390_StoreFloat32;
@@ -279,16 +392,27 @@
         opcode = kS390_StoreWord16;
         break;
 #if !V8_TARGET_ARCH_S390X
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
       case MachineRepresentation::kTagged:  // Fall through.
 #endif
       case MachineRepresentation::kWord32:
         opcode = kS390_StoreWord32;
+        if (m.IsWord32ReverseBytes()) {
+          opcode = kS390_StoreReverse32;
+          value = value->InputAt(0);
+        }
         break;
 #if V8_TARGET_ARCH_S390X
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
       case MachineRepresentation::kTagged:  // Fall through.
       case MachineRepresentation::kWord64:
         opcode = kS390_StoreWord64;
-        mode = kInt16Imm_4ByteAligned;
+        if (m.IsWord64ReverseBytes()) {
+          opcode = kS390_StoreReverse64;
+          value = value->InputAt(0);
+        }
         break;
 #else
       case MachineRepresentation::kWord64:  // Fall through.
@@ -298,19 +422,25 @@
         UNREACHABLE();
         return;
     }
-    if (g.CanBeImmediate(offset, mode)) {
-      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-           g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
-    } else if (g.CanBeImmediate(base, mode)) {
-      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-           g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
-    } else {
-      Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
-           g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
-    }
+    InstructionOperand inputs[4];
+    size_t input_count = 0;
+    AddressingMode addressing_mode =
+        g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+    InstructionCode code =
+        opcode | AddressingModeField::encode(addressing_mode);
+    InstructionOperand value_operand = g.UseRegister(value);
+    inputs[input_count++] = value_operand;
+    Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+         inputs);
   }
 }
 
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
+
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
   S390OperandGenerator g(this);
@@ -340,6 +470,8 @@
       opcode = kCheckedLoadFloat64;
       break;
     case MachineRepresentation::kBit:     // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
 #if !V8_TARGET_ARCH_S390X
     case MachineRepresentation::kWord64:  // Fall through.
@@ -352,7 +484,7 @@
   AddressingMode addressingMode = kMode_MRR;
   Emit(opcode | AddressingModeField::encode(addressingMode),
        g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
-       g.UseOperand(length, kInt16Imm_Unsigned));
+       g.UseOperand(length, kUint32Imm));
 }
 
 void InstructionSelector::VisitCheckedStore(Node* node) {
@@ -385,6 +517,8 @@
       opcode = kCheckedStoreFloat64;
       break;
     case MachineRepresentation::kBit:     // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
 #if !V8_TARGET_ARCH_S390X
     case MachineRepresentation::kWord64:  // Fall through.
@@ -397,53 +531,7 @@
   AddressingMode addressingMode = kMode_MRR;
   Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
        g.UseRegister(base), g.UseRegister(offset),
-       g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
-}
-
-template <typename Matcher>
-static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
-                         ArchOpcode opcode, bool left_can_cover,
-                         bool right_can_cover, ImmediateMode imm_mode) {
-  S390OperandGenerator g(selector);
-
-  // Map instruction to equivalent operation with inverted right input.
-  ArchOpcode inv_opcode = opcode;
-  switch (opcode) {
-    case kS390_And:
-      inv_opcode = kS390_AndComplement;
-      break;
-    case kS390_Or:
-      inv_opcode = kS390_OrComplement;
-      break;
-    default:
-      UNREACHABLE();
-  }
-
-  // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
-  if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
-    Matcher mleft(m->left().node());
-    if (mleft.right().Is(-1)) {
-      selector->Emit(inv_opcode, g.DefineAsRegister(node),
-                     g.UseRegister(m->right().node()),
-                     g.UseRegister(mleft.left().node()));
-      return;
-    }
-  }
-
-  // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
-  if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
-      right_can_cover) {
-    Matcher mright(m->right().node());
-    if (mright.right().Is(-1)) {
-      // TODO(all): support shifted operand on right.
-      selector->Emit(inv_opcode, g.DefineAsRegister(node),
-                     g.UseRegister(m->left().node()),
-                     g.UseRegister(mright.left().node()));
-      return;
-    }
-  }
-
-  VisitBinop<Matcher>(selector, node, opcode, imm_mode);
+       g.UseOperand(length, kUint32Imm), g.UseRegister(value));
 }
 
 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
@@ -501,9 +589,7 @@
       return;
     }
   }
-  VisitLogical<Int32BinopMatcher>(
-      this, node, &m, kS390_And, CanCover(node, m.left().node()),
-      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kUint32Imm);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -555,25 +641,19 @@
       }
     }
   }
-  VisitLogical<Int64BinopMatcher>(
-      this, node, &m, kS390_And, CanCover(node, m.left().node()),
-      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_And64, kUint32Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Or(Node* node) {
   Int32BinopMatcher m(node);
-  VisitLogical<Int32BinopMatcher>(
-      this, node, &m, kS390_Or, CanCover(node, m.left().node()),
-      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_Or32, kUint32Imm);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitWord64Or(Node* node) {
   Int64BinopMatcher m(node);
-  VisitLogical<Int64BinopMatcher>(
-      this, node, &m, kS390_Or, CanCover(node, m.left().node()),
-      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64, kUint32Imm);
 }
 #endif
 
@@ -581,9 +661,9 @@
   S390OperandGenerator g(this);
   Int32BinopMatcher m(node);
   if (m.right().Is(-1)) {
-    Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+    Emit(kS390_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
   } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
+    VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor32, kUint32Imm);
   }
 }
 
@@ -592,9 +672,9 @@
   S390OperandGenerator g(this);
   Int64BinopMatcher m(node);
   if (m.right().Is(-1)) {
-    Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+    Emit(kS390_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
   } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
+    VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64, kUint32Imm);
   }
 }
 #endif
@@ -880,13 +960,38 @@
 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
 #endif
 
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+  S390OperandGenerator g(this);
+  NodeMatcher input(node->InputAt(0));
+  if (CanCover(node, input.node()) && input.IsLoad()) {
+    LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
+    if (load_rep.representation() == MachineRepresentation::kWord32) {
+      Node* base = input.node()->InputAt(0);
+      Node* offset = input.node()->InputAt(1);
+      Emit(kS390_LoadReverse32 | AddressingModeField::encode(kMode_MRR),
+           // TODO(john.yan): one of the base and offset can be imm.
+           g.DefineAsRegister(node), g.UseRegister(base),
+           g.UseRegister(offset));
+      return;
+    }
+  }
+  Emit(kS390_LoadReverse32RR, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
 void InstructionSelector::VisitInt32Add(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_Add, kInt16Imm);
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitInt64Add(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm);
 }
 #endif
 
@@ -894,9 +999,10 @@
   S390OperandGenerator g(this);
   Int32BinopMatcher m(node);
   if (m.left().Is(0)) {
-    Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+    Emit(kS390_Neg32, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
   } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
+    VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate);
   }
 }
 
@@ -905,33 +1011,125 @@
   S390OperandGenerator g(this);
   Int64BinopMatcher m(node);
   if (m.left().Is(0)) {
-    Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+    Emit(kS390_Neg64, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
   } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
+    VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate);
   }
 }
 #endif
 
+namespace {
+
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                  InstructionOperand left, InstructionOperand right,
+                  FlagsContinuation* cont);
+void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
+                              FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand result_operand = g.DefineAsRegister(node);
+  InstructionOperand high32_operand = g.TempRegister();
+  InstructionOperand temp_operand = g.TempRegister();
+  {
+    InstructionOperand outputs[] = {result_operand, high32_operand};
+    InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
+                                   g.UseRegister(m.right().node())};
+    selector->Emit(kS390_Mul32WithHigh32, 2, outputs, 2, inputs);
+  }
+  {
+    InstructionOperand shift_31 = g.UseImmediate(31);
+    InstructionOperand outputs[] = {temp_operand};
+    InstructionOperand inputs[] = {result_operand, shift_31};
+    selector->Emit(kS390_ShiftRightArith32, 1, outputs, 2, inputs);
+  }
+
+  VisitCompare(selector, kS390_Cmp32, high32_operand, temp_operand, cont);
+}
+
+void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+  S390OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
+  if (g.CanBeImmediate(right, kInt32Imm)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseImmediate(right));
+  } else {
+    if (g.CanBeBetterLeftOperand(right)) {
+      std::swap(left, right);
+    }
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.Use(right));
+  }
+}
+
+}  // namespace
+
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
+    return EmitInt32MulWithOverflow(this, node, &cont);
+  }
+  VisitMul(this, node, kS390_Mul32);
+  // FlagsContinuation cont;
+  // EmitInt32MulWithOverflow(this, node, &cont);
+}
+
 void InstructionSelector::VisitInt32Mul(Node* node) {
-  VisitRRR(this, kS390_Mul32, node);
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
+  if (g.CanBeImmediate(right, kInt32Imm) &&
+      base::bits::IsPowerOfTwo32(g.GetImmediate(right))) {
+    int power = 31 - base::bits::CountLeadingZeros32(g.GetImmediate(right));
+    Emit(kS390_ShiftLeft32, g.DefineSameAsFirst(node), g.UseRegister(left),
+         g.UseImmediate(power));
+    return;
+  }
+  VisitMul(this, node, kS390_Mul32);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitInt64Mul(Node* node) {
-  VisitRRR(this, kS390_Mul64, node);
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
+  if (g.CanBeImmediate(right, kInt32Imm) &&
+      base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
+    int power = 31 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
+    Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
+         g.UseImmediate(power));
+    return;
+  }
+  VisitMul(this, node, kS390_Mul64);
 }
 #endif
 
 void InstructionSelector::VisitInt32MulHigh(Node* node) {
   S390OperandGenerator g(this);
-  Emit(kS390_MulHigh32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+  Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
+  if (g.CanBeBetterLeftOperand(right)) {
+    std::swap(left, right);
+  }
+  Emit(kS390_MulHigh32, g.DefineAsRegister(node), g.UseRegister(left),
+       g.Use(right));
 }
 
 void InstructionSelector::VisitUint32MulHigh(Node* node) {
   S390OperandGenerator g(this);
-  Emit(kS390_MulHighU32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+  Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
+  if (g.CanBeBetterLeftOperand(right)) {
+    std::swap(left, right);
+  }
+  Emit(kS390_MulHighU32, g.DefineAsRegister(node), g.UseRegister(left),
+       g.Use(right));
 }
 
 void InstructionSelector::VisitInt32Div(Node* node) {
@@ -1107,47 +1305,11 @@
 }
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
-  S390OperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    Emit(kS390_NegDouble, g.DefineAsRegister(node),
-         g.UseRegister(m.right().node()));
-    return;
-  }
-  VisitRRR(this, kS390_SubFloat, node);
-}
-
-void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
-  S390OperandGenerator g(this);
   VisitRRR(this, kS390_SubFloat, node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   // TODO(mbrandy): detect multiply-subtract
-  S390OperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    if (m.right().IsFloat64RoundDown() &&
-        CanCover(m.node(), m.right().node())) {
-      if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
-          CanCover(m.right().node(), m.right().InputAt(0))) {
-        Float64BinopMatcher mright0(m.right().InputAt(0));
-        if (mright0.left().IsMinusZero()) {
-          // -floor(-x) = ceil(x)
-          Emit(kS390_CeilDouble, g.DefineAsRegister(node),
-               g.UseRegister(mright0.right().node()));
-          return;
-        }
-      }
-    }
-    Emit(kS390_NegDouble, g.DefineAsRegister(node),
-         g.UseRegister(m.right().node()));
-    return;
-  }
-  VisitRRR(this, kS390_SubDouble, node);
-}
-
-void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
   VisitRRR(this, kS390_SubDouble, node);
 }
 
@@ -1175,17 +1337,25 @@
       ->MarkAsCall();
 }
 
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Max(Node* node) {
+  VisitRRR(this, kS390_MaxFloat, node);
+}
 
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+  VisitRRR(this, kS390_MaxDouble, node);
+}
 
 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
   VisitRR(this, kS390_Float64SilenceNaN, node);
 }
 
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Min(Node* node) {
+  VisitRRR(this, kS390_MinFloat, node);
+}
 
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Min(Node* node) {
+  VisitRRR(this, kS390_MinDouble, node);
+}
 
 void InstructionSelector::VisitFloat32Abs(Node* node) {
   VisitRR(this, kS390_AbsFloat, node);
@@ -1202,14 +1372,14 @@
 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
                                                   InstructionCode opcode) {
   S390OperandGenerator g(this);
-  Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
+  Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
       ->MarkAsCall();
 }
 
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
   S390OperandGenerator g(this);
-  Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+  Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1),
        g.UseFixed(node->InputAt(1), d2))
       ->MarkAsCall();
 }
@@ -1254,51 +1424,55 @@
   UNREACHABLE();
 }
 
-void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+  VisitRR(this, kS390_NegFloat, node);
+}
 
-void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+  VisitRR(this, kS390_NegDouble, node);
+}
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32,
-                                         kInt16Imm, &cont);
+    return VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm,
+                                         &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32, kInt16Imm,
-                                &cont);
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm, &cont);
 }
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
-                                         kInt16Imm_Negate, &cont);
+    return VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32,
+                                         kInt32Imm_Negate, &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
-                                kInt16Imm_Negate, &cont);
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate,
+                                &cont);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm,
+    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm,
                                          &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm, &cont);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm, &cont);
 }
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub,
-                                         kInt16Imm_Negate, &cont);
+    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
+                                         kInt32Imm_Negate, &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate, &cont);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate,
+                                &cont);
 }
 #endif
 
@@ -1328,7 +1502,7 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
                              cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
@@ -1360,14 +1534,14 @@
 
 void VisitWord32Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
-  ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+  ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kInt32Imm);
   VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
 }
 
 #if V8_TARGET_ARCH_S390X
 void VisitWord64Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
-  ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+  ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kUint32Imm);
   VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
 }
 #endif
@@ -1474,21 +1648,23 @@
               case IrOpcode::kInt32AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop<Int32BinopMatcher>(
-                    selector, node, kS390_AddWithOverflow32, kInt16Imm, cont);
+                    selector, node, kS390_Add32, kInt32Imm, cont);
               case IrOpcode::kInt32SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int32BinopMatcher>(selector, node,
-                                                     kS390_SubWithOverflow32,
-                                                     kInt16Imm_Negate, cont);
+                return VisitBinop<Int32BinopMatcher>(
+                    selector, node, kS390_Sub32, kInt32Imm_Negate, cont);
+              case IrOpcode::kInt32MulWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kNotEqual);
+                return EmitInt32MulWithOverflow(selector, node, cont);
 #if V8_TARGET_ARCH_S390X
               case IrOpcode::kInt64AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Add,
-                                                     kInt16Imm, cont);
+                return VisitBinop<Int64BinopMatcher>(
+                    selector, node, kS390_Add64, kInt32Imm, cont);
               case IrOpcode::kInt64SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Sub,
-                                                     kInt16Imm_Negate, cont);
+                return VisitBinop<Int64BinopMatcher>(
+                    selector, node, kS390_Sub64, kInt32Imm_Negate, cont);
 #endif
               default:
                 break;
@@ -1500,7 +1676,7 @@
         return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kWord32And:
         return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
-                                kInt16Imm_Unsigned);
+                                kUint32Imm);
 // TODO(mbrandy): Handle?
 // case IrOpcode::kInt32Add:
 // case IrOpcode::kWord32Or:
@@ -1514,7 +1690,7 @@
         return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kWord64And:
         return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
-                                kInt16Imm_Unsigned);
+                                kUint32Imm);
 // TODO(mbrandy): Handle?
 // case IrOpcode::kInt64Add:
 // case IrOpcode::kWord64Or:
@@ -1557,14 +1733,14 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1584,7 +1760,7 @@
     InstructionOperand index_operand = value_operand;
     if (sw.min_value) {
       index_operand = g.TempRegister();
-      Emit(kS390_Sub, index_operand, value_operand,
+      Emit(kS390_Sub32, index_operand, value_operand,
            g.TempImmediate(sw.min_value));
     }
     // Generate a table lookup.
@@ -1699,7 +1875,7 @@
   // Prepare for C function call.
   if (descriptor->IsCFunctionCall()) {
     Emit(kArchPrepareCallCFunction |
-             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+             MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
          0, nullptr, 0, nullptr);
 
     // Poke any stack arguments.
@@ -1842,6 +2018,8 @@
          MachineOperatorBuilder::kFloat64RoundTruncate |
          MachineOperatorBuilder::kFloat64RoundTiesAway |
          MachineOperatorBuilder::kWord32Popcnt |
+         MachineOperatorBuilder::kWord32ReverseBytes |
+         MachineOperatorBuilder::kWord64ReverseBytes |
          MachineOperatorBuilder::kWord64Popcnt;
 }
 
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index c56494c..de64de3 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -51,7 +51,10 @@
   //     the next phase can begin.
   PROPAGATE,
 
-  // 2.) LOWER: perform lowering for all {Simplified} nodes by replacing some
+  // 2.) RETYPE: Propagate types from type feedback forwards.
+  RETYPE,
+
+  // 3.) LOWER: perform lowering for all {Simplified} nodes by replacing some
   //     operators for some nodes, expanding some nodes to multiple nodes, or
   //     removing some (redundant) nodes.
   //     During this phase, use the {RepresentationChanger} to insert
@@ -60,12 +63,65 @@
   LOWER
 };
 
-
 namespace {
 
+MachineRepresentation MachineRepresentationFromArrayType(
+    ExternalArrayType array_type) {
+  switch (array_type) {
+    case kExternalUint8Array:
+    case kExternalUint8ClampedArray:
+    case kExternalInt8Array:
+      return MachineRepresentation::kWord8;
+    case kExternalUint16Array:
+    case kExternalInt16Array:
+      return MachineRepresentation::kWord16;
+    case kExternalUint32Array:
+    case kExternalInt32Array:
+      return MachineRepresentation::kWord32;
+    case kExternalFloat32Array:
+      return MachineRepresentation::kFloat32;
+    case kExternalFloat64Array:
+      return MachineRepresentation::kFloat64;
+  }
+  UNREACHABLE();
+  return MachineRepresentation::kNone;
+}
+
+UseInfo CheckedUseInfoAsWord32FromHint(NumberOperationHint hint) {
+  switch (hint) {
+    case NumberOperationHint::kSignedSmall:
+      return UseInfo::CheckedSignedSmallAsWord32();
+    case NumberOperationHint::kSigned32:
+      return UseInfo::CheckedSigned32AsWord32();
+    case NumberOperationHint::kNumber:
+      return UseInfo::CheckedNumberAsWord32();
+    case NumberOperationHint::kNumberOrOddball:
+      return UseInfo::CheckedNumberOrOddballAsWord32();
+  }
+  UNREACHABLE();
+  return UseInfo::None();
+}
+
+UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint) {
+  switch (hint) {
+    case NumberOperationHint::kSignedSmall:
+    case NumberOperationHint::kSigned32:
+      // Not used currently.
+      UNREACHABLE();
+      break;
+    case NumberOperationHint::kNumber:
+      return UseInfo::CheckedNumberAsFloat64();
+    case NumberOperationHint::kNumberOrOddball:
+      return UseInfo::CheckedNumberOrOddballAsFloat64();
+  }
+  UNREACHABLE();
+  return UseInfo::None();
+}
 
 UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
   switch (rep) {
+    case MachineRepresentation::kTaggedSigned:
+    case MachineRepresentation::kTaggedPointer:
     case MachineRepresentation::kTagged:
       return UseInfo::AnyTagged();
     case MachineRepresentation::kFloat64:
@@ -98,57 +154,34 @@
   return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
 }
 
+void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
+  for (Edge edge : node->use_edges()) {
+    if (NodeProperties::IsControlEdge(edge)) {
+      edge.UpdateTo(control);
+    } else if (NodeProperties::IsEffectEdge(edge)) {
+      edge.UpdateTo(effect);
+    } else {
+      DCHECK(NodeProperties::IsValueEdge(edge));
+    }
+  }
+}
+
+void ChangeToPureOp(Node* node, const Operator* new_op) {
+  if (node->op()->EffectInputCount() > 0) {
+    DCHECK_LT(0, node->op()->ControlInputCount());
+    // Disconnect the node from effect and control chains.
+    Node* control = NodeProperties::GetControlInput(node);
+    Node* effect = NodeProperties::GetEffectInput(node);
+    ReplaceEffectControlUses(node, effect, control);
+    node->TrimInputCount(new_op->ValueInputCount());
+  } else {
+    DCHECK_EQ(0, node->op()->ControlInputCount());
+  }
+  NodeProperties::ChangeOp(node, new_op);
+}
 
 #ifdef DEBUG
 // Helpers for monotonicity checking.
-bool MachineRepresentationIsSubtype(MachineRepresentation r1,
-                                    MachineRepresentation r2) {
-  switch (r1) {
-    case MachineRepresentation::kNone:
-      return true;
-    case MachineRepresentation::kBit:
-      return r2 == MachineRepresentation::kBit ||
-             r2 == MachineRepresentation::kTagged;
-    case MachineRepresentation::kWord8:
-      return r2 == MachineRepresentation::kWord8 ||
-             r2 == MachineRepresentation::kWord16 ||
-             r2 == MachineRepresentation::kWord32 ||
-             r2 == MachineRepresentation::kWord64 ||
-             r2 == MachineRepresentation::kFloat32 ||
-             r2 == MachineRepresentation::kFloat64 ||
-             r2 == MachineRepresentation::kTagged;
-    case MachineRepresentation::kWord16:
-      return r2 == MachineRepresentation::kWord16 ||
-             r2 == MachineRepresentation::kWord32 ||
-             r2 == MachineRepresentation::kWord64 ||
-             r2 == MachineRepresentation::kFloat32 ||
-             r2 == MachineRepresentation::kFloat64 ||
-             r2 == MachineRepresentation::kTagged;
-    case MachineRepresentation::kWord32:
-      return r2 == MachineRepresentation::kWord32 ||
-             r2 == MachineRepresentation::kWord64 ||
-             r2 == MachineRepresentation::kFloat64 ||
-             r2 == MachineRepresentation::kTagged;
-    case MachineRepresentation::kWord64:
-      return r2 == MachineRepresentation::kWord64;
-    case MachineRepresentation::kFloat32:
-      return r2 == MachineRepresentation::kFloat32 ||
-             r2 == MachineRepresentation::kFloat64 ||
-             r2 == MachineRepresentation::kTagged;
-    case MachineRepresentation::kFloat64:
-      return r2 == MachineRepresentation::kFloat64 ||
-             r2 == MachineRepresentation::kTagged;
-    case MachineRepresentation::kSimd128:
-      return r2 == MachineRepresentation::kSimd128 ||
-             r2 == MachineRepresentation::kTagged;
-    case MachineRepresentation::kTagged:
-      return r2 == MachineRepresentation::kTagged;
-  }
-  UNREACHABLE();
-  return false;
-}
-
-
 class InputUseInfos {
  public:
   explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {}
@@ -167,9 +200,7 @@
   ZoneVector<UseInfo> input_use_infos_;
 
   static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
-    return MachineRepresentationIsSubtype(use1.representation(),
-                                          use2.representation()) &&
-           use1.truncation().IsLessGeneralThan(use2.truncation());
+    return use1.truncation().IsLessGeneralThan(use2.truncation());
   }
 };
 
@@ -181,7 +212,7 @@
 class RepresentationSelector {
  public:
   // Information for each node tracked during the fixpoint.
-  class NodeInfo {
+  class NodeInfo final {
    public:
     // Adds new use to the node. Returns true if something has changed
     // and the node has to be requeued.
@@ -205,11 +236,11 @@
 
     // Helpers for feedback typing.
     void set_feedback_type(Type* type) { feedback_type_ = type; }
-    Type* feedback_type() { return feedback_type_; }
+    Type* feedback_type() const { return feedback_type_; }
     void set_weakened() { weakened_ = true; }
-    bool weakened() { return weakened_; }
-    TypeCheckKind type_check() { return type_check_; }
-    void set_type_check(TypeCheckKind type_check) { type_check_ = type_check; }
+    bool weakened() const { return weakened_; }
+    void set_restriction_type(Type* type) { restriction_type_ = type; }
+    Type* restriction_type() const { return restriction_type_; }
 
    private:
     enum State : uint8_t { kUnvisited, kPushed, kVisited, kQueued };
@@ -217,8 +248,8 @@
     MachineRepresentation representation_ =
         MachineRepresentation::kNone;             // Output representation.
     Truncation truncation_ = Truncation::None();  // Information about uses.
-    TypeCheckKind type_check_ = TypeCheckKind::kNone;  // Runtime check kind.
 
+    Type* restriction_type_ = Type::Any();
     Type* feedback_type_ = nullptr;
     bool weakened_ = false;
   };
@@ -246,8 +277,12 @@
 
   // Forward propagation of types from type feedback.
   void RunTypePropagationPhase() {
-    DCHECK(typing_stack_.empty());
+    // Run type propagation.
+    TRACE("--{Type propagation phase}--\n");
+    phase_ = RETYPE;
+    ResetNodeInfoState();
 
+    DCHECK(typing_stack_.empty());
     typing_stack_.push({graph()->end(), 0});
     GetInfo(graph()->end())->set_pushed();
     while (!typing_stack_.empty()) {
@@ -274,6 +309,8 @@
       NodeInfo* info = GetInfo(node);
       info->set_visited();
       bool updated = UpdateFeedbackType(node);
+      TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
+      VisitNode(node, info->truncation(), nullptr);
       if (updated) {
         for (Node* const user : node->uses()) {
           if (GetInfo(user)->visited()) {
@@ -291,6 +328,8 @@
       NodeInfo* info = GetInfo(node);
       info->set_visited();
       bool updated = UpdateFeedbackType(node);
+      TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
+      VisitNode(node, info->truncation(), nullptr);
       if (updated) {
         for (Node* const user : node->uses()) {
           if (GetInfo(user)->visited()) {
@@ -333,23 +372,6 @@
                            FeedbackTypeOf(node->InputAt(2)));
   }
 
-  static Type* TypeOfSpeculativeOp(TypeCheckKind type_check) {
-    switch (type_check) {
-      case TypeCheckKind::kNone:
-        return Type::Any();
-      case TypeCheckKind::kSigned32:
-        return Type::Signed32();
-      case TypeCheckKind::kNumber:
-        return Type::Number();
-      // Unexpected cases.
-      case TypeCheckKind::kNumberOrUndefined:
-        FATAL("Unexpected checked type.");
-        break;
-    }
-    UNREACHABLE();
-    return nullptr;
-  }
-
   bool UpdateFeedbackType(Node* node) {
     if (node->op()->ValueOutputCount() == 0) return false;
 
@@ -357,125 +379,66 @@
     Type* type = info->feedback_type();
     Type* new_type = type;
 
+    // For any non-phi node just wait until we get all inputs typed. We only
+    // allow untyped inputs for phi nodes because phis are the only places
+    // where cycles need to be broken.
+    if (node->opcode() != IrOpcode::kPhi) {
+      for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+        if (GetInfo(node->InputAt(i))->feedback_type() == nullptr) {
+          return false;
+        }
+      }
+    }
+
     switch (node->opcode()) {
-      case IrOpcode::kSpeculativeNumberAdd: {
-        Type* lhs = FeedbackTypeOf(node->InputAt(0));
-        Type* rhs = FeedbackTypeOf(node->InputAt(1));
-        if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
-        // TODO(jarin) The ToNumber conversion is too conservative here,
-        // e.g. it will treat true as 1 even though the number check will
-        // fail on a boolean. OperationTyper should have a function that
-        // computes a more precise type.
-        lhs = op_typer_.ToNumber(lhs);
-        rhs = op_typer_.ToNumber(rhs);
-        Type* static_type = op_typer_.NumericAdd(lhs, rhs);
-        if (info->type_check() == TypeCheckKind::kNone) {
-          new_type = static_type;
-        } else {
-          Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
-          new_type = Type::Intersect(static_type, feedback_type, graph_zone());
-        }
-        break;
-      }
+#define DECLARE_CASE(Name)                                       \
+  case IrOpcode::k##Name: {                                      \
+    new_type = op_typer_.Name(FeedbackTypeOf(node->InputAt(0)),  \
+                              FeedbackTypeOf(node->InputAt(1))); \
+    break;                                                       \
+  }
+      SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
 
-      case IrOpcode::kSpeculativeNumberSubtract: {
-        Type* lhs = FeedbackTypeOf(node->InputAt(0));
-        Type* rhs = FeedbackTypeOf(node->InputAt(1));
-        if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
-        // TODO(jarin) The ToNumber conversion is too conservative here,
-        // e.g. it will treat true as 1 even though the number check will
-        // fail on a boolean. OperationTyper should have a function that
-        // computes a more precise type.
-        lhs = op_typer_.ToNumber(lhs);
-        rhs = op_typer_.ToNumber(rhs);
-        Type* static_type = op_typer_.NumericSubtract(lhs, rhs);
-        if (info->type_check() == TypeCheckKind::kNone) {
-          new_type = static_type;
-        } else {
-          Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
-          new_type = Type::Intersect(static_type, feedback_type, graph_zone());
-        }
-        break;
-      }
+#define DECLARE_CASE(Name)                                                \
+  case IrOpcode::k##Name: {                                               \
+    new_type =                                                            \
+        Type::Intersect(op_typer_.Name(FeedbackTypeOf(node->InputAt(0)),  \
+                                       FeedbackTypeOf(node->InputAt(1))), \
+                        info->restriction_type(), graph_zone());          \
+    break;                                                                \
+  }
+      SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
 
-      case IrOpcode::kSpeculativeNumberMultiply: {
-        Type* lhs = FeedbackTypeOf(node->InputAt(0));
-        Type* rhs = FeedbackTypeOf(node->InputAt(1));
-        if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
-        // TODO(jarin) The ToNumber conversion is too conservative here,
-        // e.g. it will treat true as 1 even though the number check will
-        // fail on a boolean. OperationTyper should have a function that
-        // computes a more precise type.
-        lhs = op_typer_.ToNumber(lhs);
-        rhs = op_typer_.ToNumber(rhs);
-        Type* static_type = op_typer_.NumericMultiply(lhs, rhs);
-        if (info->type_check() == TypeCheckKind::kNone) {
-          new_type = static_type;
-        } else {
-          Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
-          new_type = Type::Intersect(static_type, feedback_type, graph_zone());
-        }
-        break;
-      }
+#define DECLARE_CASE(Name)                                       \
+  case IrOpcode::k##Name: {                                      \
+    new_type = op_typer_.Name(FeedbackTypeOf(node->InputAt(0))); \
+    break;                                                       \
+  }
+      SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
 
-      case IrOpcode::kSpeculativeNumberDivide: {
-        Type* lhs = FeedbackTypeOf(node->InputAt(0));
-        Type* rhs = FeedbackTypeOf(node->InputAt(1));
-        if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
-        // TODO(jarin) The ToNumber conversion is too conservative here,
-        // e.g. it will treat true as 1 even though the number check will
-        // fail on a boolean. OperationTyper should have a function that
-        // computes a more precise type.
-        lhs = op_typer_.ToNumber(lhs);
-        rhs = op_typer_.ToNumber(rhs);
-        Type* static_type = op_typer_.NumericDivide(lhs, rhs);
-        if (info->type_check() == TypeCheckKind::kNone) {
-          new_type = static_type;
-        } else {
-          Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
-          new_type = Type::Intersect(static_type, feedback_type, graph_zone());
-        }
+      case IrOpcode::kPlainPrimitiveToNumber:
+        new_type = op_typer_.ToNumber(FeedbackTypeOf(node->InputAt(0)));
         break;
-      }
-
-      case IrOpcode::kSpeculativeNumberModulus: {
-        Type* lhs = FeedbackTypeOf(node->InputAt(0));
-        Type* rhs = FeedbackTypeOf(node->InputAt(1));
-        if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
-        // TODO(jarin) The ToNumber conversion is too conservative here,
-        // e.g. it will treat true as 1 even though the number check will
-        // fail on a boolean. OperationTyper should have a function that
-        // computes a more precise type.
-        lhs = op_typer_.ToNumber(lhs);
-        rhs = op_typer_.ToNumber(rhs);
-        Type* static_type = op_typer_.NumericModulus(lhs, rhs);
-        if (info->type_check() == TypeCheckKind::kNone) {
-          new_type = static_type;
-        } else {
-          Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
-          new_type = Type::Intersect(static_type, feedback_type, graph_zone());
-        }
-        break;
-      }
 
       case IrOpcode::kPhi: {
         new_type = TypePhi(node);
         if (type != nullptr) {
           new_type = Weaken(node, type, new_type);
         }
-        // Recompute the phi representation based on the new type.
-        MachineRepresentation output =
-            GetOutputInfoForPhi(node, GetInfo(node)->truncation(), new_type);
-        ResetOutput(node, output);
+        break;
+      }
+
+      case IrOpcode::kTypeGuard: {
+        new_type = op_typer_.TypeTypeGuard(node->op(),
+                                           FeedbackTypeOf(node->InputAt(0)));
         break;
       }
 
       case IrOpcode::kSelect: {
         new_type = TypeSelect(node);
-        // Recompute representation based on the new type.
-        MachineRepresentation output =
-            GetOutputInfoForPhi(node, GetInfo(node)->truncation(), new_type);
-        ResetOutput(node, output);
         break;
       }
 
@@ -487,6 +450,12 @@
         }
         return false;
     }
+    // We need to guarantee that the feedback type is a subtype of the upper
+    // bound. Naively that should hold, but weakening can actually produce
+    // a bigger type if we are unlucky with ordering of phi typing. To be
+    // really sure, just intersect the upper bound with the feedback type.
+    new_type = Type::Intersect(GetUpperBound(node), new_type, graph_zone());
+
     if (type != nullptr && new_type->Is(type)) return false;
     GetInfo(node)->set_feedback_type(new_type);
     if (FLAG_trace_representation) {
@@ -562,7 +531,8 @@
       NodeInfo* info = GetInfo(node);
       queue_.pop();
       info->set_visited();
-      TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
+      TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(),
+            info->truncation().description());
       VisitNode(node, info->truncation(), nullptr);
       TRACE("  ==> output ");
       PrintOutputInfo(info);
@@ -573,10 +543,7 @@
   void Run(SimplifiedLowering* lowering) {
     RunTruncationPropagationPhase();
 
-    if (lowering->flags() & SimplifiedLowering::kTypeFeedbackEnabled) {
-      ResetNodeInfoState();
-      RunTypePropagationPhase();
-    }
+    RunTypePropagationPhase();
 
     // Run lowering and change insertion phase.
     TRACE("--{Simplified lowering phase}--\n");
@@ -632,12 +599,12 @@
       info->set_queued();
       nodes_.push_back(node);
       queue_.push(node);
-      TRACE("  initial: ");
+      TRACE("  initial #%i: ", node->id());
       info->AddUse(use_info);
       PrintTruncation(info->truncation());
       return;
     }
-    TRACE("   queue?: ");
+    TRACE("   queue #%i?: ", node->id());
     PrintTruncation(info->truncation());
     if (info->AddUse(use_info)) {
       // New usage information for the node is available.
@@ -652,25 +619,37 @@
     }
   }
 
-  bool lower() { return phase_ == LOWER; }
-  bool propagate() { return phase_ == PROPAGATE; }
+  bool lower() const { return phase_ == LOWER; }
+  bool retype() const { return phase_ == RETYPE; }
+  bool propagate() const { return phase_ == PROPAGATE; }
 
   void SetOutput(Node* node, MachineRepresentation representation,
-                 TypeCheckKind type_check = TypeCheckKind::kNone) {
-    DCHECK(MachineRepresentationIsSubtype(GetInfo(node)->representation(),
-                                          representation));
-    ResetOutput(node, representation, type_check);
-  }
-
-  void ResetOutput(Node* node, MachineRepresentation representation,
-                   TypeCheckKind type_check = TypeCheckKind::kNone) {
-    NodeInfo* info = GetInfo(node);
-    info->set_output(representation);
-    info->set_type_check(type_check);
+                 Type* restriction_type = Type::Any()) {
+    NodeInfo* const info = GetInfo(node);
+    switch (phase_) {
+      case PROPAGATE:
+        info->set_restriction_type(restriction_type);
+        break;
+      case RETYPE:
+        DCHECK(info->restriction_type()->Is(restriction_type));
+        DCHECK(restriction_type->Is(info->restriction_type()));
+        info->set_output(representation);
+        break;
+      case LOWER:
+        DCHECK_EQ(info->representation(), representation);
+        DCHECK(info->restriction_type()->Is(restriction_type));
+        DCHECK(restriction_type->Is(info->restriction_type()));
+        break;
+    }
   }
 
   Type* GetUpperBound(Node* node) { return NodeProperties::GetType(node); }
 
+  bool InputCannotBe(Node* node, Type* type) {
+    DCHECK_EQ(1, node->op()->ValueInputCount());
+    return !GetUpperBound(node->InputAt(0))->Maybe(type);
+  }
+
   bool InputIs(Node* node, Type* type) {
     DCHECK_EQ(1, node->op()->ValueInputCount());
     return GetUpperBound(node->InputAt(0))->Is(type);
@@ -690,6 +669,12 @@
            GetUpperBound(node->InputAt(1))->Is(type);
   }
 
+  bool OneInputCannotBe(Node* node, Type* type) {
+    DCHECK_EQ(2, node->op()->ValueInputCount());
+    return !GetUpperBound(node->InputAt(0))->Maybe(type) ||
+           !GetUpperBound(node->InputAt(1))->Maybe(type);
+  }
+
   void ConvertInput(Node* node, int index, UseInfo use) {
     Node* input = node->InputAt(index);
     // In the change phase, insert a change before the use if necessary.
@@ -715,10 +700,15 @@
   }
 
   void ProcessInput(Node* node, int index, UseInfo use) {
-    if (phase_ == PROPAGATE) {
-      EnqueueInput(node, index, use);
-    } else {
-      ConvertInput(node, index, use);
+    switch (phase_) {
+      case PROPAGATE:
+        EnqueueInput(node, index, use);
+        break;
+      case RETYPE:
+        break;
+      case LOWER:
+        ConvertInput(node, index, use);
+        break;
     }
   }
 
@@ -741,34 +731,58 @@
   // values {kTypeAny}.
   void VisitInputs(Node* node) {
     int tagged_count = node->op()->ValueInputCount() +
-                       OperatorProperties::GetContextInputCount(node->op());
-    // Visit value and context inputs as tagged.
+                       OperatorProperties::GetContextInputCount(node->op()) +
+                       OperatorProperties::GetFrameStateInputCount(node->op());
+    // Visit value, context and frame state inputs as tagged.
     for (int i = 0; i < tagged_count; i++) {
       ProcessInput(node, i, UseInfo::AnyTagged());
     }
-    // Only enqueue other inputs (framestates, effects, control).
+    // Only enqueue other inputs (effects, control).
     for (int i = tagged_count; i < node->InputCount(); i++) {
       EnqueueInput(node, i);
     }
   }
 
+  // Helper for an unused node.
+  void VisitUnused(Node* node) {
+    int value_count = node->op()->ValueInputCount() +
+                      OperatorProperties::GetContextInputCount(node->op()) +
+                      OperatorProperties::GetFrameStateInputCount(node->op());
+    for (int i = 0; i < value_count; i++) {
+      ProcessInput(node, i, UseInfo::None());
+    }
+    ProcessRemainingInputs(node, value_count);
+    if (lower()) Kill(node);
+  }
+
   // Helper for binops of the R x L -> O variety.
   void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
                   MachineRepresentation output,
-                  TypeCheckKind type_check = TypeCheckKind::kNone) {
+                  Type* restriction_type = Type::Any()) {
     DCHECK_EQ(2, node->op()->ValueInputCount());
     ProcessInput(node, 0, left_use);
     ProcessInput(node, 1, right_use);
     for (int i = 2; i < node->InputCount(); i++) {
       EnqueueInput(node, i);
     }
-    SetOutput(node, output, type_check);
+    SetOutput(node, output, restriction_type);
   }
 
   // Helper for binops of the I x I -> O variety.
   void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output,
-                  TypeCheckKind type_check = TypeCheckKind::kNone) {
-    VisitBinop(node, input_use, input_use, output, type_check);
+                  Type* restriction_type = Type::Any()) {
+    VisitBinop(node, input_use, input_use, output, restriction_type);
+  }
+
+  void VisitSpeculativeInt32Binop(Node* node) {
+    DCHECK_EQ(2, node->op()->ValueInputCount());
+    if (BothInputsAre(node, Type::NumberOrOddball())) {
+      return VisitBinop(node, UseInfo::TruncatingWord32(),
+                        MachineRepresentation::kWord32);
+    }
+    NumberOperationHint hint = NumberOperationHintOf(node->op());
+    return VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                      MachineRepresentation::kWord32);
   }
 
   // Helper for unops of the I -> O variety.
@@ -827,23 +841,30 @@
   }
 
   // Infer representation for phi-like nodes.
-  MachineRepresentation GetOutputInfoForPhi(Node* node, Truncation use,
-                                            Type* type = nullptr) {
+  // The {node} parameter is only used to decide on the int64 representation.
+  // Once the type system supports an external pointer type, the {node}
+  // parameter can be removed.
+  MachineRepresentation GetOutputInfoForPhi(Node* node, Type* type,
+                                            Truncation use) {
     // Compute the representation.
-    if (type == nullptr) {
-      type = TypeOf(node);
-    }
     if (type->Is(Type::None())) {
       return MachineRepresentation::kNone;
     } else if (type->Is(Type::Signed32()) || type->Is(Type::Unsigned32())) {
       return MachineRepresentation::kWord32;
-    } else if (use.TruncatesToWord32()) {
+    } else if (type->Is(Type::NumberOrOddball()) && use.IsUsedAsWord32()) {
       return MachineRepresentation::kWord32;
     } else if (type->Is(Type::Boolean())) {
       return MachineRepresentation::kBit;
-    } else if (type->Is(Type::Number())) {
+    } else if (type->Is(Type::NumberOrOddball()) && use.IsUsedAsFloat64()) {
       return MachineRepresentation::kFloat64;
-    } else if (use.TruncatesToFloat64()) {
+    } else if (type->Is(
+                   Type::Union(Type::SignedSmall(), Type::NaN(), zone()))) {
+      // TODO(turbofan): For Phis that return either NaN or some Smi, it's
+      // beneficial to not go all the way to double, unless the uses are
+      // double uses. For tagging that just means some potentially expensive
+      // allocation code; we might want to do the same for -0 as well?
+      return MachineRepresentation::kTagged;
+    } else if (type->Is(Type::Number())) {
       return MachineRepresentation::kFloat64;
     } else if (type->Is(Type::Internal())) {
       // We mark (u)int64 as Type::Internal.
@@ -872,7 +893,8 @@
                    SimplifiedLowering* lowering) {
     ProcessInput(node, 0, UseInfo::Bool());
 
-    MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
+    MachineRepresentation output =
+        GetOutputInfoForPhi(node, TypeOf(node), truncation);
     SetOutput(node, output);
 
     if (lower()) {
@@ -893,7 +915,8 @@
   // Helper for handling phis.
   void VisitPhi(Node* node, Truncation truncation,
                 SimplifiedLowering* lowering) {
-    MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
+    MachineRepresentation output =
+        GetOutputInfoForPhi(node, TypeOf(node), truncation);
     // Only set the output representation if not running with type
     // feedback. (Feedback typing will set the representation.)
     SetOutput(node, output);
@@ -907,7 +930,7 @@
     }
 
     // Convert inputs to the output representation of this phi, pass the
-    // truncation truncation along.
+    // truncation along.
     UseInfo input_use(output, truncation);
     for (int i = 0; i < node->InputCount(); i++) {
       ProcessInput(node, i, i < values ? input_use : UseInfo::None());
@@ -916,31 +939,30 @@
 
   void VisitCall(Node* node, SimplifiedLowering* lowering) {
     const CallDescriptor* desc = CallDescriptorOf(node->op());
-    const MachineSignature* sig = desc->GetMachineSignature();
-    int params = static_cast<int>(sig->parameter_count());
+    int params = static_cast<int>(desc->ParameterCount());
+    int value_input_count = node->op()->ValueInputCount();
     // Propagate representation information from call descriptor.
-    for (int i = 0; i < node->InputCount(); i++) {
+    for (int i = 0; i < value_input_count; i++) {
       if (i == 0) {
         // The target of the call.
-        ProcessInput(node, i, UseInfo::None());
+        ProcessInput(node, i, UseInfo::Any());
       } else if ((i - 1) < params) {
         ProcessInput(node, i, TruncatingUseInfoFromRepresentation(
-                                  sig->GetParam(i - 1).representation()));
+                                  desc->GetInputType(i).representation()));
       } else {
-        ProcessInput(node, i, UseInfo::None());
+        ProcessInput(node, i, UseInfo::AnyTagged());
       }
     }
+    ProcessRemainingInputs(node, value_input_count);
 
-    if (sig->return_count() > 0) {
-      SetOutput(node,
-                desc->GetMachineSignature()->GetReturn().representation());
+    if (desc->ReturnCount() > 0) {
+      SetOutput(node, desc->GetReturnType(0).representation());
     } else {
       SetOutput(node, MachineRepresentation::kTagged);
     }
   }
 
   MachineSemantic DeoptValueSemanticOf(Type* type) {
-    CHECK(!type->Is(Type::None()));
     // We only need signedness to do deopt correctly.
     if (type->Is(Type::Signed32())) {
       return MachineSemantic::kInt32;
@@ -952,11 +974,11 @@
   }
 
   void VisitStateValues(Node* node) {
-    if (phase_ == PROPAGATE) {
+    if (propagate()) {
       for (int i = 0; i < node->InputCount(); i++) {
         EnqueueInput(node, i, UseInfo::Any());
       }
-    } else {
+    } else if (lower()) {
       Zone* zone = jsgraph_->zone();
       ZoneVector<MachineType>* types =
           new (zone->New(sizeof(ZoneVector<MachineType>)))
@@ -990,6 +1012,10 @@
     return changer_->Uint32OperatorFor(node->opcode());
   }
 
+  const Operator* Uint32OverflowOp(Node* node) {
+    return changer_->Uint32OverflowOperatorFor(node->opcode());
+  }
+
   const Operator* Float64Op(Node* node) {
     return changer_->Float64OperatorFor(node->opcode());
   }
@@ -1065,90 +1091,78 @@
     return jsgraph_->simplified();
   }
 
-  void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
-    for (Edge edge : node->use_edges()) {
-      if (NodeProperties::IsControlEdge(edge)) {
-        edge.UpdateTo(control);
-      } else if (NodeProperties::IsEffectEdge(edge)) {
-        edge.UpdateTo(effect);
-      } else {
-        DCHECK(NodeProperties::IsValueEdge(edge));
-      }
-    }
+  void LowerToCheckedInt32Mul(Node* node, Truncation truncation,
+                              Type* input0_type, Type* input1_type) {
+    // If one of the inputs is positive and/or truncation is being applied,
+    // there is no need to return -0.
+    CheckForMinusZeroMode mz_mode =
+        truncation.IsUsedAsWord32() ||
+                (input0_type->Is(Type::OrderedNumber()) &&
+                 input0_type->Min() > 0) ||
+                (input1_type->Is(Type::OrderedNumber()) &&
+                 input1_type->Min() > 0)
+            ? CheckForMinusZeroMode::kDontCheckForMinusZero
+            : CheckForMinusZeroMode::kCheckForMinusZero;
+
+    NodeProperties::ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode));
   }
 
-  void ChangeToPureOp(Node* node, const Operator* new_op) {
-    if (node->op()->EffectInputCount() > 0) {
-      DCHECK_LT(0, node->op()->ControlInputCount());
-      // Disconnect the node from effect and control chains.
-      Node* control = NodeProperties::GetControlInput(node);
-      Node* effect = NodeProperties::GetEffectInput(node);
-      ReplaceEffectControlUses(node, effect, control);
-      node->TrimInputCount(new_op->ValueInputCount());
-    } else {
-      DCHECK_EQ(0, node->op()->ControlInputCount());
-    }
-
-    NodeProperties::ChangeOp(node, new_op);
+  void ChangeToInt32OverflowOp(Node* node) {
+    NodeProperties::ChangeOp(node, Int32OverflowOp(node));
   }
 
-  void ChangeToInt32OverflowOp(Node* node, const Operator* new_op) {
-    NodeProperties::ChangeOp(node, new_op);
+  void ChangeToUint32OverflowOp(Node* node) {
+    NodeProperties::ChangeOp(node, Uint32OverflowOp(node));
   }
 
   void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation,
                                   SimplifiedLowering* lowering) {
-    if (BothInputsAre(node, type_cache_.kSigned32OrMinusZero) &&
-        NodeProperties::GetType(node)->Is(Type::Signed32())) {
-      // int32 + int32 = int32   ==>   signed Int32Add/Sub
-      VisitInt32Binop(node);
-      if (lower()) ChangeToPureOp(node, Int32Op(node));
-      return;
+    // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we can
+    // only eliminate an unused speculative number operation if we know that
+    // the inputs are PlainPrimitive, which excludes everything that's might
+    // have side effects or throws during a ToNumber conversion.
+    if (BothInputsAre(node, Type::PlainPrimitive())) {
+      if (truncation.IsUnused()) return VisitUnused(node);
     }
-
-    // Use truncation if available.
     if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
-        truncation.TruncatesToWord32()) {
-      // safe-int + safe-int = x (truncated to int32)
-      // => signed Int32Add/Sub (truncated)
+        (GetUpperBound(node)->Is(Type::Signed32()) ||
+         GetUpperBound(node)->Is(Type::Unsigned32()) ||
+         truncation.IsUsedAsWord32())) {
+      // => Int32Add/Sub
       VisitWord32TruncatingBinop(node);
       if (lower()) ChangeToPureOp(node, Int32Op(node));
       return;
     }
 
     // Try to use type feedback.
-    BinaryOperationHints::Hint hint = BinaryOperationHintOf(node->op());
+    NumberOperationHint hint = NumberOperationHintOf(node->op());
 
     // Handle the case when no int32 checks on inputs are necessary
     // (but an overflow check is needed on the output).
     if (BothInputsAre(node, Type::Signed32()) ||
-        (BothInputsAre(node, type_cache_.kSigned32OrMinusZero) &&
+        (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
          NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger))) {
       // If both the inputs the feedback are int32, use the overflow op.
-      if (hint == BinaryOperationHints::kSignedSmall ||
-          hint == BinaryOperationHints::kSigned32) {
+      if (hint == NumberOperationHint::kSignedSmall ||
+          hint == NumberOperationHint::kSigned32) {
         VisitBinop(node, UseInfo::TruncatingWord32(),
-                   MachineRepresentation::kWord32, TypeCheckKind::kSigned32);
-        if (lower()) {
-          ChangeToInt32OverflowOp(node, Int32OverflowOp(node));
-        }
+                   MachineRepresentation::kWord32, Type::Signed32());
+        if (lower()) ChangeToInt32OverflowOp(node);
         return;
       }
     }
 
-    if (hint == BinaryOperationHints::kSignedSmall ||
-        hint == BinaryOperationHints::kSigned32) {
-      VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
-                 MachineRepresentation::kWord32, TypeCheckKind::kSigned32);
-      if (lower()) {
-        ChangeToInt32OverflowOp(node, Int32OverflowOp(node));
-      }
+    if (hint == NumberOperationHint::kSignedSmall ||
+        hint == NumberOperationHint::kSigned32) {
+      VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                 MachineRepresentation::kWord32, Type::Signed32());
+      if (lower()) ChangeToInt32OverflowOp(node);
       return;
     }
 
     // default case => Float64Add/Sub
-    VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
-               MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+    VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+               MachineRepresentation::kFloat64, Type::Number());
     if (lower()) {
       ChangeToPureOp(node, Float64Op(node));
     }
@@ -1159,11 +1173,28 @@
   // Depending on the operator, propagate new usage info to the inputs.
   void VisitNode(Node* node, Truncation truncation,
                  SimplifiedLowering* lowering) {
+    // Unconditionally eliminate unused pure nodes (only relevant if there's
+    // a pure operation in between two effectful ones, where the last one
+    // is unused).
+    // Note: We must not do this for constants, as they are cached and we
+    // would thus kill the cached {node} during lowering (i.e. replace all
+    // uses with Dead), but at that point some node lowering might have
+    // already taken the constant {node} from the cache (while it was in
+    // a sane state still) and we would afterwards replace that use with
+    // Dead as well.
+    if (node->op()->ValueInputCount() > 0 &&
+        node->op()->HasProperty(Operator::kPure)) {
+      if (truncation.IsUnused()) return VisitUnused(node);
+    }
     switch (node->opcode()) {
       //------------------------------------------------------------------
       // Common operators.
       //------------------------------------------------------------------
       case IrOpcode::kStart:
+        // We use Start as a terminator for the frame state chain, so even
+        // tho Start doesn't really produce a value, we have to say Tagged
+        // here, otherwise the input conversion will fail.
+        return VisitLeaf(node, MachineRepresentation::kTagged);
       case IrOpcode::kDead:
         return VisitLeaf(node, MachineRepresentation::kNone);
       case IrOpcode::kParameter: {
@@ -1214,10 +1245,10 @@
       case IrOpcode::kJSToNumber: {
         VisitInputs(node);
         // TODO(bmeurer): Optimize somewhat based on input type?
-        if (truncation.TruncatesToWord32()) {
+        if (truncation.IsUsedAsWord32()) {
           SetOutput(node, MachineRepresentation::kWord32);
           if (lower()) lowering->DoJSToNumberTruncatesToWord32(node, this);
-        } else if (truncation.TruncatesToFloat64()) {
+        } else if (truncation.IsUsedAsFloat64()) {
           SetOutput(node, MachineRepresentation::kFloat64);
           if (lower()) lowering->DoJSToNumberTruncatesToFloat64(node, this);
         } else {
@@ -1248,38 +1279,48 @@
         }
         return;
       }
-      case IrOpcode::kBooleanToNumber: {
-        if (lower()) {
-          NodeInfo* input_info = GetInfo(node->InputAt(0));
-          if (input_info->representation() == MachineRepresentation::kBit) {
-            // BooleanToNumber(x: kRepBit) => x
-            DeferReplacement(node, node->InputAt(0));
-          } else {
-            // BooleanToNumber(x: kRepTagged) => WordEqual(x, #true)
-            node->AppendInput(jsgraph_->zone(), jsgraph_->TrueConstant());
-            NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
-          }
-        } else {
-          // No input representation requirement; adapt during lowering.
-          ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
-          SetOutput(node, MachineRepresentation::kWord32);
-        }
-        return;
-      }
-      case IrOpcode::kNumberEqual:
-      case IrOpcode::kNumberLessThan:
-      case IrOpcode::kNumberLessThanOrEqual: {
+      case IrOpcode::kNumberEqual: {
+        Type* const lhs_type = TypeOf(node->InputAt(0));
+        Type* const rhs_type = TypeOf(node->InputAt(1));
         // Number comparisons reduce to integer comparisons for integer inputs.
-        if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
-            TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
-          // => signed Int32Cmp
-          VisitInt32Cmp(node);
-          if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
-        } else if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
-                   TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
+        if ((lhs_type->Is(Type::Unsigned32()) &&
+             rhs_type->Is(Type::Unsigned32())) ||
+            (lhs_type->Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
+             rhs_type->Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
+             OneInputCannotBe(node, type_cache_.kZeroish))) {
           // => unsigned Int32Cmp
           VisitUint32Cmp(node);
           if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+          return;
+        }
+        if ((lhs_type->Is(Type::Signed32()) &&
+             rhs_type->Is(Type::Signed32())) ||
+            (lhs_type->Is(Type::Signed32OrMinusZeroOrNaN()) &&
+             rhs_type->Is(Type::Signed32OrMinusZeroOrNaN()) &&
+             OneInputCannotBe(node, type_cache_.kZeroish))) {
+          // => signed Int32Cmp
+          VisitInt32Cmp(node);
+          if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+          return;
+        }
+        // => Float64Cmp
+        VisitFloat64Cmp(node);
+        if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+        return;
+      }
+      case IrOpcode::kNumberLessThan:
+      case IrOpcode::kNumberLessThanOrEqual: {
+        // Number comparisons reduce to integer comparisons for integer inputs.
+        if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+            TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
+          // => unsigned Int32Cmp
+          VisitUint32Cmp(node);
+          if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+        } else if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+                   TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
+          // => signed Int32Cmp
+          VisitInt32Cmp(node);
+          if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
         } else {
           // => Float64Cmp
           VisitFloat64Cmp(node);
@@ -1295,95 +1336,158 @@
       case IrOpcode::kSpeculativeNumberLessThan:
       case IrOpcode::kSpeculativeNumberLessThanOrEqual:
       case IrOpcode::kSpeculativeNumberEqual: {
+        // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+        // can only eliminate an unused speculative number operation if we know
+        // that the inputs are PlainPrimitive, which excludes everything that's
+        // might have side effects or throws during a ToNumber conversion.
+        if (BothInputsAre(node, Type::PlainPrimitive())) {
+          if (truncation.IsUnused()) return VisitUnused(node);
+        }
         // Number comparisons reduce to integer comparisons for integer inputs.
-        if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
-            TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
-          // => signed Int32Cmp
-          VisitInt32Cmp(node);
-          if (lower()) ChangeToPureOp(node, Int32Op(node));
-          return;
-        } else if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
-                   TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
+        if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+            TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
           // => unsigned Int32Cmp
           VisitUint32Cmp(node);
           if (lower()) ChangeToPureOp(node, Uint32Op(node));
           return;
-        }
-        // Try to use type feedback.
-        CompareOperationHints::Hint hint = CompareOperationHintOf(node->op());
-
-        if (hint == CompareOperationHints::kSignedSmall) {
-          VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
-                     MachineRepresentation::kBit);
+        } else if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+                   TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
+          // => signed Int32Cmp
+          VisitInt32Cmp(node);
           if (lower()) ChangeToPureOp(node, Int32Op(node));
           return;
         }
-        DCHECK_EQ(CompareOperationHints::kNumber, hint);
-        // default case => Float64 comparison
-        VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
-                   MachineRepresentation::kBit);
-        if (lower()) ChangeToPureOp(node, Float64Op(node));
+        // Try to use type feedback.
+        NumberOperationHint hint = NumberOperationHintOf(node->op());
+        switch (hint) {
+          case NumberOperationHint::kSignedSmall:
+          case NumberOperationHint::kSigned32:
+            VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                       MachineRepresentation::kBit);
+            if (lower()) ChangeToPureOp(node, Int32Op(node));
+            return;
+          case NumberOperationHint::kNumber:
+          case NumberOperationHint::kNumberOrOddball:
+            VisitBinop(node, CheckedUseInfoAsFloat64FromHint(hint),
+                       MachineRepresentation::kBit);
+            if (lower()) ChangeToPureOp(node, Float64Op(node));
+            return;
+        }
+        UNREACHABLE();
         return;
       }
 
       case IrOpcode::kNumberAdd:
       case IrOpcode::kNumberSubtract: {
-        if (BothInputsAre(node, Type::Signed32()) &&
-            NodeProperties::GetType(node)->Is(Type::Signed32())) {
-          // int32 + int32 = int32
-          // => signed Int32Add/Sub
-          VisitInt32Binop(node);
-          if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
-        } else if (BothInputsAre(node,
-                                 type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
-                   truncation.TruncatesToWord32()) {
-          // safe-int + safe-int = x (truncated to int32)
-          // => signed Int32Add/Sub (truncated)
+        if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+            (GetUpperBound(node)->Is(Type::Signed32()) ||
+             GetUpperBound(node)->Is(Type::Unsigned32()) ||
+             truncation.IsUsedAsWord32())) {
+          // => Int32Add/Sub
           VisitWord32TruncatingBinop(node);
-          if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+          if (lower()) ChangeToPureOp(node, Int32Op(node));
         } else {
           // => Float64Add/Sub
           VisitFloat64Binop(node);
-          if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+          if (lower()) ChangeToPureOp(node, Float64Op(node));
         }
         return;
       }
-      case IrOpcode::kSpeculativeNumberMultiply:
-      case IrOpcode::kNumberMultiply: {
-        if (BothInputsAreSigned32(node)) {
-          if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
-            // Multiply reduces to Int32Mul if the inputs and the output
-            // are integers.
-            VisitInt32Binop(node);
-            if (lower()) ChangeToPureOp(node, Int32Op(node));
-            return;
-          }
-          if (truncation.TruncatesToWord32() &&
-              NodeProperties::GetType(node)->Is(
-                  type_cache_.kSafeIntegerOrMinusZero)) {
-            // Multiply reduces to Int32Mul if the inputs are integers,
-            // the uses are truncating and the result is in the safe
-            // integer range.
-            VisitWord32TruncatingBinop(node);
-            if (lower()) ChangeToPureOp(node, Int32Op(node));
-            return;
-          }
+      case IrOpcode::kSpeculativeNumberMultiply: {
+        // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+        // can only eliminate an unused speculative number operation if we know
+        // that the inputs are PlainPrimitive, which excludes everything that's
+        // might have side effects or throws during a ToNumber conversion.
+        if (BothInputsAre(node, Type::PlainPrimitive())) {
+          if (truncation.IsUnused()) return VisitUnused(node);
         }
-        // Number x Number => Float64Mul
-        if (BothInputsAre(node, Type::NumberOrUndefined())) {
-          VisitFloat64Binop(node);
-          if (lower()) ChangeToPureOp(node, Float64Op(node));
+        if (BothInputsAre(node, Type::Integral32()) &&
+            (NodeProperties::GetType(node)->Is(Type::Signed32()) ||
+             NodeProperties::GetType(node)->Is(Type::Unsigned32()) ||
+             (truncation.IsUsedAsWord32() &&
+              NodeProperties::GetType(node)->Is(
+                  type_cache_.kSafeIntegerOrMinusZero)))) {
+          // Multiply reduces to Int32Mul if the inputs are integers, and
+          // (a) the output is either known to be Signed32, or
+          // (b) the output is known to be Unsigned32, or
+          // (c) the uses are truncating and the result is in the safe
+          //     integer range.
+          VisitWord32TruncatingBinop(node);
+          if (lower()) ChangeToPureOp(node, Int32Op(node));
           return;
         }
+        // Try to use type feedback.
+        NumberOperationHint hint = NumberOperationHintOf(node->op());
+        Type* input0_type = TypeOf(node->InputAt(0));
+        Type* input1_type = TypeOf(node->InputAt(1));
+
+        // Handle the case when no int32 checks on inputs are necessary
+        // (but an overflow check is needed on the output).
+        if (BothInputsAre(node, Type::Signed32())) {
+          // If both the inputs the feedback are int32, use the overflow op.
+          if (hint == NumberOperationHint::kSignedSmall ||
+              hint == NumberOperationHint::kSigned32) {
+            VisitBinop(node, UseInfo::TruncatingWord32(),
+                       MachineRepresentation::kWord32, Type::Signed32());
+            if (lower()) {
+              LowerToCheckedInt32Mul(node, truncation, input0_type,
+                                     input1_type);
+            }
+            return;
+          }
+        }
+
+        if (hint == NumberOperationHint::kSignedSmall ||
+            hint == NumberOperationHint::kSigned32) {
+          VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                     MachineRepresentation::kWord32, Type::Signed32());
+          if (lower()) {
+            LowerToCheckedInt32Mul(node, truncation, input0_type, input1_type);
+          }
+          return;
+        }
+
         // Checked float64 x float64 => float64
-        DCHECK_EQ(IrOpcode::kSpeculativeNumberMultiply, node->opcode());
-        VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
-                   MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+        VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+                   MachineRepresentation::kFloat64, Type::Number());
         if (lower()) ChangeToPureOp(node, Float64Op(node));
         return;
       }
-      case IrOpcode::kSpeculativeNumberDivide:
-      case IrOpcode::kNumberDivide: {
+      case IrOpcode::kNumberMultiply: {
+        if (BothInputsAre(node, Type::Integral32()) &&
+            (NodeProperties::GetType(node)->Is(Type::Signed32()) ||
+             NodeProperties::GetType(node)->Is(Type::Unsigned32()) ||
+             (truncation.IsUsedAsWord32() &&
+              NodeProperties::GetType(node)->Is(
+                  type_cache_.kSafeIntegerOrMinusZero)))) {
+          // Multiply reduces to Int32Mul if the inputs are integers, and
+          // (a) the output is either known to be Signed32, or
+          // (b) the output is known to be Unsigned32, or
+          // (c) the uses are truncating and the result is in the safe
+          //     integer range.
+          VisitWord32TruncatingBinop(node);
+          if (lower()) ChangeToPureOp(node, Int32Op(node));
+          return;
+        }
+        // Number x Number => Float64Mul
+        VisitFloat64Binop(node);
+        if (lower()) ChangeToPureOp(node, Float64Op(node));
+        return;
+      }
+      case IrOpcode::kSpeculativeNumberDivide: {
+        // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+        // can only eliminate an unused speculative number operation if we know
+        // that the inputs are PlainPrimitive, which excludes everything that's
+        // might have side effects or throws during a ToNumber conversion.
+        if (BothInputsAre(node, Type::PlainPrimitive())) {
+          if (truncation.IsUnused()) return VisitUnused(node);
+        }
+        if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) {
+          // => unsigned Uint32Div
+          VisitWord32TruncatingBinop(node);
+          if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
+          return;
+        }
         if (BothInputsAreSigned32(node)) {
           if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
           // => signed Int32Div
@@ -1391,19 +1495,84 @@
           if (lower()) DeferReplacement(node, lowering->Int32Div(node));
           return;
           }
-          if (truncation.TruncatesToWord32()) {
+          if (truncation.IsUsedAsWord32()) {
             // => signed Int32Div
             VisitWord32TruncatingBinop(node);
             if (lower()) DeferReplacement(node, lowering->Int32Div(node));
             return;
           }
         }
-        if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
+
+        // Try to use type feedback.
+        NumberOperationHint hint = NumberOperationHintOf(node->op());
+
+        // Handle the case when no uint32 checks on inputs are necessary
+        // (but an overflow check is needed on the output).
+        if (BothInputsAreUnsigned32(node)) {
+          if (hint == NumberOperationHint::kSignedSmall ||
+              hint == NumberOperationHint::kSigned32) {
+            VisitBinop(node, UseInfo::TruncatingWord32(),
+                       MachineRepresentation::kWord32, Type::Unsigned32());
+            if (lower()) ChangeToUint32OverflowOp(node);
+            return;
+          }
+        }
+
+        // Handle the case when no int32 checks on inputs are necessary
+        // (but an overflow check is needed on the output).
+        if (BothInputsAreSigned32(node)) {
+          // If both the inputs the feedback are int32, use the overflow op.
+          if (hint == NumberOperationHint::kSignedSmall ||
+              hint == NumberOperationHint::kSigned32) {
+            VisitBinop(node, UseInfo::TruncatingWord32(),
+                       MachineRepresentation::kWord32, Type::Signed32());
+            if (lower()) ChangeToInt32OverflowOp(node);
+            return;
+          }
+        }
+
+        if (hint == NumberOperationHint::kSignedSmall ||
+            hint == NumberOperationHint::kSigned32) {
+          // If the result is truncated, we only need to check the inputs.
+          if (truncation.IsUsedAsWord32()) {
+            VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                       MachineRepresentation::kWord32);
+            if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+          } else {
+            VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                       MachineRepresentation::kWord32, Type::Signed32());
+            if (lower()) ChangeToInt32OverflowOp(node);
+          }
+          return;
+        }
+
+        // default case => Float64Div
+        VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+                   MachineRepresentation::kFloat64, Type::Number());
+        if (lower()) ChangeToPureOp(node, Float64Op(node));
+        return;
+      }
+      case IrOpcode::kNumberDivide: {
+        if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) {
           // => unsigned Uint32Div
           VisitWord32TruncatingBinop(node);
           if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
           return;
         }
+        if (BothInputsAreSigned32(node)) {
+          if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
+            // => signed Int32Div
+            VisitInt32Binop(node);
+            if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+            return;
+          }
+          if (truncation.IsUsedAsWord32()) {
+            // => signed Int32Div
+            VisitWord32TruncatingBinop(node);
+            if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+            return;
+          }
+        }
         // Number x Number => Float64Div
         if (BothInputsAre(node, Type::NumberOrUndefined())) {
           VisitFloat64Binop(node);
@@ -1412,44 +1581,152 @@
         }
         // Checked float64 x float64 => float64
         DCHECK_EQ(IrOpcode::kSpeculativeNumberDivide, node->opcode());
-        VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
-                   MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+        VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+                   MachineRepresentation::kFloat64, Type::Number());
         if (lower()) ChangeToPureOp(node, Float64Op(node));
         return;
       }
-      case IrOpcode::kSpeculativeNumberModulus:
-      case IrOpcode::kNumberModulus: {
-        if (BothInputsAreSigned32(node)) {
-          if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
-            // => signed Int32Mod
-            VisitInt32Binop(node);
-            if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
-            return;
-          }
-          if (truncation.TruncatesToWord32()) {
-            // => signed Int32Mod
-            VisitWord32TruncatingBinop(node);
-            if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
-            return;
-          }
+      case IrOpcode::kSpeculativeNumberModulus: {
+        // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+        // can only eliminate an unused speculative number operation if we know
+        // that the inputs are PlainPrimitive, which excludes everything that's
+        // might have side effects or throws during a ToNumber conversion.
+        if (BothInputsAre(node, Type::PlainPrimitive())) {
+          if (truncation.IsUnused()) return VisitUnused(node);
         }
-        if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
+        if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
+            (truncation.IsUsedAsWord32() ||
+             NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
           // => unsigned Uint32Mod
           VisitWord32TruncatingBinop(node);
           if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
           return;
         }
-        // Number x Number => Float64Mod
-        if (BothInputsAre(node, Type::NumberOrUndefined())) {
-          // => Float64Mod
-          VisitFloat64Binop(node);
-          if (lower()) ChangeToPureOp(node, Float64Op(node));
+        if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
+            (truncation.IsUsedAsWord32() ||
+             NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+          // => signed Int32Mod
+          VisitWord32TruncatingBinop(node);
+          if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
           return;
         }
-        // Checked float64 x float64 => float64
-        DCHECK_EQ(IrOpcode::kSpeculativeNumberModulus, node->opcode());
-        VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
-                   MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+
+        // Try to use type feedback.
+        NumberOperationHint hint = NumberOperationHintOf(node->op());
+
+        // Handle the case when no uint32 checks on inputs are necessary
+        // (but an overflow check is needed on the output).
+        if (BothInputsAreUnsigned32(node)) {
+          if (hint == NumberOperationHint::kSignedSmall ||
+              hint == NumberOperationHint::kSigned32) {
+            VisitBinop(node, UseInfo::TruncatingWord32(),
+                       MachineRepresentation::kWord32, Type::Unsigned32());
+            if (lower()) ChangeToUint32OverflowOp(node);
+            return;
+          }
+        }
+
+        // Handle the case when no int32 checks on inputs are necessary
+        // (but an overflow check is needed on the output).
+        if (BothInputsAre(node, Type::Signed32())) {
+          // If both the inputs the feedback are int32, use the overflow op.
+          if (hint == NumberOperationHint::kSignedSmall ||
+              hint == NumberOperationHint::kSigned32) {
+            VisitBinop(node, UseInfo::TruncatingWord32(),
+                       MachineRepresentation::kWord32, Type::Signed32());
+            if (lower()) ChangeToInt32OverflowOp(node);
+            return;
+          }
+        }
+
+        if (hint == NumberOperationHint::kSignedSmall ||
+            hint == NumberOperationHint::kSigned32) {
+          // If the result is truncated, we only need to check the inputs.
+          if (truncation.IsUsedAsWord32()) {
+            VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                       MachineRepresentation::kWord32);
+            if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+          } else {
+            VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                       MachineRepresentation::kWord32, Type::Signed32());
+            if (lower()) ChangeToInt32OverflowOp(node);
+          }
+          return;
+        }
+
+        if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+            TypeOf(node->InputAt(1))->Is(Type::Unsigned32()) &&
+            (truncation.IsUsedAsWord32() ||
+             NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
+          // We can only promise Float64 truncation here, as the decision is
+          // based on the feedback types of the inputs.
+          VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
+                                   Truncation::Float64()),
+                     MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+          return;
+        }
+        if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+            TypeOf(node->InputAt(1))->Is(Type::Signed32()) &&
+            (truncation.IsUsedAsWord32() ||
+             NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+          // We can only promise Float64 truncation here, as the decision is
+          // based on the feedback types of the inputs.
+          VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
+                                   Truncation::Float64()),
+                     MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+          return;
+        }
+        // default case => Float64Mod
+        VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+                   MachineRepresentation::kFloat64, Type::Number());
+        if (lower()) ChangeToPureOp(node, Float64Op(node));
+        return;
+      }
+      case IrOpcode::kNumberModulus: {
+        if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
+            (truncation.IsUsedAsWord32() ||
+             NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
+          // => unsigned Uint32Mod
+          VisitWord32TruncatingBinop(node);
+          if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+          return;
+        }
+        if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
+            (truncation.IsUsedAsWord32() ||
+             NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+          // => signed Int32Mod
+          VisitWord32TruncatingBinop(node);
+          if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+          return;
+        }
+        if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+            TypeOf(node->InputAt(1))->Is(Type::Unsigned32()) &&
+            (truncation.IsUsedAsWord32() ||
+             NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
+          // We can only promise Float64 truncation here, as the decision is
+          // based on the feedback types of the inputs.
+          VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
+                                   Truncation::Float64()),
+                     MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+          return;
+        }
+        if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+            TypeOf(node->InputAt(1))->Is(Type::Signed32()) &&
+            (truncation.IsUsedAsWord32() ||
+             NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+          // We can only promise Float64 truncation here, as the decision is
+          // based on the feedback types of the inputs.
+          VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
+                                   Truncation::Float64()),
+                     MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+          return;
+        }
+        // default case => Float64Mod
+        VisitFloat64Binop(node);
         if (lower()) ChangeToPureOp(node, Float64Op(node));
         return;
       }
@@ -1460,6 +1737,14 @@
         if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
         return;
       }
+      case IrOpcode::kSpeculativeNumberBitwiseOr:
+      case IrOpcode::kSpeculativeNumberBitwiseXor:
+      case IrOpcode::kSpeculativeNumberBitwiseAnd:
+        VisitSpeculativeInt32Binop(node);
+        if (lower()) {
+          ChangeToPureOp(node, Int32Op(node));
+        }
+        return;
       case IrOpcode::kNumberShiftLeft: {
         Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -1469,6 +1754,33 @@
         }
         return;
       }
+      case IrOpcode::kSpeculativeNumberShiftLeft: {
+        // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+        // can only eliminate an unused speculative number operation if we know
+        // that the inputs are PlainPrimitive, which excludes everything that's
+        // might have side effects or throws during a ToNumber conversion.
+        if (BothInputsAre(node, Type::PlainPrimitive())) {
+          if (truncation.IsUnused()) return VisitUnused(node);
+        }
+        if (BothInputsAre(node, Type::NumberOrOddball())) {
+          Type* rhs_type = GetUpperBound(node->InputAt(1));
+          VisitBinop(node, UseInfo::TruncatingWord32(),
+                     UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kWord32);
+          if (lower()) {
+            lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
+          }
+          return;
+        }
+        NumberOperationHint hint = NumberOperationHintOf(node->op());
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
+        VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                   MachineRepresentation::kWord32, Type::Signed32());
+        if (lower()) {
+          lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
+        }
+        return;
+      }
       case IrOpcode::kNumberShiftRight: {
         Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -1478,6 +1790,33 @@
         }
         return;
       }
+      case IrOpcode::kSpeculativeNumberShiftRight: {
+        // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+        // can only eliminate an unused speculative number operation if we know
+        // that the inputs are PlainPrimitive, which excludes everything that's
+        // might have side effects or throws during a ToNumber conversion.
+        if (BothInputsAre(node, Type::PlainPrimitive())) {
+          if (truncation.IsUnused()) return VisitUnused(node);
+        }
+        if (BothInputsAre(node, Type::NumberOrOddball())) {
+          Type* rhs_type = GetUpperBound(node->InputAt(1));
+          VisitBinop(node, UseInfo::TruncatingWord32(),
+                     UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kWord32);
+          if (lower()) {
+            lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
+          }
+          return;
+        }
+        NumberOperationHint hint = NumberOperationHintOf(node->op());
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
+        VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                   MachineRepresentation::kWord32, Type::Signed32());
+        if (lower()) {
+          lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
+        }
+        return;
+      }
       case IrOpcode::kNumberShiftRightLogical: {
         Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -1487,17 +1826,44 @@
         }
         return;
       }
+      case IrOpcode::kSpeculativeNumberShiftRightLogical: {
+        // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+        // can only eliminate an unused speculative number operation if we know
+        // that the inputs are PlainPrimitive, which excludes everything that's
+        // might have side effects or throws during a ToNumber conversion.
+        if (BothInputsAre(node, Type::PlainPrimitive())) {
+          if (truncation.IsUnused()) return VisitUnused(node);
+        }
+        if (BothInputsAre(node, Type::NumberOrOddball())) {
+          Type* rhs_type = GetUpperBound(node->InputAt(1));
+          VisitBinop(node, UseInfo::TruncatingWord32(),
+                     UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kWord32);
+          if (lower()) {
+            lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
+          }
+          return;
+        }
+        NumberOperationHint hint = NumberOperationHintOf(node->op());
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
+        VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                   MachineRepresentation::kWord32, Type::Unsigned32());
+        if (lower()) {
+          lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
+        }
+        return;
+      }
       case IrOpcode::kNumberAbs: {
-        if (InputIs(node, Type::Unsigned32())) {
+        if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32())) {
           VisitUnop(node, UseInfo::TruncatingWord32(),
                     MachineRepresentation::kWord32);
           if (lower()) DeferReplacement(node, node->InputAt(0));
-        } else if (InputIs(node, type_cache_.kSafeSigned32)) {
+        } else if (TypeOf(node->InputAt(0))->Is(Type::Signed32())) {
           VisitUnop(node, UseInfo::TruncatingWord32(),
                     MachineRepresentation::kWord32);
           if (lower()) DeferReplacement(node, lowering->Int32Abs(node));
-        } else if (InputIs(node,
-                           type_cache_.kPositiveIntegerOrMinusZeroOrNaN)) {
+        } else if (TypeOf(node->InputAt(0))
+                       ->Is(type_cache_.kPositiveIntegerOrMinusZeroOrNaN)) {
           VisitUnop(node, UseInfo::TruncatingFloat64(),
                     MachineRepresentation::kFloat64);
           if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -1520,42 +1886,93 @@
         if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
         return;
       }
-      case IrOpcode::kNumberCeil: {
-        VisitUnop(node, UseInfo::TruncatingFloat64(),
-                  MachineRepresentation::kFloat64);
-        if (lower()) DeferReplacement(node, lowering->Float64Ceil(node));
-        return;
-      }
-      case IrOpcode::kNumberFloor: {
-        VisitUnop(node, UseInfo::TruncatingFloat64(),
-                  MachineRepresentation::kFloat64);
-        if (lower()) DeferReplacement(node, lowering->Float64Floor(node));
-        return;
-      }
       case IrOpcode::kNumberFround: {
         VisitUnop(node, UseInfo::TruncatingFloat64(),
                   MachineRepresentation::kFloat32);
         if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
         return;
       }
-      case IrOpcode::kNumberAtan2: {
+      case IrOpcode::kNumberMax: {
+        // TODO(turbofan): We should consider feedback types here as well.
+        if (BothInputsAreUnsigned32(node)) {
+          VisitUint32Binop(node);
+          if (lower()) {
+            lowering->DoMax(node, lowering->machine()->Uint32LessThan(),
+                            MachineRepresentation::kWord32);
+          }
+        } else if (BothInputsAreSigned32(node)) {
+          VisitInt32Binop(node);
+          if (lower()) {
+            lowering->DoMax(node, lowering->machine()->Int32LessThan(),
+                            MachineRepresentation::kWord32);
+          }
+        } else if (BothInputsAre(node, Type::PlainNumber())) {
+          VisitFloat64Binop(node);
+          if (lower()) {
+            lowering->DoMax(node, lowering->machine()->Float64LessThan(),
+                            MachineRepresentation::kFloat64);
+          }
+        } else {
+          VisitFloat64Binop(node);
+          if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+        }
+        return;
+      }
+      case IrOpcode::kNumberMin: {
+        // TODO(turbofan): We should consider feedback types here as well.
+        if (BothInputsAreUnsigned32(node)) {
+          VisitUint32Binop(node);
+          if (lower()) {
+            lowering->DoMin(node, lowering->machine()->Uint32LessThan(),
+                            MachineRepresentation::kWord32);
+          }
+        } else if (BothInputsAreSigned32(node)) {
+          VisitInt32Binop(node);
+          if (lower()) {
+            lowering->DoMin(node, lowering->machine()->Int32LessThan(),
+                            MachineRepresentation::kWord32);
+          }
+        } else if (BothInputsAre(node, Type::PlainNumber())) {
+          VisitFloat64Binop(node);
+          if (lower()) {
+            lowering->DoMin(node, lowering->machine()->Float64LessThan(),
+                            MachineRepresentation::kFloat64);
+          }
+        } else {
+          VisitFloat64Binop(node);
+          if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+        }
+        return;
+      }
+      case IrOpcode::kNumberAtan2:
+      case IrOpcode::kNumberPow: {
         VisitBinop(node, UseInfo::TruncatingFloat64(),
                    MachineRepresentation::kFloat64);
         if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
         return;
       }
+      case IrOpcode::kNumberAcos:
+      case IrOpcode::kNumberAcosh:
+      case IrOpcode::kNumberAsin:
+      case IrOpcode::kNumberAsinh:
       case IrOpcode::kNumberAtan:
       case IrOpcode::kNumberAtanh:
+      case IrOpcode::kNumberCeil:
       case IrOpcode::kNumberCos:
+      case IrOpcode::kNumberCosh:
       case IrOpcode::kNumberExp:
       case IrOpcode::kNumberExpm1:
+      case IrOpcode::kNumberFloor:
       case IrOpcode::kNumberLog:
       case IrOpcode::kNumberLog1p:
       case IrOpcode::kNumberLog2:
       case IrOpcode::kNumberLog10:
       case IrOpcode::kNumberCbrt:
       case IrOpcode::kNumberSin:
-      case IrOpcode::kNumberTan: {
+      case IrOpcode::kNumberSinh:
+      case IrOpcode::kNumberTan:
+      case IrOpcode::kNumberTanh:
+      case IrOpcode::kNumberTrunc: {
         VisitUnop(node, UseInfo::TruncatingFloat64(),
                   MachineRepresentation::kFloat64);
         if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
@@ -1567,18 +1984,24 @@
         if (lower()) DeferReplacement(node, lowering->Float64Round(node));
         return;
       }
+      case IrOpcode::kNumberSign: {
+        if (InputIs(node, Type::Signed32())) {
+          VisitUnop(node, UseInfo::TruncatingWord32(),
+                    MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, lowering->Int32Sign(node));
+        } else {
+          VisitUnop(node, UseInfo::TruncatingFloat64(),
+                    MachineRepresentation::kFloat64);
+          if (lower()) DeferReplacement(node, lowering->Float64Sign(node));
+        }
+        return;
+      }
       case IrOpcode::kNumberSqrt: {
         VisitUnop(node, UseInfo::TruncatingFloat64(),
                   MachineRepresentation::kFloat64);
         if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
         return;
       }
-      case IrOpcode::kNumberTrunc: {
-        VisitUnop(node, UseInfo::TruncatingFloat64(),
-                  MachineRepresentation::kFloat64);
-        if (lower()) DeferReplacement(node, lowering->Float64Trunc(node));
-        return;
-      }
       case IrOpcode::kNumberToInt32: {
         // Just change representation if necessary.
         VisitUnop(node, UseInfo::TruncatingWord32(),
@@ -1657,57 +2080,86 @@
         }
         return;
       }
+      case IrOpcode::kStringCharCodeAt: {
+        VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+                   MachineRepresentation::kWord32);
+        return;
+      }
       case IrOpcode::kStringFromCharCode: {
         VisitUnop(node, UseInfo::TruncatingWord32(),
                   MachineRepresentation::kTagged);
         return;
       }
-      case IrOpcode::kStringToNumber: {
-        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-        if (lower()) {
-          // StringToNumber(x) => Call(StringToNumber, x, no-context)
-          Operator::Properties properties = Operator::kEliminatable;
-          Callable callable = CodeFactory::StringToNumber(jsgraph_->isolate());
-          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
-              flags, properties);
-          node->InsertInput(jsgraph_->zone(), 0,
-                            jsgraph_->HeapConstant(callable.code()));
-          node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
-          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
-          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
-        }
-        return;
-      }
 
       case IrOpcode::kCheckBounds: {
-        VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
-                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
-        return;
-      }
-      case IrOpcode::kCheckTaggedPointer: {
-        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        Type* index_type = TypeOf(node->InputAt(0));
+        if (index_type->Is(Type::Unsigned32())) {
+          VisitBinop(node, UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kWord32);
+        } else {
+          VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
+                     UseInfo::TruncatingWord32(),
+                     MachineRepresentation::kWord32);
+        }
         if (lower()) {
-          if (InputIs(node, Type::TaggedPointer())) {
+          // The bounds check is redundant if we already know that
+          // the index is within the bounds of [0.0, length[.
+          if (index_type->Is(NodeProperties::GetType(node))) {
             DeferReplacement(node, node->InputAt(0));
           }
         }
         return;
       }
+      case IrOpcode::kCheckIf: {
+        ProcessInput(node, 0, UseInfo::Bool());
+        ProcessRemainingInputs(node, 1);
+        SetOutput(node, MachineRepresentation::kNone);
+        return;
+      }
+      case IrOpcode::kCheckNumber: {
+        if (InputIs(node, Type::Number())) {
+          if (truncation.IsUsedAsWord32()) {
+            VisitUnop(node, UseInfo::TruncatingWord32(),
+                      MachineRepresentation::kWord32);
+          } else {
+            // TODO(jarin,bmeurer): We need to go to Tagged here, because
+            // otherwise we cannot distinguish the hole NaN (which might need to
+            // be treated as undefined). We should have a dedicated Type for
+            // that at some point, and maybe even a dedicated truncation.
+            VisitUnop(node, UseInfo::AnyTagged(),
+                      MachineRepresentation::kTagged);
+          }
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        }
+        return;
+      }
+      case IrOpcode::kCheckString: {
+        if (InputIs(node, Type::String())) {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        }
+        return;
+      }
+      case IrOpcode::kCheckTaggedPointer: {
+        if (InputCannotBe(node, Type::SignedSmall())) {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        }
+        return;
+      }
       case IrOpcode::kCheckTaggedSigned: {
-        if (SmiValuesAre32Bits() && truncation.TruncatesToWord32()) {
-          // TODO(jarin,bmeurer): Add CheckedSignedSmallAsWord32?
-          VisitUnop(node, UseInfo::CheckedSigned32AsWord32(),
+        if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
+          VisitUnop(node, UseInfo::CheckedSignedSmallAsWord32(),
                     MachineRepresentation::kWord32);
           if (lower()) DeferReplacement(node, node->InputAt(0));
         } else {
           VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-          if (lower()) {
-            if (InputIs(node, Type::TaggedSigned())) {
-              DeferReplacement(node, node->InputAt(0));
-            }
-          }
         }
         return;
       }
@@ -1719,23 +2171,25 @@
         return;
       }
       case IrOpcode::kLoadField: {
+        if (truncation.IsUnused()) return VisitUnused(node);
         FieldAccess access = FieldAccessOf(node->op());
-        ProcessInput(node, 0, UseInfoForBasePointer(access));
-        ProcessRemainingInputs(node, 1);
-        SetOutput(node, access.machine_type.representation());
+        MachineRepresentation const representation =
+            access.machine_type.representation();
+        // TODO(bmeurer): Introduce an appropriate tagged-signed machine rep.
+        VisitUnop(node, UseInfoForBasePointer(access), representation);
         return;
       }
       case IrOpcode::kStoreField: {
         FieldAccess access = FieldAccessOf(node->op());
+        WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
+            access.base_is_tagged, access.machine_type.representation(),
+            access.offset, access.type, node->InputAt(1));
         ProcessInput(node, 0, UseInfoForBasePointer(access));
         ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
                                   access.machine_type.representation()));
         ProcessRemainingInputs(node, 2);
         SetOutput(node, MachineRepresentation::kNone);
         if (lower()) {
-          WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
-              access.base_is_tagged, access.machine_type.representation(),
-              access.offset, access.type, node->InputAt(1));
           if (write_barrier_kind < access.write_barrier_kind) {
             access.write_barrier_kind = write_barrier_kind;
             NodeProperties::ChangeOp(
@@ -1745,6 +2199,7 @@
         return;
       }
       case IrOpcode::kLoadBuffer: {
+        if (truncation.IsUnused()) return VisitUnused(node);
         BufferAccess access = BufferAccessOf(node->op());
         ProcessInput(node, 0, UseInfo::PointerInt());        // buffer
         ProcessInput(node, 1, UseInfo::TruncatingWord32());  // offset
@@ -1752,8 +2207,8 @@
         ProcessRemainingInputs(node, 3);
 
         MachineRepresentation output;
-        if (truncation.TruncatesUndefinedToZeroOrNaN()) {
-          if (truncation.TruncatesNaNToZero()) {
+        if (truncation.IdentifiesUndefinedAndNaNAndZero()) {
+          if (truncation.IdentifiesNaNAndZero()) {
             // If undefined is truncated to a non-NaN number, we can use
             // the load's representation.
             output = access.machine_type().representation();
@@ -1791,15 +2246,18 @@
         return;
       }
       case IrOpcode::kLoadElement: {
+        if (truncation.IsUnused()) return VisitUnused(node);
         ElementAccess access = ElementAccessOf(node->op());
-        ProcessInput(node, 0, UseInfoForBasePointer(access));  // base
-        ProcessInput(node, 1, UseInfo::TruncatingWord32());    // index
-        ProcessRemainingInputs(node, 2);
-        SetOutput(node, access.machine_type.representation());
+        VisitBinop(node, UseInfoForBasePointer(access),
+                   UseInfo::TruncatingWord32(),
+                   access.machine_type.representation());
         return;
       }
       case IrOpcode::kStoreElement: {
         ElementAccess access = ElementAccessOf(node->op());
+        WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
+            access.base_is_tagged, access.machine_type.representation(),
+            access.type, node->InputAt(2));
         ProcessInput(node, 0, UseInfoForBasePointer(access));  // base
         ProcessInput(node, 1, UseInfo::TruncatingWord32());    // index
         ProcessInput(node, 2,
@@ -1808,9 +2266,6 @@
         ProcessRemainingInputs(node, 3);
         SetOutput(node, MachineRepresentation::kNone);
         if (lower()) {
-          WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
-              access.base_is_tagged, access.machine_type.representation(),
-              access.type, node->InputAt(2));
           if (write_barrier_kind < access.write_barrier_kind) {
             access.write_barrier_kind = write_barrier_kind;
             NodeProperties::ChangeOp(
@@ -1819,10 +2274,39 @@
         }
         return;
       }
-      case IrOpcode::kPlainPrimitiveToNumber:
-        if (truncation.TruncatesToWord32()) {
-          // TODO(jarin): Extend this to Number \/ Oddball
-          if (InputIs(node, Type::NumberOrUndefined())) {
+      case IrOpcode::kLoadTypedElement: {
+        MachineRepresentation const rep =
+            MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
+        ProcessInput(node, 0, UseInfo::AnyTagged());         // buffer
+        ProcessInput(node, 1, UseInfo::AnyTagged());         // base pointer
+        ProcessInput(node, 2, UseInfo::PointerInt());        // external pointer
+        ProcessInput(node, 3, UseInfo::TruncatingWord32());  // index
+        ProcessRemainingInputs(node, 4);
+        SetOutput(node, rep);
+        return;
+      }
+      case IrOpcode::kStoreTypedElement: {
+        MachineRepresentation const rep =
+            MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
+        ProcessInput(node, 0, UseInfo::AnyTagged());         // buffer
+        ProcessInput(node, 1, UseInfo::AnyTagged());         // base pointer
+        ProcessInput(node, 2, UseInfo::PointerInt());        // external pointer
+        ProcessInput(node, 3, UseInfo::TruncatingWord32());  // index
+        ProcessInput(node, 4,
+                     TruncatingUseInfoFromRepresentation(rep));  // value
+        ProcessRemainingInputs(node, 5);
+        SetOutput(node, MachineRepresentation::kNone);
+        return;
+      }
+      case IrOpcode::kPlainPrimitiveToNumber: {
+        if (InputIs(node, Type::Boolean())) {
+          VisitUnop(node, UseInfo::Bool(), MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if (InputIs(node, Type::String())) {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+          if (lower()) lowering->DoStringToNumber(node);
+        } else if (truncation.IsUsedAsWord32()) {
+          if (InputIs(node, Type::NumberOrOddball())) {
             VisitUnop(node, UseInfo::TruncatingWord32(),
                       MachineRepresentation::kWord32);
             if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -1834,9 +2318,8 @@
                                        simplified()->PlainPrimitiveToWord32());
             }
           }
-        } else if (truncation.TruncatesToFloat64()) {
-          // TODO(jarin): Extend this to Number \/ Oddball
-          if (InputIs(node, Type::NumberOrUndefined())) {
+        } else if (truncation.IsUsedAsFloat64()) {
+          if (InputIs(node, Type::NumberOrOddball())) {
             VisitUnop(node, UseInfo::TruncatingFloat64(),
                       MachineRepresentation::kFloat64);
             if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -1852,6 +2335,7 @@
           VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         }
         return;
+      }
       case IrOpcode::kObjectIsCallable:
       case IrOpcode::kObjectIsNumber:
       case IrOpcode::kObjectIsReceiver:
@@ -1863,31 +2347,61 @@
         return;
       }
       case IrOpcode::kCheckFloat64Hole: {
+        if (truncation.IsUnused()) return VisitUnused(node);
         CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
         ProcessInput(node, 0, UseInfo::TruncatingFloat64());
         ProcessRemainingInputs(node, 1);
         SetOutput(node, MachineRepresentation::kFloat64);
-        if (truncation.TruncatesToFloat64() &&
+        if (truncation.IsUsedAsFloat64() &&
             mode == CheckFloat64HoleMode::kAllowReturnHole) {
           if (lower()) DeferReplacement(node, node->InputAt(0));
         }
         return;
       }
       case IrOpcode::kCheckTaggedHole: {
-        CheckTaggedHoleMode mode = CheckTaggedHoleModeOf(node->op());
-        if (truncation.TruncatesToWord32() &&
-            mode == CheckTaggedHoleMode::kConvertHoleToUndefined) {
-          ProcessInput(node, 0, UseInfo::CheckedSigned32AsWord32());
-          ProcessRemainingInputs(node, 1);
-          SetOutput(node, MachineRepresentation::kWord32);
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        return;
+      }
+      case IrOpcode::kConvertTaggedHoleToUndefined: {
+        if (InputIs(node, Type::NumberOrOddball()) &&
+            truncation.IsUsedAsWord32()) {
+          // Propagate the Word32 truncation.
+          VisitUnop(node, UseInfo::TruncatingWord32(),
+                    MachineRepresentation::kWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if (InputIs(node, Type::NumberOrOddball()) &&
+                   truncation.IsUsedAsFloat64()) {
+          // Propagate the Float64 truncation.
+          VisitUnop(node, UseInfo::TruncatingFloat64(),
+                    MachineRepresentation::kFloat64);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if (InputIs(node, Type::NonInternal())) {
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
           if (lower()) DeferReplacement(node, node->InputAt(0));
         } else {
-          ProcessInput(node, 0, UseInfo::AnyTagged());
-          ProcessRemainingInputs(node, 1);
-          SetOutput(node, MachineRepresentation::kTagged);
+          // TODO(turbofan): Add a (Tagged) truncation that identifies hole
+          // and undefined, i.e. for a[i] === obj cases.
+          VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         }
         return;
       }
+      case IrOpcode::kCheckMaps:
+      case IrOpcode::kTransitionElementsKind: {
+        VisitInputs(node);
+        return SetOutput(node, MachineRepresentation::kNone);
+      }
+      case IrOpcode::kEnsureWritableFastElements:
+        return VisitBinop(node, UseInfo::AnyTagged(),
+                          MachineRepresentation::kTagged);
+      case IrOpcode::kMaybeGrowFastElements: {
+        ProcessInput(node, 0, UseInfo::AnyTagged());         // object
+        ProcessInput(node, 1, UseInfo::AnyTagged());         // elements
+        ProcessInput(node, 2, UseInfo::TruncatingWord32());  // index
+        ProcessInput(node, 3, UseInfo::TruncatingWord32());  // length
+        ProcessRemainingInputs(node, 4);
+        SetOutput(node, MachineRepresentation::kTagged);
+        return;
+      }
 
       //------------------------------------------------------------------
       // Machine-level operators.
@@ -2043,6 +2557,17 @@
         return VisitLeaf(node, MachineType::PointerRepresentation());
       case IrOpcode::kStateValues:
         return VisitStateValues(node);
+      case IrOpcode::kTypeGuard: {
+        // We just get rid of the sigma here. In principle, it should be
+        // possible to refine the truncation and representation based on
+        // the sigma's type.
+        MachineRepresentation output =
+            GetOutputInfoForPhi(node, TypeOf(node->InputAt(0)), truncation);
+
+        VisitUnop(node, UseInfo(output, truncation), output);
+        if (lower()) DeferReplacement(node, node->InputAt(0));
+        return;
+      }
 
       // The following opcodes are not produced before representation
       // inference runs, so we do not have any real test coverage.
@@ -2059,6 +2584,9 @@
       case IrOpcode::kCheckedTaggedToFloat64:
       case IrOpcode::kPlainPrimitiveToWord32:
       case IrOpcode::kPlainPrimitiveToFloat64:
+      case IrOpcode::kLoopExit:
+      case IrOpcode::kLoopExitValue:
+      case IrOpcode::kLoopExitEffect:
         FATAL("Representation inference: unsupported opcodes.");
         break;
 
@@ -2082,27 +2610,34 @@
       Node* control = NodeProperties::GetControlInput(node);
       Node* effect = NodeProperties::GetEffectInput(node);
       ReplaceEffectControlUses(node, effect, control);
-    } else {
-      DCHECK_EQ(0, node->op()->ControlInputCount());
     }
 
-    if (replacement->id() < count_ &&
-        GetUpperBound(node)->Is(GetUpperBound(replacement)) &&
-        TypeOf(node)->Is(TypeOf(replacement))) {
-      // Replace with a previously existing node eagerly only if the type is the
-      // same.
-      node->ReplaceUses(replacement);
-    } else {
-      // Otherwise, we are replacing a node with a representation change.
-      // Such a substitution must be done after all lowering is done, because
-      // changing the type could confuse the representation change
-      // insertion for uses of the node.
-      replacements_.push_back(node);
-      replacements_.push_back(replacement);
-    }
+    replacements_.push_back(node);
+    replacements_.push_back(replacement);
+
     node->NullAllInputs();  // Node is now dead.
   }
 
+  void Kill(Node* node) {
+    TRACE("killing #%d:%s\n", node->id(), node->op()->mnemonic());
+
+    if (node->op()->EffectInputCount() == 1) {
+      DCHECK_LT(0, node->op()->ControlInputCount());
+      // Disconnect the node from effect and control chains.
+      Node* control = NodeProperties::GetControlInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      ReplaceEffectControlUses(node, effect, control);
+    } else {
+      DCHECK_EQ(0, node->op()->EffectInputCount());
+      DCHECK_EQ(0, node->op()->ControlOutputCount());
+      DCHECK_EQ(0, node->op()->EffectOutputCount());
+    }
+
+    node->ReplaceUses(jsgraph_->Dead());
+
+    node->NullAllInputs();  // The {node} is now dead.
+  }
+
   void PrintOutputInfo(NodeInfo* info) {
     if (FLAG_trace_representation) {
       OFStream os(stdout);
@@ -2161,7 +2696,6 @@
   OperationTyper op_typer_;  // helper for the feedback typer
 
   NodeInfo* GetInfo(Node* node) {
-    DCHECK(node->id() >= 0);
     DCHECK(node->id() < count_);
     return &info_[node->id()];
   }
@@ -2170,12 +2704,10 @@
 };
 
 SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
-                                       SourcePositionTable* source_positions,
-                                       Flags flags)
+                                       SourcePositionTable* source_positions)
     : jsgraph_(jsgraph),
       zone_(zone),
       type_cache_(TypeCache::Get()),
-      flags_(flags),
       source_positions_(source_positions) {}
 
 void SimplifiedLowering::LowerAllNodes() {
@@ -2416,262 +2948,14 @@
   NodeProperties::ChangeOp(node, machine()->CheckedStore(rep));
 }
 
-Node* SimplifiedLowering::Float64Ceil(Node* const node) {
-  Node* const one = jsgraph()->Float64Constant(1.0);
-  Node* const zero = jsgraph()->Float64Constant(0.0);
-  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
-  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
-  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
-  Node* const input = node->InputAt(0);
-
-  // Use fast hardware instruction if available.
-  if (machine()->Float64RoundUp().IsSupported()) {
-    return graph()->NewNode(machine()->Float64RoundUp().op(), input);
-  }
-
-  // General case for ceil.
-  //
-  //   if 0.0 < input then
-  //     if 2^52 <= input then
-  //       input
-  //     else
-  //       let temp1 = (2^52 + input) - 2^52 in
-  //       if temp1 < input then
-  //         temp1 + 1
-  //       else
-  //         temp1
-  //   else
-  //     if input == 0 then
-  //       input
-  //     else
-  //       if input <= -2^52 then
-  //         input
-  //       else
-  //         let temp1 = -0 - input in
-  //         let temp2 = (2^52 + temp1) - 2^52 in
-  //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
-  //         -0 - temp3
-  //
-  // Note: We do not use the Diamond helper class here, because it really hurts
-  // readability with nested diamonds.
-
-  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
-  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
-                                   graph()->start());
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0;
-  {
-    Node* check1 =
-        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
-    {
-      Node* temp1 = graph()->NewNode(
-          machine()->Float64Sub(),
-          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
-      vfalse1 = graph()->NewNode(
-          common()->Select(MachineRepresentation::kFloat64),
-          graph()->NewNode(machine()->Float64LessThan(), temp1, input),
-          graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
-    }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                              vtrue1, vfalse1, if_true0);
-  }
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* vfalse0;
-  {
-    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
-    {
-      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
-                                      input, minus_two_52);
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check2, if_false1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* vtrue2 = input;
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* vfalse2;
-      {
-        Node* temp1 =
-            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
-        Node* temp2 = graph()->NewNode(
-            machine()->Float64Sub(),
-            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
-        Node* temp3 = graph()->NewNode(
-            common()->Select(MachineRepresentation::kFloat64),
-            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
-            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
-        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
-      }
-
-      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      vfalse1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue2, vfalse2, if_false1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
-  }
-
-  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                          vtrue0, vfalse0, merge0);
-}
-
-Node* SimplifiedLowering::Float64Floor(Node* const node) {
-  Node* const one = jsgraph()->Float64Constant(1.0);
-  Node* const zero = jsgraph()->Float64Constant(0.0);
-  Node* const minus_one = jsgraph()->Float64Constant(-1.0);
-  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
-  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
-  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
-  Node* const input = node->InputAt(0);
-
-  // Use fast hardware instruction if available.
-  if (machine()->Float64RoundDown().IsSupported()) {
-    return graph()->NewNode(machine()->Float64RoundDown().op(), input);
-  }
-
-  // General case for floor.
-  //
-  //   if 0.0 < input then
-  //     if 2^52 <= input then
-  //       input
-  //     else
-  //       let temp1 = (2^52 + input) - 2^52 in
-  //       if input < temp1 then
-  //         temp1 - 1
-  //       else
-  //         temp1
-  //   else
-  //     if input == 0 then
-  //       input
-  //     else
-  //       if input <= -2^52 then
-  //         input
-  //       else
-  //         let temp1 = -0 - input in
-  //         let temp2 = (2^52 + temp1) - 2^52 in
-  //         if temp2 < temp1 then
-  //           -1 - temp2
-  //         else
-  //           -0 - temp2
-  //
-  // Note: We do not use the Diamond helper class here, because it really hurts
-  // readability with nested diamonds.
-
-  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
-  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
-                                   graph()->start());
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0;
-  {
-    Node* check1 =
-        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
-    {
-      Node* temp1 = graph()->NewNode(
-          machine()->Float64Sub(),
-          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
-      vfalse1 = graph()->NewNode(
-          common()->Select(MachineRepresentation::kFloat64),
-          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
-          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
-    }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                              vtrue1, vfalse1, if_true0);
-  }
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* vfalse0;
-  {
-    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
-    {
-      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
-                                      input, minus_two_52);
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check2, if_false1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* vtrue2 = input;
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* vfalse2;
-      {
-        Node* temp1 =
-            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
-        Node* temp2 = graph()->NewNode(
-            machine()->Float64Sub(),
-            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
-        vfalse2 = graph()->NewNode(
-            common()->Select(MachineRepresentation::kFloat64),
-            graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
-            graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
-            graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
-      }
-
-      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      vfalse1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue2, vfalse2, if_false1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
-  }
-
-  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                          vtrue0, vfalse0, merge0);
-}
-
 Node* SimplifiedLowering::Float64Round(Node* const node) {
   Node* const one = jsgraph()->Float64Constant(1.0);
   Node* const one_half = jsgraph()->Float64Constant(0.5);
   Node* const input = node->InputAt(0);
 
   // Round up towards Infinity, and adjust if the difference exceeds 0.5.
-  Node* result = Float64Ceil(node);
+  Node* result = graph()->NewNode(machine()->Float64RoundUp().placeholder(),
+                                  node->InputAt(0));
   return graph()->NewNode(
       common()->Select(MachineRepresentation::kFloat64),
       graph()->NewNode(
@@ -2680,138 +2964,35 @@
       result, graph()->NewNode(machine()->Float64Sub(), result, one));
 }
 
-Node* SimplifiedLowering::Float64Trunc(Node* const node) {
-  Node* const one = jsgraph()->Float64Constant(1.0);
+Node* SimplifiedLowering::Float64Sign(Node* const node) {
+  Node* const minus_one = jsgraph()->Float64Constant(-1.0);
   Node* const zero = jsgraph()->Float64Constant(0.0);
-  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
-  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
-  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+  Node* const one = jsgraph()->Float64Constant(1.0);
+
   Node* const input = node->InputAt(0);
 
-  // Use fast hardware instruction if available.
-  if (machine()->Float64RoundTruncate().IsSupported()) {
-    return graph()->NewNode(machine()->Float64RoundTruncate().op(), input);
-  }
-
-  // General case for trunc.
-  //
-  //   if 0.0 < input then
-  //     if 2^52 <= input then
-  //       input
-  //     else
-  //       let temp1 = (2^52 + input) - 2^52 in
-  //       if input < temp1 then
-  //         temp1 - 1
-  //       else
-  //         temp1
-  //   else
-  //     if input == 0 then
-  //       input
-  //     else
-  //       if input <= -2^52 then
-  //         input
-  //       else
-  //         let temp1 = -0 - input in
-  //         let temp2 = (2^52 + temp1) - 2^52 in
-  //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
-  //         -0 - temp3
-  //
-  // Note: We do not use the Diamond helper class here, because it really hurts
-  // readability with nested diamonds.
-
-  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
-  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
-                                   graph()->start());
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0;
-  {
-    Node* check1 =
-        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
-    {
-      Node* temp1 = graph()->NewNode(
-          machine()->Float64Sub(),
-          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
-      vfalse1 = graph()->NewNode(
+  return graph()->NewNode(
+      common()->Select(MachineRepresentation::kFloat64),
+      graph()->NewNode(machine()->Float64LessThan(), input, zero), minus_one,
+      graph()->NewNode(
           common()->Select(MachineRepresentation::kFloat64),
-          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
-          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
-    }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                              vtrue1, vfalse1, if_true0);
-  }
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* vfalse0;
-  {
-    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
-    {
-      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
-                                      input, minus_two_52);
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check2, if_false1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* vtrue2 = input;
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* vfalse2;
-      {
-        Node* temp1 =
-            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
-        Node* temp2 = graph()->NewNode(
-            machine()->Float64Sub(),
-            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
-        Node* temp3 = graph()->NewNode(
-            common()->Select(MachineRepresentation::kFloat64),
-            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
-            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
-        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
-      }
-
-      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      vfalse1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue2, vfalse2, if_false1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
-  }
-
-  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                          vtrue0, vfalse0, merge0);
+          graph()->NewNode(machine()->Float64LessThan(), zero, input), one,
+          zero));
 }
 
 Node* SimplifiedLowering::Int32Abs(Node* const node) {
-  Node* const zero = jsgraph()->Int32Constant(0);
   Node* const input = node->InputAt(0);
 
-  // if 0 < input then input else 0 - input
-  return graph()->NewNode(
-      common()->Select(MachineRepresentation::kWord32, BranchHint::kTrue),
-      graph()->NewNode(machine()->Int32LessThan(), zero, input), input,
-      graph()->NewNode(machine()->Int32Sub(), zero, input));
+  // Generate case for absolute integer value.
+  //
+  //    let sign = input >> 31 in
+  //    (input ^ sign) - sign
+
+  Node* sign = graph()->NewNode(machine()->Word32Sar(), input,
+                                jsgraph()->Int32Constant(31));
+  return graph()->NewNode(machine()->Int32Sub(),
+                          graph()->NewNode(machine()->Word32Xor(), input, sign),
+                          sign);
 }
 
 Node* SimplifiedLowering::Int32Div(Node* const node) {
@@ -2986,6 +3167,21 @@
   return graph()->NewNode(phi_op, true0, false0, merge0);
 }
 
+Node* SimplifiedLowering::Int32Sign(Node* const node) {
+  Node* const minus_one = jsgraph()->Int32Constant(-1);
+  Node* const zero = jsgraph()->Int32Constant(0);
+  Node* const one = jsgraph()->Int32Constant(1);
+
+  Node* const input = node->InputAt(0);
+
+  return graph()->NewNode(
+      common()->Select(MachineRepresentation::kWord32),
+      graph()->NewNode(machine()->Int32LessThan(), input, zero), minus_one,
+      graph()->NewNode(
+          common()->Select(MachineRepresentation::kWord32),
+          graph()->NewNode(machine()->Int32LessThan(), zero, input), one,
+          zero));
+}
 
 Node* SimplifiedLowering::Uint32Div(Node* const node) {
   Uint32BinopMatcher m(node);
@@ -3065,6 +3261,27 @@
   return graph()->NewNode(phi_op, true0, false0, merge0);
 }
 
+void SimplifiedLowering::DoMax(Node* node, Operator const* op,
+                               MachineRepresentation rep) {
+  Node* const lhs = node->InputAt(0);
+  Node* const rhs = node->InputAt(1);
+
+  node->ReplaceInput(0, graph()->NewNode(op, lhs, rhs));
+  DCHECK_EQ(rhs, node->InputAt(1));
+  node->AppendInput(graph()->zone(), lhs);
+  NodeProperties::ChangeOp(node, common()->Select(rep));
+}
+
+void SimplifiedLowering::DoMin(Node* node, Operator const* op,
+                               MachineRepresentation rep) {
+  Node* const lhs = node->InputAt(0);
+  Node* const rhs = node->InputAt(1);
+
+  node->InsertInput(graph()->zone(), 0, graph()->NewNode(op, lhs, rhs));
+  DCHECK_EQ(lhs, node->InputAt(1));
+  DCHECK_EQ(rhs, node->InputAt(2));
+  NodeProperties::ChangeOp(node, common()->Select(rep));
+}
 
 void SimplifiedLowering::DoShift(Node* node, Operator const* op,
                                  Type* rhs_type) {
@@ -3073,7 +3290,21 @@
     node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
                                            jsgraph()->Int32Constant(0x1f)));
   }
-  NodeProperties::ChangeOp(node, op);
+  DCHECK(op->HasProperty(Operator::kPure));
+  ChangeToPureOp(node, op);
+}
+
+void SimplifiedLowering::DoStringToNumber(Node* node) {
+  Operator::Properties properties = Operator::kEliminatable;
+  Callable callable = CodeFactory::StringToNumber(isolate());
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  node->InsertInput(graph()->zone(), 0,
+                    jsgraph()->HeapConstant(callable.code()));
+  node->AppendInput(graph()->zone(), jsgraph()->NoContextConstant());
+  node->AppendInput(graph()->zone(), graph()->start());
+  NodeProperties::ChangeOp(node, common()->Call(desc));
 }
 
 Node* SimplifiedLowering::ToNumberCode() {
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index 75fd9c2..18c7331 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -5,7 +5,6 @@
 #ifndef V8_COMPILER_SIMPLIFIED_LOWERING_H_
 #define V8_COMPILER_SIMPLIFIED_LOWERING_H_
 
-#include "src/base/flags.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node.h"
@@ -27,15 +26,14 @@
 
 class SimplifiedLowering final {
  public:
-  enum Flag { kNoFlag = 0u, kTypeFeedbackEnabled = 1u << 0 };
-  typedef base::Flags<Flag> Flags;
   SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
-                     SourcePositionTable* source_positions,
-                     Flags flags = kNoFlag);
+                     SourcePositionTable* source_positions);
   ~SimplifiedLowering() {}
 
   void LowerAllNodes();
 
+  void DoMax(Node* node, Operator const* op, MachineRepresentation rep);
+  void DoMin(Node* node, Operator const* op, MachineRepresentation rep);
   void DoJSToNumberTruncatesToFloat64(Node* node,
                                       RepresentationSelector* selector);
   void DoJSToNumberTruncatesToWord32(Node* node,
@@ -46,8 +44,7 @@
                     RepresentationChanger* changer);
   void DoStoreBuffer(Node* node);
   void DoShift(Node* node, Operator const* op, Type* rhs_type);
-
-  Flags flags() const { return flags_; }
+  void DoStringToNumber(Node* node);
 
  private:
   JSGraph* const jsgraph_;
@@ -55,7 +52,6 @@
   TypeCache const& type_cache_;
   SetOncePointer<Node> to_number_code_;
   SetOncePointer<Operator const> to_number_operator_;
-  Flags flags_;
 
   // TODO(danno): SimplifiedLowering shouldn't know anything about the source
   // positions table, but must for now since there currently is no other way to
@@ -64,13 +60,12 @@
   // position information via the SourcePositionWrapper like all other reducers.
   SourcePositionTable* source_positions_;
 
-  Node* Float64Ceil(Node* const node);
-  Node* Float64Floor(Node* const node);
   Node* Float64Round(Node* const node);
-  Node* Float64Trunc(Node* const node);
+  Node* Float64Sign(Node* const node);
   Node* Int32Abs(Node* const node);
   Node* Int32Div(Node* const node);
   Node* Int32Mod(Node* const node);
+  Node* Int32Sign(Node* const node);
   Node* Uint32Div(Node* const node);
   Node* Uint32Mod(Node* const node);
 
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index 5db9dfb..d8bd1e0 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -34,9 +34,7 @@
 
 SimplifiedOperatorReducer::SimplifiedOperatorReducer(Editor* editor,
                                                      JSGraph* jsgraph)
-    : AdvancedReducer(editor),
-      jsgraph_(jsgraph),
-      type_cache_(TypeCache::Get()) {}
+    : AdvancedReducer(editor), jsgraph_(jsgraph) {}
 
 SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
 
@@ -45,9 +43,8 @@
   switch (node->opcode()) {
     case IrOpcode::kBooleanNot: {
       HeapObjectMatcher m(node->InputAt(0));
-      if (m.HasValue()) {
-        return Replace(jsgraph()->BooleanConstant(!m.Value()->BooleanValue()));
-      }
+      if (m.Is(factory()->true_value())) return ReplaceBoolean(false);
+      if (m.Is(factory()->false_value())) return ReplaceBoolean(true);
       if (m.IsBooleanNot()) return Replace(m.InputAt(0));
       break;
     }
@@ -129,6 +126,22 @@
       }
       break;
     }
+    case IrOpcode::kCheckIf: {
+      HeapObjectMatcher m(node->InputAt(0));
+      if (m.Is(factory()->true_value())) {
+        Node* const effect = NodeProperties::GetEffectInput(node);
+        return Replace(effect);
+      }
+      break;
+    }
+    case IrOpcode::kCheckNumber: {
+      NodeMatcher m(node->InputAt(0));
+      if (m.IsConvertTaggedHoleToUndefined()) {
+        node->ReplaceInput(0, m.InputAt(0));
+        return Changed(node);
+      }
+      break;
+    }
     case IrOpcode::kCheckTaggedPointer: {
       Node* const input = node->InputAt(0);
       if (DecideObjectIsSmi(input) == Decision::kFalse) {
@@ -143,6 +156,11 @@
         ReplaceWithValue(node, input);
         return Replace(input);
       }
+      NodeMatcher m(input);
+      if (m.IsConvertTaggedHoleToUndefined()) {
+        node->ReplaceInput(0, m.InputAt(0));
+        return Changed(node);
+      }
       break;
     }
     case IrOpcode::kObjectIsSmi: {
@@ -162,52 +180,17 @@
       if (m.HasValue()) return ReplaceNumber(std::fabs(m.Value()));
       break;
     }
-    case IrOpcode::kNumberCeil:
-    case IrOpcode::kNumberFloor:
-    case IrOpcode::kNumberRound:
-    case IrOpcode::kNumberTrunc: {
-      Node* const input = NodeProperties::GetValueInput(node, 0);
-      Type* const input_type = NodeProperties::GetType(input);
-      if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
-        return Replace(input);
-      }
+    case IrOpcode::kReferenceEqual: {
+      HeapObjectBinopMatcher m(node);
+      if (m.left().node() == m.right().node()) return ReplaceBoolean(true);
       break;
     }
-    case IrOpcode::kReferenceEqual:
-      return ReduceReferenceEqual(node);
-    case IrOpcode::kTypeGuard:
-      return ReduceTypeGuard(node);
     default:
       break;
   }
   return NoChange();
 }
 
-Reduction SimplifiedOperatorReducer::ReduceReferenceEqual(Node* node) {
-  DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
-  Node* const left = NodeProperties::GetValueInput(node, 0);
-  Node* const right = NodeProperties::GetValueInput(node, 1);
-  HeapObjectMatcher match_left(left);
-  HeapObjectMatcher match_right(right);
-  if (match_left.HasValue() && match_right.HasValue()) {
-    if (match_left.Value().is_identical_to(match_right.Value())) {
-      return Replace(jsgraph()->TrueConstant());
-    } else {
-      return Replace(jsgraph()->FalseConstant());
-    }
-  }
-  return NoChange();
-}
-
-Reduction SimplifiedOperatorReducer::ReduceTypeGuard(Node* node) {
-  DCHECK_EQ(IrOpcode::kTypeGuard, node->opcode());
-  Node* const input = NodeProperties::GetValueInput(node, 0);
-  Type* const input_type = NodeProperties::GetTypeOrAny(input);
-  Type* const guard_type = TypeOf(node->op());
-  if (input_type->Is(guard_type)) return Replace(input);
-  return NoChange();
-}
-
 Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
                                             Node* a) {
   DCHECK_EQ(node->InputCount(), OperatorProperties::GetTotalInputCount(op));
@@ -240,9 +223,15 @@
   return Replace(jsgraph()->Constant(value));
 }
 
+Factory* SimplifiedOperatorReducer::factory() const {
+  return isolate()->factory();
+}
 
 Graph* SimplifiedOperatorReducer::graph() const { return jsgraph()->graph(); }
 
+Isolate* SimplifiedOperatorReducer::isolate() const {
+  return jsgraph()->isolate();
+}
 
 MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
   return jsgraph()->machine();
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
index 6ee903b..44bfdff 100644
--- a/src/compiler/simplified-operator-reducer.h
+++ b/src/compiler/simplified-operator-reducer.h
@@ -11,7 +11,8 @@
 namespace internal {
 
 // Forward declarations.
-class TypeCache;
+class Factory;
+class Isolate;
 
 namespace compiler {
 
@@ -29,7 +30,6 @@
 
  private:
   Reduction ReduceReferenceEqual(Node* node);
-  Reduction ReduceTypeGuard(Node* node);
 
   Reduction Change(Node* node, const Operator* op, Node* a);
   Reduction ReplaceBoolean(bool value);
@@ -41,13 +41,14 @@
   Reduction ReplaceNumber(double value);
   Reduction ReplaceNumber(int32_t value);
 
+  Factory* factory() const;
   Graph* graph() const;
+  Isolate* isolate() const;
   JSGraph* jsgraph() const { return jsgraph_; }
   MachineOperatorBuilder* machine() const;
   SimplifiedOperatorBuilder* simplified() const;
 
   JSGraph* const jsgraph_;
-  TypeCache const& type_cache_;
 
   DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
 };
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index 0f32b0c..cf0c3de 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -124,6 +124,15 @@
   return os;
 }
 
+template <>
+void Operator1<FieldAccess>::PrintParameter(std::ostream& os,
+                                            PrintVerbosity verbose) const {
+  if (verbose == PrintVerbosity::kVerbose) {
+    os << parameter();
+  } else {
+    os << "[+" << parameter().offset << "]";
+  }
+}
 
 bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
   // On purpose we don't include the write barrier kind here, as this method is
@@ -172,6 +181,12 @@
   return OpParameter<ElementAccess>(op);
 }
 
+ExternalArrayType ExternalArrayTypeOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kLoadTypedElement ||
+         op->opcode() == IrOpcode::kStoreTypedElement);
+  return OpParameter<ExternalArrayType>(op);
+}
+
 size_t hash_value(CheckFloat64HoleMode mode) {
   return static_cast<size_t>(mode);
 }
@@ -192,155 +207,326 @@
   return OpParameter<CheckFloat64HoleMode>(op);
 }
 
-size_t hash_value(CheckTaggedHoleMode mode) {
+CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kChangeFloat64ToTagged ||
+         op->opcode() == IrOpcode::kCheckedInt32Mul ||
+         op->opcode() == IrOpcode::kCheckedFloat64ToInt32 ||
+         op->opcode() == IrOpcode::kCheckedTaggedToInt32);
+  return OpParameter<CheckForMinusZeroMode>(op);
+}
+
+size_t hash_value(CheckForMinusZeroMode mode) {
   return static_cast<size_t>(mode);
 }
 
-std::ostream& operator<<(std::ostream& os, CheckTaggedHoleMode mode) {
+std::ostream& operator<<(std::ostream& os, CheckForMinusZeroMode mode) {
   switch (mode) {
-    case CheckTaggedHoleMode::kConvertHoleToUndefined:
-      return os << "convert-hole-to-undefined";
-    case CheckTaggedHoleMode::kNeverReturnHole:
-      return os << "never-return-hole";
+    case CheckForMinusZeroMode::kCheckForMinusZero:
+      return os << "check-for-minus-zero";
+    case CheckForMinusZeroMode::kDontCheckForMinusZero:
+      return os << "dont-check-for-minus-zero";
   }
   UNREACHABLE();
   return os;
 }
 
-CheckTaggedHoleMode CheckTaggedHoleModeOf(const Operator* op) {
-  DCHECK_EQ(IrOpcode::kCheckTaggedHole, op->opcode());
-  return OpParameter<CheckTaggedHoleMode>(op);
+size_t hash_value(CheckTaggedInputMode mode) {
+  return static_cast<size_t>(mode);
 }
 
-Type* TypeOf(const Operator* op) {
-  DCHECK_EQ(IrOpcode::kTypeGuard, op->opcode());
-  return OpParameter<Type*>(op);
+std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
+  switch (mode) {
+    case CheckTaggedInputMode::kNumber:
+      return os << "Number";
+    case CheckTaggedInputMode::kNumberOrOddball:
+      return os << "NumberOrOddball";
+  }
+  UNREACHABLE();
+  return os;
 }
 
-BinaryOperationHints::Hint BinaryOperationHintOf(const Operator* op) {
+CheckTaggedInputMode CheckTaggedInputModeOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kCheckedTaggedToFloat64, op->opcode());
+  return OpParameter<CheckTaggedInputMode>(op);
+}
+
+std::ostream& operator<<(std::ostream& os, GrowFastElementsFlags flags) {
+  bool empty = true;
+  if (flags & GrowFastElementsFlag::kArrayObject) {
+    os << "ArrayObject";
+    empty = false;
+  }
+  if (flags & GrowFastElementsFlag::kDoubleElements) {
+    if (!empty) os << "|";
+    os << "DoubleElements";
+    empty = false;
+  }
+  if (flags & GrowFastElementsFlag::kHoleyElements) {
+    if (!empty) os << "|";
+    os << "HoleyElements";
+    empty = false;
+  }
+  if (empty) os << "None";
+  return os;
+}
+
+GrowFastElementsFlags GrowFastElementsFlagsOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kMaybeGrowFastElements, op->opcode());
+  return OpParameter<GrowFastElementsFlags>(op);
+}
+
+size_t hash_value(ElementsTransition transition) {
+  return static_cast<uint8_t>(transition);
+}
+
+std::ostream& operator<<(std::ostream& os, ElementsTransition transition) {
+  switch (transition) {
+    case ElementsTransition::kFastTransition:
+      return os << "fast-transition";
+    case ElementsTransition::kSlowTransition:
+      return os << "slow-transition";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+ElementsTransition ElementsTransitionOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kTransitionElementsKind, op->opcode());
+  return OpParameter<ElementsTransition>(op);
+}
+
+std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
+  switch (hint) {
+    case NumberOperationHint::kSignedSmall:
+      return os << "SignedSmall";
+    case NumberOperationHint::kSigned32:
+      return os << "Signed32";
+    case NumberOperationHint::kNumber:
+      return os << "Number";
+    case NumberOperationHint::kNumberOrOddball:
+      return os << "NumberOrOddball";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+size_t hash_value(NumberOperationHint hint) {
+  return static_cast<uint8_t>(hint);
+}
+
+NumberOperationHint NumberOperationHintOf(const Operator* op) {
   DCHECK(op->opcode() == IrOpcode::kSpeculativeNumberAdd ||
          op->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
          op->opcode() == IrOpcode::kSpeculativeNumberMultiply ||
          op->opcode() == IrOpcode::kSpeculativeNumberDivide ||
-         op->opcode() == IrOpcode::kSpeculativeNumberModulus);
-  return OpParameter<BinaryOperationHints::Hint>(op);
-}
-
-CompareOperationHints::Hint CompareOperationHintOf(const Operator* op) {
-  DCHECK(op->opcode() == IrOpcode::kSpeculativeNumberEqual ||
+         op->opcode() == IrOpcode::kSpeculativeNumberModulus ||
+         op->opcode() == IrOpcode::kSpeculativeNumberShiftLeft ||
+         op->opcode() == IrOpcode::kSpeculativeNumberShiftRight ||
+         op->opcode() == IrOpcode::kSpeculativeNumberShiftRightLogical ||
+         op->opcode() == IrOpcode::kSpeculativeNumberBitwiseAnd ||
+         op->opcode() == IrOpcode::kSpeculativeNumberBitwiseOr ||
+         op->opcode() == IrOpcode::kSpeculativeNumberBitwiseXor ||
+         op->opcode() == IrOpcode::kSpeculativeNumberEqual ||
          op->opcode() == IrOpcode::kSpeculativeNumberLessThan ||
          op->opcode() == IrOpcode::kSpeculativeNumberLessThanOrEqual);
-  return OpParameter<CompareOperationHints::Hint>(op);
+  return OpParameter<NumberOperationHint>(op);
 }
 
-#define PURE_OP_LIST(V)                                    \
-  V(BooleanNot, Operator::kNoProperties, 1)                \
-  V(BooleanToNumber, Operator::kNoProperties, 1)           \
-  V(NumberEqual, Operator::kCommutative, 2)                \
-  V(NumberLessThan, Operator::kNoProperties, 2)            \
-  V(NumberLessThanOrEqual, Operator::kNoProperties, 2)     \
-  V(NumberAdd, Operator::kCommutative, 2)                  \
-  V(NumberSubtract, Operator::kNoProperties, 2)            \
-  V(NumberMultiply, Operator::kCommutative, 2)             \
-  V(NumberDivide, Operator::kNoProperties, 2)              \
-  V(NumberModulus, Operator::kNoProperties, 2)             \
-  V(NumberBitwiseOr, Operator::kCommutative, 2)            \
-  V(NumberBitwiseXor, Operator::kCommutative, 2)           \
-  V(NumberBitwiseAnd, Operator::kCommutative, 2)           \
-  V(NumberShiftLeft, Operator::kNoProperties, 2)           \
-  V(NumberShiftRight, Operator::kNoProperties, 2)          \
-  V(NumberShiftRightLogical, Operator::kNoProperties, 2)   \
-  V(NumberImul, Operator::kCommutative, 2)                 \
-  V(NumberAbs, Operator::kNoProperties, 1)                 \
-  V(NumberClz32, Operator::kNoProperties, 1)               \
-  V(NumberCeil, Operator::kNoProperties, 1)                \
-  V(NumberFloor, Operator::kNoProperties, 1)               \
-  V(NumberFround, Operator::kNoProperties, 1)              \
-  V(NumberAtan, Operator::kNoProperties, 1)                \
-  V(NumberAtan2, Operator::kNoProperties, 2)               \
-  V(NumberAtanh, Operator::kNoProperties, 1)               \
-  V(NumberCbrt, Operator::kNoProperties, 1)                \
-  V(NumberCos, Operator::kNoProperties, 1)                 \
-  V(NumberExp, Operator::kNoProperties, 1)                 \
-  V(NumberExpm1, Operator::kNoProperties, 1)               \
-  V(NumberLog, Operator::kNoProperties, 1)                 \
-  V(NumberLog1p, Operator::kNoProperties, 1)               \
-  V(NumberLog10, Operator::kNoProperties, 1)               \
-  V(NumberLog2, Operator::kNoProperties, 1)                \
-  V(NumberRound, Operator::kNoProperties, 1)               \
-  V(NumberSin, Operator::kNoProperties, 1)                 \
-  V(NumberSqrt, Operator::kNoProperties, 1)                \
-  V(NumberTan, Operator::kNoProperties, 1)                 \
-  V(NumberTrunc, Operator::kNoProperties, 1)               \
-  V(NumberToInt32, Operator::kNoProperties, 1)             \
-  V(NumberToUint32, Operator::kNoProperties, 1)            \
-  V(NumberSilenceNaN, Operator::kNoProperties, 1)          \
-  V(StringFromCharCode, Operator::kNoProperties, 1)        \
-  V(StringToNumber, Operator::kNoProperties, 1)            \
-  V(PlainPrimitiveToNumber, Operator::kNoProperties, 1)    \
-  V(PlainPrimitiveToWord32, Operator::kNoProperties, 1)    \
-  V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1)   \
-  V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1) \
-  V(ChangeTaggedToInt32, Operator::kNoProperties, 1)       \
-  V(ChangeTaggedToUint32, Operator::kNoProperties, 1)      \
-  V(ChangeTaggedToFloat64, Operator::kNoProperties, 1)     \
-  V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1) \
-  V(ChangeInt32ToTagged, Operator::kNoProperties, 1)       \
-  V(ChangeUint32ToTagged, Operator::kNoProperties, 1)      \
-  V(ChangeFloat64ToTagged, Operator::kNoProperties, 1)     \
-  V(ChangeTaggedToBit, Operator::kNoProperties, 1)         \
-  V(ChangeBitToTagged, Operator::kNoProperties, 1)         \
-  V(TruncateTaggedToWord32, Operator::kNoProperties, 1)    \
-  V(TruncateTaggedToFloat64, Operator::kNoProperties, 1)   \
-  V(ObjectIsCallable, Operator::kNoProperties, 1)          \
-  V(ObjectIsNumber, Operator::kNoProperties, 1)            \
-  V(ObjectIsReceiver, Operator::kNoProperties, 1)          \
-  V(ObjectIsSmi, Operator::kNoProperties, 1)               \
-  V(ObjectIsString, Operator::kNoProperties, 1)            \
-  V(ObjectIsUndetectable, Operator::kNoProperties, 1)      \
-  V(StringEqual, Operator::kCommutative, 2)                \
-  V(StringLessThan, Operator::kNoProperties, 2)            \
-  V(StringLessThanOrEqual, Operator::kNoProperties, 2)
+#define PURE_OP_LIST(V)                                          \
+  V(BooleanNot, Operator::kNoProperties, 1, 0)                   \
+  V(NumberEqual, Operator::kCommutative, 2, 0)                   \
+  V(NumberLessThan, Operator::kNoProperties, 2, 0)               \
+  V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0)        \
+  V(NumberAdd, Operator::kCommutative, 2, 0)                     \
+  V(NumberSubtract, Operator::kNoProperties, 2, 0)               \
+  V(NumberMultiply, Operator::kCommutative, 2, 0)                \
+  V(NumberDivide, Operator::kNoProperties, 2, 0)                 \
+  V(NumberModulus, Operator::kNoProperties, 2, 0)                \
+  V(NumberBitwiseOr, Operator::kCommutative, 2, 0)               \
+  V(NumberBitwiseXor, Operator::kCommutative, 2, 0)              \
+  V(NumberBitwiseAnd, Operator::kCommutative, 2, 0)              \
+  V(NumberShiftLeft, Operator::kNoProperties, 2, 0)              \
+  V(NumberShiftRight, Operator::kNoProperties, 2, 0)             \
+  V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0)      \
+  V(NumberImul, Operator::kCommutative, 2, 0)                    \
+  V(NumberAbs, Operator::kNoProperties, 1, 0)                    \
+  V(NumberClz32, Operator::kNoProperties, 1, 0)                  \
+  V(NumberCeil, Operator::kNoProperties, 1, 0)                   \
+  V(NumberFloor, Operator::kNoProperties, 1, 0)                  \
+  V(NumberFround, Operator::kNoProperties, 1, 0)                 \
+  V(NumberAcos, Operator::kNoProperties, 1, 0)                   \
+  V(NumberAcosh, Operator::kNoProperties, 1, 0)                  \
+  V(NumberAsin, Operator::kNoProperties, 1, 0)                   \
+  V(NumberAsinh, Operator::kNoProperties, 1, 0)                  \
+  V(NumberAtan, Operator::kNoProperties, 1, 0)                   \
+  V(NumberAtan2, Operator::kNoProperties, 2, 0)                  \
+  V(NumberAtanh, Operator::kNoProperties, 1, 0)                  \
+  V(NumberCbrt, Operator::kNoProperties, 1, 0)                   \
+  V(NumberCos, Operator::kNoProperties, 1, 0)                    \
+  V(NumberCosh, Operator::kNoProperties, 1, 0)                   \
+  V(NumberExp, Operator::kNoProperties, 1, 0)                    \
+  V(NumberExpm1, Operator::kNoProperties, 1, 0)                  \
+  V(NumberLog, Operator::kNoProperties, 1, 0)                    \
+  V(NumberLog1p, Operator::kNoProperties, 1, 0)                  \
+  V(NumberLog10, Operator::kNoProperties, 1, 0)                  \
+  V(NumberLog2, Operator::kNoProperties, 1, 0)                   \
+  V(NumberMax, Operator::kNoProperties, 2, 0)                    \
+  V(NumberMin, Operator::kNoProperties, 2, 0)                    \
+  V(NumberPow, Operator::kNoProperties, 2, 0)                    \
+  V(NumberRound, Operator::kNoProperties, 1, 0)                  \
+  V(NumberSign, Operator::kNoProperties, 1, 0)                   \
+  V(NumberSin, Operator::kNoProperties, 1, 0)                    \
+  V(NumberSinh, Operator::kNoProperties, 1, 0)                   \
+  V(NumberSqrt, Operator::kNoProperties, 1, 0)                   \
+  V(NumberTan, Operator::kNoProperties, 1, 0)                    \
+  V(NumberTanh, Operator::kNoProperties, 1, 0)                   \
+  V(NumberTrunc, Operator::kNoProperties, 1, 0)                  \
+  V(NumberToInt32, Operator::kNoProperties, 1, 0)                \
+  V(NumberToUint32, Operator::kNoProperties, 1, 0)               \
+  V(NumberSilenceNaN, Operator::kNoProperties, 1, 0)             \
+  V(StringCharCodeAt, Operator::kNoProperties, 2, 1)             \
+  V(StringFromCharCode, Operator::kNoProperties, 1, 0)           \
+  V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0)       \
+  V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0)       \
+  V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0)      \
+  V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0)    \
+  V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0)          \
+  V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0)         \
+  V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0)        \
+  V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0)    \
+  V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0)          \
+  V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0)         \
+  V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0)            \
+  V(ChangeBitToTagged, Operator::kNoProperties, 1, 0)            \
+  V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0)       \
+  V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0)      \
+  V(ObjectIsCallable, Operator::kNoProperties, 1, 0)             \
+  V(ObjectIsNumber, Operator::kNoProperties, 1, 0)               \
+  V(ObjectIsReceiver, Operator::kNoProperties, 1, 0)             \
+  V(ObjectIsSmi, Operator::kNoProperties, 1, 0)                  \
+  V(ObjectIsString, Operator::kNoProperties, 1, 0)               \
+  V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0)         \
+  V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
+  V(ReferenceEqual, Operator::kCommutative, 2, 0)                \
+  V(StringEqual, Operator::kCommutative, 2, 0)                   \
+  V(StringLessThan, Operator::kNoProperties, 2, 0)               \
+  V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0)
 
-#define SPECULATIVE_BINOP_LIST(V) \
-  V(SpeculativeNumberAdd)         \
-  V(SpeculativeNumberSubtract)    \
-  V(SpeculativeNumberDivide)      \
-  V(SpeculativeNumberMultiply)    \
-  V(SpeculativeNumberModulus)
+#define SPECULATIVE_NUMBER_BINOP_LIST(V)      \
+  SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
+  V(SpeculativeNumberEqual)                   \
+  V(SpeculativeNumberLessThan)                \
+  V(SpeculativeNumberLessThanOrEqual)
 
-#define CHECKED_OP_LIST(V)    \
-  V(CheckTaggedPointer, 1)    \
-  V(CheckTaggedSigned, 1)     \
-  V(CheckedInt32Add, 2)       \
-  V(CheckedInt32Sub, 2)       \
-  V(CheckedUint32ToInt32, 1)  \
-  V(CheckedFloat64ToInt32, 1) \
-  V(CheckedTaggedToInt32, 1)  \
-  V(CheckedTaggedToFloat64, 1)
+#define CHECKED_OP_LIST(V)            \
+  V(CheckBounds, 2, 1)                \
+  V(CheckIf, 1, 0)                    \
+  V(CheckNumber, 1, 1)                \
+  V(CheckString, 1, 1)                \
+  V(CheckTaggedHole, 1, 1)            \
+  V(CheckTaggedPointer, 1, 1)         \
+  V(CheckTaggedSigned, 1, 1)          \
+  V(CheckedInt32Add, 2, 1)            \
+  V(CheckedInt32Sub, 2, 1)            \
+  V(CheckedInt32Div, 2, 1)            \
+  V(CheckedInt32Mod, 2, 1)            \
+  V(CheckedUint32Div, 2, 1)           \
+  V(CheckedUint32Mod, 2, 1)           \
+  V(CheckedUint32ToInt32, 1, 1)       \
+  V(CheckedTaggedSignedToInt32, 1, 1) \
+  V(CheckedTruncateTaggedToWord32, 1, 1)
 
 struct SimplifiedOperatorGlobalCache final {
-#define PURE(Name, properties, input_count)                                \
+#define PURE(Name, properties, value_input_count, control_input_count)     \
   struct Name##Operator final : public Operator {                          \
     Name##Operator()                                                       \
         : Operator(IrOpcode::k##Name, Operator::kPure | properties, #Name, \
-                   input_count, 0, 0, 1, 0, 0) {}                          \
+                   value_input_count, 0, control_input_count, 1, 0, 0) {}  \
   };                                                                       \
   Name##Operator k##Name;
   PURE_OP_LIST(PURE)
 #undef PURE
 
-#define CHECKED(Name, value_input_count)                            \
-  struct Name##Operator final : public Operator {                   \
-    Name##Operator()                                                \
-        : Operator(IrOpcode::k##Name,                               \
-                   Operator::kFoldable | Operator::kNoThrow, #Name, \
-                   value_input_count, 1, 1, 1, 1, 0) {}             \
-  };                                                                \
+#define CHECKED(Name, value_input_count, value_output_count)             \
+  struct Name##Operator final : public Operator {                        \
+    Name##Operator()                                                     \
+        : Operator(IrOpcode::k##Name,                                    \
+                   Operator::kFoldable | Operator::kNoThrow, #Name,      \
+                   value_input_count, 1, 1, value_output_count, 1, 0) {} \
+  };                                                                     \
   Name##Operator k##Name;
   CHECKED_OP_LIST(CHECKED)
 #undef CHECKED
 
+  template <CheckForMinusZeroMode kMode>
+  struct ChangeFloat64ToTaggedOperator final
+      : public Operator1<CheckForMinusZeroMode> {
+    ChangeFloat64ToTaggedOperator()
+        : Operator1<CheckForMinusZeroMode>(
+              IrOpcode::kChangeFloat64ToTagged, Operator::kPure,
+              "ChangeFloat64ToTagged", 1, 0, 0, 1, 0, 0, kMode) {}
+  };
+  ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kCheckForMinusZero>
+      kChangeFloat64ToTaggedCheckForMinusZeroOperator;
+  ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+      kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
+
+  template <CheckForMinusZeroMode kMode>
+  struct CheckedInt32MulOperator final
+      : public Operator1<CheckForMinusZeroMode> {
+    CheckedInt32MulOperator()
+        : Operator1<CheckForMinusZeroMode>(
+              IrOpcode::kCheckedInt32Mul,
+              Operator::kFoldable | Operator::kNoThrow, "CheckedInt32Mul", 2, 1,
+              1, 1, 1, 0, kMode) {}
+  };
+  CheckedInt32MulOperator<CheckForMinusZeroMode::kCheckForMinusZero>
+      kCheckedInt32MulCheckForMinusZeroOperator;
+  CheckedInt32MulOperator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+      kCheckedInt32MulDontCheckForMinusZeroOperator;
+
+  template <CheckForMinusZeroMode kMode>
+  struct CheckedFloat64ToInt32Operator final
+      : public Operator1<CheckForMinusZeroMode> {
+    CheckedFloat64ToInt32Operator()
+        : Operator1<CheckForMinusZeroMode>(
+              IrOpcode::kCheckedFloat64ToInt32,
+              Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt32",
+              1, 1, 1, 1, 1, 0, kMode) {}
+  };
+  CheckedFloat64ToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
+      kCheckedFloat64ToInt32CheckForMinusZeroOperator;
+  CheckedFloat64ToInt32Operator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+      kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
+
+  template <CheckForMinusZeroMode kMode>
+  struct CheckedTaggedToInt32Operator final
+      : public Operator1<CheckForMinusZeroMode> {
+    CheckedTaggedToInt32Operator()
+        : Operator1<CheckForMinusZeroMode>(
+              IrOpcode::kCheckedTaggedToInt32,
+              Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToInt32",
+              1, 1, 1, 1, 1, 0, kMode) {}
+  };
+  CheckedTaggedToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
+      kCheckedTaggedToInt32CheckForMinusZeroOperator;
+  CheckedTaggedToInt32Operator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+      kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+
+  template <CheckTaggedInputMode kMode>
+  struct CheckedTaggedToFloat64Operator final
+      : public Operator1<CheckTaggedInputMode> {
+    CheckedTaggedToFloat64Operator()
+        : Operator1<CheckTaggedInputMode>(
+              IrOpcode::kCheckedTaggedToFloat64,
+              Operator::kFoldable | Operator::kNoThrow,
+              "CheckedTaggedToFloat64", 1, 1, 1, 1, 1, 0, kMode) {}
+  };
+  CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumber>
+      kCheckedTaggedToFloat64NumberOperator;
+  CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumberOrOddball>
+      kCheckedTaggedToFloat64NumberOrOddballOperator;
+
   template <CheckFloat64HoleMode kMode>
   struct CheckFloat64HoleNaNOperator final
       : public Operator1<CheckFloat64HoleMode> {
@@ -355,19 +541,6 @@
   CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kNeverReturnHole>
       kCheckFloat64HoleNeverReturnHoleOperator;
 
-  template <CheckTaggedHoleMode kMode>
-  struct CheckTaggedHoleOperator final : public Operator1<CheckTaggedHoleMode> {
-    CheckTaggedHoleOperator()
-        : Operator1<CheckTaggedHoleMode>(
-              IrOpcode::kCheckTaggedHole,
-              Operator::kFoldable | Operator::kNoThrow, "CheckTaggedHole", 1, 1,
-              1, 1, 1, 0, kMode) {}
-  };
-  CheckTaggedHoleOperator<CheckTaggedHoleMode::kConvertHoleToUndefined>
-      kCheckTaggedHoleConvertHoleToUndefinedOperator;
-  CheckTaggedHoleOperator<CheckTaggedHoleMode::kNeverReturnHole>
-      kCheckTaggedHoleNeverReturnHoleOperator;
-
   template <PretenureFlag kPretenure>
   struct AllocateOperator final : public Operator1<PretenureFlag> {
     AllocateOperator()
@@ -379,6 +552,33 @@
   AllocateOperator<NOT_TENURED> kAllocateNotTenuredOperator;
   AllocateOperator<TENURED> kAllocateTenuredOperator;
 
+  struct EnsureWritableFastElementsOperator final : public Operator {
+    EnsureWritableFastElementsOperator()
+        : Operator(                                     // --
+              IrOpcode::kEnsureWritableFastElements,    // opcode
+              Operator::kNoDeopt | Operator::kNoThrow,  // flags
+              "EnsureWritableFastElements",             // name
+              2, 1, 1, 1, 1, 0) {}                      // counts
+  };
+  EnsureWritableFastElementsOperator kEnsureWritableFastElements;
+
+#define SPECULATIVE_NUMBER_BINOP(Name)                                      \
+  template <NumberOperationHint kHint>                                      \
+  struct Name##Operator final : public Operator1<NumberOperationHint> {     \
+    Name##Operator()                                                        \
+        : Operator1<NumberOperationHint>(                                   \
+              IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow,  \
+              #Name, 2, 1, 1, 1, 1, 0, kHint) {}                            \
+  };                                                                        \
+  Name##Operator<NumberOperationHint::kSignedSmall>                         \
+      k##Name##SignedSmallOperator;                                         \
+  Name##Operator<NumberOperationHint::kSigned32> k##Name##Signed32Operator; \
+  Name##Operator<NumberOperationHint::kNumber> k##Name##NumberOperator;     \
+  Name##Operator<NumberOperationHint::kNumberOrOddball>                     \
+      k##Name##NumberOrOddballOperator;
+  SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
+#undef SPECULATIVE_NUMBER_BINOP
+
 #define BUFFER_ACCESS(Type, type, TYPE, ctype, size)                          \
   struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> {  \
     LoadBuffer##Type##Operator()                                              \
@@ -410,16 +610,84 @@
 SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
     : cache_(kCache.Get()), zone_(zone) {}
 
-#define GET_FROM_CACHE(Name, properties, input_count) \
+#define GET_FROM_CACHE(Name, ...) \
   const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
 PURE_OP_LIST(GET_FROM_CACHE)
-#undef GET_FROM_CACHE
-
-#define GET_FROM_CACHE(Name, value_input_count) \
-  const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
 CHECKED_OP_LIST(GET_FROM_CACHE)
 #undef GET_FROM_CACHE
 
+const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
+    CheckForMinusZeroMode mode) {
+  switch (mode) {
+    case CheckForMinusZeroMode::kCheckForMinusZero:
+      return &cache_.kChangeFloat64ToTaggedCheckForMinusZeroOperator;
+    case CheckForMinusZeroMode::kDontCheckForMinusZero:
+      return &cache_.kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
+    CheckForMinusZeroMode mode) {
+  switch (mode) {
+    case CheckForMinusZeroMode::kCheckForMinusZero:
+      return &cache_.kCheckedInt32MulCheckForMinusZeroOperator;
+    case CheckForMinusZeroMode::kDontCheckForMinusZero:
+      return &cache_.kCheckedInt32MulDontCheckForMinusZeroOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
+    CheckForMinusZeroMode mode) {
+  switch (mode) {
+    case CheckForMinusZeroMode::kCheckForMinusZero:
+      return &cache_.kCheckedFloat64ToInt32CheckForMinusZeroOperator;
+    case CheckForMinusZeroMode::kDontCheckForMinusZero:
+      return &cache_.kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
+    CheckForMinusZeroMode mode) {
+  switch (mode) {
+    case CheckForMinusZeroMode::kCheckForMinusZero:
+      return &cache_.kCheckedTaggedToInt32CheckForMinusZeroOperator;
+    case CheckForMinusZeroMode::kDontCheckForMinusZero:
+      return &cache_.kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
+    CheckTaggedInputMode mode) {
+  switch (mode) {
+    case CheckTaggedInputMode::kNumber:
+      return &cache_.kCheckedTaggedToFloat64NumberOperator;
+    case CheckTaggedInputMode::kNumberOrOddball:
+      return &cache_.kCheckedTaggedToFloat64NumberOrOddballOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckMaps(int map_input_count) {
+  // TODO(bmeurer): Cache the most important versions of this operator.
+  DCHECK_LT(0, map_input_count);
+  int const value_input_count = 1 + map_input_count;
+  return new (zone()) Operator1<int>(           // --
+      IrOpcode::kCheckMaps,                     // opcode
+      Operator::kNoThrow | Operator::kNoWrite,  // flags
+      "CheckMaps",                              // name
+      value_input_count, 1, 1, 0, 1, 0,         // counts
+      map_input_count);                         // parameter
+}
+
 const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
     CheckFloat64HoleMode mode) {
   switch (mode) {
@@ -432,46 +700,28 @@
   return nullptr;
 }
 
-const Operator* SimplifiedOperatorBuilder::CheckTaggedHole(
-    CheckTaggedHoleMode mode) {
-  switch (mode) {
-    case CheckTaggedHoleMode::kConvertHoleToUndefined:
-      return &cache_.kCheckTaggedHoleConvertHoleToUndefinedOperator;
-    case CheckTaggedHoleMode::kNeverReturnHole:
-      return &cache_.kCheckTaggedHoleNeverReturnHoleOperator;
-  }
-  UNREACHABLE();
-  return nullptr;
+const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
+  return &cache_.kEnsureWritableFastElements;
 }
 
-const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
-  return new (zone()) Operator(IrOpcode::kReferenceEqual,
-                               Operator::kCommutative | Operator::kPure,
-                               "ReferenceEqual", 2, 0, 0, 1, 0, 0);
+const Operator* SimplifiedOperatorBuilder::MaybeGrowFastElements(
+    GrowFastElementsFlags flags) {
+  return new (zone()) Operator1<GrowFastElementsFlags>(  // --
+      IrOpcode::kMaybeGrowFastElements,                  // opcode
+      Operator::kNoThrow,                                // flags
+      "MaybeGrowFastElements",                           // name
+      4, 1, 1, 1, 1, 0,                                  // counts
+      flags);                                            // parameter
 }
 
-const Operator* SimplifiedOperatorBuilder::CheckBounds() {
-  // TODO(bmeurer): Cache this operator. Make it pure!
-  return new (zone())
-      Operator(IrOpcode::kCheckBounds, Operator::kFoldable | Operator::kNoThrow,
-               "CheckBounds", 2, 1, 1, 1, 1, 0);
-}
-
-const Operator* SimplifiedOperatorBuilder::TypeGuard(Type* type) {
-  class TypeGuardOperator final : public Operator1<Type*> {
-   public:
-    explicit TypeGuardOperator(Type* type)
-        : Operator1<Type*>(                           // --
-              IrOpcode::kTypeGuard, Operator::kPure,  // opcode
-              "TypeGuard",                            // name
-              1, 0, 1, 1, 0, 0,                       // counts
-              type) {}                                // parameter
-
-    void PrintParameter(std::ostream& os) const final {
-      parameter()->PrintTo(os);
-    }
-  };
-  return new (zone()) TypeGuardOperator(type);
+const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
+    ElementsTransition transition) {
+  return new (zone()) Operator1<ElementsTransition>(  // --
+      IrOpcode::kTransitionElementsKind,              // opcode
+      Operator::kNoDeopt | Operator::kNoThrow,        // flags
+      "TransitionElementsKind",                       // name
+      3, 1, 1, 0, 1, 0,                               // counts
+      transition);                                    // parameter
 }
 
 const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
@@ -511,45 +761,31 @@
   return nullptr;
 }
 
-#define SPECULATIVE_BINOP_DEF(Name)                                            \
-  const Operator* SimplifiedOperatorBuilder::Name(                             \
-      BinaryOperationHints::Hint hint) {                                       \
-    return new (zone()) Operator1<BinaryOperationHints::Hint>(                 \
-        IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, #Name, 2, \
-        1, 1, 1, 1, 0, hint);                                                  \
+#define SPECULATIVE_NUMBER_BINOP(Name)                                        \
+  const Operator* SimplifiedOperatorBuilder::Name(NumberOperationHint hint) { \
+    switch (hint) {                                                           \
+      case NumberOperationHint::kSignedSmall:                                 \
+        return &cache_.k##Name##SignedSmallOperator;                          \
+      case NumberOperationHint::kSigned32:                                    \
+        return &cache_.k##Name##Signed32Operator;                             \
+      case NumberOperationHint::kNumber:                                      \
+        return &cache_.k##Name##NumberOperator;                               \
+      case NumberOperationHint::kNumberOrOddball:                             \
+        return &cache_.k##Name##NumberOrOddballOperator;                      \
+    }                                                                         \
+    UNREACHABLE();                                                            \
+    return nullptr;                                                           \
   }
-SPECULATIVE_BINOP_LIST(SPECULATIVE_BINOP_DEF)
-#undef SPECULATIVE_BINOP_DEF
+SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
+#undef SPECULATIVE_NUMBER_BINOP
 
-const Operator* SimplifiedOperatorBuilder::SpeculativeNumberEqual(
-    CompareOperationHints::Hint hint) {
-  return new (zone()) Operator1<CompareOperationHints::Hint>(
-      IrOpcode::kSpeculativeNumberEqual,
-      Operator::kFoldable | Operator::kNoThrow, "SpeculativeNumberEqual", 2, 1,
-      1, 1, 1, 0, hint);
-}
-
-const Operator* SimplifiedOperatorBuilder::SpeculativeNumberLessThan(
-    CompareOperationHints::Hint hint) {
-  return new (zone()) Operator1<CompareOperationHints::Hint>(
-      IrOpcode::kSpeculativeNumberLessThan,
-      Operator::kFoldable | Operator::kNoThrow, "SpeculativeNumberLessThan", 2,
-      1, 1, 1, 1, 0, hint);
-}
-
-const Operator* SimplifiedOperatorBuilder::SpeculativeNumberLessThanOrEqual(
-    CompareOperationHints::Hint hint) {
-  return new (zone()) Operator1<CompareOperationHints::Hint>(
-      IrOpcode::kSpeculativeNumberLessThanOrEqual,
-      Operator::kFoldable | Operator::kNoThrow,
-      "SpeculativeNumberLessThanOrEqual", 2, 1, 1, 1, 1, 0, hint);
-}
-
-#define ACCESS_OP_LIST(V)                                    \
-  V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1)     \
-  V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0)     \
-  V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
-  V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0)
+#define ACCESS_OP_LIST(V)                                             \
+  V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1)              \
+  V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0)              \
+  V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1)          \
+  V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0)          \
+  V(LoadTypedElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
+  V(StoreTypedElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0)
 
 #define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
                output_count)                                                   \
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index ffdf33f..5e7fa75 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -7,7 +7,7 @@
 
 #include <iosfwd>
 
-#include "src/compiler/type-hints.h"
+#include "src/compiler/operator.h"
 #include "src/handles.h"
 #include "src/machine-type.h"
 #include "src/objects.h"
@@ -79,6 +79,9 @@
 
 FieldAccess const& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
 
+template <>
+void Operator1<FieldAccess>::PrintParameter(std::ostream& os,
+                                            PrintVerbosity verbose) const;
 
 // An access descriptor for loads/stores of indexed structures like characters
 // in strings or off-heap backing stores. Accesses from either tagged or
@@ -103,6 +106,8 @@
 
 ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
 
+ExternalArrayType ExternalArrayTypeOf(const Operator* op) WARN_UNUSED_RESULT;
+
 enum class CheckFloat64HoleMode : uint8_t {
   kNeverReturnHole,  // Never return the hole (deoptimize instead).
   kAllowReturnHole   // Allow to return the hole (signaling NaN).
@@ -114,22 +119,70 @@
 
 CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator*) WARN_UNUSED_RESULT;
 
-enum class CheckTaggedHoleMode : uint8_t {
-  kNeverReturnHole,        // Never return the hole (deoptimize instead).
-  kConvertHoleToUndefined  // Convert the hole to undefined.
+enum class CheckTaggedInputMode : uint8_t {
+  kNumber,
+  kNumberOrOddball,
 };
 
-size_t hash_value(CheckTaggedHoleMode);
+size_t hash_value(CheckTaggedInputMode);
 
-std::ostream& operator<<(std::ostream&, CheckTaggedHoleMode);
+std::ostream& operator<<(std::ostream&, CheckTaggedInputMode);
 
-CheckTaggedHoleMode CheckTaggedHoleModeOf(const Operator*) WARN_UNUSED_RESULT;
+CheckTaggedInputMode CheckTaggedInputModeOf(const Operator*) WARN_UNUSED_RESULT;
 
-Type* TypeOf(const Operator* op) WARN_UNUSED_RESULT;
+enum class CheckForMinusZeroMode : uint8_t {
+  kCheckForMinusZero,
+  kDontCheckForMinusZero,
+};
 
-BinaryOperationHints::Hint BinaryOperationHintOf(const Operator* op);
+size_t hash_value(CheckForMinusZeroMode);
 
-CompareOperationHints::Hint CompareOperationHintOf(const Operator* op);
+std::ostream& operator<<(std::ostream&, CheckForMinusZeroMode);
+
+CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
+
+// A descriptor for growing elements backing stores.
+enum class GrowFastElementsFlag : uint8_t {
+  kNone = 0u,
+  kArrayObject = 1u << 0,     // Update JSArray::length field.
+  kHoleyElements = 1u << 1,   // Backing store is holey.
+  kDoubleElements = 1u << 2,  // Backing store contains doubles.
+};
+typedef base::Flags<GrowFastElementsFlag> GrowFastElementsFlags;
+
+DEFINE_OPERATORS_FOR_FLAGS(GrowFastElementsFlags)
+
+std::ostream& operator<<(std::ostream&, GrowFastElementsFlags);
+
+GrowFastElementsFlags GrowFastElementsFlagsOf(const Operator*)
+    WARN_UNUSED_RESULT;
+
+// A descriptor for elements kind transitions.
+enum class ElementsTransition : uint8_t {
+  kFastTransition,  // simple transition, just updating the map.
+  kSlowTransition   // full transition, round-trip to the runtime.
+};
+
+size_t hash_value(ElementsTransition);
+
+std::ostream& operator<<(std::ostream&, ElementsTransition);
+
+ElementsTransition ElementsTransitionOf(const Operator* op) WARN_UNUSED_RESULT;
+
+// A hint for speculative number operations.
+enum class NumberOperationHint : uint8_t {
+  kSignedSmall,      // Inputs were always Smi so far, output was in Smi range.
+  kSigned32,         // Inputs and output were Signed32 so far.
+  kNumber,           // Inputs were Number, output was Number.
+  kNumberOrOddball,  // Inputs were Number or Oddball, output was Number.
+};
+
+size_t hash_value(NumberOperationHint);
+
+std::ostream& operator<<(std::ostream&, NumberOperationHint);
+
+NumberOperationHint NumberOperationHintOf(const Operator* op)
+    WARN_UNUSED_RESULT;
 
 // Interface for building simplified operators, which represent the
 // medium-level operations of V8, including adding numbers, allocating objects,
@@ -158,7 +211,6 @@
   explicit SimplifiedOperatorBuilder(Zone* zone);
 
   const Operator* BooleanNot();
-  const Operator* BooleanToNumber();
 
   const Operator* NumberEqual();
   const Operator* NumberLessThan();
@@ -180,45 +232,61 @@
   const Operator* NumberCeil();
   const Operator* NumberFloor();
   const Operator* NumberFround();
+  const Operator* NumberAcos();
+  const Operator* NumberAcosh();
+  const Operator* NumberAsin();
+  const Operator* NumberAsinh();
   const Operator* NumberAtan();
   const Operator* NumberAtan2();
   const Operator* NumberAtanh();
   const Operator* NumberCbrt();
   const Operator* NumberCos();
+  const Operator* NumberCosh();
   const Operator* NumberExp();
   const Operator* NumberExpm1();
   const Operator* NumberLog();
   const Operator* NumberLog1p();
   const Operator* NumberLog10();
   const Operator* NumberLog2();
+  const Operator* NumberMax();
+  const Operator* NumberMin();
+  const Operator* NumberPow();
   const Operator* NumberRound();
+  const Operator* NumberSign();
   const Operator* NumberSin();
+  const Operator* NumberSinh();
   const Operator* NumberSqrt();
   const Operator* NumberTan();
+  const Operator* NumberTanh();
   const Operator* NumberTrunc();
   const Operator* NumberToInt32();
   const Operator* NumberToUint32();
 
   const Operator* NumberSilenceNaN();
 
-  const Operator* SpeculativeNumberAdd(BinaryOperationHints::Hint hint);
-  const Operator* SpeculativeNumberSubtract(BinaryOperationHints::Hint hint);
-  const Operator* SpeculativeNumberMultiply(BinaryOperationHints::Hint hint);
-  const Operator* SpeculativeNumberDivide(BinaryOperationHints::Hint hint);
-  const Operator* SpeculativeNumberModulus(BinaryOperationHints::Hint hint);
+  const Operator* SpeculativeNumberAdd(NumberOperationHint hint);
+  const Operator* SpeculativeNumberSubtract(NumberOperationHint hint);
+  const Operator* SpeculativeNumberMultiply(NumberOperationHint hint);
+  const Operator* SpeculativeNumberDivide(NumberOperationHint hint);
+  const Operator* SpeculativeNumberModulus(NumberOperationHint hint);
+  const Operator* SpeculativeNumberShiftLeft(NumberOperationHint hint);
+  const Operator* SpeculativeNumberShiftRight(NumberOperationHint hint);
+  const Operator* SpeculativeNumberShiftRightLogical(NumberOperationHint hint);
+  const Operator* SpeculativeNumberBitwiseAnd(NumberOperationHint hint);
+  const Operator* SpeculativeNumberBitwiseOr(NumberOperationHint hint);
+  const Operator* SpeculativeNumberBitwiseXor(NumberOperationHint hint);
 
-  const Operator* SpeculativeNumberLessThan(CompareOperationHints::Hint hint);
-  const Operator* SpeculativeNumberLessThanOrEqual(
-      CompareOperationHints::Hint hint);
-  const Operator* SpeculativeNumberEqual(CompareOperationHints::Hint hint);
+  const Operator* SpeculativeNumberLessThan(NumberOperationHint hint);
+  const Operator* SpeculativeNumberLessThanOrEqual(NumberOperationHint hint);
+  const Operator* SpeculativeNumberEqual(NumberOperationHint hint);
 
-  const Operator* ReferenceEqual(Type* type);
+  const Operator* ReferenceEqual();
 
   const Operator* StringEqual();
   const Operator* StringLessThan();
   const Operator* StringLessThanOrEqual();
+  const Operator* StringCharCodeAt();
   const Operator* StringFromCharCode();
-  const Operator* StringToNumber();
 
   const Operator* PlainPrimitiveToNumber();
   const Operator* PlainPrimitiveToWord32();
@@ -231,25 +299,37 @@
   const Operator* ChangeInt31ToTaggedSigned();
   const Operator* ChangeInt32ToTagged();
   const Operator* ChangeUint32ToTagged();
-  const Operator* ChangeFloat64ToTagged();
+  const Operator* ChangeFloat64ToTagged(CheckForMinusZeroMode);
   const Operator* ChangeTaggedToBit();
   const Operator* ChangeBitToTagged();
   const Operator* TruncateTaggedToWord32();
   const Operator* TruncateTaggedToFloat64();
 
+  const Operator* CheckIf();
   const Operator* CheckBounds();
+  const Operator* CheckMaps(int map_input_count);
+  const Operator* CheckNumber();
+  const Operator* CheckString();
   const Operator* CheckTaggedPointer();
   const Operator* CheckTaggedSigned();
 
   const Operator* CheckedInt32Add();
   const Operator* CheckedInt32Sub();
+  const Operator* CheckedInt32Div();
+  const Operator* CheckedInt32Mod();
+  const Operator* CheckedUint32Div();
+  const Operator* CheckedUint32Mod();
+  const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
   const Operator* CheckedUint32ToInt32();
-  const Operator* CheckedFloat64ToInt32();
-  const Operator* CheckedTaggedToInt32();
-  const Operator* CheckedTaggedToFloat64();
+  const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode);
+  const Operator* CheckedTaggedSignedToInt32();
+  const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode);
+  const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
+  const Operator* CheckedTruncateTaggedToWord32();
 
   const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
-  const Operator* CheckTaggedHole(CheckTaggedHoleMode);
+  const Operator* CheckTaggedHole();
+  const Operator* ConvertTaggedHoleToUndefined();
 
   const Operator* ObjectIsCallable();
   const Operator* ObjectIsNumber();
@@ -258,7 +338,14 @@
   const Operator* ObjectIsString();
   const Operator* ObjectIsUndetectable();
 
-  const Operator* TypeGuard(Type* type);
+  // ensure-writable-fast-elements object, elements
+  const Operator* EnsureWritableFastElements();
+
+  // maybe-grow-fast-elements object, elements, index, length
+  const Operator* MaybeGrowFastElements(GrowFastElementsFlags flags);
+
+  // transition-elements-kind object, from-map, to-map
+  const Operator* TransitionElementsKind(ElementsTransition transition);
 
   const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
 
@@ -271,12 +358,18 @@
   // store-buffer buffer, offset, length, value
   const Operator* StoreBuffer(BufferAccess);
 
-  // load-element [base + index], length
+  // load-element [base + index]
   const Operator* LoadElement(ElementAccess const&);
 
-  // store-element [base + index], length, value
+  // store-element [base + index], value
   const Operator* StoreElement(ElementAccess const&);
 
+  // load-typed-element buffer, [base + external + index]
+  const Operator* LoadTypedElement(ExternalArrayType const&);
+
+  // store-typed-element buffer, [base + external + index], value
+  const Operator* StoreTypedElement(ExternalArrayType const&);
+
  private:
   Zone* zone() const { return zone_; }
 
diff --git a/src/compiler/source-position.h b/src/compiler/source-position.h
index 912f188..d4df783 100644
--- a/src/compiler/source-position.h
+++ b/src/compiler/source-position.h
@@ -5,8 +5,8 @@
 #ifndef V8_COMPILER_SOURCE_POSITION_H_
 #define V8_COMPILER_SOURCE_POSITION_H_
 
-#include "src/assembler.h"
 #include "src/compiler/node-aux-data.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -25,7 +25,7 @@
   int raw() const { return raw_; }
 
  private:
-  static const int kUnknownPosition = RelocInfo::kNoPosition;
+  static const int kUnknownPosition = kNoSourcePosition;
   int raw_;
 };
 
diff --git a/src/compiler/store-store-elimination.cc b/src/compiler/store-store-elimination.cc
index a469b20..98904b0 100644
--- a/src/compiler/store-store-elimination.cc
+++ b/src/compiler/store-store-elimination.cc
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <iterator>
+
 #include "src/compiler/store-store-elimination.h"
 
 #include "src/compiler/all-nodes.h"
@@ -13,250 +15,554 @@
 namespace internal {
 namespace compiler {
 
-#define TRACE(fmt, ...)                                              \
-  do {                                                               \
-    if (FLAG_trace_store_elimination) {                              \
-      PrintF("StoreStoreElimination::ReduceEligibleNode: " fmt "\n", \
-             ##__VA_ARGS__);                                         \
-    }                                                                \
+#define TRACE(fmt, ...)                                         \
+  do {                                                          \
+    if (FLAG_trace_store_elimination) {                         \
+      PrintF("RedundantStoreFinder: " fmt "\n", ##__VA_ARGS__); \
+    }                                                           \
   } while (false)
 
-// A simple store-store elimination. When the effect chain contains the
-// following sequence,
-//
-// - StoreField[[+off_1]](x1, y1)
-// - StoreField[[+off_2]](x2, y2)
-// - StoreField[[+off_3]](x3, y3)
-//   ...
-// - StoreField[[+off_n]](xn, yn)
-//
-// where the xes are the objects and the ys are the values to be stored, then
-// we are going to say that a store is superfluous if the same offset of the
-// same object will be stored to in the future. If off_i == off_j and xi == xj
-// and i < j, then we optimize the i'th StoreField away.
-//
-// This optimization should be initiated on the last StoreField in such a
-// sequence.
-//
-// The algorithm works by walking the effect chain from the last StoreField
-// upwards. While walking, we maintain a map {futureStore} from offsets to
-// nodes; initially it is empty. As we walk the effect chain upwards, if
-// futureStore[off] = n, then any store to node {n} with offset {off} is
-// guaranteed to be useless because we do a full-width[1] store to that offset
-// of that object in the near future anyway. For example, for this effect
-// chain
-//
-// 71: StoreField(60, 0)
-// 72: StoreField(65, 8)
-// 73: StoreField(63, 8)
-// 74: StoreField(65, 16)
-// 75: StoreField(62, 8)
-//
-// just before we get to 72, we will have futureStore = {8: 63, 16: 65}.
-//
-// Here is the complete process.
-//
-// - We are at the end of a sequence of consecutive StoreFields.
-// - We start out with futureStore = empty.
-// - We then walk the effect chain upwards to find the next StoreField [2].
-//
-//   1. If the offset is not a key of {futureStore} yet, we put it in.
-//   2. If the offset is a key of {futureStore}, but futureStore[offset] is a
-//      different node, we overwrite futureStore[offset] with the current node.
-//   3. If the offset is a key of {futureStore} and futureStore[offset] equals
-//      this node, we eliminate this StoreField.
-//
-//   As long as the current effect input points to a node with a single effect
-//   output, and as long as its opcode is StoreField, we keep traversing
-//   upwards.
-//
-// [1] This optimization is unsound if we optimize away a store to an offset
-//   because we store to the same offset in the future, even though the future
-//   store is narrower than the store we optimize away. Therefore, in case (1)
-//   and (2) we only add/overwrite to the dictionary when the field access has
-//   maximal size. For simplicity of implementation, we do not try to detect
-//   case (3).
-//
-// [2] We make sure that we only traverse the linear part, that is, the part
-//   where every node has exactly one incoming and one outgoing effect edge.
-//   Also, we only keep walking upwards as long as we keep finding consecutive
-//   StoreFields on the same node.
+// CHECK_EXTRA is like CHECK, but has two or more arguments: a boolean
+// expression, a format string, and any number of extra arguments. The boolean
+// expression will be evaluated at runtime. If it evaluates to false, then an
+// error message will be shown containing the condition, as well as the extra
+// info formatted like with printf.
+#define CHECK_EXTRA(condition, fmt, ...)                                 \
+  do {                                                                   \
+    if (V8_UNLIKELY(!(condition))) {                                     \
+      V8_Fatal(__FILE__, __LINE__, "Check failed: %s. Extra info: " fmt, \
+               #condition, ##__VA_ARGS__);                               \
+    }                                                                    \
+  } while (0)
 
-StoreStoreElimination::StoreStoreElimination(JSGraph* js_graph, Zone* temp_zone)
-    : jsgraph_(js_graph), temp_zone_(temp_zone) {}
+#ifdef DEBUG
+#define DCHECK_EXTRA(condition, fmt, ...) \
+  CHECK_EXTRA(condition, fmt, ##__VA_ARGS__)
+#else
+#define DCHECK_EXTRA(condition, fmt, ...) ((void)0)
+#endif
 
-StoreStoreElimination::~StoreStoreElimination() {}
+// Store-store elimination.
+//
+// The aim of this optimization is to detect the following pattern in the
+// effect graph:
+//
+// - StoreField[+24, kRepTagged](263, ...)
+//
+//   ... lots of nodes from which the field at offset 24 of the object
+//       returned by node #263 cannot be observed ...
+//
+// - StoreField[+24, kRepTagged](263, ...)
+//
+// In such situations, the earlier StoreField cannot be observed, and can be
+// eliminated. This optimization should work for any offset and input node, of
+// course.
+//
+// The optimization also works across splits. It currently does not work for
+// loops, because we tend to put a stack check in loops, and like deopts,
+// stack checks can observe anything.
 
-void StoreStoreElimination::Run() {
-  // The store-store elimination performs work on chains of certain types of
-  // nodes. The elimination must be invoked on the lowest node in such a
-  // chain; we have a helper function IsEligibleNode that returns true
-  // precisely on the lowest node in such a chain.
-  //
-  // Because the elimination removes nodes from the graph, even remove nodes
-  // that the elimination was not invoked on, we cannot use a normal
-  // AdvancedReducer but we manually find which nodes to invoke the
-  // elimination on. Then in a next step, we invoke the elimination for each
-  // node that was eligible.
-
-  NodeVector eligible(temp_zone());  // loops over all nodes
-  AllNodes all(temp_zone(), jsgraph()->graph());
-
-  for (Node* node : all.live) {
-    if (IsEligibleNode(node)) {
-      eligible.push_back(node);
-    }
-  }
-
-  for (Node* node : eligible) {
-    ReduceEligibleNode(node);
-  }
-}
+// Assumption: every byte of a JS object is only ever accessed through one
+// offset. For instance, byte 15 of a given object may be accessed using a
+// two-byte read at offset 14, or a four-byte read at offset 12, but never
+// both in the same program.
+//
+// This implementation needs all dead nodes removed from the graph, and the
+// graph should be trimmed.
 
 namespace {
 
 // 16 bits was chosen fairly arbitrarily; it seems enough now. 8 bits is too
 // few.
-typedef uint16_t Offset;
+typedef uint16_t StoreOffset;
+
+struct UnobservableStore {
+  NodeId id_;
+  StoreOffset offset_;
+
+  bool operator==(const UnobservableStore) const;
+  bool operator!=(const UnobservableStore) const;
+  bool operator<(const UnobservableStore) const;
+};
+
+}  // namespace
+
+namespace {
+
+// Instances of UnobservablesSet are immutable. They represent either a set of
+// UnobservableStores, or the "unvisited empty set".
+//
+// We apply some sharing to save memory. The class UnobservablesSet is only a
+// pointer wide, and a copy does not use any heap (or temp_zone) memory. Most
+// changes to an UnobservablesSet might allocate in the temp_zone.
+//
+// The size of an instance should be the size of a pointer, plus additional
+// space in the zone in the case of non-unvisited UnobservablesSets. Copying
+// an UnobservablesSet allocates no memory.
+class UnobservablesSet final {
+ public:
+  static UnobservablesSet Unvisited();
+  static UnobservablesSet VisitedEmpty(Zone* zone);
+  UnobservablesSet();  // unvisited
+  UnobservablesSet(const UnobservablesSet& other) : set_(other.set_) {}
+
+  UnobservablesSet Intersect(UnobservablesSet other, Zone* zone) const;
+  UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
+  UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const;
+
+  const ZoneSet<UnobservableStore>* set() const { return set_; }
+
+  bool IsUnvisited() const { return set_ == nullptr; }
+  bool IsEmpty() const { return set_ == nullptr || set_->empty(); }
+  bool Contains(UnobservableStore obs) const {
+    return set_ != nullptr && (set_->find(obs) != set_->end());
+  }
+
+  bool operator==(const UnobservablesSet&) const;
+  bool operator!=(const UnobservablesSet&) const;
+
+ private:
+  explicit UnobservablesSet(const ZoneSet<UnobservableStore>* set)
+      : set_(set) {}
+  const ZoneSet<UnobservableStore>* set_;
+};
+
+}  // namespace
+
+namespace {
+
+class RedundantStoreFinder final {
+ public:
+  RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone);
+
+  void Find();
+
+  const ZoneSet<Node*>& to_remove_const() { return to_remove_; }
+
+  void Visit(Node* node);
+
+ private:
+  static bool IsEffectful(Node* node);
+  void VisitEffectfulNode(Node* node);
+  UnobservablesSet RecomputeUseIntersection(Node* node);
+  UnobservablesSet RecomputeSet(Node* node, UnobservablesSet uses);
+  static bool CannotObserveStoreField(Node* node);
+
+  void MarkForRevisit(Node* node);
+  bool HasBeenVisited(Node* node);
+
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Zone* temp_zone() const { return temp_zone_; }
+  ZoneVector<UnobservablesSet>& unobservable() { return unobservable_; }
+  UnobservablesSet& unobservable_for_id(NodeId id) {
+    DCHECK_LT(id, unobservable().size());
+    return unobservable()[id];
+  }
+  ZoneSet<Node*>& to_remove() { return to_remove_; }
+
+  JSGraph* const jsgraph_;
+  Zone* const temp_zone_;
+
+  ZoneStack<Node*> revisit_;
+  ZoneVector<bool> in_revisit_;
+  // Maps node IDs to UnobservableNodeSets.
+  ZoneVector<UnobservablesSet> unobservable_;
+  ZoneSet<Node*> to_remove_;
+  const UnobservablesSet unobservables_visited_empty_;
+};
 
 // To safely cast an offset from a FieldAccess, which has a wider range
 // (namely int).
-Offset ToOffset(int offset) {
-  CHECK(0 <= offset && offset < (1 << 8 * sizeof(Offset)));
-  return (Offset)offset;
+StoreOffset ToOffset(int offset) {
+  CHECK(0 <= offset && offset < (1 << 8 * sizeof(StoreOffset)));
+  return (StoreOffset)offset;
 }
 
-Offset ToOffset(const FieldAccess& access) { return ToOffset(access.offset); }
-
-// If node has a single effect use, return that node. If node has no or
-// multiple effect uses, return nullptr.
-Node* SingleEffectUse(Node* node) {
-  Node* last_use = nullptr;
-  for (Edge edge : node->use_edges()) {
-    if (!NodeProperties::IsEffectEdge(edge)) {
-      continue;
-    }
-    if (last_use != nullptr) {
-      // more than one
-      return nullptr;
-    }
-    last_use = edge.from();
-    DCHECK_NOT_NULL(last_use);
-  }
-  return last_use;
+StoreOffset ToOffset(const FieldAccess& access) {
+  return ToOffset(access.offset);
 }
 
-// Return true if node is the last consecutive StoreField node in a linear
-// part of the effect chain.
-bool IsEndOfStoreFieldChain(Node* node) {
-  Node* next_on_chain = SingleEffectUse(node);
-  return (next_on_chain == nullptr ||
-          next_on_chain->op()->opcode() != IrOpcode::kStoreField);
+unsigned int RepSizeOf(MachineRepresentation rep) {
+  return 1u << ElementSizeLog2Of(rep);
+}
+unsigned int RepSizeOf(FieldAccess access) {
+  return RepSizeOf(access.machine_type.representation());
 }
 
-// The argument must be a StoreField node. If there is a node before it in the
-// effect chain, and if this part of the effect chain is linear (no other
-// effect uses of that previous node), then return that previous node.
-// Otherwise, return nullptr.
-//
-// The returned node need not be a StoreField.
-Node* PreviousEffectBeforeStoreField(Node* node) {
-  DCHECK_EQ(node->op()->opcode(), IrOpcode::kStoreField);
-  DCHECK_EQ(node->op()->EffectInputCount(), 1);
-
-  Node* previous = NodeProperties::GetEffectInput(node);
-  if (previous != nullptr && node == SingleEffectUse(previous)) {
-    return previous;
-  } else {
-    return nullptr;
-  }
+bool AtMostTagged(FieldAccess access) {
+  return RepSizeOf(access) <= RepSizeOf(MachineRepresentation::kTagged);
 }
 
-size_t rep_size_of(MachineRepresentation rep) {
-  return ((size_t)1) << ElementSizeLog2Of(rep);
-}
-size_t rep_size_of(FieldAccess access) {
-  return rep_size_of(access.machine_type.representation());
+bool AtLeastTagged(FieldAccess access) {
+  return RepSizeOf(access) >= RepSizeOf(MachineRepresentation::kTagged);
 }
 
 }  // namespace
 
-bool StoreStoreElimination::IsEligibleNode(Node* node) {
-  return (node->op()->opcode() == IrOpcode::kStoreField) &&
-         IsEndOfStoreFieldChain(node);
+void RedundantStoreFinder::Find() {
+  Visit(jsgraph()->graph()->end());
+
+  while (!revisit_.empty()) {
+    Node* next = revisit_.top();
+    revisit_.pop();
+    DCHECK_LT(next->id(), in_revisit_.size());
+    in_revisit_[next->id()] = false;
+    Visit(next);
+  }
+
+#ifdef DEBUG
+  // Check that we visited all the StoreFields
+  AllNodes all(temp_zone(), jsgraph()->graph());
+  for (Node* node : all.reachable) {
+    if (node->op()->opcode() == IrOpcode::kStoreField) {
+      DCHECK_EXTRA(HasBeenVisited(node), "#%d:%s", node->id(),
+                   node->op()->mnemonic());
+    }
+  }
+#endif
 }
 
-void StoreStoreElimination::ReduceEligibleNode(Node* node) {
-  DCHECK(IsEligibleNode(node));
+void RedundantStoreFinder::MarkForRevisit(Node* node) {
+  DCHECK_LT(node->id(), in_revisit_.size());
+  if (!in_revisit_[node->id()]) {
+    revisit_.push(node);
+    in_revisit_[node->id()] = true;
+  }
+}
 
-  // if (FLAG_trace_store_elimination) {
-  //   PrintF("** StoreStoreElimination::ReduceEligibleNode: activated:
-  //   #%d\n",
-  //          node->id());
-  // }
+bool RedundantStoreFinder::HasBeenVisited(Node* node) {
+  return !unobservable_for_id(node->id()).IsUnvisited();
+}
 
-  TRACE("activated: #%d", node->id());
+void StoreStoreElimination::Run(JSGraph* js_graph, Zone* temp_zone) {
+  // Find superfluous nodes
+  RedundantStoreFinder finder(js_graph, temp_zone);
+  finder.Find();
 
-  // Initialize empty futureStore.
-  ZoneMap<Offset, Node*> futureStore(temp_zone());
+  // Remove superfluous nodes
 
-  Node* current_node = node;
+  for (Node* node : finder.to_remove_const()) {
+    if (FLAG_trace_store_elimination) {
+      PrintF("StoreStoreElimination::Run: Eliminating node #%d:%s\n",
+             node->id(), node->op()->mnemonic());
+    }
+    Node* previous_effect = NodeProperties::GetEffectInput(node);
+    NodeProperties::ReplaceUses(node, nullptr, previous_effect, nullptr,
+                                nullptr);
+    node->Kill();
+  }
+}
 
-  do {
-    FieldAccess access = OpParameter<FieldAccess>(current_node->op());
-    Offset offset = ToOffset(access);
-    Node* object_input = current_node->InputAt(0);
+bool RedundantStoreFinder::IsEffectful(Node* node) {
+  return (node->op()->EffectInputCount() >= 1);
+}
 
-    Node* previous = PreviousEffectBeforeStoreField(current_node);
+// Recompute unobservables-set for a node. Will also mark superfluous nodes
+// as to be removed.
 
-    CHECK(rep_size_of(access) <= rep_size_of(MachineRepresentation::kTagged));
-    if (rep_size_of(access) == rep_size_of(MachineRepresentation::kTagged)) {
-      // Try to insert. If it was present, this will preserve the original
-      // value.
-      auto insert_result =
-          futureStore.insert(std::make_pair(offset, object_input));
-      if (insert_result.second) {
-        // Key was not present. This means that there is no matching
-        // StoreField to this offset in the future, so we cannot optimize
-        // current_node away. However, we will record the current StoreField
-        // in futureStore, and continue ascending up the chain.
-        TRACE("#%d[[+%d]] -- wide, key not present", current_node->id(),
-              offset);
-      } else if (insert_result.first->second != object_input) {
-        // Key was present, and the value did not equal object_input. This
-        // means
-        // that there is a StoreField to this offset in the future, but the
-        // object instance comes from a different Node. We pessimistically
-        // assume that we cannot optimize current_node away. However, we will
-        // record the current StoreField in futureStore, and continue
-        // ascending up the chain.
-        insert_result.first->second = object_input;
-        TRACE("#%d[[+%d]] -- wide, diff object", current_node->id(), offset);
+UnobservablesSet RedundantStoreFinder::RecomputeSet(Node* node,
+                                                    UnobservablesSet uses) {
+  switch (node->op()->opcode()) {
+    case IrOpcode::kStoreField: {
+      Node* stored_to = node->InputAt(0);
+      FieldAccess access = OpParameter<FieldAccess>(node->op());
+      StoreOffset offset = ToOffset(access);
+
+      UnobservableStore observation = {stored_to->id(), offset};
+      bool isNotObservable = uses.Contains(observation);
+
+      if (isNotObservable && AtMostTagged(access)) {
+        TRACE("  #%d is StoreField[+%d,%s](#%d), unobservable", node->id(),
+              offset, MachineReprToString(access.machine_type.representation()),
+              stored_to->id());
+        to_remove().insert(node);
+        return uses;
+      } else if (isNotObservable && !AtMostTagged(access)) {
+        TRACE(
+            "  #%d is StoreField[+%d,%s](#%d), repeated in future but too "
+            "big to optimize away",
+            node->id(), offset,
+            MachineReprToString(access.machine_type.representation()),
+            stored_to->id());
+        return uses;
+      } else if (!isNotObservable && AtLeastTagged(access)) {
+        TRACE("  #%d is StoreField[+%d,%s](#%d), observable, recording in set",
+              node->id(), offset,
+              MachineReprToString(access.machine_type.representation()),
+              stored_to->id());
+        return uses.Add(observation, temp_zone());
+      } else if (!isNotObservable && !AtLeastTagged(access)) {
+        TRACE(
+            "  #%d is StoreField[+%d,%s](#%d), observable but too small to "
+            "record",
+            node->id(), offset,
+            MachineReprToString(access.machine_type.representation()),
+            stored_to->id());
+        return uses;
       } else {
-        // Key was present, and the value equalled object_input. This means
-        // that soon after in the effect chain, we will do a StoreField to the
-        // same object with the same offset, therefore current_node can be
-        // optimized away. We don't need to update futureStore.
-
-        Node* previous_effect = NodeProperties::GetEffectInput(current_node);
-
-        NodeProperties::ReplaceUses(current_node, nullptr, previous_effect,
-                                    nullptr, nullptr);
-        current_node->Kill();
-        TRACE("#%d[[+%d]] -- wide, eliminated", current_node->id(), offset);
+        UNREACHABLE();
       }
-    } else {
-      TRACE("#%d[[+%d]] -- narrow, not eliminated", current_node->id(), offset);
+      break;
+    }
+    case IrOpcode::kLoadField: {
+      Node* loaded_from = node->InputAt(0);
+      FieldAccess access = OpParameter<FieldAccess>(node->op());
+      StoreOffset offset = ToOffset(access);
+
+      TRACE(
+          "  #%d is LoadField[+%d,%s](#%d), removing all offsets [+%d] from "
+          "set",
+          node->id(), offset,
+          MachineReprToString(access.machine_type.representation()),
+          loaded_from->id(), offset);
+
+      return uses.RemoveSameOffset(offset, temp_zone());
+      break;
+    }
+    default:
+      if (CannotObserveStoreField(node)) {
+        TRACE("  #%d:%s can observe nothing, set stays unchanged", node->id(),
+              node->op()->mnemonic());
+        return uses;
+      } else {
+        TRACE("  #%d:%s might observe anything, recording empty set",
+              node->id(), node->op()->mnemonic());
+        return unobservables_visited_empty_;
+      }
+  }
+  UNREACHABLE();
+  return UnobservablesSet::Unvisited();
+}
+
+bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
+  return node->opcode() == IrOpcode::kCheckedLoad ||
+         node->opcode() == IrOpcode::kLoadElement ||
+         node->opcode() == IrOpcode::kLoad ||
+         node->opcode() == IrOpcode::kStore ||
+         node->opcode() == IrOpcode::kEffectPhi ||
+         node->opcode() == IrOpcode::kStoreElement ||
+         node->opcode() == IrOpcode::kCheckedStore ||
+         node->opcode() == IrOpcode::kUnsafePointerAdd ||
+         node->opcode() == IrOpcode::kRetain;
+}
+
+// Initialize unobservable_ with js_graph->graph->NodeCount() empty sets.
+RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone)
+    : jsgraph_(js_graph),
+      temp_zone_(temp_zone),
+      revisit_(temp_zone),
+      in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
+      unobservable_(js_graph->graph()->NodeCount(),
+                    UnobservablesSet::Unvisited(), temp_zone),
+      to_remove_(temp_zone),
+      unobservables_visited_empty_(UnobservablesSet::VisitedEmpty(temp_zone)) {}
+
+void RedundantStoreFinder::Visit(Node* node) {
+  // All effectful nodes should be reachable from End via a sequence of
+  // control, then a sequence of effect edges. In VisitEffectfulNode we mark
+  // all effect inputs for revisiting (if they might have stale state); here
+  // we mark all control inputs at least once.
+
+  if (!HasBeenVisited(node)) {
+    for (int i = 0; i < node->op()->ControlInputCount(); i++) {
+      Node* control_input = NodeProperties::GetControlInput(node, i);
+      if (!HasBeenVisited(control_input)) {
+        MarkForRevisit(control_input);
+      }
+    }
+  }
+
+  bool isEffectful = (node->op()->EffectInputCount() >= 1);
+  if (isEffectful) {
+    VisitEffectfulNode(node);
+    DCHECK(HasBeenVisited(node));
+  }
+
+  if (!HasBeenVisited(node)) {
+    // Mark as visited.
+    unobservable_for_id(node->id()) = unobservables_visited_empty_;
+  }
+}
+
+void RedundantStoreFinder::VisitEffectfulNode(Node* node) {
+  if (HasBeenVisited(node)) {
+    TRACE("- Revisiting: #%d:%s", node->id(), node->op()->mnemonic());
+  }
+  UnobservablesSet after_set = RecomputeUseIntersection(node);
+  UnobservablesSet before_set = RecomputeSet(node, after_set);
+  DCHECK(!before_set.IsUnvisited());
+
+  UnobservablesSet stored_for_node = unobservable_for_id(node->id());
+  bool cur_set_changed =
+      (stored_for_node.IsUnvisited() || stored_for_node != before_set);
+  if (!cur_set_changed) {
+    // We will not be able to update the part of this chain above any more.
+    // Exit.
+    TRACE("+ No change: stabilized. Not visiting effect inputs.");
+  } else {
+    unobservable_for_id(node->id()) = before_set;
+
+    // Mark effect inputs for visiting.
+    for (int i = 0; i < node->op()->EffectInputCount(); i++) {
+      Node* input = NodeProperties::GetEffectInput(node, i);
+      if (!HasBeenVisited(input)) {
+        TRACE("    marking #%d:%s for revisit", input->id(),
+              input->op()->mnemonic());
+        MarkForRevisit(input);
+      }
+    }
+  }
+}
+
+// Compute the intersection of the UnobservablesSets of all effect uses and
+// return it. This function only works if {node} has an effect use.
+//
+// The result UnobservablesSet will always be visited.
+UnobservablesSet RedundantStoreFinder::RecomputeUseIntersection(Node* node) {
+  // {first} == true indicates that we haven't looked at any elements yet.
+  // {first} == false indicates that cur_set is the intersection of at least one
+  // thing.
+
+  bool first = true;
+  UnobservablesSet cur_set = UnobservablesSet::Unvisited();  // irrelevant
+
+  for (Edge edge : node->use_edges()) {
+    // Skip non-effect edges
+    if (!NodeProperties::IsEffectEdge(edge)) {
+      continue;
     }
 
-    // Regardless of whether we eliminated node {current}, we want to
-    // continue walking up the effect chain.
+    Node* use = edge.from();
+    UnobservablesSet new_set = unobservable_for_id(use->id());
+    // Include new_set in the intersection.
+    if (first) {
+      // Intersection of a one-element set is that one element
+      first = false;
+      cur_set = new_set;
+    } else {
+      // Take the intersection of cur_set and new_set.
+      cur_set = cur_set.Intersect(new_set, temp_zone());
+    }
+  }
 
-    current_node = previous;
-  } while (current_node != nullptr &&
-           current_node->op()->opcode() == IrOpcode::kStoreField);
+  if (first) {
+    // There were no effect uses.
+    auto opcode = node->op()->opcode();
+    // List of opcodes that may end this effect chain. The opcodes are not
+    // important to the soundness of this optimization; this serves as a
+    // general sanity check. Add opcodes to this list as it suits you.
+    //
+    // Everything is observable after these opcodes; return the empty set.
+    DCHECK_EXTRA(
+        opcode == IrOpcode::kReturn || opcode == IrOpcode::kTerminate ||
+            opcode == IrOpcode::kDeoptimize || opcode == IrOpcode::kThrow,
+        "for #%d:%s", node->id(), node->op()->mnemonic());
+    USE(opcode);  // silence warning about unused variable in release mode
 
-  TRACE("finished");
+    return unobservables_visited_empty_;
+  } else {
+    if (cur_set.IsUnvisited()) {
+      cur_set = unobservables_visited_empty_;
+    }
+
+    return cur_set;
+  }
+}
+
+UnobservablesSet UnobservablesSet::Unvisited() { return UnobservablesSet(); }
+
+UnobservablesSet::UnobservablesSet() : set_(nullptr) {}
+
+UnobservablesSet UnobservablesSet::VisitedEmpty(Zone* zone) {
+  // Create a new empty UnobservablesSet. This allocates in the zone, and
+  // can probably be optimized to use a global singleton.
+  ZoneSet<UnobservableStore>* empty_set =
+      new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
+          ZoneSet<UnobservableStore>(zone);
+  return UnobservablesSet(empty_set);
+}
+
+// Computes the intersection of two UnobservablesSets. May return
+// UnobservablesSet::Unvisited() instead of an empty UnobservablesSet for
+// speed.
+UnobservablesSet UnobservablesSet::Intersect(UnobservablesSet other,
+                                             Zone* zone) const {
+  if (IsEmpty() || other.IsEmpty()) {
+    return Unvisited();
+  } else {
+    ZoneSet<UnobservableStore>* intersection =
+        new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
+            ZoneSet<UnobservableStore>(zone);
+    // Put the intersection of set() and other.set() in intersection.
+    set_intersection(set()->begin(), set()->end(), other.set()->begin(),
+                     other.set()->end(),
+                     std::inserter(*intersection, intersection->end()));
+
+    return UnobservablesSet(intersection);
+  }
+}
+
+UnobservablesSet UnobservablesSet::Add(UnobservableStore obs,
+                                       Zone* zone) const {
+  bool present = (set()->find(obs) != set()->end());
+  if (present) {
+    return *this;
+  } else {
+    // Make a new empty set.
+    ZoneSet<UnobservableStore>* new_set =
+        new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
+            ZoneSet<UnobservableStore>(zone);
+    // Copy the old elements over.
+    *new_set = *set();
+    // Add the new element.
+    bool inserted = new_set->insert(obs).second;
+    DCHECK(inserted);
+    USE(inserted);  // silence warning about unused variable
+
+    return UnobservablesSet(new_set);
+  }
+}
+
+UnobservablesSet UnobservablesSet::RemoveSameOffset(StoreOffset offset,
+                                                    Zone* zone) const {
+  // Make a new empty set.
+  ZoneSet<UnobservableStore>* new_set =
+      new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
+          ZoneSet<UnobservableStore>(zone);
+  // Copy all elements over that have a different offset.
+  for (auto obs : *set()) {
+    if (obs.offset_ != offset) {
+      new_set->insert(obs);
+    }
+  }
+
+  return UnobservablesSet(new_set);
+}
+
+// Used for debugging.
+bool UnobservablesSet::operator==(const UnobservablesSet& other) const {
+  if (IsUnvisited() || other.IsUnvisited()) {
+    return IsEmpty() && other.IsEmpty();
+  } else {
+    // Both pointers guaranteed not to be nullptrs.
+    return *set() == *other.set();
+  }
+}
+
+bool UnobservablesSet::operator!=(const UnobservablesSet& other) const {
+  return !(*this == other);
+}
+
+bool UnobservableStore::operator==(const UnobservableStore other) const {
+  return (id_ == other.id_) && (offset_ == other.offset_);
+}
+
+bool UnobservableStore::operator!=(const UnobservableStore other) const {
+  return !(*this == other);
+}
+
+bool UnobservableStore::operator<(const UnobservableStore other) const {
+  return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_);
 }
 
 }  // namespace compiler
diff --git a/src/compiler/store-store-elimination.h b/src/compiler/store-store-elimination.h
index 1c9ae3d..07ae2c2 100644
--- a/src/compiler/store-store-elimination.h
+++ b/src/compiler/store-store-elimination.h
@@ -6,31 +6,16 @@
 #define V8_COMPILER_STORE_STORE_ELIMINATION_H_
 
 #include "src/compiler/common-operator.h"
-#include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-graph.h"
+#include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-// Forward declarations.
-class CommonOperatorBuilder;
-class JSGraph;
-
 class StoreStoreElimination final {
  public:
-  StoreStoreElimination(JSGraph* js_graph, Zone* temp_zone);
-  ~StoreStoreElimination();
-  void Run();
-
- private:
-  static bool IsEligibleNode(Node* node);
-  void ReduceEligibleNode(Node* node);
-  JSGraph* jsgraph() const { return jsgraph_; }
-  Zone* temp_zone() const { return temp_zone_; }
-
-  JSGraph* const jsgraph_;
-  Zone* const temp_zone_;
+  static void Run(JSGraph* js_graph, Zone* temp_zone);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/type-hint-analyzer.cc b/src/compiler/type-hint-analyzer.cc
index 791aa9d..8e7a0f3 100644
--- a/src/compiler/type-hint-analyzer.cc
+++ b/src/compiler/type-hint-analyzer.cc
@@ -15,73 +15,69 @@
 
 namespace {
 
-// TODO(bmeurer): This detour via types is ugly.
-BinaryOperationHints::Hint ToBinaryOperationHint(Type* type) {
-  if (type->Is(Type::None())) return BinaryOperationHints::kNone;
-  if (type->Is(Type::SignedSmall())) return BinaryOperationHints::kSignedSmall;
-  if (type->Is(Type::Signed32())) return BinaryOperationHints::kSigned32;
-  if (type->Is(Type::Number())) return BinaryOperationHints::kNumberOrUndefined;
-  if (type->Is(Type::String())) return BinaryOperationHints::kString;
-  return BinaryOperationHints::kAny;
-}
-
-CompareOperationHints::Hint ToCompareOperationHint(
-    CompareICState::State state) {
-  switch (state) {
-    case CompareICState::UNINITIALIZED:
-      return CompareOperationHints::kNone;
-    case CompareICState::BOOLEAN:
-      return CompareOperationHints::kBoolean;
-    case CompareICState::SMI:
-      return CompareOperationHints::kSignedSmall;
-    case CompareICState::NUMBER:
-      return CompareOperationHints::kNumber;
-    case CompareICState::STRING:
-      return CompareOperationHints::kString;
-    case CompareICState::INTERNALIZED_STRING:
-      return CompareOperationHints::kInternalizedString;
-    case CompareICState::UNIQUE_NAME:
-      return CompareOperationHints::kUniqueName;
-    case CompareICState::RECEIVER:
-    case CompareICState::KNOWN_RECEIVER:
-      return CompareOperationHints::kReceiver;
-    case CompareICState::GENERIC:
-      return CompareOperationHints::kAny;
+BinaryOperationHint ToBinaryOperationHint(BinaryOpICState::Kind kind) {
+  switch (kind) {
+    case BinaryOpICState::NONE:
+      return BinaryOperationHint::kNone;
+    case BinaryOpICState::SMI:
+      return BinaryOperationHint::kSignedSmall;
+    case BinaryOpICState::INT32:
+      return BinaryOperationHint::kSigned32;
+    case BinaryOpICState::NUMBER:
+      return BinaryOperationHint::kNumberOrOddball;
+    case BinaryOpICState::STRING:
+    case BinaryOpICState::GENERIC:
+      return BinaryOperationHint::kAny;
   }
   UNREACHABLE();
-  return CompareOperationHints::kAny;
+  return BinaryOperationHint::kNone;
+}
+
+CompareOperationHint ToCompareOperationHint(Token::Value op,
+                                            CompareICState::State state) {
+  switch (state) {
+    case CompareICState::UNINITIALIZED:
+      return CompareOperationHint::kNone;
+    case CompareICState::SMI:
+      return CompareOperationHint::kSignedSmall;
+    case CompareICState::NUMBER:
+      return Token::IsOrderedRelationalCompareOp(op)
+                 ? CompareOperationHint::kNumberOrOddball
+                 : CompareOperationHint::kNumber;
+    case CompareICState::STRING:
+    case CompareICState::INTERNALIZED_STRING:
+    case CompareICState::UNIQUE_NAME:
+    case CompareICState::RECEIVER:
+    case CompareICState::KNOWN_RECEIVER:
+    case CompareICState::BOOLEAN:
+    case CompareICState::GENERIC:
+      return CompareOperationHint::kAny;
+  }
+  UNREACHABLE();
+  return CompareOperationHint::kNone;
 }
 
 }  // namespace
 
-bool TypeHintAnalysis::GetBinaryOperationHints(
-    TypeFeedbackId id, BinaryOperationHints* hints) const {
+bool TypeHintAnalysis::GetBinaryOperationHint(TypeFeedbackId id,
+                                              BinaryOperationHint* hint) const {
   auto i = infos_.find(id);
   if (i == infos_.end()) return false;
   Handle<Code> code = i->second;
   DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
   BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
-  *hints = BinaryOperationHints(ToBinaryOperationHint(state.GetLeftType()),
-                                ToBinaryOperationHint(state.GetRightType()),
-                                ToBinaryOperationHint(state.GetResultType()));
+  *hint = ToBinaryOperationHint(state.kind());
   return true;
 }
 
-bool TypeHintAnalysis::GetCompareOperationHints(
-    TypeFeedbackId id, CompareOperationHints* hints) const {
+bool TypeHintAnalysis::GetCompareOperationHint(
+    TypeFeedbackId id, CompareOperationHint* hint) const {
   auto i = infos_.find(id);
   if (i == infos_.end()) return false;
   Handle<Code> code = i->second;
   DCHECK_EQ(Code::COMPARE_IC, code->kind());
-
-  Handle<Map> map;
-  Map* raw_map = code->FindFirstMap();
-  if (raw_map != nullptr) Map::TryUpdate(handle(raw_map)).ToHandle(&map);
-
   CompareICStub stub(code->stub_key(), code->GetIsolate());
-  *hints = CompareOperationHints(ToCompareOperationHint(stub.left()),
-                                 ToCompareOperationHint(stub.right()),
-                                 ToCompareOperationHint(stub.state()));
+  *hint = ToCompareOperationHint(stub.op(), stub.state());
   return true;
 }
 
@@ -136,6 +132,21 @@
   return new (zone()) TypeHintAnalysis(infos, zone());
 }
 
+// Helper function to transform the feedback to BinaryOperationHint.
+BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
+  switch (type_feedback) {
+    case BinaryOperationFeedback::kSignedSmall:
+      return BinaryOperationHint::kSignedSmall;
+    case BinaryOperationFeedback::kNumber:
+      return BinaryOperationHint::kNumberOrOddball;
+    case BinaryOperationFeedback::kAny:
+    default:
+      return BinaryOperationHint::kAny;
+  }
+  UNREACHABLE();
+  return BinaryOperationHint::kNone;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/type-hint-analyzer.h b/src/compiler/type-hint-analyzer.h
index bfb6232..e48938a 100644
--- a/src/compiler/type-hint-analyzer.h
+++ b/src/compiler/type-hint-analyzer.h
@@ -21,10 +21,10 @@
   explicit TypeHintAnalysis(Infos const& infos, Zone* zone)
       : infos_(infos), zone_(zone) {}
 
-  bool GetBinaryOperationHints(TypeFeedbackId id,
-                               BinaryOperationHints* hints) const;
-  bool GetCompareOperationHints(TypeFeedbackId id,
-                                CompareOperationHints* hints) const;
+  bool GetBinaryOperationHint(TypeFeedbackId id,
+                              BinaryOperationHint* hint) const;
+  bool GetCompareOperationHint(TypeFeedbackId id,
+                               CompareOperationHint* hint) const;
   bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
 
  private:
@@ -50,6 +50,8 @@
   DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
 };
 
+BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/type-hints.cc b/src/compiler/type-hints.cc
index e608832..a07a870 100644
--- a/src/compiler/type-hints.cc
+++ b/src/compiler/type-hints.cc
@@ -8,59 +8,40 @@
 namespace internal {
 namespace compiler {
 
-std::ostream& operator<<(std::ostream& os, BinaryOperationHints::Hint hint) {
+std::ostream& operator<<(std::ostream& os, BinaryOperationHint hint) {
   switch (hint) {
-    case BinaryOperationHints::kNone:
+    case BinaryOperationHint::kNone:
       return os << "None";
-    case BinaryOperationHints::kSignedSmall:
+    case BinaryOperationHint::kSignedSmall:
       return os << "SignedSmall";
-    case BinaryOperationHints::kSigned32:
+    case BinaryOperationHint::kSigned32:
       return os << "Signed32";
-    case BinaryOperationHints::kNumberOrUndefined:
-      return os << "NumberOrUndefined";
-    case BinaryOperationHints::kString:
-      return os << "String";
-    case BinaryOperationHints::kAny:
+    case BinaryOperationHint::kNumberOrOddball:
+      return os << "NumberOrOddball";
+    case BinaryOperationHint::kAny:
       return os << "Any";
   }
   UNREACHABLE();
   return os;
 }
 
-std::ostream& operator<<(std::ostream& os, BinaryOperationHints hints) {
-  return os << hints.left() << "*" << hints.right() << "->" << hints.result();
-}
-
-std::ostream& operator<<(std::ostream& os, CompareOperationHints::Hint hint) {
+std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
   switch (hint) {
-    case CompareOperationHints::kNone:
+    case CompareOperationHint::kNone:
       return os << "None";
-    case CompareOperationHints::kBoolean:
-      return os << "Boolean";
-    case CompareOperationHints::kSignedSmall:
+    case CompareOperationHint::kSignedSmall:
       return os << "SignedSmall";
-    case CompareOperationHints::kNumber:
+    case CompareOperationHint::kNumber:
       return os << "Number";
-    case CompareOperationHints::kString:
-      return os << "String";
-    case CompareOperationHints::kInternalizedString:
-      return os << "InternalizedString";
-    case CompareOperationHints::kUniqueName:
-      return os << "UniqueName";
-    case CompareOperationHints::kReceiver:
-      return os << "Receiver";
-    case CompareOperationHints::kAny:
+    case CompareOperationHint::kNumberOrOddball:
+      return os << "NumberOrOddball";
+    case CompareOperationHint::kAny:
       return os << "Any";
   }
   UNREACHABLE();
   return os;
 }
 
-std::ostream& operator<<(std::ostream& os, CompareOperationHints hints) {
-  return os << hints.left() << "*" << hints.right() << " (" << hints.combined()
-            << ")";
-}
-
 std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
   switch (hint) {
     case ToBooleanHint::kNone:
@@ -94,7 +75,7 @@
   if (hints == ToBooleanHint::kAny) return os << "Any";
   if (hints == ToBooleanHint::kNone) return os << "None";
   bool first = true;
-  for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * CHAR_BIT; ++i) {
+  for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * 8; ++i) {
     ToBooleanHint const hint = static_cast<ToBooleanHint>(1u << i);
     if (hints & hint) {
       if (!first) os << "|";
@@ -105,34 +86,6 @@
   return os;
 }
 
-// static
-bool BinaryOperationHints::Is(Hint h1, Hint h2) {
-  if (h1 == h2) return true;
-  switch (h1) {
-    case kNone:
-      return true;
-    case kSignedSmall:
-      return h2 == kSigned32 || h2 == kNumberOrUndefined || h2 == kAny;
-    case kSigned32:
-      return h2 == kNumberOrUndefined || h2 == kAny;
-    case kNumberOrUndefined:
-      return h2 == kAny;
-    case kString:
-      return h2 == kAny;
-    case kAny:
-      return false;
-  }
-  UNREACHABLE();
-  return false;
-}
-
-// static
-BinaryOperationHints::Hint BinaryOperationHints::Combine(Hint h1, Hint h2) {
-  if (Is(h1, h2)) return h2;
-  if (Is(h2, h1)) return h1;
-  return kAny;
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/type-hints.h b/src/compiler/type-hints.h
index 7c9badd..ad94491 100644
--- a/src/compiler/type-hints.h
+++ b/src/compiler/type-hints.h
@@ -13,106 +13,34 @@
 namespace compiler {
 
 // Type hints for an binary operation.
-class BinaryOperationHints final {
- public:
-  enum Hint {
-    kNone,
-    kSignedSmall,
-    kSigned32,
-    kNumberOrUndefined,
-    kString,
-    kAny
-  };
-
-  BinaryOperationHints() : BinaryOperationHints(kNone, kNone, kNone) {}
-  BinaryOperationHints(Hint left, Hint right, Hint result)
-      : bit_field_(LeftField::encode(left) | RightField::encode(right) |
-                   ResultField::encode(result)) {}
-
-  static BinaryOperationHints Any() {
-    return BinaryOperationHints(kAny, kAny, kAny);
-  }
-
-  Hint left() const { return LeftField::decode(bit_field_); }
-  Hint right() const { return RightField::decode(bit_field_); }
-  Hint result() const { return ResultField::decode(bit_field_); }
-  Hint combined() const { return Combine(Combine(left(), right()), result()); }
-
-  // Hint 'subtyping' and generalization.
-  static bool Is(Hint h1, Hint h2);
-  static Hint Combine(Hint h1, Hint h2);
-
-  bool operator==(BinaryOperationHints const& that) const {
-    return this->bit_field_ == that.bit_field_;
-  }
-  bool operator!=(BinaryOperationHints const& that) const {
-    return !(*this == that);
-  }
-
-  friend size_t hash_value(BinaryOperationHints const& hints) {
-    return hints.bit_field_;
-  }
-
- private:
-  typedef BitField<Hint, 0, 3> LeftField;
-  typedef BitField<Hint, 3, 3> RightField;
-  typedef BitField<Hint, 6, 3> ResultField;
-
-  uint32_t bit_field_;
+enum class BinaryOperationHint : uint8_t {
+  kNone,
+  kSignedSmall,
+  kSigned32,
+  kNumberOrOddball,
+  kAny
 };
 
-std::ostream& operator<<(std::ostream&, BinaryOperationHints::Hint);
-std::ostream& operator<<(std::ostream&, BinaryOperationHints);
+inline size_t hash_value(BinaryOperationHint hint) {
+  return static_cast<unsigned>(hint);
+}
 
-// Type hints for an binary operation.
-class CompareOperationHints final {
- public:
-  enum Hint {
-    kNone,
-    kBoolean,
-    kSignedSmall,
-    kNumber,
-    kString,
-    kInternalizedString,
-    kUniqueName,
-    kReceiver,
-    kAny
-  };
+std::ostream& operator<<(std::ostream&, BinaryOperationHint);
 
-  CompareOperationHints() : CompareOperationHints(kNone, kNone, kNone) {}
-  CompareOperationHints(Hint left, Hint right, Hint combined)
-      : bit_field_(LeftField::encode(left) | RightField::encode(right) |
-                   CombinedField::encode(combined)) {}
-
-  static CompareOperationHints Any() {
-    return CompareOperationHints(kAny, kAny, kAny);
-  }
-
-  Hint left() const { return LeftField::decode(bit_field_); }
-  Hint right() const { return RightField::decode(bit_field_); }
-  Hint combined() const { return CombinedField::decode(bit_field_); }
-
-  bool operator==(CompareOperationHints const& that) const {
-    return this->bit_field_ == that.bit_field_;
-  }
-  bool operator!=(CompareOperationHints const& that) const {
-    return !(*this == that);
-  }
-
-  friend size_t hash_value(CompareOperationHints const& hints) {
-    return hints.bit_field_;
-  }
-
- private:
-  typedef BitField<Hint, 0, 4> LeftField;
-  typedef BitField<Hint, 4, 4> RightField;
-  typedef BitField<Hint, 8, 4> CombinedField;
-
-  uint32_t bit_field_;
+// Type hints for an compare operation.
+enum class CompareOperationHint : uint8_t {
+  kNone,
+  kSignedSmall,
+  kNumber,
+  kNumberOrOddball,
+  kAny
 };
 
-std::ostream& operator<<(std::ostream&, CompareOperationHints::Hint);
-std::ostream& operator<<(std::ostream&, CompareOperationHints);
+inline size_t hash_value(CompareOperationHint hint) {
+  return static_cast<unsigned>(hint);
+}
+
+std::ostream& operator<<(std::ostream&, CompareOperationHint);
 
 // Type hints for the ToBoolean type conversion.
 enum class ToBooleanHint : uint16_t {
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 2bc0bb3..0d07053 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -4,12 +4,14 @@
 
 #include "src/compiler/typer.h"
 
+#include <iomanip>
+
 #include "src/base/flags.h"
 #include "src/bootstrapper.h"
-#include "src/compilation-dependencies.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
+#include "src/compiler/loop-variable-optimizer.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
 #include "src/compiler/operation-typer.h"
@@ -30,33 +32,18 @@
   Typer* const typer_;
 };
 
-Typer::Typer(Isolate* isolate, Graph* graph, Flags flags,
-             CompilationDependencies* dependencies, FunctionType* function_type)
+Typer::Typer(Isolate* isolate, Graph* graph)
     : isolate_(isolate),
       graph_(graph),
-      flags_(flags),
-      dependencies_(dependencies),
-      function_type_(function_type),
       decorator_(nullptr),
       cache_(TypeCache::Get()),
       operation_typer_(isolate, zone()) {
   Zone* zone = this->zone();
   Factory* const factory = isolate->factory();
 
-  Type* infinity = Type::Constant(factory->infinity_value(), zone);
-  Type* minus_infinity = Type::Constant(factory->minus_infinity_value(), zone);
-  // Unfortunately, the infinities created in other places might be different
-  // ones (eg the result of NewNumber in TypeNumberConstant).
-  Type* truncating_to_zero =
-      Type::Union(Type::Union(infinity, minus_infinity, zone),
-                  Type::MinusZeroOrNaN(), zone);
-  DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
-
   singleton_false_ = Type::Constant(factory->false_value(), zone);
   singleton_true_ = Type::Constant(factory->true_value(), zone);
   singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
-  signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
-  unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
   falsish_ = Type::Union(
       Type::Undetectable(),
       Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
@@ -78,8 +65,10 @@
 
 class Typer::Visitor : public Reducer {
  public:
-  explicit Visitor(Typer* typer)
-      : typer_(typer), weakened_nodes_(typer->zone()) {}
+  explicit Visitor(Typer* typer, LoopVariableOptimizer* induction_vars)
+      : typer_(typer),
+        induction_vars_(induction_vars),
+        weakened_nodes_(typer->zone()) {}
 
   Reduction Reduce(Node* node) override {
     if (node->op()->ValueOutputCount() == 0) return NoChange();
@@ -97,7 +86,8 @@
       DECLARE_CASE(IfException)
       // VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
       COMMON_OP_LIST(DECLARE_CASE)
-      SIMPLIFIED_OP_LIST(DECLARE_CASE)
+      SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_CASE)
+      SIMPLIFIED_OTHER_OP_LIST(DECLARE_CASE)
       MACHINE_OP_LIST(DECLARE_CASE)
       MACHINE_SIMD_OP_LIST(DECLARE_CASE)
       JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
@@ -106,6 +96,19 @@
       JS_OTHER_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
 
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    return UpdateType(node, TypeBinaryOp(node, x));
+      SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+      SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    return UpdateType(node, TypeUnaryOp(node, x));
+      SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
 #define DECLARE_CASE(x) case IrOpcode::k##x:
       DECLARE_CASE(Loop)
       DECLARE_CASE(Branch)
@@ -126,6 +129,8 @@
       DECLARE_CASE(OsrLoopEntry)
       DECLARE_CASE(Throw)
       DECLARE_CASE(End)
+      SIMPLIFIED_CHANGE_OP_LIST(DECLARE_CASE)
+      SIMPLIFIED_CHECKED_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
       break;
     }
@@ -144,7 +149,8 @@
       DECLARE_CASE(IfException)
       // VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
       COMMON_OP_LIST(DECLARE_CASE)
-      SIMPLIFIED_OP_LIST(DECLARE_CASE)
+      SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_CASE)
+      SIMPLIFIED_OTHER_OP_LIST(DECLARE_CASE)
       MACHINE_OP_LIST(DECLARE_CASE)
       MACHINE_SIMD_OP_LIST(DECLARE_CASE)
       JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
@@ -153,6 +159,19 @@
       JS_OTHER_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
 
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    return TypeBinaryOp(node, x);
+      SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+      SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    return TypeUnaryOp(node, x);
+      SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
 #define DECLARE_CASE(x) case IrOpcode::k##x:
       DECLARE_CASE(Loop)
       DECLARE_CASE(Branch)
@@ -173,6 +192,8 @@
       DECLARE_CASE(OsrLoopEntry)
       DECLARE_CASE(Throw)
       DECLARE_CASE(End)
+      SIMPLIFIED_CHANGE_OP_LIST(DECLARE_CASE)
+      SIMPLIFIED_CHECKED_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
       break;
     }
@@ -184,12 +205,18 @@
 
  private:
   Typer* typer_;
+  LoopVariableOptimizer* induction_vars_;
   ZoneSet<NodeId> weakened_nodes_;
 
 #define DECLARE_METHOD(x) inline Type* Type##x(Node* node);
   DECLARE_METHOD(Start)
   DECLARE_METHOD(IfException)
-  VALUE_OP_LIST(DECLARE_METHOD)
+  COMMON_OP_LIST(DECLARE_METHOD)
+  SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_METHOD)
+  SIMPLIFIED_OTHER_OP_LIST(DECLARE_METHOD)
+  MACHINE_OP_LIST(DECLARE_METHOD)
+  MACHINE_SIMD_OP_LIST(DECLARE_METHOD)
+  JS_OP_LIST(DECLARE_METHOD)
 #undef DECLARE_METHOD
 
   Type* TypeOrNone(Node* node) {
@@ -208,10 +235,6 @@
   Zone* zone() { return typer_->zone(); }
   Isolate* isolate() { return typer_->isolate(); }
   Graph* graph() { return typer_->graph(); }
-  Typer::Flags flags() const { return typer_->flags(); }
-  CompilationDependencies* dependencies() const {
-    return typer_->dependencies();
-  }
 
   void SetWeakened(NodeId node_id) { weakened_nodes_.insert(node_id); }
   bool IsWeakened(NodeId node_id) {
@@ -243,13 +266,19 @@
   static Type* ToNumber(Type*, Typer*);
   static Type* ToObject(Type*, Typer*);
   static Type* ToString(Type*, Typer*);
-  static Type* NumberAbs(Type*, Typer*);
-  static Type* NumberCeil(Type*, Typer*);
-  static Type* NumberFloor(Type*, Typer*);
-  static Type* NumberRound(Type*, Typer*);
-  static Type* NumberTrunc(Type*, Typer*);
-  static Type* NumberToInt32(Type*, Typer*);
-  static Type* NumberToUint32(Type*, Typer*);
+#define DECLARE_METHOD(Name)                \
+  static Type* Name(Type* type, Typer* t) { \
+    return t->operation_typer_.Name(type);  \
+  }
+  SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+#define DECLARE_METHOD(Name)                          \
+  static Type* Name(Type* lhs, Type* rhs, Typer* t) { \
+    return t->operation_typer_.Name(lhs, rhs);        \
+  }
+  SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+  SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
 
   static Type* ObjectIsCallable(Type*, Typer*);
   static Type* ObjectIsNumber(Type*, Typer*);
@@ -265,7 +294,6 @@
 #undef DECLARE_METHOD
 
   static Type* JSTypeOfTyper(Type*, Typer*);
-  static Type* JSLoadPropertyTyper(Type*, Type*, Typer*);
   static Type* JSCallFunctionTyper(Type*, Typer*);
 
   static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
@@ -275,7 +303,8 @@
     if (NodeProperties::IsTyped(node)) {
       // Widen the type of a previously typed node.
       Type* previous = NodeProperties::GetType(node);
-      if (node->opcode() == IrOpcode::kPhi) {
+      if (node->opcode() == IrOpcode::kPhi ||
+          node->opcode() == IrOpcode::kInductionVariablePhi) {
         // Speed up termination in the presence of range types:
         current = Weaken(node, current, previous);
       }
@@ -296,18 +325,23 @@
   }
 };
 
+void Typer::Run() { Run(NodeVector(zone()), nullptr); }
 
-void Typer::Run() { Run(NodeVector(zone())); }
-
-
-void Typer::Run(const NodeVector& roots) {
-  Visitor visitor(this);
+void Typer::Run(const NodeVector& roots,
+                LoopVariableOptimizer* induction_vars) {
+  if (induction_vars != nullptr) {
+    induction_vars->ChangeToInductionVariablePhis();
+  }
+  Visitor visitor(this, induction_vars);
   GraphReducer graph_reducer(zone(), graph());
   graph_reducer.AddReducer(&visitor);
   for (Node* const root : roots) graph_reducer.ReduceNode(root);
   graph_reducer.ReduceGraph();
-}
 
+  if (induction_vars != nullptr) {
+    induction_vars->ChangeToPhisAndInsertGuards();
+  }
+}
 
 void Typer::Decorator::Decorate(Node* node) {
   if (node->op()->ValueOutputCount() > 0) {
@@ -315,7 +349,7 @@
     // Other cases will generally require a proper fixpoint iteration with Run.
     bool is_typed = NodeProperties::IsTyped(node);
     if (is_typed || NodeProperties::AllValueInputsAreTyped(node)) {
-      Visitor typing(typer_);
+      Visitor typing(typer_, nullptr);
       Type* type = typing.TypeNode(node);
       if (is_typed) {
         type = Type::Intersect(type, NodeProperties::GetType(node),
@@ -438,24 +472,7 @@
 
 // static
 Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
-  if (type->Is(Type::Number())) return type;
-  if (type->Is(Type::NullOrUndefined())) {
-    if (type->Is(Type::Null())) return t->cache_.kSingletonZero;
-    if (type->Is(Type::Undefined())) return Type::NaN();
-    return Type::Union(Type::NaN(), t->cache_.kSingletonZero, t->zone());
-  }
-  if (type->Is(Type::NumberOrUndefined())) {
-    return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
-                       Type::NaN(), t->zone());
-  }
-  if (type->Is(t->singleton_false_)) return t->cache_.kSingletonZero;
-  if (type->Is(t->singleton_true_)) return t->cache_.kSingletonOne;
-  if (type->Is(Type::Boolean())) return t->cache_.kZeroOrOne;
-  if (type->Is(Type::BooleanOrNumber())) {
-    return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
-                       t->cache_.kZeroOrOne, t->zone());
-  }
-  return Type::Number();
+  return t->operation_typer_.ToNumber(type);
 }
 
 
@@ -479,89 +496,6 @@
   return Type::String();
 }
 
-// static
-Type* Typer::Visitor::NumberAbs(Type* type, Typer* t) {
-  DCHECK(type->Is(Type::Number()));
-  Factory* const f = t->isolate()->factory();
-  bool const maybe_nan = type->Maybe(Type::NaN());
-  bool const maybe_minuszero = type->Maybe(Type::MinusZero());
-  type = Type::Intersect(type, Type::PlainNumber(), t->zone());
-  double const max = type->Max();
-  double const min = type->Min();
-  if (min < 0) {
-    if (type->Is(t->cache_.kInteger)) {
-      type =
-          Type::Range(0.0, std::max(std::fabs(min), std::fabs(max)), t->zone());
-    } else if (min == max) {
-      type = Type::Constant(f->NewNumber(std::fabs(min)), t->zone());
-    } else {
-      type = Type::PlainNumber();
-    }
-  }
-  if (maybe_minuszero) {
-    type = Type::Union(type, t->cache_.kSingletonZero, t->zone());
-  }
-  if (maybe_nan) {
-    type = Type::Union(type, Type::NaN(), t->zone());
-  }
-  return type;
-}
-
-// static
-Type* Typer::Visitor::NumberCeil(Type* type, Typer* t) {
-  DCHECK(type->Is(Type::Number()));
-  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
-  // TODO(bmeurer): We could infer a more precise type here.
-  return t->cache_.kIntegerOrMinusZeroOrNaN;
-}
-
-// static
-Type* Typer::Visitor::NumberFloor(Type* type, Typer* t) {
-  DCHECK(type->Is(Type::Number()));
-  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
-  // TODO(bmeurer): We could infer a more precise type here.
-  return t->cache_.kIntegerOrMinusZeroOrNaN;
-}
-
-// static
-Type* Typer::Visitor::NumberRound(Type* type, Typer* t) {
-  DCHECK(type->Is(Type::Number()));
-  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
-  // TODO(bmeurer): We could infer a more precise type here.
-  return t->cache_.kIntegerOrMinusZeroOrNaN;
-}
-
-// static
-Type* Typer::Visitor::NumberTrunc(Type* type, Typer* t) {
-  DCHECK(type->Is(Type::Number()));
-  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
-  // TODO(bmeurer): We could infer a more precise type here.
-  return t->cache_.kIntegerOrMinusZeroOrNaN;
-}
-
-Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
-  if (type->Is(Type::Signed32())) return type;
-  if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
-  if (type->Is(t->signed32ish_)) {
-    return Type::Intersect(
-        Type::Union(type, t->cache_.kSingletonZero, t->zone()),
-        Type::Signed32(), t->zone());
-  }
-  return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::NumberToUint32(Type* type, Typer* t) {
-  if (type->Is(Type::Unsigned32())) return type;
-  if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
-  if (type->Is(t->unsigned32ish_)) {
-    return Type::Intersect(
-        Type::Union(type, t->cache_.kSingletonZero, t->zone()),
-        Type::Unsigned32(), t->zone());
-  }
-  return Type::Unsigned32();
-}
-
 // Type checks.
 
 Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
@@ -610,22 +544,13 @@
 
 Type* Typer::Visitor::TypeStart(Node* node) { return Type::Internal(); }
 
-Type* Typer::Visitor::TypeIfException(Node* node) { return Type::Any(); }
-
+Type* Typer::Visitor::TypeIfException(Node* node) {
+  return Type::NonInternal();
+}
 
 // Common operators.
 
-
-Type* Typer::Visitor::TypeParameter(Node* node) {
-  if (FunctionType* function_type = typer_->function_type()) {
-    int const index = ParameterIndexOf(node->op());
-    if (index >= 0 && index < function_type->Arity()) {
-      return function_type->Parameter(index);
-    }
-  }
-  return Type::Any();
-}
-
+Type* Typer::Visitor::TypeParameter(Node* node) { return Type::Any(); }
 
 Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
 
@@ -687,7 +612,6 @@
   return Type::Union(Operand(node, 1), Operand(node, 2), zone());
 }
 
-
 Type* Typer::Visitor::TypePhi(Node* node) {
   int arity = node->op()->ValueInputCount();
   Type* type = Operand(node, 0);
@@ -697,16 +621,138 @@
   return type;
 }
 
+Type* Typer::Visitor::TypeInductionVariablePhi(Node* node) {
+  int arity = NodeProperties::GetControlInput(node)->op()->ControlInputCount();
+  DCHECK_EQ(IrOpcode::kLoop, NodeProperties::GetControlInput(node)->opcode());
+  DCHECK_EQ(2, NodeProperties::GetControlInput(node)->InputCount());
+
+  Type* initial_type = Operand(node, 0);
+  Type* increment_type = Operand(node, 2);
+
+  // We only handle integer induction variables (otherwise ranges
+  // do not apply and we cannot do anything).
+  if (!initial_type->Is(typer_->cache_.kInteger) ||
+      !increment_type->Is(typer_->cache_.kInteger)) {
+    // Fallback to normal phi typing.
+    Type* type = Operand(node, 0);
+    for (int i = 1; i < arity; ++i) {
+      type = Type::Union(type, Operand(node, i), zone());
+    }
+    return type;
+  }
+  // If we do not have enough type information for the initial value or
+  // the increment, just return the initial value's type.
+  if (!initial_type->IsInhabited() || !increment_type->IsInhabited()) {
+    return initial_type;
+  }
+
+  // Now process the bounds.
+  auto res = induction_vars_->induction_variables().find(node->id());
+  DCHECK(res != induction_vars_->induction_variables().end());
+  InductionVariable* induction_var = res->second;
+
+  InductionVariable::ArithmeticType arithmetic_type = induction_var->Type();
+
+  double min = -V8_INFINITY;
+  double max = V8_INFINITY;
+
+  double increment_min;
+  double increment_max;
+  if (arithmetic_type == InductionVariable::ArithmeticType::kAddition) {
+    increment_min = increment_type->Min();
+    increment_max = increment_type->Max();
+  } else {
+    DCHECK(arithmetic_type == InductionVariable::ArithmeticType::kSubtraction);
+    increment_min = -increment_type->Max();
+    increment_max = -increment_type->Min();
+  }
+
+  if (increment_min >= 0) {
+    // increasing sequence
+    min = initial_type->Min();
+    for (auto bound : induction_var->upper_bounds()) {
+      Type* bound_type = TypeOrNone(bound.bound);
+      // If the type is not an integer, just skip the bound.
+      if (!bound_type->Is(typer_->cache_.kInteger)) continue;
+      // If the type is not inhabited, then we can take the initial value.
+      if (!bound_type->IsInhabited()) {
+        max = initial_type->Max();
+        break;
+      }
+      double bound_max = bound_type->Max();
+      if (bound.kind == InductionVariable::kStrict) {
+        bound_max -= 1;
+      }
+      max = std::min(max, bound_max + increment_max);
+    }
+    // The upper bound must be at least the initial value's upper bound.
+    max = std::max(max, initial_type->Max());
+  } else if (increment_max <= 0) {
+    // decreasing sequence
+    max = initial_type->Max();
+    for (auto bound : induction_var->lower_bounds()) {
+      Type* bound_type = TypeOrNone(bound.bound);
+      // If the type is not an integer, just skip the bound.
+      if (!bound_type->Is(typer_->cache_.kInteger)) continue;
+      // If the type is not inhabited, then we can take the initial value.
+      if (!bound_type->IsInhabited()) {
+        min = initial_type->Min();
+        break;
+      }
+      double bound_min = bound_type->Min();
+      if (bound.kind == InductionVariable::kStrict) {
+        bound_min += 1;
+      }
+      min = std::max(min, bound_min + increment_min);
+    }
+    // The lower bound must be at most the initial value's lower bound.
+    min = std::min(min, initial_type->Min());
+  } else {
+    // Shortcut: If the increment can be both positive and negative,
+    // the variable can go arbitrarily far, so just return integer.
+    return typer_->cache_.kInteger;
+  }
+  if (FLAG_trace_turbo_loop) {
+    OFStream os(stdout);
+    os << std::setprecision(10);
+    os << "Loop (" << NodeProperties::GetControlInput(node)->id()
+       << ") variable bounds in "
+       << (arithmetic_type == InductionVariable::ArithmeticType::kAddition
+               ? "addition"
+               : "subtraction")
+       << " for phi " << node->id() << ": (" << min << ", " << max << ")\n";
+  }
+  return Type::Range(min, max, typer_->zone());
+}
 
 Type* Typer::Visitor::TypeEffectPhi(Node* node) {
   UNREACHABLE();
   return nullptr;
 }
 
-Type* Typer::Visitor::TypeTypeGuard(Node* node) {
-  Type* input_type = Operand(node, 0);
-  Type* guard_type = TypeOf(node->op());
-  return Type::Intersect(input_type, guard_type, zone());
+Type* Typer::Visitor::TypeLoopExit(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
+Type* Typer::Visitor::TypeLoopExitValue(Node* node) { return Operand(node, 0); }
+
+Type* Typer::Visitor::TypeLoopExitEffect(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
+Type* Typer::Visitor::TypeEnsureWritableFastElements(Node* node) {
+  return Operand(node, 1);
+}
+
+Type* Typer::Visitor::TypeMaybeGrowFastElements(Node* node) {
+  return Operand(node, 1);
+}
+
+Type* Typer::Visitor::TypeTransitionElementsKind(Node* node) {
+  UNREACHABLE();
+  return nullptr;
 }
 
 Type* Typer::Visitor::TypeCheckpoint(Node* node) {
@@ -750,9 +796,12 @@
   return Type::Any();
 }
 
+Type* Typer::Visitor::TypeTypeGuard(Node* node) {
+  Type* const type = Operand(node, 0);
+  return typer_->operation_typer()->TypeTypeGuard(node->op(), type);
+}
 
-Type* Typer::Visitor::TypeDead(Node* node) { return Type::Any(); }
-
+Type* Typer::Visitor::TypeDead(Node* node) { return Type::None(); }
 
 // JS comparison operators.
 
@@ -883,129 +932,32 @@
 
 
 Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
-  lhs = NumberToInt32(ToNumber(lhs, t), t);
-  rhs = NumberToInt32(ToNumber(rhs, t), t);
-  double lmin = lhs->Min();
-  double rmin = rhs->Min();
-  double lmax = lhs->Max();
-  double rmax = rhs->Max();
-  // Or-ing any two values results in a value no smaller than their minimum.
-  // Even no smaller than their maximum if both values are non-negative.
-  double min =
-      lmin >= 0 && rmin >= 0 ? std::max(lmin, rmin) : std::min(lmin, rmin);
-  double max = Type::Signed32()->Max();
-
-  // Or-ing with 0 is essentially a conversion to int32.
-  if (rmin == 0 && rmax == 0) {
-    min = lmin;
-    max = lmax;
-  }
-  if (lmin == 0 && lmax == 0) {
-    min = rmin;
-    max = rmax;
-  }
-
-  if (lmax < 0 || rmax < 0) {
-    // Or-ing two values of which at least one is negative results in a negative
-    // value.
-    max = std::min(max, -1.0);
-  }
-  return Type::Range(min, max, t->zone());
+  return NumberBitwiseOr(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 
 Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
-  lhs = NumberToInt32(ToNumber(lhs, t), t);
-  rhs = NumberToInt32(ToNumber(rhs, t), t);
-  double lmin = lhs->Min();
-  double rmin = rhs->Min();
-  double lmax = lhs->Max();
-  double rmax = rhs->Max();
-  double min = Type::Signed32()->Min();
-  // And-ing any two values results in a value no larger than their maximum.
-  // Even no larger than their minimum if both values are non-negative.
-  double max =
-      lmin >= 0 && rmin >= 0 ? std::min(lmax, rmax) : std::max(lmax, rmax);
-  // And-ing with a non-negative value x causes the result to be between
-  // zero and x.
-  if (lmin >= 0) {
-    min = 0;
-    max = std::min(max, lmax);
-  }
-  if (rmin >= 0) {
-    min = 0;
-    max = std::min(max, rmax);
-  }
-  return Type::Range(min, max, t->zone());
+  return NumberBitwiseAnd(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 
 Type* Typer::Visitor::JSBitwiseXorTyper(Type* lhs, Type* rhs, Typer* t) {
-  lhs = NumberToInt32(ToNumber(lhs, t), t);
-  rhs = NumberToInt32(ToNumber(rhs, t), t);
-  double lmin = lhs->Min();
-  double rmin = rhs->Min();
-  double lmax = lhs->Max();
-  double rmax = rhs->Max();
-  if ((lmin >= 0 && rmin >= 0) || (lmax < 0 && rmax < 0)) {
-    // Xor-ing negative or non-negative values results in a non-negative value.
-    return Type::Unsigned31();
-  }
-  if ((lmax < 0 && rmin >= 0) || (lmin >= 0 && rmax < 0)) {
-    // Xor-ing a negative and a non-negative value results in a negative value.
-    // TODO(jarin) Use a range here.
-    return Type::Negative32();
-  }
-  return Type::Signed32();
+  return NumberBitwiseXor(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 
 Type* Typer::Visitor::JSShiftLeftTyper(Type* lhs, Type* rhs, Typer* t) {
-  return Type::Signed32();
+  return NumberShiftLeft(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 
 Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
-  lhs = NumberToInt32(ToNumber(lhs, t), t);
-  rhs = NumberToUint32(ToNumber(rhs, t), t);
-  double min = kMinInt;
-  double max = kMaxInt;
-  if (lhs->Min() >= 0) {
-    // Right-shifting a non-negative value cannot make it negative, nor larger.
-    min = std::max(min, 0.0);
-    max = std::min(max, lhs->Max());
-    if (rhs->Min() > 0 && rhs->Max() <= 31) {
-      max = static_cast<int>(max) >> static_cast<int>(rhs->Min());
-    }
-  }
-  if (lhs->Max() < 0) {
-    // Right-shifting a negative value cannot make it non-negative, nor smaller.
-    min = std::max(min, lhs->Min());
-    max = std::min(max, -1.0);
-    if (rhs->Min() > 0 && rhs->Max() <= 31) {
-      min = static_cast<int>(min) >> static_cast<int>(rhs->Min());
-    }
-  }
-  if (rhs->Min() > 0 && rhs->Max() <= 31) {
-    // Right-shifting by a positive value yields a small integer value.
-    double shift_min = kMinInt >> static_cast<int>(rhs->Min());
-    double shift_max = kMaxInt >> static_cast<int>(rhs->Min());
-    min = std::max(min, shift_min);
-    max = std::min(max, shift_max);
-  }
-  // TODO(jarin) Ideally, the following micro-optimization should be performed
-  // by the type constructor.
-  if (max != Type::Signed32()->Max() || min != Type::Signed32()->Min()) {
-    return Type::Range(min, max, t->zone());
-  }
-  return Type::Signed32();
+  return NumberShiftRight(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 
 Type* Typer::Visitor::JSShiftRightLogicalTyper(Type* lhs, Type* rhs, Typer* t) {
-  lhs = NumberToUint32(ToNumber(lhs, t), t);
-  // Logical right-shifting any value cannot make it larger.
-  return Type::Range(0.0, lhs->Max(), t->zone());
+  return NumberShiftRightLogical(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 
@@ -1022,36 +974,23 @@
     }
   }
   // The addition must be numeric.
-  return t->operation_typer()->NumericAdd(ToNumber(lhs, t), ToNumber(rhs, t));
+  return NumberAdd(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
-  return t->operation_typer()->NumericSubtract(ToNumber(lhs, t),
-                                               ToNumber(rhs, t));
+  return NumberSubtract(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
-  return t->operation_typer()->NumericMultiply(ToNumber(lhs, t),
-                                               ToNumber(rhs, t));
+  return NumberMultiply(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
-  return t->operation_typer()->NumericDivide(ToNumber(lhs, t),
-                                             ToNumber(rhs, t));
-  lhs = ToNumber(lhs, t);
-  rhs = ToNumber(rhs, t);
-  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
-  // Division is tricky, so all we do is try ruling out nan.
-  bool maybe_nan =
-      lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
-      ((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
-       (rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
-  return maybe_nan ? Type::Number() : Type::OrderedNumber();
+  return NumberDivide(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
-  return t->operation_typer()->NumericModulus(ToNumber(lhs, t),
-                                              ToNumber(rhs, t));
+  return NumberModulus(ToNumber(lhs, t), ToNumber(rhs, t), t);
 }
 
 
@@ -1160,28 +1099,18 @@
 }
 
 
-Type* Typer::Visitor::JSLoadPropertyTyper(Type* object, Type* name, Typer* t) {
-  // TODO(rossberg): Use range types and sized array types to filter undefined.
-  if (object->IsArray() && name->Is(Type::Integral32())) {
-    return Type::Union(
-        object->AsArray()->Element(), Type::Undefined(), t->zone());
-  }
-  return Type::Any();
-}
-
-
 Type* Typer::Visitor::TypeJSLoadProperty(Node* node) {
-  return TypeBinaryOp(node, JSLoadPropertyTyper);
+  return Type::NonInternal();
 }
 
 
 Type* Typer::Visitor::TypeJSLoadNamed(Node* node) {
-  return Type::Any();
+  return Type::NonInternal();
 }
 
-
-Type* Typer::Visitor::TypeJSLoadGlobal(Node* node) { return Type::Any(); }
-
+Type* Typer::Visitor::TypeJSLoadGlobal(Node* node) {
+  return Type::NonInternal();
+}
 
 // Returns a somewhat larger range if we previously assigned
 // a (smaller) range to this node. This is used  to speed up
@@ -1314,7 +1243,7 @@
   if (outer->Is(Type::None())) {
     return Type::None();
   } else {
-    DCHECK(outer->Maybe(Type::Internal()));
+    DCHECK(outer->Maybe(Type::OtherInternal()));
     return Type::Context(outer, zone());
   }
 }
@@ -1340,12 +1269,6 @@
 }
 
 
-Type* Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
-  // TODO(rossberg): this is probably incorrect
-  return WrapContextTypeForInput(node);
-}
-
-
 Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
   return WrapContextTypeForInput(node);
 }
@@ -1376,19 +1299,29 @@
         case kMathTrunc:
           return t->cache_.kIntegerOrMinusZeroOrNaN;
         // Unary math functions.
-        case kMathExp:
-          return Type::Union(Type::PlainNumber(), Type::NaN(), t->zone());
         case kMathAbs:
-        case kMathLog:
-        case kMathSqrt:
-        case kMathCos:
-        case kMathSin:
-        case kMathTan:
+        case kMathExp:
+        case kMathExpm1:
+          return Type::Union(Type::PlainNumber(), Type::NaN(), t->zone());
         case kMathAcos:
+        case kMathAcosh:
         case kMathAsin:
+        case kMathAsinh:
         case kMathAtan:
+        case kMathAtanh:
+        case kMathCbrt:
+        case kMathCos:
         case kMathFround:
+        case kMathLog:
+        case kMathLog1p:
+        case kMathLog10:
+        case kMathLog2:
+        case kMathSin:
+        case kMathSqrt:
+        case kMathTan:
           return Type::Number();
+        case kMathSign:
+          return t->cache_.kMinusOneToOne;
         // Binary math functions.
         case kMathAtan2:
         case kMathPow:
@@ -1399,6 +1332,11 @@
           return Type::Signed32();
         case kMathClz32:
           return t->cache_.kZeroToThirtyTwo;
+        // Number functions.
+        case kNumberParseInt:
+          return t->cache_.kIntegerOrMinusZeroOrNaN;
+        case kNumberToString:
+          return Type::String();
         // String functions.
         case kStringCharCodeAt:
           return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
@@ -1406,19 +1344,33 @@
         case kStringCharAt:
         case kStringConcat:
         case kStringFromCharCode:
+        case kStringSubstr:
         case kStringToLowerCase:
         case kStringToUpperCase:
           return Type::String();
         // Array functions.
         case kArrayIndexOf:
         case kArrayLastIndexOf:
-          return Type::Number();
+          return Type::Range(-1, kMaxSafeInteger, t->zone());
+        case kArrayPush:
+          return t->cache_.kPositiveSafeInteger;
+        // Object functions.
+        case kObjectHasOwnProperty:
+          return Type::Boolean();
+        // Global functions.
+        case kGlobalDecodeURI:
+        case kGlobalDecodeURIComponent:
+        case kGlobalEncodeURI:
+        case kGlobalEncodeURIComponent:
+        case kGlobalEscape:
+        case kGlobalUnescape:
+          return Type::String();
         default:
           break;
       }
     }
   }
-  return Type::Any();
+  return Type::NonInternal();
 }
 
 
@@ -1440,9 +1392,6 @@
     case Runtime::kInlineIsTypedArray:
     case Runtime::kInlineIsRegExp:
       return Type::Boolean();
-    case Runtime::kInlineDoubleLo:
-    case Runtime::kInlineDoubleHi:
-      return Type::Signed32();
     case Runtime::kInlineCreateIterResultObject:
     case Runtime::kInlineRegExpConstructResult:
       return Type::OtherObject();
@@ -1453,16 +1402,10 @@
       return TypeUnaryOp(node, ToInteger);
     case Runtime::kInlineToLength:
       return TypeUnaryOp(node, ToLength);
-    case Runtime::kInlineToName:
-      return TypeUnaryOp(node, ToName);
     case Runtime::kInlineToNumber:
       return TypeUnaryOp(node, ToNumber);
     case Runtime::kInlineToObject:
       return TypeUnaryOp(node, ToObject);
-    case Runtime::kInlineToPrimitive:
-    case Runtime::kInlineToPrimitive_Number:
-    case Runtime::kInlineToPrimitive_String:
-      return TypeUnaryOp(node, ToPrimitive);
     case Runtime::kInlineToString:
       return TypeUnaryOp(node, ToString);
     case Runtime::kHasInPrototypeChain:
@@ -1470,6 +1413,9 @@
     default:
       break;
   }
+  // TODO(turbofan): This should be Type::NonInternal(), but unfortunately we
+  // have a few weird runtime calls that return the hole or even FixedArrays;
+  // change this once those weird runtime calls have been removed.
   return Type::Any();
 }
 
@@ -1529,10 +1475,6 @@
 
 Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
 
-Type* Typer::Visitor::TypeBooleanToNumber(Node* node) {
-  return TypeUnaryOp(node, ToNumber);
-}
-
 Type* Typer::Visitor::TypeNumberEqual(Node* node) { return Type::Boolean(); }
 
 Type* Typer::Visitor::TypeNumberLessThan(Node* node) { return Type::Boolean(); }
@@ -1553,65 +1495,6 @@
   return Type::Boolean();
 }
 
-Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberSubtract(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeSpeculativeNumberAdd(Node* node) {
-  return Type::Number();
-}
-
-Type* Typer::Visitor::TypeSpeculativeNumberSubtract(Node* node) {
-  return Type::Number();
-}
-
-Type* Typer::Visitor::TypeSpeculativeNumberMultiply(Node* node) {
-  return Type::Number();
-}
-
-Type* Typer::Visitor::TypeSpeculativeNumberDivide(Node* node) {
-  return Type::Number();
-}
-
-Type* Typer::Visitor::TypeSpeculativeNumberModulus(Node* node) {
-  return Type::Number();
-}
-
-Type* Typer::Visitor::TypeNumberMultiply(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberDivide(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberModulus(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberBitwiseOr(Node* node) {
-  return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeNumberBitwiseXor(Node* node) {
-  return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeNumberBitwiseAnd(Node* node) {
-  return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeNumberShiftLeft(Node* node) {
-  return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeNumberShiftRight(Node* node) {
-  return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeNumberShiftRightLogical(Node* node) {
-  return Type::Unsigned32();
-}
-
 Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
   return TypeUnaryOp(node, ToNumber);
 }
@@ -1624,75 +1507,6 @@
   return Type::Number();
 }
 
-Type* Typer::Visitor::TypeNumberImul(Node* node) { return Type::Signed32(); }
-
-Type* Typer::Visitor::TypeNumberAbs(Node* node) {
-  return TypeUnaryOp(node, NumberAbs);
-}
-
-Type* Typer::Visitor::TypeNumberClz32(Node* node) {
-  return typer_->cache_.kZeroToThirtyTwo;
-}
-
-Type* Typer::Visitor::TypeNumberCeil(Node* node) {
-  return TypeUnaryOp(node, NumberCeil);
-}
-
-Type* Typer::Visitor::TypeNumberFloor(Node* node) {
-  return TypeUnaryOp(node, NumberFloor);
-}
-
-Type* Typer::Visitor::TypeNumberFround(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberAtan(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberAtan2(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberAtanh(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberCos(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberExp(Node* node) {
-  return Type::Union(Type::PlainNumber(), Type::NaN(), zone());
-}
-
-// TODO(mvstanton): Is this type sufficient, or should it look like Exp()?
-Type* Typer::Visitor::TypeNumberExpm1(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberLog(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberLog1p(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberLog2(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberLog10(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberCbrt(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberRound(Node* node) {
-  return TypeUnaryOp(node, NumberRound);
-}
-
-Type* Typer::Visitor::TypeNumberSin(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberSqrt(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberTan(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberTrunc(Node* node) {
-  return TypeUnaryOp(node, NumberTrunc);
-}
-
-Type* Typer::Visitor::TypeNumberToInt32(Node* node) {
-  return TypeUnaryOp(node, NumberToInt32);
-}
-
-
-Type* Typer::Visitor::TypeNumberToUint32(Node* node) {
-  return TypeUnaryOp(node, NumberToUint32);
-}
-
-
 // static
 Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
   if (lhs->IsConstant() && rhs->Is(lhs)) {
@@ -1727,109 +1541,44 @@
   return Type::String();
 }
 
+Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
+  // TODO(bmeurer): We could do better here based on inputs.
+  return Type::Range(0, kMaxUInt16, zone());
+}
+
 Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
   return TypeUnaryOp(node, StringFromCharCodeTyper);
 }
 
-Type* Typer::Visitor::TypeStringToNumber(Node* node) {
-  return TypeUnaryOp(node, ToNumber);
-}
-
-namespace {
-
-Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
-  return Type::Union(Type::Semantic(type, zone),
-                     Type::Representation(rep, zone), zone);
-}
-
-}  // namespace
-
-Type* Typer::Visitor::TypeChangeTaggedSignedToInt32(Node* node) {
-  Type* arg = Operand(node, 0);
-  // TODO(jarin): DCHECK(arg->Is(Type::Signed32()));
-  // Many tests fail this check.
-  return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
-}
-
-Type* Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
-  Type* arg = Operand(node, 0);
-  DCHECK(arg->Is(Type::Signed32()));
-  return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
-  Type* arg = Operand(node, 0);
-  DCHECK(arg->Is(Type::Unsigned32()));
-  return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
-}
-
-
-Type* Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
-  Type* arg = Operand(node, 0);
-  DCHECK(arg->Is(Type::Number()));
-  return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
-}
-
-Type* Typer::Visitor::TypeTruncateTaggedToFloat64(Node* node) {
-  Type* arg = Operand(node, 0);
-  // TODO(jarin) This DCHECK does not work because of speculative feedback.
-  // Re-enable once we record the speculative feedback in types.
-  // DCHECK(arg->Is(Type::NumberOrOddball()));
-  return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
-}
-
-Type* Typer::Visitor::TypeChangeInt31ToTaggedSigned(Node* node) {
-  Type* arg = Operand(node, 0);
-  // TODO(jarin): DCHECK(arg->Is(Type::Signed31()));
-  // Some mjsunit/asm and mjsunit/wasm tests fail this check.
-  // For instance, asm/int32-umod fails with Signed32/UntaggedIntegral32 in
-  // simplified-lowering (after propagation).
-  Type* rep =
-      arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
-  return ChangeRepresentation(arg, rep, zone());
-}
-
-Type* Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
-  Type* arg = Operand(node, 0);
-  // TODO(jarin): DCHECK(arg->Is(Type::Signed32()));
-  // Two tests fail this check: mjsunit/asm/sqlite3/sqlite-safe-heap and
-  // mjsunit/wasm/embenchen/lua_binarytrees. The first one fails with Any/Any in
-  // simplified-lowering (after propagation).
-  Type* rep =
-      arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
-  return ChangeRepresentation(arg, rep, zone());
-}
-
-Type* Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
-  Type* arg = Operand(node, 0);
-  // TODO(jarin): DCHECK(arg->Is(Type::Unsigned32()));
-  // This fails in benchmarks/octane/mandreel (--turbo).
-  return ChangeRepresentation(arg, Type::Tagged(), zone());
-}
-
-Type* Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
-  Type* arg = Operand(node, 0);
-  // TODO(jarin): DCHECK(arg->Is(Type::Number()));
-  // Some (or all) mjsunit/wasm/embenchen/ tests fail this check when run with
-  // --turbo and --always-opt.
-  return ChangeRepresentation(arg, Type::Tagged(), zone());
-}
-
-Type* Typer::Visitor::TypeChangeTaggedToBit(Node* node) {
-  Type* arg = Operand(node, 0);
-  DCHECK(arg->Is(Type::Boolean()));
-  return ChangeRepresentation(arg, Type::UntaggedBit(), zone());
-}
-
-Type* Typer::Visitor::TypeChangeBitToTagged(Node* node) {
-  Type* arg = Operand(node, 0);
-  return ChangeRepresentation(arg, Type::TaggedPointer(), zone());
-}
-
 Type* Typer::Visitor::TypeCheckBounds(Node* node) {
-  // TODO(bmeurer): We could do better here based on the limit.
-  return Type::Unsigned31();
+  Type* index = Operand(node, 0);
+  Type* length = Operand(node, 1);
+  index = Type::Intersect(index, Type::Integral32(), zone());
+  if (!index->IsInhabited() || !length->IsInhabited()) return Type::None();
+  double min = std::max(index->Min(), 0.0);
+  double max = std::min(index->Max(), length->Min() - 1);
+  if (max < min) return Type::None();
+  return Type::Range(min, max, zone());
+}
+
+Type* Typer::Visitor::TypeCheckMaps(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
+Type* Typer::Visitor::TypeCheckNumber(Node* node) {
+  Type* arg = Operand(node, 0);
+  return Type::Intersect(arg, Type::Number(), zone());
+}
+
+Type* Typer::Visitor::TypeCheckString(Node* node) {
+  Type* arg = Operand(node, 0);
+  return Type::Intersect(arg, Type::String(), zone());
+}
+
+Type* Typer::Visitor::TypeCheckIf(Node* node) {
+  UNREACHABLE();
+  return nullptr;
 }
 
 Type* Typer::Visitor::TypeCheckTaggedPointer(Node* node) {
@@ -1842,115 +1591,33 @@
   return Type::Intersect(arg, typer_->cache_.kSmi, zone());
 }
 
-Type* Typer::Visitor::TypeCheckedInt32Add(Node* node) {
-  return Type::Integral32();
-}
-
-Type* Typer::Visitor::TypeCheckedInt32Sub(Node* node) {
-  return Type::Integral32();
-}
-
-Type* Typer::Visitor::TypeCheckedUint32ToInt32(Node* node) {
-  return Type::Signed32();
-}
-
-Type* Typer::Visitor::TypeCheckedFloat64ToInt32(Node* node) {
-  return Type::Signed32();
-}
-
-Type* Typer::Visitor::TypeCheckedTaggedToInt32(Node* node) {
-  return Type::Signed32();
-}
-
-Type* Typer::Visitor::TypeCheckedTaggedToFloat64(Node* node) {
-  return Type::Number();
-}
-
 Type* Typer::Visitor::TypeCheckFloat64Hole(Node* node) {
   Type* type = Operand(node, 0);
   return type;
 }
 
 Type* Typer::Visitor::TypeCheckTaggedHole(Node* node) {
-  CheckTaggedHoleMode mode = CheckTaggedHoleModeOf(node->op());
   Type* type = Operand(node, 0);
   type = Type::Intersect(type, Type::NonInternal(), zone());
-  switch (mode) {
-    case CheckTaggedHoleMode::kConvertHoleToUndefined: {
-      // The hole is turned into undefined.
-      type = Type::Union(type, Type::Undefined(), zone());
-      break;
-    }
-    case CheckTaggedHoleMode::kNeverReturnHole: {
-      // We deoptimize in case of the hole.
-      break;
-    }
+  return type;
+}
+
+Type* Typer::Visitor::TypeConvertTaggedHoleToUndefined(Node* node) {
+  Type* type = Operand(node, 0);
+  if (type->Maybe(Type::Hole())) {
+    // Turn "the hole" into undefined.
+    type = Type::Intersect(type, Type::NonInternal(), zone());
+    type = Type::Union(type, Type::Undefined(), zone());
   }
   return type;
 }
 
-Type* Typer::Visitor::TypeTruncateTaggedToWord32(Node* node) {
-  Type* arg = Operand(node, 0);
-  // TODO(jarin): DCHECK(arg->Is(Type::NumberOrUndefined()));
-  // Several mjsunit and cctest tests fail this check. For instance,
-  // mjsunit/compiler/regress-607493 fails with Any/Any in simplified-lowering
-  // (after propagation).
-  return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
-}
-
 Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
 
-
-namespace {
-
-MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
-  if (object_type->IsConstant() &&
-      object_type->AsConstant()->Value()->IsHeapObject()) {
-    Handle<Map> object_map(
-        Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
-    if (object_map->is_stable()) return object_map;
-  } else if (object_type->IsClass()) {
-    Handle<Map> object_map = object_type->AsClass()->Map();
-    if (object_map->is_stable()) return object_map;
-  }
-  return MaybeHandle<Map>();
-}
-
-}  // namespace
-
-
 Type* Typer::Visitor::TypeLoadField(Node* node) {
-  FieldAccess const& access = FieldAccessOf(node->op());
-  if (access.base_is_tagged == kTaggedBase &&
-      access.offset == HeapObject::kMapOffset) {
-    // The type of LoadField[Map](o) is Constant(map) if map is stable and
-    // either
-    //  (a) o has type Constant(object) and map == object->map, or
-    //  (b) o has type Class(map),
-    // and either
-    //  (1) map cannot transition further, or
-    //  (2) deoptimization is enabled and we can add a code dependency on the
-    //      stability of map (to guard the Constant type information).
-    Type* const object = Operand(node, 0);
-    if (object->Is(Type::None())) return Type::None();
-    Handle<Map> object_map;
-    if (GetStableMapFromObjectType(object).ToHandle(&object_map)) {
-      if (object_map->CanTransition()) {
-        if (flags() & kDeoptimizationEnabled) {
-          dependencies()->AssumeMapStable(object_map);
-        } else {
-          return access.type;
-        }
-      }
-      Type* object_map_type = Type::Constant(object_map, zone());
-      DCHECK(object_map_type->Is(access.type));
-      return object_map_type;
-    }
-  }
-  return access.type;
+  return FieldAccessOf(node->op()).type;
 }
 
-
 Type* Typer::Visitor::TypeLoadBuffer(Node* node) {
   // TODO(bmeurer): This typing is not yet correct. Since we can still access
   // out of bounds, the type in the general case has to include Undefined.
@@ -1970,6 +1637,17 @@
   return ElementAccessOf(node->op()).type;
 }
 
+Type* Typer::Visitor::TypeLoadTypedElement(Node* node) {
+  switch (ExternalArrayTypeOf(node->op())) {
+#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
+  case kExternal##ElemType##Array:                          \
+    return typer_->cache_.k##ElemType;
+    TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+  }
+  UNREACHABLE();
+  return nullptr;
+}
 
 Type* Typer::Visitor::TypeStoreField(Node* node) {
   UNREACHABLE();
@@ -1988,6 +1666,11 @@
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeStoreTypedElement(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
 Type* Typer::Visitor::TypeObjectIsCallable(Node* node) {
   return TypeUnaryOp(node, ObjectIsCallable);
 }
@@ -2021,6 +1704,13 @@
 
 Type* Typer::Visitor::TypeComment(Node* node) { return Type::None(); }
 
+Type* Typer::Visitor::TypeRetain(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
+Type* Typer::Visitor::TypeUnsafePointerAdd(Node* node) { return Type::None(); }
+
 Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
 
 Type* Typer::Visitor::TypeStackSlot(Node* node) { return Type::Any(); }
@@ -2065,6 +1755,9 @@
   return Type::Integral32();
 }
 
+Type* Typer::Visitor::TypeWord32ReverseBytes(Node* node) {
+  return Type::Integral32();
+}
 
 Type* Typer::Visitor::TypeWord32Popcnt(Node* node) {
   return Type::Integral32();
@@ -2102,6 +1795,9 @@
   return Type::Internal();
 }
 
+Type* Typer::Visitor::TypeWord64ReverseBytes(Node* node) {
+  return Type::Internal();
+}
 
 Type* Typer::Visitor::TypeWord64Popcnt(Node* node) { return Type::Internal(); }
 
@@ -2127,6 +1823,9 @@
 
 Type* Typer::Visitor::TypeInt32Mul(Node* node) { return Type::Integral32(); }
 
+Type* Typer::Visitor::TypeInt32MulWithOverflow(Node* node) {
+  return Type::Internal();
+}
 
 Type* Typer::Visitor::TypeInt32MulHigh(Node* node) { return Type::Signed32(); }
 
@@ -2182,7 +1881,6 @@
 
 Type* Typer::Visitor::TypeInt64Mul(Node* node) { return Type::Internal(); }
 
-
 Type* Typer::Visitor::TypeInt64Div(Node* node) { return Type::Internal(); }
 
 
@@ -2223,10 +1921,6 @@
   return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
 }
 
-Type* Typer::Visitor::TypeNumberSilenceNaN(Node* node) {
-  return Type::Number();
-}
-
 Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
   return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
                          zone());
@@ -2280,16 +1974,35 @@
   return Type::Internal();
 }
 
-
 Type* Typer::Visitor::TypeChangeUint32ToFloat64(Node* node) {
   return Type::Intersect(Type::Unsigned32(), Type::UntaggedFloat64(), zone());
 }
 
-
 Type* Typer::Visitor::TypeChangeUint32ToUint64(Node* node) {
   return Type::Internal();
 }
 
+Type* Typer::Visitor::TypeImpossibleToWord32(Node* node) {
+  return Type::None();
+}
+
+Type* Typer::Visitor::TypeImpossibleToWord64(Node* node) {
+  return Type::None();
+}
+
+Type* Typer::Visitor::TypeImpossibleToFloat32(Node* node) {
+  return Type::None();
+}
+
+Type* Typer::Visitor::TypeImpossibleToFloat64(Node* node) {
+  return Type::None();
+}
+
+Type* Typer::Visitor::TypeImpossibleToTagged(Node* node) {
+  return Type::None();
+}
+
+Type* Typer::Visitor::TypeImpossibleToBit(Node* node) { return Type::None(); }
 
 Type* Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
   return Type::Intersect(Type::Number(), Type::UntaggedFloat32(), zone());
@@ -2300,7 +2013,6 @@
                          zone());
 }
 
-
 Type* Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
   return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
 }
@@ -2364,10 +2076,6 @@
 
 Type* Typer::Visitor::TypeFloat32Sub(Node* node) { return Type::Number(); }
 
-Type* Typer::Visitor::TypeFloat32SubPreserveNan(Node* node) {
-  return Type::Number();
-}
-
 Type* Typer::Visitor::TypeFloat32Neg(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
@@ -2376,12 +2084,6 @@
 Type* Typer::Visitor::TypeFloat32Div(Node* node) { return Type::Number(); }
 
 
-Type* Typer::Visitor::TypeFloat32Max(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat32Min(Node* node) { return Type::Number(); }
-
-
 Type* Typer::Visitor::TypeFloat32Abs(Node* node) {
   // TODO(turbofan): We should be able to infer a better type here.
   return Type::Number();
@@ -2403,16 +2105,15 @@
   return Type::Boolean();
 }
 
+Type* Typer::Visitor::TypeFloat32Max(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat32Min(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeFloat64Add(Node* node) { return Type::Number(); }
 
 
 Type* Typer::Visitor::TypeFloat64Sub(Node* node) { return Type::Number(); }
 
-Type* Typer::Visitor::TypeFloat64SubPreserveNan(Node* node) {
-  return Type::Number();
-}
-
 Type* Typer::Visitor::TypeFloat64Neg(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }
@@ -2435,14 +2136,26 @@
   return Type::Number();
 }
 
-Type* Typer::Visitor::TypeFloat64Atan(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeFloat64Acos(Node* node) { return Type::Number(); }
 
-Type* Typer::Visitor::TypeFloat64Atan2(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeFloat64Acosh(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Asin(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Asinh(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Atan(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeFloat64Atanh(Node* node) { return Type::Number(); }
 
+Type* Typer::Visitor::TypeFloat64Atan2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Cbrt(Node* node) { return Type::Number(); }
+
 Type* Typer::Visitor::TypeFloat64Cos(Node* node) { return Type::Number(); }
 
+Type* Typer::Visitor::TypeFloat64Cosh(Node* node) { return Type::Number(); }
+
 Type* Typer::Visitor::TypeFloat64Exp(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeFloat64Expm1(Node* node) { return Type::Number(); }
@@ -2451,18 +2164,22 @@
 
 Type* Typer::Visitor::TypeFloat64Log1p(Node* node) { return Type::Number(); }
 
-Type* Typer::Visitor::TypeFloat64Log2(Node* node) { return Type::Number(); }
-
 Type* Typer::Visitor::TypeFloat64Log10(Node* node) { return Type::Number(); }
 
-Type* Typer::Visitor::TypeFloat64Cbrt(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeFloat64Log2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Pow(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeFloat64Sin(Node* node) { return Type::Number(); }
 
+Type* Typer::Visitor::TypeFloat64Sinh(Node* node) { return Type::Number(); }
+
 Type* Typer::Visitor::TypeFloat64Sqrt(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeFloat64Tan(Node* node) { return Type::Number(); }
 
+Type* Typer::Visitor::TypeFloat64Tanh(Node* node) { return Type::Number(); }
+
 Type* Typer::Visitor::TypeFloat64Equal(Node* node) { return Type::Boolean(); }
 
 
@@ -2563,6 +2280,13 @@
   return Type::Internal();
 }
 
+Type* Typer::Visitor::TypeUnalignedLoad(Node* node) { return Type::Any(); }
+
+Type* Typer::Visitor::TypeUnalignedStore(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
 Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
 
 Type* Typer::Visitor::TypeCheckedStore(Node* node) {
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index b6c5cb3..d4d5744 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -5,7 +5,6 @@
 #ifndef V8_COMPILER_TYPER_H_
 #define V8_COMPILER_TYPER_H_
 
-#include "src/base/flags.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/operation-typer.h"
 #include "src/types.h"
@@ -14,30 +13,22 @@
 namespace internal {
 
 // Forward declarations.
-class CompilationDependencies;
 class TypeCache;
 
 namespace compiler {
 
+class LoopVariableOptimizer;
 class OperationTyper;
 
 class Typer {
  public:
-  // Flags that control the mode of operation.
-  enum Flag {
-    kNoFlags = 0u,
-    kDeoptimizationEnabled = 1u << 0,
-  };
-  typedef base::Flags<Flag> Flags;
-
-  Typer(Isolate* isolate, Graph* graph, Flags flags = kNoFlags,
-        CompilationDependencies* dependencies = nullptr,
-        FunctionType* function_type = nullptr);
+  Typer(Isolate* isolate, Graph* graph);
   ~Typer();
 
   void Run();
   // TODO(bmeurer,jarin): Remove this once we have a notion of "roots" on Graph.
-  void Run(const ZoneVector<Node*>& roots);
+  void Run(const ZoneVector<Node*>& roots,
+           LoopVariableOptimizer* induction_vars);
 
  private:
   class Visitor;
@@ -46,16 +37,10 @@
   Graph* graph() const { return graph_; }
   Zone* zone() const { return graph()->zone(); }
   Isolate* isolate() const { return isolate_; }
-  Flags flags() const { return flags_; }
-  CompilationDependencies* dependencies() const { return dependencies_; }
-  FunctionType* function_type() const { return function_type_; }
   OperationTyper* operation_typer() { return &operation_typer_; }
 
   Isolate* const isolate_;
   Graph* const graph_;
-  Flags const flags_;
-  CompilationDependencies* const dependencies_;
-  FunctionType* function_type_;
   Decorator* decorator_;
   TypeCache const& cache_;
   OperationTyper operation_typer_;
@@ -63,16 +48,12 @@
   Type* singleton_false_;
   Type* singleton_true_;
   Type* singleton_the_hole_;
-  Type* signed32ish_;
-  Type* unsigned32ish_;
   Type* falsish_;
   Type* truish_;
 
   DISALLOW_COPY_AND_ASSIGN(Typer);
 };
 
-DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags)
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/unwinding-info-writer.h b/src/compiler/unwinding-info-writer.h
new file mode 100644
index 0000000..86f5e9e
--- /dev/null
+++ b/src/compiler/unwinding-info-writer.h
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_UNWINDING_INFO_WRITER_H_
+#define V8_COMPILER_UNWINDING_INFO_WRITER_H_
+
+#if V8_TARGET_ARCH_ARM
+#include "src/compiler/arm/unwinding-info-writer-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/compiler/arm64/unwinding-info-writer-arm64.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/compiler/x64/unwinding-info-writer-x64.h"
+#else
+
+// Placeholder for unsupported architectures.
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+class EhFrameWriter;
+
+namespace compiler {
+
+class InstructionBlock;
+
+class UnwindingInfoWriter {
+ public:
+  explicit UnwindingInfoWriter(Zone* zone) {}
+
+  void SetNumberOfInstructionBlocks(int number) {
+    if (FLAG_perf_prof_unwinding_info) UNIMPLEMENTED();
+  }
+
+  void BeginInstructionBlock(int pc_offset, const InstructionBlock* block) {
+    if (FLAG_perf_prof_unwinding_info) UNIMPLEMENTED();
+  }
+  void EndInstructionBlock(const InstructionBlock* block) {
+    if (FLAG_perf_prof_unwinding_info) UNIMPLEMENTED();
+  }
+
+  void Finish(int code_size) {}
+
+  EhFrameWriter* eh_frame_writer() { return nullptr; }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
+
+#endif
diff --git a/src/compiler/value-numbering-reducer.cc b/src/compiler/value-numbering-reducer.cc
index 555570d..4769cb0 100644
--- a/src/compiler/value-numbering-reducer.cc
+++ b/src/compiler/value-numbering-reducer.cc
@@ -7,6 +7,7 @@
 #include <cstring>
 
 #include "src/base/functional.h"
+#include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
 
 namespace v8 {
@@ -41,10 +42,12 @@
 
 }  // namespace
 
-
-ValueNumberingReducer::ValueNumberingReducer(Zone* zone)
-    : entries_(nullptr), capacity_(0), size_(0), zone_(zone) {}
-
+ValueNumberingReducer::ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone)
+    : entries_(nullptr),
+      capacity_(0),
+      size_(0),
+      temp_zone_(temp_zone),
+      graph_zone_(graph_zone) {}
 
 ValueNumberingReducer::~ValueNumberingReducer() {}
 
@@ -58,7 +61,7 @@
     DCHECK(capacity_ == 0);
     // Allocate the initial entries and insert the first entry.
     capacity_ = kInitialCapacity;
-    entries_ = zone()->NewArray<Node*>(kInitialCapacity);
+    entries_ = temp_zone()->NewArray<Node*>(kInitialCapacity);
     memset(entries_, 0, sizeof(*entries_) * kInitialCapacity);
     entries_[hash & (kInitialCapacity - 1)] = node;
     size_ = 1;
@@ -123,6 +126,25 @@
       continue;
     }
     if (Equals(entry, node)) {
+      // Make sure the replacement has at least as good type as the original
+      // node.
+      if (NodeProperties::IsTyped(entry) && NodeProperties::IsTyped(node)) {
+        Type* entry_type = NodeProperties::GetType(entry);
+        Type* node_type = NodeProperties::GetType(node);
+        if (!entry_type->Is(node_type)) {
+          // Ideally, we would set an intersection of {entry_type} and
+          // {node_type} here. However, typing of NumberConstants assigns
+          // different types to constants with the same value (it creates
+          // a fresh heap number), which would make the intersection empty.
+          // To be safe, we use the smaller type if the types are comparable.
+          if (node_type->Is(entry_type)) {
+            NodeProperties::SetType(entry, node_type);
+          } else {
+            // Types are not comparable => do not replace.
+            return NoChange();
+          }
+        }
+      }
       return Replace(entry);
     }
   }
@@ -135,7 +157,7 @@
   Node** const old_entries = entries_;
   size_t const old_capacity = capacity_;
   capacity_ *= kCapacityToSizeRatio;
-  entries_ = zone()->NewArray<Node*>(capacity_);
+  entries_ = temp_zone()->NewArray<Node*>(capacity_);
   memset(entries_, 0, sizeof(*entries_) * capacity_);
   size_ = 0;
   size_t const mask = capacity_ - 1;
diff --git a/src/compiler/value-numbering-reducer.h b/src/compiler/value-numbering-reducer.h
index 822b607..f700c85 100644
--- a/src/compiler/value-numbering-reducer.h
+++ b/src/compiler/value-numbering-reducer.h
@@ -13,7 +13,7 @@
 
 class ValueNumberingReducer final : public Reducer {
  public:
-  explicit ValueNumberingReducer(Zone* zone);
+  explicit ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone);
   ~ValueNumberingReducer();
 
   Reduction Reduce(Node* node) override;
@@ -22,12 +22,14 @@
   enum { kInitialCapacity = 256u, kCapacityToSizeRatio = 2u };
 
   void Grow();
-  Zone* zone() const { return zone_; }
+  Zone* temp_zone() const { return temp_zone_; }
+  Zone* graph_zone() const { return graph_zone_; }
 
   Node** entries_;
   size_t capacity_;
   size_t size_;
-  Zone* zone_;
+  Zone* temp_zone_;
+  Zone* graph_zone_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 365f075..eb42b39 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -28,18 +28,6 @@
 namespace compiler {
 
 
-static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
-  const Node::Uses uses = def->uses();
-  return std::find(uses.begin(), uses.end(), use) != uses.end();
-}
-
-
-static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
-  const Node::Inputs inputs = use->inputs();
-  return std::find(inputs.begin(), inputs.end(), def) != inputs.end();
-}
-
-
 class Verifier::Visitor {
  public:
   Visitor(Zone* z, Typing typed, CheckInputs check_inputs)
@@ -124,21 +112,17 @@
 
   // Verify that frame state has been inserted for the nodes that need it.
   for (int i = 0; i < frame_state_count; i++) {
-    Node* frame_state = NodeProperties::GetFrameStateInput(node, i);
+    Node* frame_state = NodeProperties::GetFrameStateInput(node);
     CHECK(frame_state->opcode() == IrOpcode::kFrameState ||
           // kFrameState uses Start as a sentinel.
           (node->opcode() == IrOpcode::kFrameState &&
            frame_state->opcode() == IrOpcode::kStart));
-    CHECK(IsDefUseChainLinkPresent(frame_state, node));
-    CHECK(IsUseDefChainLinkPresent(frame_state, node));
   }
 
   // Verify all value inputs actually produce a value.
   for (int i = 0; i < value_count; ++i) {
     Node* value = NodeProperties::GetValueInput(node, i);
     CheckOutput(value, node, value->op()->ValueOutputCount(), "value");
-    CHECK(IsDefUseChainLinkPresent(value, node));
-    CHECK(IsUseDefChainLinkPresent(value, node));
     // Verify that only parameters and projections can have input nodes with
     // multiple outputs.
     CHECK(node->opcode() == IrOpcode::kParameter ||
@@ -150,8 +134,6 @@
   for (int i = 0; i < context_count; ++i) {
     Node* context = NodeProperties::GetContextInput(node);
     CheckOutput(context, node, context->op()->ValueOutputCount(), "context");
-    CHECK(IsDefUseChainLinkPresent(context, node));
-    CHECK(IsUseDefChainLinkPresent(context, node));
   }
 
   if (check_inputs == kAll) {
@@ -159,8 +141,6 @@
     for (int i = 0; i < effect_count; ++i) {
       Node* effect = NodeProperties::GetEffectInput(node);
       CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
-      CHECK(IsDefUseChainLinkPresent(effect, node));
-      CHECK(IsUseDefChainLinkPresent(effect, node));
     }
 
     // Verify all control inputs are control nodes.
@@ -168,8 +148,30 @@
       Node* control = NodeProperties::GetControlInput(node, i);
       CheckOutput(control, node, control->op()->ControlOutputCount(),
                   "control");
-      CHECK(IsDefUseChainLinkPresent(control, node));
-      CHECK(IsUseDefChainLinkPresent(control, node));
+    }
+
+    // Verify that no-no-throw nodes only have IfSuccess/IfException control
+    // uses.
+    if (!node->op()->HasProperty(Operator::kNoThrow)) {
+      int count_success = 0, count_exception = 0;
+      for (Edge edge : node->use_edges()) {
+        if (!NodeProperties::IsControlEdge(edge)) {
+          continue;
+        }
+        Node* control_use = edge.from();
+        if (control_use->opcode() != IrOpcode::kIfSuccess &&
+            control_use->opcode() != IrOpcode::kIfException) {
+          V8_Fatal(__FILE__, __LINE__,
+                   "#%d:%s should be followed by IfSuccess/IfException, but is "
+                   "followed by #%d:%s",
+                   node->id(), node->op()->mnemonic(), control_use->id(),
+                   control_use->op()->mnemonic());
+        }
+        if (control_use->opcode() == IrOpcode::kIfSuccess) ++count_success;
+        if (control_use->opcode() == IrOpcode::kIfException) ++count_exception;
+        CHECK_LE(count_success, 1);
+        CHECK_LE(count_exception, 1);
+      }
     }
   }
 
@@ -408,6 +410,11 @@
       */
       break;
     }
+    case IrOpcode::kInductionVariablePhi: {
+      // This is only a temporary node for the typer.
+      UNREACHABLE();
+      break;
+    }
     case IrOpcode::kEffectPhi: {
       // EffectPhi input count matches parent control node.
       CHECK_EQ(0, value_count);
@@ -417,9 +424,24 @@
       CHECK_EQ(input_count, 1 + effect_count);
       break;
     }
-    case IrOpcode::kTypeGuard:
-      // TODO(bmeurer): what are the constraints on these?
+    case IrOpcode::kLoopExit: {
+      CHECK_EQ(2, control_count);
+      Node* loop = NodeProperties::GetControlInput(node, 1);
+      CHECK_EQ(IrOpcode::kLoop, loop->opcode());
       break;
+    }
+    case IrOpcode::kLoopExitValue: {
+      CHECK_EQ(1, control_count);
+      Node* loop_exit = NodeProperties::GetControlInput(node, 0);
+      CHECK_EQ(IrOpcode::kLoopExit, loop_exit->opcode());
+      break;
+    }
+    case IrOpcode::kLoopExitEffect: {
+      CHECK_EQ(1, control_count);
+      Node* loop_exit = NodeProperties::GetControlInput(node, 0);
+      CHECK_EQ(IrOpcode::kLoopExit, loop_exit->opcode());
+      break;
+    }
     case IrOpcode::kCheckpoint:
       // Type is empty.
       CheckNotTyped(node);
@@ -587,7 +609,6 @@
     case IrOpcode::kJSCreateCatchContext:
     case IrOpcode::kJSCreateWithContext:
     case IrOpcode::kJSCreateBlockContext:
-    case IrOpcode::kJSCreateModuleContext:
     case IrOpcode::kJSCreateScriptContext: {
       // Type is Context, and operand is Internal.
       Node* context = NodeProperties::GetContextInput(node);
@@ -653,11 +674,10 @@
       CheckNotTyped(node);
       break;
 
-    case IrOpcode::kDebugBreak:
-      CheckNotTyped(node);
-      break;
-
     case IrOpcode::kComment:
+    case IrOpcode::kDebugBreak:
+    case IrOpcode::kRetain:
+    case IrOpcode::kUnsafePointerAdd:
       CheckNotTyped(node);
       break;
 
@@ -668,11 +688,6 @@
       CheckValueInputIs(node, 0, Type::Boolean());
       CheckUpperIs(node, Type::Boolean());
       break;
-    case IrOpcode::kBooleanToNumber:
-      // Boolean -> Number
-      CheckValueInputIs(node, 0, Type::Boolean());
-      CheckUpperIs(node, Type::Number());
-      break;
     case IrOpcode::kNumberEqual:
       // (Number, Number) -> Boolean
       CheckValueInputIs(node, 0, Type::Number());
@@ -721,6 +736,11 @@
       CheckValueInputIs(node, 1, Type::Signed32());
       CheckUpperIs(node, Type::Signed32());
       break;
+    case IrOpcode::kSpeculativeNumberBitwiseOr:
+    case IrOpcode::kSpeculativeNumberBitwiseXor:
+    case IrOpcode::kSpeculativeNumberBitwiseAnd:
+      CheckUpperIs(node, Type::Signed32());
+      break;
     case IrOpcode::kNumberShiftLeft:
     case IrOpcode::kNumberShiftRight:
       // (Signed32, Unsigned32) -> Signed32
@@ -728,12 +748,19 @@
       CheckValueInputIs(node, 1, Type::Unsigned32());
       CheckUpperIs(node, Type::Signed32());
       break;
+    case IrOpcode::kSpeculativeNumberShiftLeft:
+    case IrOpcode::kSpeculativeNumberShiftRight:
+      CheckUpperIs(node, Type::Signed32());
+      break;
     case IrOpcode::kNumberShiftRightLogical:
       // (Unsigned32, Unsigned32) -> Unsigned32
       CheckValueInputIs(node, 0, Type::Unsigned32());
       CheckValueInputIs(node, 1, Type::Unsigned32());
       CheckUpperIs(node, Type::Unsigned32());
       break;
+    case IrOpcode::kSpeculativeNumberShiftRightLogical:
+      CheckUpperIs(node, Type::Unsigned32());
+      break;
     case IrOpcode::kNumberImul:
       // (Unsigned32, Unsigned32) -> Signed32
       CheckValueInputIs(node, 0, Type::Unsigned32());
@@ -746,6 +773,9 @@
       CheckUpperIs(node, Type::Unsigned32());
       break;
     case IrOpcode::kNumberAtan2:
+    case IrOpcode::kNumberMax:
+    case IrOpcode::kNumberMin:
+    case IrOpcode::kNumberPow:
       // (Number, Number) -> Number
       CheckValueInputIs(node, 0, Type::Number());
       CheckValueInputIs(node, 1, Type::Number());
@@ -755,9 +785,14 @@
     case IrOpcode::kNumberCeil:
     case IrOpcode::kNumberFloor:
     case IrOpcode::kNumberFround:
+    case IrOpcode::kNumberAcos:
+    case IrOpcode::kNumberAcosh:
+    case IrOpcode::kNumberAsin:
+    case IrOpcode::kNumberAsinh:
     case IrOpcode::kNumberAtan:
     case IrOpcode::kNumberAtanh:
     case IrOpcode::kNumberCos:
+    case IrOpcode::kNumberCosh:
     case IrOpcode::kNumberExp:
     case IrOpcode::kNumberExpm1:
     case IrOpcode::kNumberLog:
@@ -766,9 +801,12 @@
     case IrOpcode::kNumberLog10:
     case IrOpcode::kNumberCbrt:
     case IrOpcode::kNumberRound:
+    case IrOpcode::kNumberSign:
     case IrOpcode::kNumberSin:
+    case IrOpcode::kNumberSinh:
     case IrOpcode::kNumberSqrt:
     case IrOpcode::kNumberTan:
+    case IrOpcode::kNumberTanh:
     case IrOpcode::kNumberTrunc:
       // Number -> Number
       CheckValueInputIs(node, 0, Type::Number());
@@ -785,13 +823,18 @@
       CheckUpperIs(node, Type::Unsigned32());
       break;
     case IrOpcode::kPlainPrimitiveToNumber:
-      // Type is Number.
+      // PlainPrimitive -> Number
+      CheckValueInputIs(node, 0, Type::PlainPrimitive());
       CheckUpperIs(node, Type::Number());
       break;
     case IrOpcode::kPlainPrimitiveToWord32:
-      CheckUpperIs(node, Type::Number());
+      // PlainPrimitive -> Integral32
+      CheckValueInputIs(node, 0, Type::PlainPrimitive());
+      CheckUpperIs(node, Type::Integral32());
       break;
     case IrOpcode::kPlainPrimitiveToFloat64:
+      // PlainPrimitive -> Number
+      CheckValueInputIs(node, 0, Type::PlainPrimitive());
       CheckUpperIs(node, Type::Number());
       break;
     case IrOpcode::kStringEqual:
@@ -802,16 +845,17 @@
       CheckValueInputIs(node, 1, Type::String());
       CheckUpperIs(node, Type::Boolean());
       break;
+    case IrOpcode::kStringCharCodeAt:
+      // (String, Unsigned32) -> UnsignedSmall
+      CheckValueInputIs(node, 0, Type::String());
+      CheckValueInputIs(node, 1, Type::Unsigned32());
+      CheckUpperIs(node, Type::UnsignedSmall());
+      break;
     case IrOpcode::kStringFromCharCode:
       // Number -> String
       CheckValueInputIs(node, 0, Type::Number());
       CheckUpperIs(node, Type::String());
       break;
-    case IrOpcode::kStringToNumber:
-      // String -> Number
-      CheckValueInputIs(node, 0, Type::String());
-      CheckUpperIs(node, Type::Number());
-      break;
     case IrOpcode::kReferenceEqual: {
       // (Unique, Any) -> Boolean  and
       // (Any, Unique) -> Boolean
@@ -831,6 +875,24 @@
       CheckValueInputIs(node, 0, Type::PlainNumber());
       CheckUpperIs(node, Type::TaggedPointer());
       break;
+    case IrOpcode::kEnsureWritableFastElements:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckValueInputIs(node, 1, Type::Internal());
+      CheckUpperIs(node, Type::Internal());
+      break;
+    case IrOpcode::kMaybeGrowFastElements:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckValueInputIs(node, 1, Type::Internal());
+      CheckValueInputIs(node, 2, Type::Unsigned31());
+      CheckValueInputIs(node, 3, Type::Unsigned31());
+      CheckUpperIs(node, Type::Internal());
+      break;
+    case IrOpcode::kTransitionElementsKind:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckValueInputIs(node, 1, Type::Internal());
+      CheckValueInputIs(node, 2, Type::Internal());
+      CheckNotTyped(node);
+      break;
 
     case IrOpcode::kChangeTaggedSignedToInt32: {
       // Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
@@ -941,12 +1003,39 @@
       // CheckUpperIs(node, to));
       break;
     }
+    case IrOpcode::kImpossibleToWord32:
+    case IrOpcode::kImpossibleToWord64:
+    case IrOpcode::kImpossibleToFloat32:
+    case IrOpcode::kImpossibleToFloat64:
+    case IrOpcode::kImpossibleToTagged:
+    case IrOpcode::kImpossibleToBit:
+      break;
 
     case IrOpcode::kCheckBounds:
       CheckValueInputIs(node, 0, Type::Any());
       CheckValueInputIs(node, 1, Type::Unsigned31());
       CheckUpperIs(node, Type::Unsigned31());
       break;
+    case IrOpcode::kCheckMaps:
+      // (Any, Internal, ..., Internal) -> Any
+      CheckValueInputIs(node, 0, Type::Any());
+      for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
+        CheckValueInputIs(node, i, Type::Internal());
+      }
+      CheckNotTyped(node);
+      break;
+    case IrOpcode::kCheckNumber:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckUpperIs(node, Type::Number());
+      break;
+    case IrOpcode::kCheckString:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckUpperIs(node, Type::String());
+      break;
+    case IrOpcode::kCheckIf:
+      CheckValueInputIs(node, 0, Type::Boolean());
+      CheckNotTyped(node);
+      break;
     case IrOpcode::kCheckTaggedSigned:
       CheckValueInputIs(node, 0, Type::Any());
       CheckUpperIs(node, Type::TaggedSigned());
@@ -958,10 +1047,17 @@
 
     case IrOpcode::kCheckedInt32Add:
     case IrOpcode::kCheckedInt32Sub:
+    case IrOpcode::kCheckedInt32Div:
+    case IrOpcode::kCheckedInt32Mod:
+    case IrOpcode::kCheckedUint32Div:
+    case IrOpcode::kCheckedUint32Mod:
+    case IrOpcode::kCheckedInt32Mul:
     case IrOpcode::kCheckedUint32ToInt32:
     case IrOpcode::kCheckedFloat64ToInt32:
+    case IrOpcode::kCheckedTaggedSignedToInt32:
     case IrOpcode::kCheckedTaggedToInt32:
     case IrOpcode::kCheckedTaggedToFloat64:
+    case IrOpcode::kCheckedTruncateTaggedToWord32:
       break;
 
     case IrOpcode::kCheckFloat64Hole:
@@ -970,7 +1066,11 @@
       break;
     case IrOpcode::kCheckTaggedHole:
       CheckValueInputIs(node, 0, Type::Any());
-      CheckUpperIs(node, Type::Any());
+      CheckUpperIs(node, Type::NonInternal());
+      break;
+    case IrOpcode::kConvertTaggedHoleToUndefined:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckUpperIs(node, Type::NonInternal());
       break;
 
     case IrOpcode::kLoadField:
@@ -987,6 +1087,8 @@
       // CheckValueInputIs(node, 0, Type::Object());
       // CheckUpperIs(node, ElementAccessOf(node->op()).type));
       break;
+    case IrOpcode::kLoadTypedElement:
+      break;
     case IrOpcode::kStoreField:
       // (Object, fieldtype) -> _|_
       // TODO(rossberg): activate once machine ops are typed.
@@ -1003,10 +1105,16 @@
       // CheckValueInputIs(node, 1, ElementAccessOf(node->op()).type));
       CheckNotTyped(node);
       break;
+    case IrOpcode::kStoreTypedElement:
+      CheckNotTyped(node);
+      break;
     case IrOpcode::kNumberSilenceNaN:
       CheckValueInputIs(node, 0, Type::Number());
       CheckUpperIs(node, Type::Number());
       break;
+    case IrOpcode::kTypeGuard:
+      CheckUpperIs(node, TypeGuardTypeOf(node->op()));
+      break;
 
     // Machine operators
     // -----------------------
@@ -1024,6 +1132,7 @@
     case IrOpcode::kWord32Clz:
     case IrOpcode::kWord32Ctz:
     case IrOpcode::kWord32ReverseBits:
+    case IrOpcode::kWord32ReverseBytes:
     case IrOpcode::kWord32Popcnt:
     case IrOpcode::kWord64And:
     case IrOpcode::kWord64Or:
@@ -1036,12 +1145,14 @@
     case IrOpcode::kWord64Popcnt:
     case IrOpcode::kWord64Ctz:
     case IrOpcode::kWord64ReverseBits:
+    case IrOpcode::kWord64ReverseBytes:
     case IrOpcode::kWord64Equal:
     case IrOpcode::kInt32Add:
     case IrOpcode::kInt32AddWithOverflow:
     case IrOpcode::kInt32Sub:
     case IrOpcode::kInt32SubWithOverflow:
     case IrOpcode::kInt32Mul:
+    case IrOpcode::kInt32MulWithOverflow:
     case IrOpcode::kInt32MulHigh:
     case IrOpcode::kInt32Div:
     case IrOpcode::kInt32Mod:
@@ -1067,20 +1178,18 @@
     case IrOpcode::kUint64LessThanOrEqual:
     case IrOpcode::kFloat32Add:
     case IrOpcode::kFloat32Sub:
-    case IrOpcode::kFloat32SubPreserveNan:
     case IrOpcode::kFloat32Neg:
     case IrOpcode::kFloat32Mul:
     case IrOpcode::kFloat32Div:
-    case IrOpcode::kFloat32Max:
-    case IrOpcode::kFloat32Min:
     case IrOpcode::kFloat32Abs:
     case IrOpcode::kFloat32Sqrt:
     case IrOpcode::kFloat32Equal:
     case IrOpcode::kFloat32LessThan:
     case IrOpcode::kFloat32LessThanOrEqual:
+    case IrOpcode::kFloat32Max:
+    case IrOpcode::kFloat32Min:
     case IrOpcode::kFloat64Add:
     case IrOpcode::kFloat64Sub:
-    case IrOpcode::kFloat64SubPreserveNan:
     case IrOpcode::kFloat64Neg:
     case IrOpcode::kFloat64Mul:
     case IrOpcode::kFloat64Div:
@@ -1088,20 +1197,28 @@
     case IrOpcode::kFloat64Max:
     case IrOpcode::kFloat64Min:
     case IrOpcode::kFloat64Abs:
+    case IrOpcode::kFloat64Acos:
+    case IrOpcode::kFloat64Acosh:
+    case IrOpcode::kFloat64Asin:
+    case IrOpcode::kFloat64Asinh:
     case IrOpcode::kFloat64Atan:
     case IrOpcode::kFloat64Atan2:
     case IrOpcode::kFloat64Atanh:
+    case IrOpcode::kFloat64Cbrt:
     case IrOpcode::kFloat64Cos:
+    case IrOpcode::kFloat64Cosh:
     case IrOpcode::kFloat64Exp:
     case IrOpcode::kFloat64Expm1:
     case IrOpcode::kFloat64Log:
     case IrOpcode::kFloat64Log1p:
-    case IrOpcode::kFloat64Log2:
     case IrOpcode::kFloat64Log10:
-    case IrOpcode::kFloat64Cbrt:
+    case IrOpcode::kFloat64Log2:
+    case IrOpcode::kFloat64Pow:
     case IrOpcode::kFloat64Sin:
+    case IrOpcode::kFloat64Sinh:
     case IrOpcode::kFloat64Sqrt:
     case IrOpcode::kFloat64Tan:
+    case IrOpcode::kFloat64Tanh:
     case IrOpcode::kFloat32RoundDown:
     case IrOpcode::kFloat64RoundDown:
     case IrOpcode::kFloat32RoundUp:
@@ -1157,6 +1274,8 @@
     case IrOpcode::kLoadStackPointer:
     case IrOpcode::kLoadFramePointer:
     case IrOpcode::kLoadParentFramePointer:
+    case IrOpcode::kUnalignedLoad:
+    case IrOpcode::kUnalignedStore:
     case IrOpcode::kCheckedLoad:
     case IrOpcode::kCheckedStore:
     case IrOpcode::kAtomicLoad:
@@ -1177,10 +1296,10 @@
   Zone zone(graph->zone()->allocator());
   Visitor visitor(&zone, typing, check_inputs);
   AllNodes all(&zone, graph);
-  for (Node* node : all.live) visitor.Check(node);
+  for (Node* node : all.reachable) visitor.Check(node);
 
   // Check the uniqueness of projections.
-  for (Node* proj : all.live) {
+  for (Node* proj : all.reachable) {
     if (proj->opcode() != IrOpcode::kProjection) continue;
     Node* node = proj->InputAt(0);
     for (Node* other : node->uses()) {
@@ -1456,10 +1575,9 @@
       }
     }
   }
-  // Frame state inputs should be frame states (or sentinels).
-  for (int i = 0; i < OperatorProperties::GetFrameStateInputCount(node->op());
-       i++) {
-    Node* input = NodeProperties::GetFrameStateInput(node, i);
+  // Frame state input should be a frame state (or sentinel).
+  if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+    Node* input = NodeProperties::GetFrameStateInput(node);
     CHECK(input->opcode() == IrOpcode::kFrameState ||
           input->opcode() == IrOpcode::kStart ||
           input->opcode() == IrOpcode::kDead);
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 0a13f98..e92a434 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -4,6 +4,8 @@
 
 #include "src/compiler/wasm-compiler.h"
 
+#include <memory>
+
 #include "src/isolate-inl.h"
 
 #include "src/base/platform/elapsed-timer.h"
@@ -16,7 +18,6 @@
 #include "src/compiler/graph.h"
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/int64-lowering.h"
-#include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
@@ -62,6 +63,39 @@
   }
 }
 
+Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
+                         Handle<Context> context, Node** parameters,
+                         int parameter_count, Node** effect_ptr,
+                         Node* control) {
+  // At the moment we only allow 2 parameters. If more parameters are needed,
+  // then the size of {inputs} below has to be increased accordingly.
+  DCHECK(parameter_count <= 2);
+  const Runtime::Function* fun = Runtime::FunctionForId(f);
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
+      CallDescriptor::kNoFlags);
+  // CEntryStubConstant nodes have to be created and cached in the main
+  // thread. At the moment this is only done for CEntryStubConstant(1).
+  DCHECK_EQ(1, fun->result_size);
+  Node* inputs[8];
+  int count = 0;
+  inputs[count++] = jsgraph->CEntryStubConstant(fun->result_size);
+  for (int i = 0; i < parameter_count; i++) {
+    inputs[count++] = parameters[i];
+  }
+  inputs[count++] = jsgraph->ExternalConstant(
+      ExternalReference(f, jsgraph->isolate()));         // ref
+  inputs[count++] = jsgraph->Int32Constant(fun->nargs);  // arity
+  inputs[count++] = jsgraph->HeapConstant(context);      // context
+  inputs[count++] = *effect_ptr;
+  inputs[count++] = control;
+
+  Node* node =
+      jsgraph->graph()->NewNode(jsgraph->common()->Call(desc), count, inputs);
+  *effect_ptr = node;
+  return node;
+}
+
 }  // namespace
 
 // A helper that handles building graph fragments for trapping.
@@ -225,30 +259,11 @@
     Node* trap_position_smi = builder_->BuildChangeInt32ToSmi(trap_position_);
 
     if (module && !module->instance->context.is_null()) {
-      // Use the module context to call the runtime to throw an exception.
-      Runtime::FunctionId f = Runtime::kThrowWasmError;
-      const Runtime::Function* fun = Runtime::FunctionForId(f);
-      CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-          jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
-          CallDescriptor::kNoFlags);
-      // CEntryStubConstant nodes have to be created and cached in the main
-      // thread. At the moment this is only done for CEntryStubConstant(1).
-      DCHECK_EQ(1, fun->result_size);
-      Node* inputs[] = {
-          jsgraph()->CEntryStubConstant(fun->result_size),  // C entry
-          trap_reason_smi,                                  // message id
-          trap_position_smi,                                // byte position
-          jsgraph()->ExternalConstant(
-              ExternalReference(f, jsgraph()->isolate())),    // ref
-          jsgraph()->Int32Constant(fun->nargs),               // arity
-          builder_->HeapConstant(module->instance->context),  // context
-          *effect_ptr,
-          *control_ptr};
-
-      Node* node = graph()->NewNode(
-          common()->Call(desc), static_cast<int>(arraysize(inputs)), inputs);
-      *control_ptr = node;
-      *effect_ptr = node;
+      Node* parameters[] = {trap_reason_smi,     // message id
+                            trap_position_smi};  // byte position
+      BuildCallToRuntime(Runtime::kThrowWasmError, jsgraph(),
+                         module->instance->context, parameters,
+                         arraysize(parameters), effect_ptr, *control_ptr);
     }
     if (false) {
       // End the control flow with a throw
@@ -275,7 +290,7 @@
       module_(nullptr),
       mem_buffer_(nullptr),
       mem_size_(nullptr),
-      function_table_(nullptr),
+      function_tables_(zone),
       control_(nullptr),
       effect_(nullptr),
       cur_buffer_(def_buffer_),
@@ -361,6 +376,10 @@
   return jsgraph()->Constant(value);
 }
 
+Node* WasmGraphBuilder::Uint32Constant(uint32_t value) {
+  return jsgraph()->Uint32Constant(value);
+}
+
 Node* WasmGraphBuilder::Int32Constant(int32_t value) {
   return jsgraph()->Int32Constant(value);
 }
@@ -369,6 +388,40 @@
   return jsgraph()->Int64Constant(value);
 }
 
+void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
+  // We do not generate stack checks for cctests.
+  if (module_ && !module_->instance->context.is_null()) {
+    Node* limit = graph()->NewNode(
+        jsgraph()->machine()->Load(MachineType::Pointer()),
+        jsgraph()->ExternalConstant(
+            ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
+        jsgraph()->IntPtrConstant(0), *effect_, *control_);
+    Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
+
+    Node* check =
+        graph()->NewNode(jsgraph()->machine()->UintLessThan(), limit, pointer);
+
+    Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
+
+    Node* effect_true = *effect_;
+
+    Node* effect_false;
+    // Generate a call to the runtime if there is a stack check failure.
+    {
+      Node* node = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
+                                      module_->instance->context, nullptr, 0,
+                                      effect_, stack_check.if_false);
+      effect_false = node;
+    }
+
+    Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2),
+                                  effect_true, effect_false, stack_check.merge);
+
+    *control_ = stack_check.merge;
+    *effect_ = ephi;
+  }
+}
+
 Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
                               wasm::WasmCodePosition position) {
   const Operator* op;
@@ -537,7 +590,7 @@
       op = m->Float32Add();
       break;
     case wasm::kExprF32Sub:
-      op = m->Float32SubPreserveNan();
+      op = m->Float32Sub();
       break;
     case wasm::kExprF32Mul:
       op = m->Float32Mul();
@@ -568,7 +621,7 @@
       op = m->Float64Add();
       break;
     case wasm::kExprF64Sub:
-      op = m->Float64SubPreserveNan();
+      op = m->Float64Sub();
       break;
     case wasm::kExprF64Mul:
       op = m->Float64Mul();
@@ -596,13 +649,17 @@
       std::swap(left, right);
       break;
     case wasm::kExprF32Min:
-      return BuildF32Min(left, right);
+      op = m->Float32Min();
+      break;
     case wasm::kExprF64Min:
-      return BuildF64Min(left, right);
+      op = m->Float64Min();
+      break;
     case wasm::kExprF32Max:
-      return BuildF32Max(left, right);
+      op = m->Float32Max();
+      break;
     case wasm::kExprF64Max:
-      return BuildF64Max(left, right);
+      op = m->Float64Max();
+      break;
     case wasm::kExprF64Pow:
       return BuildF64Pow(left, right);
     case wasm::kExprF64Atan2:
@@ -646,12 +703,8 @@
       op = m->Float32Abs();
       break;
     case wasm::kExprF32Neg: {
-      if (m->Float32Neg().IsSupported()) {
-        op = m->Float32Neg().op();
-        break;
-      } else {
-        return BuildF32Neg(input);
-      }
+      op = m->Float32Neg();
+      break;
     }
     case wasm::kExprF32Sqrt:
       op = m->Float32Sqrt();
@@ -660,12 +713,8 @@
       op = m->Float64Abs();
       break;
     case wasm::kExprF64Neg: {
-      if (m->Float64Neg().IsSupported()) {
-        op = m->Float64Neg().op();
-        break;
-      } else {
-        return BuildF64Neg(input);
-      }
+      op = m->Float64Neg();
+      break;
     }
     case wasm::kExprF64Sqrt:
       op = m->Float64Sqrt();
@@ -822,11 +871,12 @@
       op = m->Word64Clz();
       break;
     case wasm::kExprI64Ctz: {
-      if (m->Word64Ctz().IsSupported()) {
-        op = m->Word64Ctz().op();
+      OptionalOperator ctz64 = m->Word64Ctz();
+      if (ctz64.IsSupported()) {
+        op = ctz64.op();
         break;
       } else if (m->Is32() && m->Word32Ctz().IsSupported()) {
-        op = m->Word64CtzPlaceholder();
+        op = ctz64.placeholder();
         break;
       } else if (m->Word64ReverseBits().IsSupported()) {
         Node* reversed = graph()->NewNode(m->Word64ReverseBits().op(), input);
@@ -837,10 +887,11 @@
       }
     }
     case wasm::kExprI64Popcnt: {
-      if (m->Word64Popcnt().IsSupported()) {
-        op = m->Word64Popcnt().op();
+      OptionalOperator popcnt64 = m->Word64Popcnt();
+      if (popcnt64.IsSupported()) {
+        op = popcnt64.op();
       } else if (m->Is32() && m->Word32Popcnt().IsSupported()) {
-        op = m->Word64PopcntPlaceholder();
+        op = popcnt64.placeholder();
       } else {
         return BuildI64Popcnt(input);
       }
@@ -881,6 +932,8 @@
       return BuildI64UConvertF32(input, position);
     case wasm::kExprI64UConvertF64:
       return BuildI64UConvertF64(input, position);
+    case wasm::kExprGrowMemory:
+      return BuildGrowMemory(input);
     case wasm::kExprI32AsmjsLoadMem8S:
       return BuildAsmjsLoadMem(MachineType::Int8(), input);
     case wasm::kExprI32AsmjsLoadMem8U:
@@ -996,32 +1049,155 @@
   return node;
 }
 
-Node* WasmGraphBuilder::BuildF32Neg(Node* input) {
-  Node* result =
-      Unop(wasm::kExprF32ReinterpretI32,
-           Binop(wasm::kExprI32Xor, Unop(wasm::kExprI32ReinterpretF32, input),
-                 jsgraph()->Int32Constant(0x80000000)));
-
-  return result;
+static bool ReverseBytesSupported(MachineOperatorBuilder* m,
+                                  size_t size_in_bytes) {
+  switch (size_in_bytes) {
+    case 4:
+      return m->Word32ReverseBytes().IsSupported();
+    case 8:
+      return m->Word64ReverseBytes().IsSupported();
+    default:
+      break;
+  }
+  return false;
 }
 
-Node* WasmGraphBuilder::BuildF64Neg(Node* input) {
-#if WASM_64
-  Node* result =
-      Unop(wasm::kExprF64ReinterpretI64,
-           Binop(wasm::kExprI64Xor, Unop(wasm::kExprI64ReinterpretF64, input),
-                 jsgraph()->Int64Constant(0x8000000000000000)));
+Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
+                                              wasm::LocalType wasmtype) {
+  Node* result;
+  Node* value = node;
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  int valueSizeInBytes = 1 << ElementSizeLog2Of(memtype.representation());
+  int valueSizeInBits = 8 * valueSizeInBytes;
+  bool isFloat = false;
+
+  switch (memtype.representation()) {
+    case MachineRepresentation::kFloat64:
+      value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
+      isFloat = true;
+    case MachineRepresentation::kWord64:
+      result = jsgraph()->Int64Constant(0);
+      break;
+    case MachineRepresentation::kFloat32:
+      value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
+      isFloat = true;
+    case MachineRepresentation::kWord32:
+    case MachineRepresentation::kWord16:
+      result = jsgraph()->Int32Constant(0);
+      break;
+    case MachineRepresentation::kWord8:
+      // No need to change endianness for byte size, return original node
+      return node;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  int i;
+  uint32_t shiftCount;
+
+  if (ReverseBytesSupported(m, valueSizeInBytes < 4 ? 4 : valueSizeInBytes)) {
+    switch (valueSizeInBytes) {
+      case 2:
+        result =
+            graph()->NewNode(m->Word32ReverseBytes().op(),
+                             graph()->NewNode(m->Word32Shl(), value,
+                                              jsgraph()->Int32Constant(16)));
+        break;
+      case 4:
+        result = graph()->NewNode(m->Word32ReverseBytes().op(), value);
+        break;
+      case 8:
+        result = graph()->NewNode(m->Word64ReverseBytes().op(), value);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    for (i = 0, shiftCount = valueSizeInBits - 8; i < valueSizeInBits / 2;
+         i += 8, shiftCount -= 16) {
+      Node* shiftLower;
+      Node* shiftHigher;
+      Node* lowerByte;
+      Node* higherByte;
+
+      DCHECK(shiftCount > 0);
+      DCHECK((shiftCount + 8) % 16 == 0);
+
+      if (valueSizeInBits > 32) {
+        shiftLower = graph()->NewNode(m->Word64Shl(), value,
+                                      jsgraph()->Int64Constant(shiftCount));
+        shiftHigher = graph()->NewNode(m->Word64Shr(), value,
+                                       jsgraph()->Int64Constant(shiftCount));
+        lowerByte = graph()->NewNode(
+            m->Word64And(), shiftLower,
+            jsgraph()->Int64Constant(static_cast<uint64_t>(0xFF)
+                                     << (valueSizeInBits - 8 - i)));
+        higherByte = graph()->NewNode(
+            m->Word64And(), shiftHigher,
+            jsgraph()->Int64Constant(static_cast<uint64_t>(0xFF) << i));
+        result = graph()->NewNode(m->Word64Or(), result, lowerByte);
+        result = graph()->NewNode(m->Word64Or(), result, higherByte);
+      } else {
+        shiftLower = graph()->NewNode(m->Word32Shl(), value,
+                                      jsgraph()->Int32Constant(shiftCount));
+        shiftHigher = graph()->NewNode(m->Word32Shr(), value,
+                                       jsgraph()->Int32Constant(shiftCount));
+        lowerByte = graph()->NewNode(
+            m->Word32And(), shiftLower,
+            jsgraph()->Int32Constant(static_cast<uint32_t>(0xFF)
+                                     << (valueSizeInBits - 8 - i)));
+        higherByte = graph()->NewNode(
+            m->Word32And(), shiftHigher,
+            jsgraph()->Int32Constant(static_cast<uint32_t>(0xFF) << i));
+        result = graph()->NewNode(m->Word32Or(), result, lowerByte);
+        result = graph()->NewNode(m->Word32Or(), result, higherByte);
+      }
+    }
+  }
+
+  if (isFloat) {
+    switch (memtype.representation()) {
+      case MachineRepresentation::kFloat64:
+        result = graph()->NewNode(m->BitcastInt64ToFloat64(), result);
+        break;
+      case MachineRepresentation::kFloat32:
+        result = graph()->NewNode(m->BitcastInt32ToFloat32(), result);
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+
+  // We need to sign extend the value
+  if (memtype.IsSigned()) {
+    DCHECK(!isFloat);
+    if (valueSizeInBits < 32) {
+      Node* shiftBitCount;
+      // Perform sign extension using following trick
+      // result = (x << machine_width - type_width) >> (machine_width -
+      // type_width)
+      if (wasmtype == wasm::kAstI64) {
+        shiftBitCount = jsgraph()->Int32Constant(64 - valueSizeInBits);
+        result = graph()->NewNode(
+            m->Word64Sar(),
+            graph()->NewNode(m->Word64Shl(),
+                             graph()->NewNode(m->ChangeInt32ToInt64(), result),
+                             shiftBitCount),
+            shiftBitCount);
+      } else if (wasmtype == wasm::kAstI32) {
+        shiftBitCount = jsgraph()->Int32Constant(32 - valueSizeInBits);
+        result = graph()->NewNode(
+            m->Word32Sar(),
+            graph()->NewNode(m->Word32Shl(), result, shiftBitCount),
+            shiftBitCount);
+      }
+    }
+  }
 
   return result;
-#else
-  MachineOperatorBuilder* m = jsgraph()->machine();
-
-  Node* old_high_word = graph()->NewNode(m->Float64ExtractHighWord32(), input);
-  Node* new_high_word = Binop(wasm::kExprI32Xor, old_high_word,
-                              jsgraph()->Int32Constant(0x80000000));
-
-  return graph()->NewNode(m->Float64InsertHighWord32(), input, new_high_word);
-#endif
 }
 
 Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
@@ -1064,86 +1240,6 @@
 #endif
 }
 
-Node* WasmGraphBuilder::BuildF32Min(Node* left, Node* right) {
-  Diamond left_le_right(graph(), jsgraph()->common(),
-                        Binop(wasm::kExprF32Le, left, right));
-
-  Diamond right_lt_left(graph(), jsgraph()->common(),
-                        Binop(wasm::kExprF32Lt, right, left));
-
-  Diamond left_is_not_nan(graph(), jsgraph()->common(),
-                          Binop(wasm::kExprF32Eq, left, left));
-
-  return left_le_right.Phi(
-      wasm::kAstF32, left,
-      right_lt_left.Phi(
-          wasm::kAstF32, right,
-          left_is_not_nan.Phi(
-              wasm::kAstF32,
-              Binop(wasm::kExprF32Mul, right, Float32Constant(1.0)),
-              Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
-}
-
-Node* WasmGraphBuilder::BuildF32Max(Node* left, Node* right) {
-  Diamond left_ge_right(graph(), jsgraph()->common(),
-                        Binop(wasm::kExprF32Ge, left, right));
-
-  Diamond right_gt_left(graph(), jsgraph()->common(),
-                        Binop(wasm::kExprF32Gt, right, left));
-
-  Diamond left_is_not_nan(graph(), jsgraph()->common(),
-                          Binop(wasm::kExprF32Eq, left, left));
-
-  return left_ge_right.Phi(
-      wasm::kAstF32, left,
-      right_gt_left.Phi(
-          wasm::kAstF32, right,
-          left_is_not_nan.Phi(
-              wasm::kAstF32,
-              Binop(wasm::kExprF32Mul, right, Float32Constant(1.0)),
-              Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
-}
-
-Node* WasmGraphBuilder::BuildF64Min(Node* left, Node* right) {
-  Diamond left_le_right(graph(), jsgraph()->common(),
-                        Binop(wasm::kExprF64Le, left, right));
-
-  Diamond right_lt_left(graph(), jsgraph()->common(),
-                        Binop(wasm::kExprF64Lt, right, left));
-
-  Diamond left_is_not_nan(graph(), jsgraph()->common(),
-                          Binop(wasm::kExprF64Eq, left, left));
-
-  return left_le_right.Phi(
-      wasm::kAstF64, left,
-      right_lt_left.Phi(
-          wasm::kAstF64, right,
-          left_is_not_nan.Phi(
-              wasm::kAstF64,
-              Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
-              Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
-}
-
-Node* WasmGraphBuilder::BuildF64Max(Node* left, Node* right) {
-  Diamond left_ge_right(graph(), jsgraph()->common(),
-                        Binop(wasm::kExprF64Ge, left, right));
-
-  Diamond right_gt_left(graph(), jsgraph()->common(),
-                        Binop(wasm::kExprF64Lt, right, left));
-
-  Diamond left_is_not_nan(graph(), jsgraph()->common(),
-                          Binop(wasm::kExprF64Eq, left, left));
-
-  return left_ge_right.Phi(
-      wasm::kAstF64, left,
-      right_gt_left.Phi(
-          wasm::kAstF64, right,
-          left_is_not_nan.Phi(
-              wasm::kAstF64,
-              Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
-              Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
-}
-
 Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
                                             wasm::WasmCodePosition position) {
   MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1355,7 +1451,7 @@
 Node* WasmGraphBuilder::BuildF64Pow(Node* left, Node* right) {
   MachineType type = MachineType::Float64();
   ExternalReference ref =
-      ExternalReference::f64_pow_wrapper_function(jsgraph()->isolate());
+      ExternalReference::wasm_float64_pow(jsgraph()->isolate());
   return BuildCFuncInstruction(ref, type, left, right);
 }
 
@@ -1571,6 +1667,31 @@
   return load;
 }
 
+Node* WasmGraphBuilder::BuildGrowMemory(Node* input) {
+  Runtime::FunctionId function_id = Runtime::kWasmGrowMemory;
+  const Runtime::Function* function = Runtime::FunctionForId(function_id);
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      jsgraph()->zone(), function_id, function->nargs, Operator::kNoThrow,
+      CallDescriptor::kNoFlags);
+  Node** control_ptr = control_;
+  Node** effect_ptr = effect_;
+  wasm::ModuleEnv* module = module_;
+  input = BuildChangeUint32ToSmi(input);
+  Node* inputs[] = {
+      jsgraph()->CEntryStubConstant(function->result_size), input,  // C entry
+      jsgraph()->ExternalConstant(
+          ExternalReference(function_id, jsgraph()->isolate())),  // ref
+      jsgraph()->Int32Constant(function->nargs),                  // arity
+      jsgraph()->HeapConstant(module->instance->context),         // context
+      *effect_ptr,
+      *control_ptr};
+  Node* node = graph()->NewNode(jsgraph()->common()->Call(desc),
+                                static_cast<int>(arraysize(inputs)), inputs);
+  *effect_ptr = node;
+  node = BuildChangeSmiToInt32(node);
+  return node;
+}
+
 Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
                                      wasm::WasmCodePosition position) {
   MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1894,11 +2015,14 @@
   // Compute the code object by loading it from the function table.
   Node* key = args[0];
 
+  // Assume only one table for now.
+  DCHECK_LE(module_->instance->function_tables.size(), 1u);
   // Bounds check the index.
-  int table_size = static_cast<int>(module_->FunctionTableSize());
+  uint32_t table_size =
+      module_->IsValidTable(0) ? module_->GetTable(0)->max_size : 0;
   if (table_size > 0) {
     // Bounds check against the table size.
-    Node* size = Int32Constant(static_cast<int>(table_size));
+    Node* size = Uint32Constant(table_size);
     Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
     trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
   } else {
@@ -1906,7 +2030,7 @@
     trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0), position);
     return trap_->GetTrapValue(module_->GetSignature(index));
   }
-  Node* table = FunctionTable();
+  Node* table = FunctionTable(0);
 
   // Load signature from the table and check.
   // The table is a FixedArray; signatures are encoded as SMIs.
@@ -1928,13 +2052,13 @@
   }
 
   // Load code object from the table.
-  int offset = fixed_offset + kPointerSize * table_size;
+  uint32_t offset = fixed_offset + kPointerSize * table_size;
   Node* load_code = graph()->NewNode(
       machine->Load(MachineType::AnyTagged()), table,
       graph()->NewNode(machine->Int32Add(),
                        graph()->NewNode(machine->Word32Shl(), key,
                                         Int32Constant(kPointerSizeLog2)),
-                       Int32Constant(offset)),
+                       Uint32Constant(offset)),
       *effect_, *control_);
 
   args[0] = load_code;
@@ -2068,20 +2192,16 @@
   return value;
 }
 
-Node* WasmGraphBuilder::ToJS(Node* node, Node* context, wasm::LocalType type) {
+Node* WasmGraphBuilder::ToJS(Node* node, wasm::LocalType type) {
   switch (type) {
     case wasm::kAstI32:
       return BuildChangeInt32ToTagged(node);
     case wasm::kAstI64:
-      // TODO(titzer): i64->JS has no good solution right now. Using lower 32
-      // bits.
-      if (jsgraph()->machine()->Is64()) {
-        // On 32 bit platforms we do not have to do the truncation because the
-        // node we get in as a parameter only contains the low word anyways.
-        node = graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(),
-                                node);
-      }
-      return BuildChangeInt32ToTagged(node);
+      DCHECK(module_ && !module_->instance->context.is_null());
+      // Throw a TypeError.
+      return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
+                                module_->instance->context, nullptr, 0, effect_,
+                                *control_);
     case wasm::kAstF32:
       node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
                               node);
@@ -2107,7 +2227,6 @@
   Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
                                   node, context, effect, control);
 
-  *control_ = result;
   *effect_ = result;
 
   return result;
@@ -2138,7 +2257,7 @@
     //     else BuildLoadHeapNumberValue(y)
     Node* object = NodeProperties::GetValueInput(value, 0);
     Node* context = NodeProperties::GetContextInput(value);
-    Node* frame_state = NodeProperties::GetFrameStateInput(value, 0);
+    Node* frame_state = NodeProperties::GetFrameStateInput(value);
     Node* effect = NodeProperties::GetEffectInput(value);
     Node* control = NodeProperties::GetControlInput(value);
 
@@ -2283,6 +2402,15 @@
   return value;
 }
 
+Node* WasmGraphBuilder::BuildChangeUint32ToSmi(Node* value) {
+  if (jsgraph()->machine()->Is64()) {
+    value =
+        graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), value);
+  }
+  return graph()->NewNode(jsgraph()->machine()->WordShl(), value,
+                          BuildSmiShiftBitsConstant());
+}
+
 Node* WasmGraphBuilder::BuildChangeSmiToFloat64(Node* value) {
   return graph()->NewNode(jsgraph()->machine()->ChangeInt32ToFloat64(),
                           BuildChangeSmiToInt32(value));
@@ -2392,18 +2520,18 @@
     retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval,
                               graph()->start());
   }
-  Node* jsval =
-      ToJS(retval, context,
-           sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
+  Node* jsval = ToJS(
+      retval, sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
   Node* ret =
       graph()->NewNode(jsgraph()->common()->Return(), jsval, call, start);
 
   MergeControlToEnd(jsgraph(), ret);
 }
 
-void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSFunction> function,
+void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
                                             wasm::FunctionSig* sig) {
-  int js_count = function->shared()->internal_formal_parameter_count();
+  DCHECK(target->IsCallable());
+
   int wasm_count = static_cast<int>(sig->parameter_count());
   int param_count;
   if (jsgraph()->machine()->Is64()) {
@@ -2418,58 +2546,71 @@
   Node* start = Start(param_count + 3);
   *effect_ = start;
   *control_ = start;
-  // JS context is the last parameter.
-  Node* context = HeapConstant(Handle<Context>(function->context(), isolate));
   Node** args = Buffer(wasm_count + 7);
 
-  bool arg_count_before_args = false;
-  bool add_new_target_undefined = false;
+  // The default context of the target.
+  Handle<Context> target_context = isolate->native_context();
 
+  // Optimization: check if the target is a JSFunction with the right arity so
+  // that we can call it directly.
+  bool call_direct = false;
   int pos = 0;
-  if (js_count == wasm_count) {
-    // exact arity match, just call the function directly.
-    desc = Linkage::GetJSCallDescriptor(graph()->zone(), false, wasm_count + 1,
-                                        CallDescriptor::kNoFlags);
-    arg_count_before_args = false;
-    add_new_target_undefined = true;
-  } else {
-    // Use the Call builtin.
+  if (target->IsJSFunction()) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(target);
+    if (function->shared()->internal_formal_parameter_count() == wasm_count) {
+      call_direct = true;
+
+      args[pos++] = jsgraph()->Constant(target);  // target callable.
+      // Receiver.
+      if (is_sloppy(function->shared()->language_mode()) &&
+          !function->shared()->native()) {
+        args[pos++] =
+            HeapConstant(handle(function->context()->global_proxy(), isolate));
+      } else {
+        args[pos++] = jsgraph()->Constant(
+            handle(isolate->heap()->undefined_value(), isolate));
+      }
+
+      desc = Linkage::GetJSCallDescriptor(
+          graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
+
+      // For a direct call we have to use the context of the JSFunction.
+      target_context = handle(function->context());
+    }
+  }
+
+  // We cannot call the target directly, we have to use the Call builtin.
+  if (!call_direct) {
     Callable callable = CodeFactory::Call(isolate);
     args[pos++] = jsgraph()->HeapConstant(callable.code());
+    args[pos++] = jsgraph()->Constant(target);           // target callable
+    args[pos++] = jsgraph()->Int32Constant(wasm_count);  // argument count
+    args[pos++] = jsgraph()->Constant(
+        handle(isolate->heap()->undefined_value(), isolate));  // receiver
+
     desc = Linkage::GetStubCallDescriptor(isolate, graph()->zone(),
                                           callable.descriptor(), wasm_count + 1,
                                           CallDescriptor::kNoFlags);
-    arg_count_before_args = true;
   }
 
-  args[pos++] = jsgraph()->Constant(function);  // JS function.
-  if (arg_count_before_args) {
-    args[pos++] = jsgraph()->Int32Constant(wasm_count);  // argument count
-  }
-  // JS receiver.
-  Handle<Object> global(function->context()->global_object(), isolate);
-  args[pos++] = jsgraph()->Constant(global);
-
   // Convert WASM numbers to JS values.
   int param_index = 0;
   for (int i = 0; i < wasm_count; ++i) {
     Node* param =
         graph()->NewNode(jsgraph()->common()->Parameter(param_index++), start);
-    args[pos++] = ToJS(param, context, sig->GetParam(i));
+    args[pos++] = ToJS(param, sig->GetParam(i));
     if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
       // On 32 bit platforms we have to skip the high word of int64 parameters.
       param_index++;
     }
   }
 
-  if (add_new_target_undefined) {
+  if (call_direct) {
     args[pos++] = jsgraph()->UndefinedConstant();  // new target
-  }
-
-  if (!arg_count_before_args) {
     args[pos++] = jsgraph()->Int32Constant(wasm_count);  // argument count
   }
-  args[pos++] = context;
+
+  args[pos++] = HeapConstant(target_context);
   args[pos++] = *effect_;
   args[pos++] = *control_;
 
@@ -2478,7 +2619,7 @@
   // Convert the return value back.
   Node* ret;
   Node* val =
-      FromJS(call, context,
+      FromJS(call, HeapConstant(isolate->native_context()),
              sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
   if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
       sig->GetReturn() == wasm::kAstI64) {
@@ -2523,17 +2664,22 @@
   }
 }
 
-Node* WasmGraphBuilder::FunctionTable() {
+Node* WasmGraphBuilder::FunctionTable(uint32_t index) {
   DCHECK(module_ && module_->instance &&
-         !module_->instance->function_table.is_null());
-  if (!function_table_) {
-    function_table_ = HeapConstant(module_->instance->function_table);
+         index < module_->instance->function_tables.size());
+  if (!function_tables_.size()) {
+    for (size_t i = 0; i < module_->instance->function_tables.size(); ++i) {
+      DCHECK(!module_->instance->function_tables[i].is_null());
+      function_tables_.push_back(
+          HeapConstant(module_->instance->function_tables[i]));
+    }
   }
-  return function_table_;
+  return function_tables_[index];
 }
 
-Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
-  MachineType mem_type = module_->GetGlobalType(index);
+Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
+  MachineType mem_type =
+      wasm::WasmOpcodes::MachineTypeFor(module_->GetGlobalType(index));
   Node* addr = jsgraph()->RelocatableIntPtrConstant(
       reinterpret_cast<uintptr_t>(module_->instance->globals_start +
                                   module_->module->globals[index].offset),
@@ -2545,8 +2691,9 @@
   return node;
 }
 
-Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
-  MachineType mem_type = module_->GetGlobalType(index);
+Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
+  MachineType mem_type =
+      wasm::WasmOpcodes::MachineTypeFor(module_->GetGlobalType(index));
   Node* addr = jsgraph()->RelocatableIntPtrConstant(
       reinterpret_cast<uintptr_t>(module_->instance->globals_start +
                                   module_->module->globals[index].offset),
@@ -2588,132 +2735,9 @@
                                 jsgraph()->RelocatableInt32Constant(
                                     static_cast<uint32_t>(effective_size),
                                     RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
-
   trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
 }
 
-MachineType WasmGraphBuilder::GetTypeForUnalignedAccess(uint32_t alignment,
-                                                        bool signExtend) {
-  switch (alignment) {
-    case 0:
-      return signExtend ? MachineType::Int8() : MachineType::Uint8();
-    case 1:
-      return signExtend ? MachineType::Int16() : MachineType::Uint16();
-    case 2:
-      return signExtend ? MachineType::Int32() : MachineType::Uint32();
-    default:
-      UNREACHABLE();
-      return MachineType::None();
-  }
-}
-
-Node* WasmGraphBuilder::GetUnalignedLoadOffsetNode(Node* baseOffset,
-                                                   int numberOfBytes,
-                                                   int stride, int current) {
-  int offset;
-  wasm::WasmOpcode addOpcode;
-
-#if defined(V8_TARGET_LITTLE_ENDIAN)
-  offset = numberOfBytes - stride - current;
-#elif defined(V8_TARGET_BIG_ENDIAN)
-  offset = current;
-#else
-#error Unsupported endianness
-#endif
-
-#if WASM_64
-  addOpcode = wasm::kExprI64Add;
-#else
-  addOpcode = wasm::kExprI32Add;
-#endif
-
-  if (offset == 0) {
-    return baseOffset;
-  } else {
-    return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
-  }
-}
-
-Node* WasmGraphBuilder::BuildUnalignedLoad(wasm::LocalType type,
-                                           MachineType memtype, Node* index,
-                                           uint32_t offset,
-                                           uint32_t alignment) {
-  Node* result;
-  Node* load;
-  bool extendTo64Bit = false;
-
-  wasm::WasmOpcode shiftOpcode;
-  wasm::WasmOpcode orOpcode;
-  Node* shiftConst;
-
-  bool signExtend = memtype.IsSigned();
-
-  bool isFloat = IsFloatingPoint(memtype.representation());
-  int stride =
-      1 << ElementSizeLog2Of(
-          GetTypeForUnalignedAccess(alignment, false).representation());
-  int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
-  DCHECK(numberOfBytes % stride == 0);
-
-  switch (type) {
-    case wasm::kAstI64:
-    case wasm::kAstF64:
-      shiftOpcode = wasm::kExprI64Shl;
-      orOpcode = wasm::kExprI64Ior;
-      result = jsgraph()->Int64Constant(0);
-      shiftConst = jsgraph()->Int64Constant(8 * stride);
-      extendTo64Bit = true;
-      break;
-    case wasm::kAstI32:
-    case wasm::kAstF32:
-      shiftOpcode = wasm::kExprI32Shl;
-      orOpcode = wasm::kExprI32Ior;
-      result = jsgraph()->Int32Constant(0);
-      shiftConst = jsgraph()->Int32Constant(8 * stride);
-      break;
-    default:
-      UNREACHABLE();
-  }
-
-  Node* baseOffset = MemBuffer(offset);
-
-  for (int i = 0; i < numberOfBytes; i += stride) {
-    result = Binop(shiftOpcode, result, shiftConst);
-    load = graph()->NewNode(
-        jsgraph()->machine()->Load(
-            GetTypeForUnalignedAccess(alignment, signExtend)),
-        GetUnalignedLoadOffsetNode(baseOffset, numberOfBytes, stride, i), index,
-        *effect_, *control_);
-    *effect_ = load;
-    if (extendTo64Bit) {
-      if (signExtend) {
-        load =
-            graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load);
-      } else {
-        load = graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(),
-                                load);
-      }
-    }
-    signExtend = false;
-    result = Binop(orOpcode, result, load);
-  }
-
-  // Convert to float
-  if (isFloat) {
-    switch (type) {
-      case wasm::kAstF32:
-        result = Unop(wasm::kExprF32ReinterpretI32, result);
-        break;
-      case wasm::kAstF64:
-        result = Unop(wasm::kExprF64ReinterpretI64, result);
-        break;
-      default:
-        UNREACHABLE();
-    }
-  }
-
-  return result;
-}
 
 Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
                                 Node* index, uint32_t offset,
@@ -2730,11 +2754,17 @@
       jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
     load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
                             MemBuffer(offset), index, *effect_, *control_);
-    *effect_ = load;
   } else {
-    load = BuildUnalignedLoad(type, memtype, index, offset, alignment);
+    load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
+                            MemBuffer(offset), index, *effect_, *control_);
   }
 
+  *effect_ = load;
+
+#if defined(V8_TARGET_BIG_ENDIAN)
+  load = BuildChangeEndianness(load, memtype, type);
+#endif
+
   if (type == wasm::kAstI64 &&
       ElementSizeLog2Of(memtype.representation()) < 3) {
     // TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
@@ -2751,97 +2781,6 @@
   return load;
 }
 
-Node* WasmGraphBuilder::GetUnalignedStoreOffsetNode(Node* baseOffset,
-                                                    int numberOfBytes,
-                                                    int stride, int current) {
-  int offset;
-  wasm::WasmOpcode addOpcode;
-
-#if defined(V8_TARGET_LITTLE_ENDIAN)
-  offset = current;
-#elif defined(V8_TARGET_BIG_ENDIAN)
-  offset = numberOfBytes - stride - current;
-#else
-#error Unsupported endianness
-#endif
-
-#if WASM_64
-  addOpcode = wasm::kExprI64Add;
-#else
-  addOpcode = wasm::kExprI32Add;
-#endif
-
-  if (offset == 0) {
-    return baseOffset;
-  } else {
-    return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
-  }
-}
-
-Node* WasmGraphBuilder::BuildUnalignedStore(MachineType memtype, Node* index,
-                                            uint32_t offset, uint32_t alignment,
-                                            Node* val) {
-  Node* store;
-  Node* newValue;
-
-  wasm::WasmOpcode shiftOpcode;
-
-  Node* shiftConst;
-  bool extendTo64Bit = false;
-  bool isFloat = IsFloatingPoint(memtype.representation());
-  int stride = 1 << ElementSizeLog2Of(
-                   GetTypeForUnalignedAccess(alignment).representation());
-  int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
-  DCHECK(numberOfBytes % stride == 0);
-
-  StoreRepresentation rep(GetTypeForUnalignedAccess(alignment).representation(),
-                          kNoWriteBarrier);
-
-  if (ElementSizeLog2Of(memtype.representation()) <= 2) {
-    shiftOpcode = wasm::kExprI32ShrU;
-    shiftConst = jsgraph()->Int32Constant(8 * stride);
-  } else {
-    shiftOpcode = wasm::kExprI64ShrU;
-    shiftConst = jsgraph()->Int64Constant(8 * stride);
-    extendTo64Bit = true;
-  }
-
-  newValue = val;
-  if (isFloat) {
-    switch (memtype.representation()) {
-      case MachineRepresentation::kFloat64:
-        newValue = Unop(wasm::kExprI64ReinterpretF64, val);
-        break;
-      case MachineRepresentation::kFloat32:
-        newValue = Unop(wasm::kExprI32ReinterpretF32, val);
-        break;
-      default:
-        UNREACHABLE();
-    }
-  }
-
-  Node* baseOffset = MemBuffer(offset);
-
-  for (int i = 0; i < numberOfBytes - stride; i += stride) {
-    store = graph()->NewNode(
-        jsgraph()->machine()->Store(rep),
-        GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride, i),
-        index,
-        extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
-        *effect_, *control_);
-    newValue = Binop(shiftOpcode, newValue, shiftConst);
-    *effect_ = store;
-  }
-  store = graph()->NewNode(
-      jsgraph()->machine()->Store(rep),
-      GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride,
-                                  numberOfBytes - stride),
-      index,
-      extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
-      *effect_, *control_);
-  *effect_ = store;
-  return val;
-}
 
 Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
                                  uint32_t offset, uint32_t alignment, Node* val,
@@ -2851,20 +2790,29 @@
   // WASM semantics throw on OOB. Introduce explicit bounds check.
   BoundsCheckMem(memtype, index, offset, position);
   StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+
   bool aligned = static_cast<int>(alignment) >=
                  ElementSizeLog2Of(memtype.representation());
 
+#if defined(V8_TARGET_BIG_ENDIAN)
+  val = BuildChangeEndianness(val, memtype);
+#endif
+
   if (aligned ||
       jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
     StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
     store =
         graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
                          index, val, *effect_, *control_);
-    *effect_ = store;
   } else {
-    store = BuildUnalignedStore(memtype, index, offset, alignment, val);
+    UnalignedStoreRepresentation rep(memtype.representation());
+    store =
+        graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
+                         MemBuffer(offset), index, val, *effect_, *control_);
   }
 
+  *effect_ = store;
+
   return store;
 }
 
@@ -2918,44 +2866,44 @@
     source_position_table_->SetSourcePosition(node, pos);
 }
 
-static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
-                                      CompilationInfo* info,
-                                      const char* message, uint32_t index,
-                                      wasm::WasmName func_name) {
-  Isolate* isolate = info->isolate();
-  if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
-    ScopedVector<char> buffer(128);
-    SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length(),
-             func_name.start());
-    Handle<String> name_str =
-        isolate->factory()->NewStringFromAsciiChecked(buffer.start());
-    Handle<String> script_str =
-        isolate->factory()->NewStringFromAsciiChecked("(WASM)");
-    Handle<Code> code = info->code();
-    Handle<SharedFunctionInfo> shared =
-        isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
-    PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
-                                     *script_str, 0, 0));
+Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
+                               const NodeVector& inputs) {
+  switch (opcode) {
+    case wasm::kExprI32x4ExtractLane:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4Splat:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(),
+                              inputs[0], inputs[0], inputs[0], inputs[0]);
+    default:
+      return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
   }
 }
 
-Handle<JSFunction> CompileJSToWasmWrapper(
-    Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
-    Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index) {
-  const wasm::WasmFunction* func = &module->module->functions[index];
+static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
+                                      Isolate* isolate, Handle<Code> code,
+                                      const char* message, uint32_t index,
+                                      const wasm::WasmName& module_name,
+                                      const wasm::WasmName& func_name) {
+  DCHECK(isolate->logger()->is_logging_code_events() ||
+         isolate->is_profiling());
 
-  //----------------------------------------------------------------------------
-  // Create the JSFunction object.
-  //----------------------------------------------------------------------------
+  ScopedVector<char> buffer(128);
+  SNPrintF(buffer, "%s#%d:%.*s:%.*s", message, index, module_name.length(),
+           module_name.start(), func_name.length(), func_name.start());
+  Handle<String> name_str =
+      isolate->factory()->NewStringFromAsciiChecked(buffer.start());
+  Handle<String> script_str =
+      isolate->factory()->NewStringFromAsciiChecked("(WASM)");
   Handle<SharedFunctionInfo> shared =
-      isolate->factory()->NewSharedFunctionInfo(name, wasm_code, false);
-  int params = static_cast<int>(func->sig->parameter_count());
-  shared->set_length(params);
-  shared->set_internal_formal_parameter_count(params);
-  Handle<JSFunction> function = isolate->factory()->NewFunction(
-      isolate->wasm_function_map(), name, MaybeHandle<Code>());
-  function->SetInternalField(0, *module_object);
-  function->set_shared(*shared);
+      isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
+  PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
+                                   *script_str, 0, 0));
+}
+
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+                                    Handle<Code> wasm_code, uint32_t index) {
+  const wasm::WasmFunction* func = &module->module->functions[index];
 
   //----------------------------------------------------------------------------
   // Create the Graph
@@ -2978,62 +2926,59 @@
   //----------------------------------------------------------------------------
   // Run the compilation pipeline.
   //----------------------------------------------------------------------------
-  {
-    if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
-      OFStream os(stdout);
-      os << "-- Graph after change lowering -- " << std::endl;
-      os << AsRPO(graph);
-    }
-
-    // Schedule and compile to machine code.
-    int params = static_cast<int>(
-        module->GetFunctionSignature(index)->parameter_count());
-    CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
-        &zone, false, params + 1, CallDescriptor::kNoFlags);
-    Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
-    bool debugging =
-#if DEBUG
-        true;
-#else
-        FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
-#endif
-    Vector<const char> func_name = ArrayVector("js-to-wasm");
-
-    static unsigned id = 0;
-    Vector<char> buffer;
-    if (debugging) {
-      buffer = Vector<char>::New(128);
-      int chars = SNPrintF(buffer, "js-to-wasm#%d", id);
-      func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
-    }
-
-    CompilationInfo info(func_name, isolate, &zone, flags);
-    Handle<Code> code =
-        Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
-#ifdef ENABLE_DISASSEMBLER
-    if (FLAG_print_opt_code && !code.is_null()) {
-      OFStream os(stdout);
-      code->Disassemble(buffer.start(), os);
-    }
-#endif
-    if (debugging) {
-      buffer.Dispose();
-    }
-
-    RecordFunctionCompilation(
-        CodeEventListener::FUNCTION_TAG, &info, "js-to-wasm", index,
-        module->module->GetName(func->name_offset, func->name_length));
-    // Set the JSFunction's machine code.
-    function->set_code(*code);
+  if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
+    OFStream os(stdout);
+    os << "-- Graph after change lowering -- " << std::endl;
+    os << AsRPO(graph);
   }
-  return function;
+
+  // Schedule and compile to machine code.
+  int params =
+      static_cast<int>(module->GetFunctionSignature(index)->parameter_count());
+  CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
+      &zone, false, params + 1, CallDescriptor::kNoFlags);
+  Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
+  bool debugging =
+#if DEBUG
+      true;
+#else
+      FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+#endif
+  Vector<const char> func_name = ArrayVector("js-to-wasm");
+
+  static unsigned id = 0;
+  Vector<char> buffer;
+  if (debugging) {
+    buffer = Vector<char>::New(128);
+    int chars = SNPrintF(buffer, "js-to-wasm#%d", id);
+    func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
+  }
+
+  CompilationInfo info(func_name, isolate, &zone, flags);
+  Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
+#ifdef ENABLE_DISASSEMBLER
+  if (FLAG_print_opt_code && !code.is_null()) {
+    OFStream os(stdout);
+    code->Disassemble(buffer.start(), os);
+  }
+#endif
+  if (debugging) {
+    buffer.Dispose();
+  }
+
+  if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
+    RecordFunctionCompilation(
+        CodeEventListener::FUNCTION_TAG, isolate, code, "js-to-wasm", index,
+        wasm::WasmName("export"),
+        module->module->GetName(func->name_offset, func->name_length));
+  }
+  return code;
 }
 
-Handle<Code> CompileWasmToJSWrapper(Isolate* isolate,
-                                    Handle<JSFunction> function,
-                                    wasm::FunctionSig* sig,
-                                    wasm::WasmName module_name,
-                                    wasm::WasmName function_name) {
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
+                                    wasm::FunctionSig* sig, uint32_t index,
+                                    Handle<String> import_module,
+                                    MaybeHandle<String> import_function) {
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
@@ -3049,7 +2994,7 @@
   WasmGraphBuilder builder(&zone, &jsgraph, sig);
   builder.set_control_ptr(&control);
   builder.set_effect_ptr(&effect);
-  builder.BuildWasmToJSWrapper(function, sig);
+  builder.BuildWasmToJSWrapper(target, sig);
 
   Handle<Code> code = Handle<Code>::null();
   {
@@ -3092,10 +3037,21 @@
     if (debugging) {
       buffer.Dispose();
     }
-
-    RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, &info,
-                              "wasm-to-js", 0, module_name);
   }
+  if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
+    const char* function_name = nullptr;
+    int function_name_size = 0;
+    if (!import_function.is_null()) {
+      Handle<String> handle = import_function.ToHandleChecked();
+      function_name = handle->ToCString().get();
+      function_name_size = handle->length();
+    }
+    RecordFunctionCompilation(
+        CodeEventListener::FUNCTION_TAG, isolate, code, "wasm-to-js", index,
+        {import_module->ToCString().get(), import_module->length()},
+        {function_name, function_name_size});
+  }
+
   return code;
 }
 
@@ -3135,6 +3091,7 @@
   }
 
   int index = static_cast<int>(function_->func_index);
+
   if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
     OFStream os(stdout);
     PrintAst(isolate_->allocator(), body, os, nullptr);
@@ -3160,7 +3117,8 @@
           new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
           nullptr, new (graph_zone()) MachineOperatorBuilder(
                        graph_zone(), MachineType::PointerRepresentation(),
-                       InstructionSelector::SupportedMachineOperatorFlags()))),
+                       InstructionSelector::SupportedMachineOperatorFlags(),
+                       InstructionSelector::AlignmentRequirements()))),
       compilation_zone_(isolate->allocator()),
       info_(function->name_length != 0
                 ? module_env->module->GetNameOrNull(function->name_offset,
@@ -3189,7 +3147,7 @@
   double decode_ms = 0;
   size_t node_count = 0;
 
-  base::SmartPointer<Zone> graph_zone(graph_zone_.Detach());
+  std::unique_ptr<Zone> graph_zone(graph_zone_.release());
   SourcePositionTable* source_positions = BuildGraphForWasmFunction(&decode_ms);
 
   if (graph_construction_result_.failed()) {
@@ -3210,13 +3168,9 @@
     descriptor =
         module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
   }
-  job_.Reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph_->graph(),
+  job_.reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph_->graph(),
                                              descriptor, source_positions));
-
-  // The function name {OptimizeGraph()} is misleading but necessary because we
-  // want to use the CompilationJob interface. A better name would be
-  // ScheduleGraphAndSelectInstructions.
-  ok_ = job_->OptimizeGraph() == CompilationJob::SUCCEEDED;
+  ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
   // TODO(bradnelson): Improve histogram handling of size_t.
   // TODO(ahaas): The counters are not thread-safe at the moment.
   //    isolate_->counters()->wasm_compile_function_peak_memory_bytes()
@@ -3248,7 +3202,7 @@
 
     return Handle<Code>::null();
   }
-  if (job_->GenerateCode() != CompilationJob::SUCCEEDED) {
+  if (job_->FinalizeJob() != CompilationJob::SUCCEEDED) {
     return Handle<Code>::null();
   }
   base::ElapsedTimer compile_timer;
@@ -3258,11 +3212,14 @@
   Handle<Code> code = info_.code();
   DCHECK(!code.is_null());
 
-  RecordFunctionCompilation(
-      CodeEventListener::FUNCTION_TAG, &info_, "WASM_function",
-      function_->func_index,
-      module_env_->module->GetName(function_->name_offset,
-                                   function_->name_length));
+  if (isolate_->logger()->is_logging_code_events() ||
+      isolate_->is_profiling()) {
+    RecordFunctionCompilation(
+        CodeEventListener::FUNCTION_TAG, isolate_, code, "WASM_function",
+        function_->func_index, wasm::WasmName("module"),
+        module_env_->module->GetName(function_->name_offset,
+                                     function_->name_length));
+  }
 
   if (FLAG_trace_wasm_decode_time) {
     double compile_ms = compile_timer.Elapsed().InMillisecondsF();
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index c03de3d..487ddcb 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_WASM_COMPILER_H_
 #define V8_COMPILER_WASM_COMPILER_H_
 
+#include <memory>
+
 // Clients of this interface shouldn't depend on lots of compiler internals.
 // Do not include anything from src/compiler here!
 #include "src/compiler.h"
@@ -29,7 +31,7 @@
 struct ModuleEnv;
 struct WasmFunction;
 class ErrorThrower;
-struct Tree;
+struct DecodeStruct;
 
 // Expose {Node} and {Graph} opaquely as {wasm::TFNode} and {wasm::TFGraph}.
 typedef compiler::Node TFNode;
@@ -66,32 +68,32 @@
   wasm::ModuleEnv* module_env_;
   const wasm::WasmFunction* function_;
   // The graph zone is deallocated at the end of ExecuteCompilation.
-  base::SmartPointer<Zone> graph_zone_;
+  std::unique_ptr<Zone> graph_zone_;
   JSGraph* jsgraph_;
   Zone compilation_zone_;
   CompilationInfo info_;
-  base::SmartPointer<CompilationJob> job_;
+  std::unique_ptr<CompilationJob> job_;
   uint32_t index_;
-  wasm::Result<wasm::Tree*> graph_construction_result_;
+  wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
   bool ok_;
+
+  DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
 };
 
 // Wraps a JS function, producing a code object that can be called from WASM.
-Handle<Code> CompileWasmToJSWrapper(Isolate* isolate,
-                                    Handle<JSFunction> function,
-                                    wasm::FunctionSig* sig,
-                                    wasm::WasmName module_name,
-                                    wasm::WasmName function_name);
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
+                                    wasm::FunctionSig* sig, uint32_t index,
+                                    Handle<String> import_module,
+                                    MaybeHandle<String> import_function);
 
-// Wraps a given wasm code object, producing a JSFunction that can be called
-// from JavaScript.
-Handle<JSFunction> CompileJSToWasmWrapper(
-    Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
-    Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index);
+// Wraps a given wasm code object, producing a code object.
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+                                    Handle<Code> wasm_code, uint32_t index);
 
 // Abstracts details of building TurboFan graph nodes for WASM to separate
 // the WASM decoder from the internal details of TurboFan.
 class WasmTrapHelper;
+typedef ZoneVector<Node*> NodeVector;
 class WasmGraphBuilder {
  public:
   WasmGraphBuilder(
@@ -120,6 +122,7 @@
   Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
   Node* EffectPhi(unsigned count, Node** effects, Node* control);
   Node* NumberConstant(int32_t value);
+  Node* Uint32Constant(uint32_t value);
   Node* Int32Constant(int32_t value);
   Node* Int64Constant(int64_t value);
   Node* Float32Constant(float value);
@@ -134,6 +137,8 @@
   void AppendToMerge(Node* merge, Node* from);
   void AppendToPhi(Node* phi, Node* from);
 
+  void StackCheck(wasm::WasmCodePosition position);
+
   //-----------------------------------------------------------------------
   // Operations that read and/or write {control} and {effect}.
   //-----------------------------------------------------------------------
@@ -152,20 +157,19 @@
   Node* CallIndirect(uint32_t index, Node** args,
                      wasm::WasmCodePosition position);
   void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
-  void BuildWasmToJSWrapper(Handle<JSFunction> function,
-                            wasm::FunctionSig* sig);
+  void BuildWasmToJSWrapper(Handle<JSReceiver> target, wasm::FunctionSig* sig);
 
-  Node* ToJS(Node* node, Node* context, wasm::LocalType type);
+  Node* ToJS(Node* node, wasm::LocalType type);
   Node* FromJS(Node* node, Node* context, wasm::LocalType type);
   Node* Invert(Node* node);
-  Node* FunctionTable();
+  Node* FunctionTable(uint32_t index);
 
   //-----------------------------------------------------------------------
   // Operations that concern the linear memory.
   //-----------------------------------------------------------------------
   Node* MemSize(uint32_t offset);
-  Node* LoadGlobal(uint32_t index);
-  Node* StoreGlobal(uint32_t index, Node* val);
+  Node* GetGlobal(uint32_t index);
+  Node* SetGlobal(uint32_t index, Node* val);
   Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
                 uint32_t offset, uint32_t alignment,
                 wasm::WasmCodePosition position);
@@ -190,6 +194,8 @@
 
   void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
 
+  Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
+
  private:
   static const int kDefaultBufferSize = 16;
   friend class WasmTrapHelper;
@@ -199,7 +205,7 @@
   wasm::ModuleEnv* module_;
   Node* mem_buffer_;
   Node* mem_size_;
-  Node* function_table_;
+  NodeVector function_tables_;
   Node** control_;
   Node** effect_;
   Node** cur_buffer_;
@@ -221,18 +227,8 @@
   void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
                       wasm::WasmCodePosition position);
 
-  MachineType GetTypeForUnalignedAccess(uint32_t alignment,
-                                        bool signExtend = false);
-
-  Node* GetUnalignedLoadOffsetNode(Node* baseOffset, int numberOfBytes,
-                                   int stride, int current);
-
-  Node* BuildUnalignedLoad(wasm::LocalType type, MachineType memtype,
-                           Node* index, uint32_t offset, uint32_t alignment);
-  Node* GetUnalignedStoreOffsetNode(Node* baseOffset, int numberOfBytes,
-                                    int stride, int current);
-  Node* BuildUnalignedStore(MachineType memtype, Node* index, uint32_t offset,
-                            uint32_t alignment, Node* val);
+  Node* BuildChangeEndianness(Node* node, MachineType type,
+                              wasm::LocalType wasmtype = wasm::kAstStmt);
 
   Node* MaskShiftCount32(Node* node);
   Node* MaskShiftCount64(Node* node);
@@ -241,14 +237,8 @@
   Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args,
                       wasm::WasmCodePosition position);
 
-  Node* BuildF32Neg(Node* input);
-  Node* BuildF64Neg(Node* input);
   Node* BuildF32CopySign(Node* left, Node* right);
   Node* BuildF64CopySign(Node* left, Node* right);
-  Node* BuildF32Min(Node* left, Node* right);
-  Node* BuildF32Max(Node* left, Node* right);
-  Node* BuildF64Min(Node* left, Node* right);
-  Node* BuildF64Max(Node* left, Node* right);
   Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position);
   Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position);
   Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position);
@@ -317,6 +307,7 @@
 
   Node* BuildChangeInt32ToSmi(Node* value);
   Node* BuildChangeSmiToInt32(Node* value);
+  Node* BuildChangeUint32ToSmi(Node* value);
   Node* BuildChangeSmiToFloat64(Node* value);
   Node* BuildTestNotSmi(Node* value);
   Node* BuildSmiShiftBitsConstant();
@@ -324,6 +315,7 @@
   Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control);
   Node* BuildLoadHeapNumberValue(Node* value, Node* control);
   Node* BuildHeapNumberValueIndexConstant();
+  Node* BuildGrowMemory(Node* input);
 
   // Asm.js specific functionality.
   Node* BuildI32AsmjsSConvertF32(Node* input);
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index cfeb6c5..c50f643 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/assembler.h"
+#include "src/base/lazy-instance.h"
 #include "src/macro-assembler.h"
 #include "src/register-configuration.h"
 
@@ -22,6 +23,7 @@
 using compiler::LinkageLocation;
 
 namespace {
+
 MachineType MachineTypeFor(LocalType type) {
   switch (type) {
     case kAstI32:
@@ -40,20 +42,16 @@
   }
 }
 
-
-// Platform-specific configuration for C calling convention.
-LinkageLocation regloc(Register reg) {
-  return LinkageLocation::ForRegister(reg.code());
+LinkageLocation regloc(Register reg, MachineType type) {
+  return LinkageLocation::ForRegister(reg.code(), type);
 }
 
-
-LinkageLocation regloc(DoubleRegister reg) {
-  return LinkageLocation::ForRegister(reg.code());
+LinkageLocation regloc(DoubleRegister reg, MachineType type) {
+  return LinkageLocation::ForRegister(reg.code(), type);
 }
 
-
-LinkageLocation stackloc(int i) {
-  return LinkageLocation::ForCallerFrameSlot(i);
+LinkageLocation stackloc(int i, MachineType type) {
+  return LinkageLocation::ForCallerFrameSlot(i, type);
 }
 
 
@@ -180,30 +178,20 @@
       // Allocate a floating point register/stack location.
       if (fp_offset < fp_count) {
         DoubleRegister reg = fp_regs[fp_offset++];
-#if V8_TARGET_ARCH_ARM
-        // Allocate floats using a double register, but modify the code to
-        // reflect how ARM FP registers alias.
-        // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
-        if (type == kAstF32) {
-          int float_reg_code = reg.code() * 2;
-          DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
-          return regloc(DoubleRegister::from_code(float_reg_code));
-        }
-#endif
-        return regloc(reg);
+        return regloc(reg, MachineTypeFor(type));
       } else {
         int offset = -1 - stack_offset;
         stack_offset += Words(type);
-        return stackloc(offset);
+        return stackloc(offset, MachineTypeFor(type));
       }
     } else {
       // Allocate a general purpose register/stack location.
       if (gp_offset < gp_count) {
-        return regloc(gp_regs[gp_offset++]);
+        return regloc(gp_regs[gp_offset++], MachineTypeFor(type));
       } else {
         int offset = -1 - stack_offset;
         stack_offset += Words(type);
-        return stackloc(offset);
+        return stackloc(offset, MachineTypeFor(type));
       }
     }
   }
@@ -219,81 +207,83 @@
 };
 }  // namespace
 
-static Allocator GetReturnRegisters() {
-#ifdef GP_RETURN_REGISTERS
-  static const Register kGPReturnRegisters[] = {GP_RETURN_REGISTERS};
-  static const int kGPReturnRegistersCount =
-      static_cast<int>(arraysize(kGPReturnRegisters));
-#else
-  static const Register* kGPReturnRegisters = nullptr;
-  static const int kGPReturnRegistersCount = 0;
-#endif
-
-#ifdef FP_RETURN_REGISTERS
-  static const DoubleRegister kFPReturnRegisters[] = {FP_RETURN_REGISTERS};
-  static const int kFPReturnRegistersCount =
-      static_cast<int>(arraysize(kFPReturnRegisters));
-#else
-  static const DoubleRegister* kFPReturnRegisters = nullptr;
-  static const int kFPReturnRegistersCount = 0;
-#endif
-
-  Allocator rets(kGPReturnRegisters, kGPReturnRegistersCount,
-                 kFPReturnRegisters, kFPReturnRegistersCount);
-
-  return rets;
-}
-
-static Allocator GetParameterRegisters() {
+struct ParameterRegistersCreateTrait {
+  static void Construct(Allocator* allocated_ptr) {
 #ifdef GP_PARAM_REGISTERS
-  static const Register kGPParamRegisters[] = {GP_PARAM_REGISTERS};
-  static const int kGPParamRegistersCount =
-      static_cast<int>(arraysize(kGPParamRegisters));
+    static const Register kGPParamRegisters[] = {GP_PARAM_REGISTERS};
+    static const int kGPParamRegistersCount =
+        static_cast<int>(arraysize(kGPParamRegisters));
 #else
-  static const Register* kGPParamRegisters = nullptr;
-  static const int kGPParamRegistersCount = 0;
+    static const Register* kGPParamRegisters = nullptr;
+    static const int kGPParamRegistersCount = 0;
 #endif
 
 #ifdef FP_PARAM_REGISTERS
-  static const DoubleRegister kFPParamRegisters[] = {FP_PARAM_REGISTERS};
-  static const int kFPParamRegistersCount =
-      static_cast<int>(arraysize(kFPParamRegisters));
+    static const DoubleRegister kFPParamRegisters[] = {FP_PARAM_REGISTERS};
+    static const int kFPParamRegistersCount =
+        static_cast<int>(arraysize(kFPParamRegisters));
 #else
-  static const DoubleRegister* kFPParamRegisters = nullptr;
-  static const int kFPParamRegistersCount = 0;
+    static const DoubleRegister* kFPParamRegisters = nullptr;
+    static const int kFPParamRegistersCount = 0;
 #endif
 
-  Allocator params(kGPParamRegisters, kGPParamRegistersCount, kFPParamRegisters,
-                   kFPParamRegistersCount);
+    new (allocated_ptr) Allocator(kGPParamRegisters, kGPParamRegistersCount,
+                                  kFPParamRegisters, kFPParamRegistersCount);
+  }
+};
 
-  return params;
-}
+static base::LazyInstance<Allocator, ParameterRegistersCreateTrait>::type
+    parameter_registers = LAZY_INSTANCE_INITIALIZER;
+
+struct ReturnRegistersCreateTrait {
+  static void Construct(Allocator* allocated_ptr) {
+#ifdef GP_RETURN_REGISTERS
+    static const Register kGPReturnRegisters[] = {GP_RETURN_REGISTERS};
+    static const int kGPReturnRegistersCount =
+        static_cast<int>(arraysize(kGPReturnRegisters));
+#else
+    static const Register* kGPReturnRegisters = nullptr;
+    static const int kGPReturnRegistersCount = 0;
+#endif
+
+#ifdef FP_RETURN_REGISTERS
+    static const DoubleRegister kFPReturnRegisters[] = {FP_RETURN_REGISTERS};
+    static const int kFPReturnRegistersCount =
+        static_cast<int>(arraysize(kFPReturnRegisters));
+#else
+    static const DoubleRegister* kFPReturnRegisters = nullptr;
+    static const int kFPReturnRegistersCount = 0;
+#endif
+
+    new (allocated_ptr) Allocator(kGPReturnRegisters, kGPReturnRegistersCount,
+                                  kFPReturnRegisters, kFPReturnRegistersCount);
+  }
+};
+
+static base::LazyInstance<Allocator, ReturnRegistersCreateTrait>::type
+    return_registers = LAZY_INSTANCE_INITIALIZER;
 
 // General code uses the above configuration data.
 CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
                                                  FunctionSig* fsig) {
-  MachineSignature::Builder msig(zone, fsig->return_count(),
-                                 fsig->parameter_count());
   LocationSignature::Builder locations(zone, fsig->return_count(),
                                        fsig->parameter_count());
 
-  Allocator rets = GetReturnRegisters();
+  Allocator rets = return_registers.Get();
 
   // Add return location(s).
   const int return_count = static_cast<int>(locations.return_count_);
   for (int i = 0; i < return_count; i++) {
     LocalType ret = fsig->GetReturn(i);
-    msig.AddReturn(MachineTypeFor(ret));
     locations.AddReturn(rets.Next(ret));
   }
 
-  Allocator params = GetParameterRegisters();
+  Allocator params = parameter_registers.Get();
 
   // Add register and/or stack parameter(s).
   const int parameter_count = static_cast<int>(fsig->parameter_count());
   for (int i = 0; i < parameter_count; i++) {
     LocalType param = fsig->GetParam(i);
-    msig.AddParam(MachineTypeFor(param));
     locations.AddParam(params.Next(param));
   }
 
@@ -302,13 +292,12 @@
 
   // The target for WASM calls is always a code object.
   MachineType target_type = MachineType::AnyTagged();
-  LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+  LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
 
   return new (zone) CallDescriptor(       // --
       CallDescriptor::kCallCodeObject,    // kind
       target_type,                        // target MachineType
       target_loc,                         // target location
-      msig.Build(),                       // machine_sig
       locations.Build(),                  // location_sig
       params.stack_offset,                // stack_parameter_count
       compiler::Operator::kNoProperties,  // properties
@@ -320,58 +309,52 @@
 
 CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
     Zone* zone, CallDescriptor* descriptor) {
-  const MachineSignature* signature = descriptor->GetMachineSignature();
-  size_t parameter_count = signature->parameter_count();
-  size_t return_count = signature->return_count();
-  for (size_t i = 0; i < signature->parameter_count(); i++) {
-    if (signature->GetParam(i) == MachineType::Int64()) {
+  size_t parameter_count = descriptor->ParameterCount();
+  size_t return_count = descriptor->ReturnCount();
+  for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
+    if (descriptor->GetParameterType(i) == MachineType::Int64()) {
       // For each int64 input we get two int32 inputs.
       parameter_count++;
     }
   }
-  for (size_t i = 0; i < signature->return_count(); i++) {
-    if (signature->GetReturn(i) == MachineType::Int64()) {
+  for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
+    if (descriptor->GetReturnType(i) == MachineType::Int64()) {
       // For each int64 return we get two int32 returns.
       return_count++;
     }
   }
-  if (parameter_count == signature->parameter_count() &&
-      return_count == signature->return_count()) {
+  if (parameter_count == descriptor->ParameterCount() &&
+      return_count == descriptor->ReturnCount()) {
     // If there is no int64 parameter or return value, we can just return the
     // original descriptor.
     return descriptor;
   }
 
-  MachineSignature::Builder msig(zone, return_count, parameter_count);
   LocationSignature::Builder locations(zone, return_count, parameter_count);
 
-  Allocator rets = GetReturnRegisters();
+  Allocator rets = return_registers.Get();
 
-  for (size_t i = 0; i < signature->return_count(); i++) {
-    if (signature->GetReturn(i) == MachineType::Int64()) {
+  for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
+    if (descriptor->GetReturnType(i) == MachineType::Int64()) {
       // For each int64 return we get two int32 returns.
-      msig.AddReturn(MachineType::Int32());
-      msig.AddReturn(MachineType::Int32());
       locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
       locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
     } else {
-      msig.AddReturn(signature->GetReturn(i));
-      locations.AddReturn(rets.Next(signature->GetReturn(i).representation()));
+      locations.AddReturn(
+          rets.Next(descriptor->GetReturnType(i).representation()));
     }
   }
 
-  Allocator params = GetParameterRegisters();
+  Allocator params = parameter_registers.Get();
 
-  for (size_t i = 0; i < signature->parameter_count(); i++) {
-    if (signature->GetParam(i) == MachineType::Int64()) {
+  for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
+    if (descriptor->GetParameterType(i) == MachineType::Int64()) {
       // For each int64 input we get two int32 inputs.
-      msig.AddParam(MachineType::Int32());
-      msig.AddParam(MachineType::Int32());
       locations.AddParam(params.Next(MachineRepresentation::kWord32));
       locations.AddParam(params.Next(MachineRepresentation::kWord32));
     } else {
-      msig.AddParam(signature->GetParam(i));
-      locations.AddParam(params.Next(signature->GetParam(i).representation()));
+      locations.AddParam(
+          params.Next(descriptor->GetParameterType(i).representation()));
     }
   }
 
@@ -379,7 +362,6 @@
       descriptor->kind(),                    // kind
       descriptor->GetInputType(0),           // target MachineType
       descriptor->GetInputLocation(0),       // target location
-      msig.Build(),                          // machine_sig
       locations.Build(),                     // location_sig
       params.stack_offset,                   // stack_parameter_count
       descriptor->properties(),              // properties
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 2ae1fc9..49a097b 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -164,35 +164,59 @@
   Register const result_;
 };
 
-
-class OutOfLineLoadNaN final : public OutOfLineCode {
+class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
  public:
-  OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
+  OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
       : OutOfLineCode(gen), result_(result) {}
 
-  void Generate() final { __ Pcmpeqd(result_, result_); }
+  void Generate() final {
+    __ Xorps(result_, result_);
+    __ Divss(result_, result_);
+  }
 
  private:
   XMMRegister const result_;
 };
 
+class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
+ public:
+  OutOfLineLoadFloat64NaN(CodeGenerator* gen, XMMRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final {
+    __ Xorpd(result_, result_);
+    __ Divsd(result_, result_);
+  }
+
+ private:
+  XMMRegister const result_;
+};
 
 class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
  public:
   OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
-                             XMMRegister input)
-      : OutOfLineCode(gen), result_(result), input_(input) {}
+                             XMMRegister input,
+                             UnwindingInfoWriter* unwinding_info_writer)
+      : OutOfLineCode(gen),
+        result_(result),
+        input_(input),
+        unwinding_info_writer_(unwinding_info_writer) {}
 
   void Generate() final {
     __ subp(rsp, Immediate(kDoubleSize));
+    unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                      kDoubleSize);
     __ Movsd(MemOperand(rsp, 0), input_);
     __ SlowTruncateToI(result_, rsp, 0);
     __ addp(rsp, Immediate(kDoubleSize));
+    unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                      -kDoubleSize);
   }
 
  private:
   Register const result_;
   XMMRegister const input_;
+  UnwindingInfoWriter* const unwinding_info_writer_;
 };
 
 
@@ -372,7 +396,7 @@
     }                                                                  \
   } while (0)
 
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN)             \
   do {                                                                       \
     auto result = i.OutputDoubleRegister();                                  \
     auto buffer = i.InputRegister(0);                                        \
@@ -622,25 +646,12 @@
   } while (false)
 
 void CodeGenerator::AssembleDeconstructFrame() {
+  unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
   __ movq(rsp, rbp);
   __ popq(rbp);
 }
 
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta > 0) {
-    __ addq(rsp, Immediate(sp_slot_delta * kPointerSize));
-  }
-  frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta < 0) {
-    __ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
-    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
-  }
+void CodeGenerator::AssemblePrepareTailCall() {
   if (frame_access_state()->has_frame()) {
     __ movq(rbp, MemOperand(rbp, 0));
   }
@@ -672,6 +683,68 @@
   __ bind(&done);
 }
 
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+                                   FrameAccessState* state,
+                                   int new_slot_above_sp,
+                                   bool allow_shrinkage = true) {
+  int current_sp_offset = state->GetSPToFPSlotCount() +
+                          StandardFrameConstants::kFixedSlotCountAboveFp;
+  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+  if (stack_slot_delta > 0) {
+    masm->subq(rsp, Immediate(stack_slot_delta * kPointerSize));
+    state->IncreaseSPDelta(stack_slot_delta);
+  } else if (allow_shrinkage && stack_slot_delta < 0) {
+    masm->addq(rsp, Immediate(-stack_slot_delta * kPointerSize));
+    state->IncreaseSPDelta(stack_slot_delta);
+  }
+}
+
+}  // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+                                              int first_unused_stack_slot) {
+  CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+  ZoneVector<MoveOperands*> pushes(zone());
+  GetPushCompatibleMoves(instr, flags, &pushes);
+
+  if (!pushes.empty() &&
+      (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+       first_unused_stack_slot)) {
+    X64OperandConverter g(this, instr);
+    for (auto move : pushes) {
+      LocationOperand destination_location(
+          LocationOperand::cast(move->destination()));
+      InstructionOperand source(move->source());
+      AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                    destination_location.index());
+      if (source.IsStackSlot()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        __ Push(g.SlotToOperand(source_location.index()));
+      } else if (source.IsRegister()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        __ Push(source_location.GetRegister());
+      } else if (source.IsImmediate()) {
+        __ Push(Immediate(ImmediateOperand::cast(source).inline_value()));
+      } else {
+        // Pushes of non-scalar data types is not supported.
+        UNIMPLEMENTED();
+      }
+      frame_access_state()->IncreaseSPDelta(1);
+      move->Eliminate();
+    }
+  }
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+                                             int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot);
+}
+
 // Assembles an instruction after register allocation, producing machine code.
 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     Instruction* instr) {
@@ -695,8 +768,6 @@
     }
     case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -710,16 +781,18 @@
         __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
         __ jmp(reg);
       }
+      unwinding_info_writer_.MarkBlockWillExit();
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchTailCallAddress: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       CHECK(!HasImmediateInput(instr, 0));
       Register reg = i.InputRegister(0);
       __ jmp(reg);
+      unwinding_info_writer_.MarkBlockWillExit();
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchCallJSFunction: {
@@ -743,8 +816,6 @@
         __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
         __ Assert(equal, kWrongFunctionContext);
       }
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          i.TempRegister(0), i.TempRegister(1),
@@ -752,6 +823,7 @@
       }
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchPrepareCallCFunction: {
@@ -762,7 +834,7 @@
       break;
     }
     case kArchPrepareTailCall:
-      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      AssemblePrepareTailCall();
       break;
     case kArchCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
@@ -794,6 +866,9 @@
     case kArchDebugBreak:
       __ int3();
       break;
+    case kArchImpossible:
+      __ Abort(kConversionFromImpossibleValue);
+      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -827,7 +902,8 @@
     case kArchTruncateDoubleToI: {
       auto result = i.OutputRegister();
       auto input = i.InputDoubleRegister(0);
-      auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
+      auto ool = new (zone()) OutOfLineTruncateDoubleToI(
+          this, result, input, &unwinding_info_writer_);
       // We use Cvttsd2siq instead of Cvttsd2si due to performance reasons. The
       // use of Cvttsd2siq requires the movl below to avoid sign extension.
       __ Cvttsd2siq(result, input);
@@ -867,21 +943,36 @@
       __ leaq(i.OutputRegister(), Operand(base, offset.offset()));
       break;
     }
+    case kIeee754Float64Acos:
+      ASSEMBLE_IEEE754_UNOP(acos);
+      break;
+    case kIeee754Float64Acosh:
+      ASSEMBLE_IEEE754_UNOP(acosh);
+      break;
+    case kIeee754Float64Asin:
+      ASSEMBLE_IEEE754_UNOP(asin);
+      break;
+    case kIeee754Float64Asinh:
+      ASSEMBLE_IEEE754_UNOP(asinh);
+      break;
     case kIeee754Float64Atan:
       ASSEMBLE_IEEE754_UNOP(atan);
       break;
-    case kIeee754Float64Atan2:
-      ASSEMBLE_IEEE754_BINOP(atan2);
-      break;
     case kIeee754Float64Atanh:
       ASSEMBLE_IEEE754_UNOP(atanh);
       break;
+    case kIeee754Float64Atan2:
+      ASSEMBLE_IEEE754_BINOP(atan2);
+      break;
     case kIeee754Float64Cbrt:
       ASSEMBLE_IEEE754_UNOP(cbrt);
       break;
     case kIeee754Float64Cos:
       ASSEMBLE_IEEE754_UNOP(cos);
       break;
+    case kIeee754Float64Cosh:
+      ASSEMBLE_IEEE754_UNOP(cosh);
+      break;
     case kIeee754Float64Exp:
       ASSEMBLE_IEEE754_UNOP(exp);
       break;
@@ -900,12 +991,26 @@
     case kIeee754Float64Log10:
       ASSEMBLE_IEEE754_UNOP(log10);
       break;
+    case kIeee754Float64Pow: {
+      // TODO(bmeurer): Improve integration of the stub.
+      __ Movsd(xmm2, xmm0);
+      MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+      __ CallStub(&stub);
+      __ Movsd(xmm0, xmm3);
+      break;
+    }
     case kIeee754Float64Sin:
       ASSEMBLE_IEEE754_UNOP(sin);
       break;
+    case kIeee754Float64Sinh:
+      ASSEMBLE_IEEE754_UNOP(sinh);
+      break;
     case kIeee754Float64Tan:
       ASSEMBLE_IEEE754_UNOP(tan);
       break;
+    case kIeee754Float64Tanh:
+      ASSEMBLE_IEEE754_UNOP(tanh);
+      break;
     case kX64Add32:
       ASSEMBLE_BINOP(addl);
       break;
@@ -1109,12 +1214,6 @@
     case kSSEFloat32Sqrt:
       ASSEMBLE_SSE_UNOP(sqrtss);
       break;
-    case kSSEFloat32Max:
-      ASSEMBLE_SSE_BINOP(maxss);
-      break;
-    case kSSEFloat32Min:
-      ASSEMBLE_SSE_BINOP(minss);
-      break;
     case kSSEFloat32ToFloat64:
       ASSEMBLE_SSE_UNOP(Cvtss2sd);
       break;
@@ -1160,6 +1259,8 @@
       break;
     case kSSEFloat64Mod: {
       __ subq(rsp, Immediate(kDoubleSize));
+      unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                       kDoubleSize);
       // Move values to st(0) and st(1).
       __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
       __ fld_d(Operand(rsp, 0));
@@ -1180,7 +1281,11 @@
         __ shrl(rax, Immediate(8));
         __ andl(rax, Immediate(0xFF));
         __ pushq(rax);
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         kPointerSize);
         __ popfq();
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         -kPointerSize);
       }
       __ j(parity_even, &mod_loop);
       // Move output to stack and clean up.
@@ -1188,14 +1293,120 @@
       __ fstp_d(Operand(rsp, 0));
       __ Movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
       __ addq(rsp, Immediate(kDoubleSize));
+      unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                       -kDoubleSize);
       break;
     }
-    case kSSEFloat64Max:
-      ASSEMBLE_SSE_BINOP(maxsd);
+    case kSSEFloat32Max: {
+      Label compare_nan, compare_swap, done_compare;
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      auto ool =
+          new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(above, &done_compare, Label::kNear);
+      __ j(below, &compare_swap, Label::kNear);
+      __ Movmskps(kScratchRegister, i.InputDoubleRegister(0));
+      __ testl(kScratchRegister, Immediate(1));
+      __ j(zero, &done_compare, Label::kNear);
+      __ bind(&compare_swap);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ Movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ Movss(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      __ bind(&done_compare);
+      __ bind(ool->exit());
       break;
-    case kSSEFloat64Min:
-      ASSEMBLE_SSE_BINOP(minsd);
+    }
+    case kSSEFloat32Min: {
+      Label compare_swap, done_compare;
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      auto ool =
+          new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(below, &done_compare, Label::kNear);
+      __ j(above, &compare_swap, Label::kNear);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ Movmskps(kScratchRegister, i.InputDoubleRegister(1));
+      } else {
+        __ Movss(kScratchDoubleReg, i.InputOperand(1));
+        __ Movmskps(kScratchRegister, kScratchDoubleReg);
+      }
+      __ testl(kScratchRegister, Immediate(1));
+      __ j(zero, &done_compare, Label::kNear);
+      __ bind(&compare_swap);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ Movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ Movss(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      __ bind(&done_compare);
+      __ bind(ool->exit());
       break;
+    }
+    case kSSEFloat64Max: {
+      Label compare_nan, compare_swap, done_compare;
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      auto ool =
+          new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(above, &done_compare, Label::kNear);
+      __ j(below, &compare_swap, Label::kNear);
+      __ Movmskpd(kScratchRegister, i.InputDoubleRegister(0));
+      __ testl(kScratchRegister, Immediate(1));
+      __ j(zero, &done_compare, Label::kNear);
+      __ bind(&compare_swap);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ Movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ Movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      __ bind(&done_compare);
+      __ bind(ool->exit());
+      break;
+    }
+    case kSSEFloat64Min: {
+      Label compare_swap, done_compare;
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      auto ool =
+          new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(below, &done_compare, Label::kNear);
+      __ j(above, &compare_swap, Label::kNear);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ Movmskpd(kScratchRegister, i.InputDoubleRegister(1));
+      } else {
+        __ Movsd(kScratchDoubleReg, i.InputOperand(1));
+        __ Movmskpd(kScratchRegister, kScratchDoubleReg);
+      }
+      __ testl(kScratchRegister, Immediate(1));
+      __ j(zero, &done_compare, Label::kNear);
+      __ bind(&compare_swap);
+      if (instr->InputAt(1)->IsFPRegister()) {
+        __ Movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      } else {
+        __ Movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+      }
+      __ bind(&done_compare);
+      __ bind(ool->exit());
+      break;
+    }
     case kSSEFloat64Abs: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
       __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
@@ -1506,12 +1717,6 @@
       // when there is a (v)mulss depending on the result.
       __ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
       break;
-    case kAVXFloat32Max:
-      ASSEMBLE_AVX_BINOP(vmaxss);
-      break;
-    case kAVXFloat32Min:
-      ASSEMBLE_AVX_BINOP(vminss);
-      break;
     case kAVXFloat64Cmp: {
       CpuFeatureScope avx_scope(masm(), AVX);
       if (instr->InputAt(1)->IsFPRegister()) {
@@ -1536,12 +1741,6 @@
       // when there is a (v)mulsd depending on the result.
       __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
       break;
-    case kAVXFloat64Max:
-      ASSEMBLE_AVX_BINOP(vmaxsd);
-      break;
-    case kAVXFloat64Min:
-      ASSEMBLE_AVX_BINOP(vminsd);
-      break;
     case kAVXFloat32Abs: {
       // TODO(bmeurer): Use RIP relative 128-bit constants.
       CpuFeatureScope avx_scope(masm(), AVX);
@@ -1610,6 +1809,13 @@
       ASSEMBLE_MOVX(movzxbl);
       __ AssertZeroExtended(i.OutputRegister());
       break;
+    case kX64Movsxbq:
+      ASSEMBLE_MOVX(movsxbq);
+      break;
+    case kX64Movzxbq:
+      ASSEMBLE_MOVX(movzxbq);
+      __ AssertZeroExtended(i.OutputRegister());
+      break;
     case kX64Movb: {
       size_t index = 0;
       Operand operand = i.MemoryOperand(&index);
@@ -1628,6 +1834,13 @@
       ASSEMBLE_MOVX(movzxwl);
       __ AssertZeroExtended(i.OutputRegister());
       break;
+    case kX64Movsxwq:
+      ASSEMBLE_MOVX(movsxwq);
+      break;
+    case kX64Movzxwq:
+      ASSEMBLE_MOVX(movzxwq);
+      __ AssertZeroExtended(i.OutputRegister());
+      break;
     case kX64Movw: {
       size_t index = 0;
       Operand operand = i.MemoryOperand(&index);
@@ -1769,18 +1982,26 @@
       if (HasImmediateInput(instr, 0)) {
         __ pushq(i.InputImmediate(0));
         frame_access_state()->IncreaseSPDelta(1);
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         kPointerSize);
       } else {
         if (instr->InputAt(0)->IsRegister()) {
           __ pushq(i.InputRegister(0));
           frame_access_state()->IncreaseSPDelta(1);
+          unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                           kPointerSize);
         } else if (instr->InputAt(0)->IsFPRegister()) {
           // TODO(titzer): use another machine instruction?
           __ subq(rsp, Immediate(kDoubleSize));
           frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+          unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                           kDoubleSize);
           __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
         } else {
           __ pushq(i.InputOperand(0));
           frame_access_state()->IncreaseSPDelta(1);
+          unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                           kPointerSize);
         }
       }
       break;
@@ -1830,10 +2051,10 @@
       ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
       break;
     case kCheckedLoadFloat32:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(Movss);
+      ASSEMBLE_CHECKED_LOAD_FLOAT(Movss, OutOfLineLoadFloat32NaN);
       break;
     case kCheckedLoadFloat64:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd);
+      ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd, OutOfLineLoadFloat64NaN);
       break;
     case kCheckedStoreWord8:
       ASSEMBLE_CHECKED_STORE_INTEGER(movb);
@@ -2035,6 +2256,9 @@
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2073,6 +2297,8 @@
 void CodeGenerator::AssembleConstructFrame() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (frame_access_state()->has_frame()) {
+    int pc_base = __ pc_offset();
+
     if (descriptor->IsCFunctionCall()) {
       __ pushq(rbp);
       __ movq(rbp, rsp);
@@ -2081,6 +2307,10 @@
     } else {
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
+
+    if (!descriptor->IsJSFunctionCall() || !info()->GeneratePreagedPrologue()) {
+      unwinding_info_writer_.MarkFrameConstructed(pc_base);
+    }
   }
   int shrink_slots = frame()->GetSpillSlotCount();
 
@@ -2154,6 +2384,8 @@
     __ addp(rsp, Immediate(stack_size));
   }
 
+  unwinding_info_writer_.MarkBlockWillExit();
+
   if (descriptor->IsCFunctionCall()) {
     AssembleDeconstructFrame();
   } else if (frame_access_state()->has_frame()) {
@@ -2245,10 +2477,7 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int slot;
-          if (IsMaterializableFromFrame(src_object, &slot)) {
-            __ movp(dst, g.SlotToOperand(slot));
-          } else if (IsMaterializableFromRoot(src_object, &index)) {
+          if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
             __ Move(dst, src_object);
@@ -2291,18 +2520,34 @@
     } else {
       DCHECK(destination->IsFPStackSlot());
       Operand dst = g.ToOperand(destination);
-      __ Movsd(dst, src);
+      MachineRepresentation rep =
+          LocationOperand::cast(source)->representation();
+      if (rep != MachineRepresentation::kSimd128) {
+        __ Movsd(dst, src);
+      } else {
+        __ Movups(dst, src);
+      }
     }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     Operand src = g.ToOperand(source);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
     if (destination->IsFPRegister()) {
       XMMRegister dst = g.ToDoubleRegister(destination);
-      __ Movsd(dst, src);
+      if (rep != MachineRepresentation::kSimd128) {
+        __ Movsd(dst, src);
+      } else {
+        __ Movups(dst, src);
+      }
     } else {
       Operand dst = g.ToOperand(destination);
-      __ Movsd(kScratchDoubleReg, src);
-      __ Movsd(dst, kScratchDoubleReg);
+      if (rep != MachineRepresentation::kSimd128) {
+        __ Movsd(kScratchDoubleReg, src);
+        __ Movsd(dst, kScratchDoubleReg);
+      } else {
+        __ Movups(kScratchDoubleReg, src);
+        __ Movups(dst, kScratchDoubleReg);
+      }
     }
   } else {
     UNREACHABLE();
@@ -2326,25 +2571,45 @@
     Register src = g.ToRegister(source);
     __ pushq(src);
     frame_access_state()->IncreaseSPDelta(1);
+    unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                     kPointerSize);
     Operand dst = g.ToOperand(destination);
     __ movq(src, dst);
     frame_access_state()->IncreaseSPDelta(-1);
     dst = g.ToOperand(destination);
     __ popq(dst);
+    unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                     -kPointerSize);
   } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
              (source->IsFPStackSlot() && destination->IsFPStackSlot())) {
     // Memory-memory.
-    Register tmp = kScratchRegister;
     Operand src = g.ToOperand(source);
     Operand dst = g.ToOperand(destination);
-    __ movq(tmp, dst);
-    __ pushq(src);
-    frame_access_state()->IncreaseSPDelta(1);
-    src = g.ToOperand(source);
-    __ movq(src, tmp);
-    frame_access_state()->IncreaseSPDelta(-1);
-    dst = g.ToOperand(destination);
-    __ popq(dst);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
+    if (rep != MachineRepresentation::kSimd128) {
+      Register tmp = kScratchRegister;
+      __ movq(tmp, dst);
+      __ pushq(src);
+      unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                       kPointerSize);
+      frame_access_state()->IncreaseSPDelta(1);
+      src = g.ToOperand(source);
+      __ movq(src, tmp);
+      frame_access_state()->IncreaseSPDelta(-1);
+      dst = g.ToOperand(destination);
+      __ popq(dst);
+      unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                       -kPointerSize);
+    } else {
+      // Use the XOR trick to swap without a temporary.
+      __ Movups(kScratchDoubleReg, src);
+      __ Xorps(kScratchDoubleReg, dst);  // scratch contains src ^ dst.
+      __ Movups(src, kScratchDoubleReg);
+      __ Xorps(kScratchDoubleReg, dst);  // scratch contains src.
+      __ Movups(dst, kScratchDoubleReg);
+      __ Xorps(kScratchDoubleReg, src);  // scratch contains dst.
+      __ Movups(src, kScratchDoubleReg);
+    }
   } else if (source->IsFPRegister() && destination->IsFPRegister()) {
     // XMM register-register swap.
     XMMRegister src = g.ToDoubleRegister(source);
@@ -2356,9 +2621,16 @@
     // XMM register-memory swap.
     XMMRegister src = g.ToDoubleRegister(source);
     Operand dst = g.ToOperand(destination);
-    __ Movsd(kScratchDoubleReg, src);
-    __ Movsd(src, dst);
-    __ Movsd(dst, kScratchDoubleReg);
+    MachineRepresentation rep = LocationOperand::cast(source)->representation();
+    if (rep != MachineRepresentation::kSimd128) {
+      __ Movsd(kScratchDoubleReg, src);
+      __ Movsd(src, dst);
+      __ Movsd(dst, kScratchDoubleReg);
+    } else {
+      __ Movups(kScratchDoubleReg, src);
+      __ Movups(src, dst);
+      __ Movups(dst, kScratchDoubleReg);
+    }
   } else {
     // No other combinations are possible.
     UNREACHABLE();
@@ -2381,7 +2653,7 @@
   int space_needed = Deoptimizer::patch_size();
   // Ensure that we have enough space after the previous lazy-bailout
   // instruction for patching the code here.
-  int current_pc = masm()->pc_offset();
+  int current_pc = __ pc_offset();
   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
     __ Nop(padding_size);
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 29acee3..7ab1097 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -64,8 +64,6 @@
   V(SSEFloat32Abs)                 \
   V(SSEFloat32Neg)                 \
   V(SSEFloat32Sqrt)                \
-  V(SSEFloat32Max)                 \
-  V(SSEFloat32Min)                 \
   V(SSEFloat32ToFloat64)           \
   V(SSEFloat32ToInt32)             \
   V(SSEFloat32ToUint32)            \
@@ -80,7 +78,9 @@
   V(SSEFloat64Neg)                 \
   V(SSEFloat64Sqrt)                \
   V(SSEFloat64Round)               \
+  V(SSEFloat32Max)                 \
   V(SSEFloat64Max)                 \
+  V(SSEFloat32Min)                 \
   V(SSEFloat64Min)                 \
   V(SSEFloat64ToFloat32)           \
   V(SSEFloat64ToInt32)             \
@@ -108,24 +108,24 @@
   V(AVXFloat32Sub)                 \
   V(AVXFloat32Mul)                 \
   V(AVXFloat32Div)                 \
-  V(AVXFloat32Max)                 \
-  V(AVXFloat32Min)                 \
   V(AVXFloat64Cmp)                 \
   V(AVXFloat64Add)                 \
   V(AVXFloat64Sub)                 \
   V(AVXFloat64Mul)                 \
   V(AVXFloat64Div)                 \
-  V(AVXFloat64Max)                 \
-  V(AVXFloat64Min)                 \
   V(AVXFloat64Abs)                 \
   V(AVXFloat64Neg)                 \
   V(AVXFloat32Abs)                 \
   V(AVXFloat32Neg)                 \
   V(X64Movsxbl)                    \
   V(X64Movzxbl)                    \
+  V(X64Movsxbq)                    \
+  V(X64Movzxbq)                    \
   V(X64Movb)                       \
   V(X64Movsxwl)                    \
   V(X64Movzxwl)                    \
+  V(X64Movsxwq)                    \
+  V(X64Movzxwq)                    \
   V(X64Movw)                       \
   V(X64Movl)                       \
   V(X64Movsxlq)                    \
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index eecefdb..fb4b749 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -67,8 +67,6 @@
     case kSSEFloat32Neg:
     case kSSEFloat32Sqrt:
     case kSSEFloat32Round:
-    case kSSEFloat32Max:
-    case kSSEFloat32Min:
     case kSSEFloat32ToFloat64:
     case kSSEFloat64Cmp:
     case kSSEFloat64Add:
@@ -80,7 +78,9 @@
     case kSSEFloat64Neg:
     case kSSEFloat64Sqrt:
     case kSSEFloat64Round:
+    case kSSEFloat32Max:
     case kSSEFloat64Max:
+    case kSSEFloat32Min:
     case kSSEFloat64Min:
     case kSSEFloat64ToFloat32:
     case kSSEFloat32ToInt32:
@@ -110,15 +110,11 @@
     case kAVXFloat32Sub:
     case kAVXFloat32Mul:
     case kAVXFloat32Div:
-    case kAVXFloat32Max:
-    case kAVXFloat32Min:
     case kAVXFloat64Cmp:
     case kAVXFloat64Add:
     case kAVXFloat64Sub:
     case kAVXFloat64Mul:
     case kAVXFloat64Div:
-    case kAVXFloat64Max:
-    case kAVXFloat64Min:
     case kAVXFloat64Abs:
     case kAVXFloat64Neg:
     case kAVXFloat32Abs:
@@ -137,8 +133,12 @@
 
     case kX64Movsxbl:
     case kX64Movzxbl:
+    case kX64Movsxbq:
+    case kX64Movzxbq:
     case kX64Movsxwl:
     case kX64Movzxwl:
+    case kX64Movsxwq:
+    case kX64Movzxwq:
     case kX64Movsxlq:
       DCHECK(instr->InputCount() >= 1);
       return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index be56dce..798d438 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -37,6 +37,15 @@
     }
   }
 
+  int32_t GetImmediateIntegerValue(Node* node) {
+    DCHECK(CanBeImmediate(node));
+    if (node->opcode() == IrOpcode::kInt32Constant) {
+      return OpParameter<int32_t>(node);
+    }
+    DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
+    return static_cast<int32_t>(OpParameter<int64_t>(node));
+  }
+
   bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
                           int effect_level) {
     if (input->opcode() != IrOpcode::kLoad ||
@@ -70,6 +79,7 @@
 
   AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
                                              Node* base, Node* displacement,
+                                             DisplacementMode displacement_mode,
                                              InstructionOperand inputs[],
                                              size_t* input_count) {
     AddressingMode mode = kMode_MRI;
@@ -79,7 +89,9 @@
         DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
         inputs[(*input_count)++] = UseRegister(index);
         if (displacement != nullptr) {
-          inputs[(*input_count)++] = UseImmediate(displacement);
+          inputs[(*input_count)++] = displacement_mode
+                                         ? UseNegatedImmediate(displacement)
+                                         : UseImmediate(displacement);
           static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
                                                        kMode_MR4I, kMode_MR8I};
           mode = kMRnI_modes[scale_exponent];
@@ -92,7 +104,9 @@
         if (displacement == nullptr) {
           mode = kMode_MR;
         } else {
-          inputs[(*input_count)++] = UseImmediate(displacement);
+          inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+                                         ? UseNegatedImmediate(displacement)
+                                         : UseImmediate(displacement);
           mode = kMode_MRI;
         }
       }
@@ -101,7 +115,9 @@
       DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
       inputs[(*input_count)++] = UseRegister(index);
       if (displacement != nullptr) {
-        inputs[(*input_count)++] = UseImmediate(displacement);
+        inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+                                       ? UseNegatedImmediate(displacement)
+                                       : UseImmediate(displacement);
         static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
                                                     kMode_M4I, kMode_M8I};
         mode = kMnI_modes[scale_exponent];
@@ -121,11 +137,12 @@
   AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
                                                   InstructionOperand inputs[],
                                                   size_t* input_count) {
-    BaseWithIndexAndDisplacement64Matcher m(operand, true);
+    BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
     DCHECK(m.matches());
     if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
-      return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
-                                         m.displacement(), inputs, input_count);
+      return GenerateMemoryOperandInputs(
+          m.index(), m.scale(), m.base(), m.displacement(),
+          m.displacement_mode(), inputs, input_count);
     } else {
       inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
       inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
@@ -161,6 +178,8 @@
     case MachineRepresentation::kWord32:
       opcode = kX64Movl;
       break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:  // Fall through.
     case MachineRepresentation::kWord64:
       opcode = kX64Movq;
@@ -246,6 +265,8 @@
       case MachineRepresentation::kWord32:
         opcode = kX64Movl;
         break;
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
       case MachineRepresentation::kTagged:  // Fall through.
       case MachineRepresentation::kWord64:
         opcode = kX64Movq;
@@ -269,6 +290,11 @@
   }
 }
 
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -298,6 +324,8 @@
       break;
     case MachineRepresentation::kBit:      // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
@@ -351,6 +379,8 @@
       break;
     case MachineRepresentation::kBit:      // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
@@ -432,7 +462,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -542,16 +572,16 @@
   }
 }
 
-
 void EmitLea(InstructionSelector* selector, InstructionCode opcode,
              Node* result, Node* index, int scale, Node* base,
-             Node* displacement) {
+             Node* displacement, DisplacementMode displacement_mode) {
   X64OperandGenerator g(selector);
 
   InstructionOperand inputs[4];
   size_t input_count = 0;
-  AddressingMode mode = g.GenerateMemoryOperandInputs(
-      index, scale, base, displacement, inputs, &input_count);
+  AddressingMode mode =
+      g.GenerateMemoryOperandInputs(index, scale, base, displacement,
+                                    displacement_mode, inputs, &input_count);
 
   DCHECK_NE(0u, input_count);
   DCHECK_GE(arraysize(inputs), input_count);
@@ -572,7 +602,8 @@
   if (m.matches()) {
     Node* index = node->InputAt(0);
     Node* base = m.power_of_two_plus_one() ? index : nullptr;
-    EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
+    EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
+            kPositiveDisplacement);
     return;
   }
   VisitWord32Shift(this, node, kX64Shl32);
@@ -581,15 +612,25 @@
 
 void InstructionSelector::VisitWord64Shl(Node* node) {
   X64OperandGenerator g(this);
-  Int64BinopMatcher m(node);
-  if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
-      m.right().IsInRange(32, 63)) {
-    // There's no need to sign/zero-extend to 64-bit if we shift out the upper
-    // 32 bits anyway.
-    Emit(kX64Shl, g.DefineSameAsFirst(node),
-         g.UseRegister(m.left().node()->InputAt(0)),
-         g.UseImmediate(m.right().node()));
+  Int64ScaleMatcher m(node, true);
+  if (m.matches()) {
+    Node* index = node->InputAt(0);
+    Node* base = m.power_of_two_plus_one() ? index : nullptr;
+    EmitLea(this, kX64Lea, node, index, m.scale(), base, nullptr,
+            kPositiveDisplacement);
     return;
+  } else {
+    Int64BinopMatcher m(node);
+    if ((m.left().IsChangeInt32ToInt64() ||
+         m.left().IsChangeUint32ToUint64()) &&
+        m.right().IsInRange(32, 63)) {
+      // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+      // 32 bits anyway.
+      Emit(kX64Shl, g.DefineSameAsFirst(node),
+           g.UseRegister(m.left().node()->InputAt(0)),
+           g.UseImmediate(m.right().node()));
+      return;
+    }
   }
   VisitWord64Shift(this, node, kX64Shl);
 }
@@ -599,37 +640,19 @@
   VisitWord32Shift(this, node, kX64Shr32);
 }
 
-
-void InstructionSelector::VisitWord64Shr(Node* node) {
-  VisitWord64Shift(this, node, kX64Shr);
-}
-
-
-void InstructionSelector::VisitWord32Sar(Node* node) {
-  X64OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
-    Int32BinopMatcher mleft(m.left().node());
-    if (mleft.right().Is(16) && m.right().Is(16)) {
-      Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
-      return;
-    } else if (mleft.right().Is(24) && m.right().Is(24)) {
-      Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
-      return;
-    }
-  }
-  VisitWord32Shift(this, node, kX64Sar32);
-}
-
-
-void InstructionSelector::VisitWord64Sar(Node* node) {
-  X64OperandGenerator g(this);
+namespace {
+bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
+                                     InstructionCode opcode) {
+  DCHECK(IrOpcode::kWord64Sar == node->opcode() ||
+         IrOpcode::kWord64Shr == node->opcode());
+  X64OperandGenerator g(selector);
   Int64BinopMatcher m(node);
-  if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
+  if (selector->CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
       m.right().Is(32)) {
     // Just load and sign-extend the interesting 4 bytes instead. This happens,
     // for example, when we're loading and untagging SMIs.
-    BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+    BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
+                                                AddressOption::kAllowAll);
     if (mleft.matches() && (mleft.displacement() == nullptr ||
                             g.CanBeImmediate(mleft.displacement()))) {
       size_t input_count = 0;
@@ -682,16 +705,43 @@
         }
         inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
       } else {
-        ImmediateOperand* op = ImmediateOperand::cast(&inputs[input_count - 1]);
-        int32_t displacement = sequence()->GetImmediate(op).ToInt32();
-        *op = ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
+        int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
+        inputs[input_count - 1] =
+            ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
       }
       InstructionOperand outputs[] = {g.DefineAsRegister(node)};
-      InstructionCode code = kX64Movsxlq | AddressingModeField::encode(mode);
-      Emit(code, 1, outputs, input_count, inputs);
+      InstructionCode code = opcode | AddressingModeField::encode(mode);
+      selector->Emit(code, 1, outputs, input_count, inputs);
+      return true;
+    }
+  }
+  return false;
+}
+}  // namespace
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movl)) return;
+  VisitWord64Shift(this, node, kX64Shr);
+}
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  X64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
       return;
     }
   }
+  VisitWord32Shift(this, node, kX64Sar32);
+}
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movsxlq)) return;
   VisitWord64Shift(this, node, kX64Sar);
 }
 
@@ -735,6 +785,9 @@
 
 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
 
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   X64OperandGenerator g(this);
@@ -756,7 +809,7 @@
   if (m.matches() &&
       (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
     EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
-            m.displacement());
+            m.displacement(), m.displacement_mode());
     return;
   }
 
@@ -766,6 +819,18 @@
 
 
 void InstructionSelector::VisitInt64Add(Node* node) {
+  X64OperandGenerator g(this);
+
+  // Try to match the Add to a leaq pattern
+  BaseWithIndexAndDisplacement64Matcher m(node);
+  if (m.matches() &&
+      (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
+    EmitLea(this, kX64Lea, node, m.index(), m.scale(), m.base(),
+            m.displacement(), m.displacement_mode());
+    return;
+  }
+
+  // No leal pattern match, use addq
   VisitBinop(this, node, kX64Add);
 }
 
@@ -805,6 +870,14 @@
   if (m.left().Is(0)) {
     Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
   } else {
+    if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+      // Turn subtractions of constant values into immediate "leaq" instructions
+      // by negating the value.
+      Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
+      return;
+    }
     VisitBinop(this, node, kX64Sub);
   }
 }
@@ -839,7 +912,6 @@
   }
 }
 
-
 void VisitMulHigh(InstructionSelector* selector, Node* node,
                   ArchOpcode opcode) {
   X64OperandGenerator g(selector);
@@ -881,18 +953,27 @@
   if (m.matches()) {
     Node* index = node->InputAt(0);
     Node* base = m.power_of_two_plus_one() ? index : nullptr;
-    EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
+    EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
+            kPositiveDisplacement);
     return;
   }
   VisitMul(this, node, kX64Imul32);
 }
 
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+  // TODO(mvstanton): Use Int32ScaleMatcher somehow.
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop(this, node, kX64Imul32, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kX64Imul32, &cont);
+}
 
 void InstructionSelector::VisitInt64Mul(Node* node) {
   VisitMul(this, node, kX64Imul);
 }
 
-
 void InstructionSelector::VisitInt32MulHigh(Node* node) {
   VisitMulHigh(this, node, kX64ImulHigh32);
 }
@@ -1057,7 +1138,36 @@
 
 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+  Node* const value = node->InputAt(0);
+  if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+    LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+    MachineRepresentation rep = load_rep.representation();
+    InstructionCode opcode = kArchNop;
+    switch (rep) {
+      case MachineRepresentation::kBit:  // Fall through.
+      case MachineRepresentation::kWord8:
+        opcode = load_rep.IsSigned() ? kX64Movsxbq : kX64Movzxbq;
+        break;
+      case MachineRepresentation::kWord16:
+        opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq;
+        break;
+      case MachineRepresentation::kWord32:
+        opcode = load_rep.IsSigned() ? kX64Movsxlq : kX64Movl;
+        break;
+      default:
+        UNREACHABLE();
+        return;
+    }
+    InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+    size_t input_count = 0;
+    InstructionOperand inputs[3];
+    AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+        node->InputAt(0), inputs, &input_count);
+    opcode |= AddressingModeField::encode(mode);
+    Emit(opcode, 1, outputs, input_count, inputs);
+  } else {
+    Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+  }
 }
 
 
@@ -1114,6 +1224,12 @@
                  g.UseRegister(node->InputAt(0)));
 }
 
+void VisitRRO(InstructionSelector* selector, Node* node,
+              InstructionCode opcode) {
+  X64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineSameAsFirst(node),
+                 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+}
 
 void VisitFloatBinop(InstructionSelector* selector, Node* node,
                      ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
@@ -1159,6 +1275,10 @@
       case IrOpcode::kWord64Shr: {
         Int64BinopMatcher m(value);
         if (m.right().Is(32)) {
+          if (TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
+            Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+            return;
+          }
           Emit(kX64Shr, g.DefineSameAsFirst(node),
                g.UseRegister(m.left().node()), g.TempImmediate(32));
           return;
@@ -1246,17 +1366,6 @@
 
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
-  X64OperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
-                   kSSEFloat32Neg);
-    return;
-  }
-  VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
-}
-
-void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
   VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
 }
 
@@ -1270,16 +1379,6 @@
 }
 
 
-void InstructionSelector::VisitFloat32Max(Node* node) {
-  VisitFloatBinop(this, node, kAVXFloat32Max, kSSEFloat32Max);
-}
-
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
-  VisitFloatBinop(this, node, kAVXFloat32Min, kSSEFloat32Min);
-}
-
-
 void InstructionSelector::VisitFloat32Abs(Node* node) {
   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
 }
@@ -1289,6 +1388,13 @@
   VisitRO(this, node, kSSEFloat32Sqrt);
 }
 
+void InstructionSelector::VisitFloat32Max(Node* node) {
+  VisitRRO(this, node, kSSEFloat32Max);
+}
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+  VisitRRO(this, node, kSSEFloat32Min);
+}
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
   VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
@@ -1296,29 +1402,6 @@
 
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
-  X64OperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsMinusZero()) {
-    if (m.right().IsFloat64RoundDown() &&
-        CanCover(m.node(), m.right().node())) {
-      if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
-          CanCover(m.right().node(), m.right().InputAt(0))) {
-        Float64BinopMatcher mright0(m.right().InputAt(0));
-        if (mright0.left().IsMinusZero()) {
-          Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
-               g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
-          return;
-        }
-      }
-    }
-    VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
-                   kSSEFloat64Neg);
-    return;
-  }
-  VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
-}
-
-void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
   VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
 }
 
@@ -1342,12 +1425,12 @@
 
 
 void InstructionSelector::VisitFloat64Max(Node* node) {
-  VisitFloatBinop(this, node, kAVXFloat64Max, kSSEFloat64Max);
+  VisitRRO(this, node, kSSEFloat64Max);
 }
 
 
 void InstructionSelector::VisitFloat64Min(Node* node) {
-  VisitFloatBinop(this, node, kAVXFloat64Min, kSSEFloat64Min);
+  VisitRRO(this, node, kSSEFloat64Min);
 }
 
 
@@ -1404,9 +1487,13 @@
   VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
 }
 
-void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
+}
 
-void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
+}
 
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
@@ -1431,7 +1518,7 @@
   // Prepare for C function call.
   if (descriptor->IsCFunctionCall()) {
     Emit(kArchPrepareCallCFunction |
-             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+             MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
          0, nullptr, 0, nullptr);
 
     // Poke any stack arguments.
@@ -1489,7 +1576,7 @@
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1507,7 +1594,7 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
                              cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
@@ -1530,10 +1617,7 @@
 // Tries to match the size of the given opcode to that of the operands, if
 // possible.
 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
-                                    Node* right) {
-  if (opcode != kX64Cmp32 && opcode != kX64Test32) {
-    return opcode;
-  }
+                                    Node* right, FlagsContinuation* cont) {
   // Currently, if one of the two operands is not a Load, we don't know what its
   // machine representation is, so we bail out.
   // TODO(epertoso): we can probably get some size information out of immediates
@@ -1543,19 +1627,39 @@
   }
   // If the load representations don't match, both operands will be
   // zero/sign-extended to 32bit.
-  LoadRepresentation left_representation = LoadRepresentationOf(left->op());
-  if (left_representation != LoadRepresentationOf(right->op())) {
-    return opcode;
+  MachineType left_type = LoadRepresentationOf(left->op());
+  MachineType right_type = LoadRepresentationOf(right->op());
+  if (left_type == right_type) {
+    switch (left_type.representation()) {
+      case MachineRepresentation::kBit:
+      case MachineRepresentation::kWord8: {
+        if (opcode == kX64Test32) return kX64Test8;
+        if (opcode == kX64Cmp32) {
+          if (left_type.semantic() == MachineSemantic::kUint32) {
+            cont->OverwriteUnsignedIfSigned();
+          } else {
+            CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+          }
+          return kX64Cmp8;
+        }
+        break;
+      }
+      case MachineRepresentation::kWord16:
+        if (opcode == kX64Test32) return kX64Test16;
+        if (opcode == kX64Cmp32) {
+          if (left_type.semantic() == MachineSemantic::kUint32) {
+            cont->OverwriteUnsignedIfSigned();
+          } else {
+            CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+          }
+          return kX64Cmp16;
+        }
+        break;
+      default:
+        break;
+    }
   }
-  switch (left_representation.representation()) {
-    case MachineRepresentation::kBit:
-    case MachineRepresentation::kWord8:
-      return opcode == kX64Cmp32 ? kX64Cmp8 : kX64Test8;
-    case MachineRepresentation::kWord16:
-      return opcode == kX64Cmp32 ? kX64Cmp16 : kX64Test16;
-    default:
-      return opcode;
-  }
+  return opcode;
 }
 
 // Shared routine for multiple word compare operations.
@@ -1565,7 +1669,7 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  opcode = TryNarrowOpcodeSize(opcode, left, right);
+  opcode = TryNarrowOpcodeSize(opcode, left, right, cont);
 
   // If one of the two inputs is an immediate, make sure it's on the right, or
   // if one of the two inputs is a memory operand, make sure it's on the left.
@@ -1624,7 +1728,7 @@
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
       } else if (cont->IsDeoptimize()) {
-        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
                                  cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
@@ -1766,6 +1870,9 @@
               case IrOpcode::kInt32SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop(selector, node, kX64Sub32, cont);
+              case IrOpcode::kInt32MulWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kX64Imul32, cont);
               case IrOpcode::kInt64AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop(selector, node, kX64Add, cont);
@@ -1805,14 +1912,14 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -2110,10 +2217,6 @@
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
   MachineOperatorBuilder::Flags flags =
-      MachineOperatorBuilder::kFloat32Max |
-      MachineOperatorBuilder::kFloat32Min |
-      MachineOperatorBuilder::kFloat64Max |
-      MachineOperatorBuilder::kFloat64Min |
       MachineOperatorBuilder::kWord32ShiftIsSafe |
       MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
   if (CpuFeatures::IsSupported(POPCNT)) {
diff --git a/src/compiler/x64/unwinding-info-writer-x64.cc b/src/compiler/x64/unwinding-info-writer-x64.cc
new file mode 100644
index 0000000..4efba32
--- /dev/null
+++ b/src/compiler/x64/unwinding-info-writer-x64.cc
@@ -0,0 +1,102 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/x64/unwinding-info-writer-x64.h"
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
+                                                const InstructionBlock* block) {
+  if (!enabled()) return;
+
+  block_will_exit_ = false;
+
+  DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+  const BlockInitialState* initial_state =
+      block_initial_states_[block->rpo_number().ToInt()];
+  if (initial_state) {
+    if (!initial_state->register_.is(eh_frame_writer_.base_register()) &&
+        initial_state->offset_ != eh_frame_writer_.base_offset()) {
+      eh_frame_writer_.AdvanceLocation(pc_offset);
+      eh_frame_writer_.SetBaseAddressRegisterAndOffset(initial_state->register_,
+                                                       initial_state->offset_);
+    } else if (!initial_state->register_.is(eh_frame_writer_.base_register())) {
+      eh_frame_writer_.AdvanceLocation(pc_offset);
+      eh_frame_writer_.SetBaseAddressRegister(initial_state->register_);
+    } else if (initial_state->offset_ != eh_frame_writer_.base_offset()) {
+      eh_frame_writer_.AdvanceLocation(pc_offset);
+      eh_frame_writer_.SetBaseAddressOffset(initial_state->offset_);
+    }
+
+    tracking_fp_ = initial_state->tracking_fp_;
+  } else {
+    // The entry block always lacks an explicit initial state.
+    // The exit block may lack an explicit state, if it is only reached by
+    //   the block ending in a ret.
+    // All the other blocks must have an explicit initial state.
+    DCHECK(block->predecessors().empty() || block->successors().empty());
+  }
+}
+
+void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
+  if (!enabled() || block_will_exit_) return;
+
+  for (const RpoNumber& successor : block->successors()) {
+    int successor_index = successor.ToInt();
+    DCHECK_LT(successor_index, block_initial_states_.size());
+    const BlockInitialState* existing_state =
+        block_initial_states_[successor_index];
+    // If we already had an entry for this BB, check that the values are the
+    // same we are trying to insert.
+    if (existing_state) {
+      DCHECK(existing_state->register_.is(eh_frame_writer_.base_register()));
+      DCHECK_EQ(existing_state->offset_, eh_frame_writer_.base_offset());
+      DCHECK_EQ(existing_state->tracking_fp_, tracking_fp_);
+    } else {
+      block_initial_states_[successor_index] = new (zone_)
+          BlockInitialState(eh_frame_writer_.base_register(),
+                            eh_frame_writer_.base_offset(), tracking_fp_);
+    }
+  }
+}
+
+void UnwindingInfoWriter::MarkFrameConstructed(int pc_base) {
+  if (!enabled()) return;
+
+  // push rbp
+  eh_frame_writer_.AdvanceLocation(pc_base + 1);
+  eh_frame_writer_.IncreaseBaseAddressOffset(kInt64Size);
+  // <base address> points at the bottom of the current frame on x64 and
+  // <base register> is rsp, which points to the top of the frame by definition.
+  // Thus, the distance between <base address> and the top is -<base offset>.
+  int top_of_stack = -eh_frame_writer_.base_offset();
+  eh_frame_writer_.RecordRegisterSavedToStack(rbp, top_of_stack);
+
+  // mov rbp, rsp
+  eh_frame_writer_.AdvanceLocation(pc_base + 4);
+  eh_frame_writer_.SetBaseAddressRegister(rbp);
+
+  tracking_fp_ = true;
+}
+
+void UnwindingInfoWriter::MarkFrameDeconstructed(int pc_base) {
+  if (!enabled()) return;
+
+  // mov rsp, rbp
+  eh_frame_writer_.AdvanceLocation(pc_base + 3);
+  eh_frame_writer_.SetBaseAddressRegister(rsp);
+
+  // pop rbp
+  eh_frame_writer_.AdvanceLocation(pc_base + 4);
+  eh_frame_writer_.IncreaseBaseAddressOffset(-kInt64Size);
+
+  tracking_fp_ = false;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/x64/unwinding-info-writer-x64.h b/src/compiler/x64/unwinding-info-writer-x64.h
new file mode 100644
index 0000000..8bb5903
--- /dev/null
+++ b/src/compiler/x64/unwinding-info-writer-x64.h
@@ -0,0 +1,79 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_X64_UNWINDING_INFO_WRITER_H_
+#define V8_COMPILER_X64_UNWINDING_INFO_WRITER_H_
+
+#include "src/eh-frame.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionBlock;
+
+class UnwindingInfoWriter {
+ public:
+  explicit UnwindingInfoWriter(Zone* zone)
+      : zone_(zone),
+        eh_frame_writer_(zone),
+        tracking_fp_(false),
+        block_will_exit_(false),
+        block_initial_states_(zone) {
+    if (enabled()) eh_frame_writer_.Initialize();
+  }
+
+  void MaybeIncreaseBaseOffsetAt(int pc_offset, int base_delta) {
+    if (enabled() && !tracking_fp_) {
+      eh_frame_writer_.AdvanceLocation(pc_offset);
+      eh_frame_writer_.IncreaseBaseAddressOffset(base_delta);
+    }
+  }
+
+  void SetNumberOfInstructionBlocks(int number) {
+    if (enabled()) block_initial_states_.resize(number);
+  }
+
+  void BeginInstructionBlock(int pc_offset, const InstructionBlock* block);
+  void EndInstructionBlock(const InstructionBlock* block);
+
+  void MarkFrameConstructed(int pc_base);
+  void MarkFrameDeconstructed(int pc_base);
+
+  void MarkBlockWillExit() { block_will_exit_ = true; }
+
+  void Finish(int code_size) {
+    if (enabled()) eh_frame_writer_.Finish(code_size);
+  }
+
+  EhFrameWriter* eh_frame_writer() {
+    return enabled() ? &eh_frame_writer_ : nullptr;
+  }
+
+ private:
+  bool enabled() const { return FLAG_perf_prof_unwinding_info; }
+
+  class BlockInitialState : public ZoneObject {
+   public:
+    BlockInitialState(Register reg, int offset, bool tracking_fp)
+        : register_(reg), offset_(offset), tracking_fp_(tracking_fp) {}
+
+    Register register_;
+    int offset_;
+    bool tracking_fp_;
+  };
+
+  Zone* zone_;
+  EhFrameWriter eh_frame_writer_;
+  bool tracking_fp_;
+  bool block_will_exit_;
+
+  ZoneVector<const BlockInitialState*> block_initial_states_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index 6bacda0..29e2dd7 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -191,21 +191,35 @@
   Register const result_;
 };
 
-
-class OutOfLineLoadFloat final : public OutOfLineCode {
+class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
  public:
-  OutOfLineLoadFloat(CodeGenerator* gen, X87Register result)
+  OutOfLineLoadFloat32NaN(CodeGenerator* gen, X87Register result)
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final {
     DCHECK(result_.code() == 0);
     USE(result_);
-    if (FLAG_debug_code && FLAG_enable_slow_asserts) {
-      __ VerifyX87StackDepth(1);
-    }
     __ fstp(0);
-    __ push(Immediate(0xffffffff));
-    __ push(Immediate(0x7fffffff));
+    __ push(Immediate(0xffc00000));
+    __ fld_s(MemOperand(esp, 0));
+    __ lea(esp, Operand(esp, kFloatSize));
+  }
+
+ private:
+  X87Register const result_;
+};
+
+class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
+ public:
+  OutOfLineLoadFloat64NaN(CodeGenerator* gen, X87Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final {
+    DCHECK(result_.code() == 0);
+    USE(result_);
+    __ fstp(0);
+    __ push(Immediate(0xfff80000));
+    __ push(Immediate(0x00000000));
     __ fld_d(MemOperand(esp, 0));
     __ lea(esp, Operand(esp, kDoubleSize));
   }
@@ -214,7 +228,6 @@
   X87Register const result_;
 };
 
-
 class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
  public:
   OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
@@ -275,25 +288,23 @@
 
 }  // namespace
 
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                          \
-  do {                                                                  \
-    auto result = i.OutputDoubleRegister();                             \
-    auto offset = i.InputRegister(0);                                   \
-    DCHECK(result.code() == 0);                                         \
-    if (instr->InputAt(1)->IsRegister()) {                              \
-      __ cmp(offset, i.InputRegister(1));                               \
-    } else {                                                            \
-      __ cmp(offset, i.InputImmediate(1));                              \
-    }                                                                   \
-    OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
-    __ j(above_equal, ool->entry());                                    \
-    __ fstp(0);                                                         \
-    __ asm_instr(i.MemoryOperand(2));                                   \
-    __ bind(ool->exit());                                               \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN)      \
+  do {                                                                \
+    auto result = i.OutputDoubleRegister();                           \
+    auto offset = i.InputRegister(0);                                 \
+    DCHECK(result.code() == 0);                                       \
+    if (instr->InputAt(1)->IsRegister()) {                            \
+      __ cmp(offset, i.InputRegister(1));                             \
+    } else {                                                          \
+      __ cmp(offset, i.InputImmediate(1));                            \
+    }                                                                 \
+    OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
+    __ j(above_equal, ool->entry());                                  \
+    __ fstp(0);                                                       \
+    __ asm_instr(i.MemoryOperand(2));                                 \
+    __ bind(ool->exit());                                             \
   } while (false)
 
-
 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                          \
   do {                                                                    \
     auto result = i.OutputRegister();                                     \
@@ -420,21 +431,7 @@
   __ pop(ebp);
 }
 
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta > 0) {
-    __ add(esp, Immediate(sp_slot_delta * kPointerSize));
-  }
-  frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
-  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
-  if (sp_slot_delta < 0) {
-    __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
-    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
-  }
+void CodeGenerator::AssemblePrepareTailCall() {
   if (frame_access_state()->has_frame()) {
     __ mov(ebp, MemOperand(ebp, 0));
   }
@@ -479,6 +476,68 @@
   __ bind(&done);
 }
 
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+                                   FrameAccessState* state,
+                                   int new_slot_above_sp,
+                                   bool allow_shrinkage = true) {
+  int current_sp_offset = state->GetSPToFPSlotCount() +
+                          StandardFrameConstants::kFixedSlotCountAboveFp;
+  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+  if (stack_slot_delta > 0) {
+    masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
+    state->IncreaseSPDelta(stack_slot_delta);
+  } else if (allow_shrinkage && stack_slot_delta < 0) {
+    masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
+    state->IncreaseSPDelta(stack_slot_delta);
+  }
+}
+
+}  // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+                                              int first_unused_stack_slot) {
+  CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+  ZoneVector<MoveOperands*> pushes(zone());
+  GetPushCompatibleMoves(instr, flags, &pushes);
+
+  if (!pushes.empty() &&
+      (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+       first_unused_stack_slot)) {
+    X87OperandConverter g(this, instr);
+    for (auto move : pushes) {
+      LocationOperand destination_location(
+          LocationOperand::cast(move->destination()));
+      InstructionOperand source(move->source());
+      AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                    destination_location.index());
+      if (source.IsStackSlot()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        __ push(g.SlotToOperand(source_location.index()));
+      } else if (source.IsRegister()) {
+        LocationOperand source_location(LocationOperand::cast(source));
+        __ push(source_location.GetRegister());
+      } else if (source.IsImmediate()) {
+        __ push(Immediate(ImmediateOperand::cast(source).inline_value()));
+      } else {
+        // Pushes of non-scalar data types is not supported.
+        UNIMPLEMENTED();
+      }
+      frame_access_state()->IncreaseSPDelta(1);
+      move->Eliminate();
+    }
+  }
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+                                             int first_unused_stack_slot) {
+  AdjustStackPointerForTailCall(masm(), frame_access_state(),
+                                first_unused_stack_slot);
+}
+
 // Assembles an instruction after register allocation, producing machine code.
 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
     Instruction* instr) {
@@ -524,8 +583,6 @@
         __ VerifyX87StackDepth(1);
       }
       __ fstp(0);
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          no_reg, no_reg, no_reg);
@@ -539,15 +596,15 @@
         __ jmp(reg);
       }
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchTailCallAddress: {
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       CHECK(!HasImmediateInput(instr, 0));
       Register reg = i.InputRegister(0);
       __ jmp(reg);
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchCallJSFunction: {
@@ -592,14 +649,13 @@
         __ VerifyX87StackDepth(1);
       }
       __ fstp(0);
-      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
-      AssembleDeconstructActivationRecord(stack_param_delta);
       if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
                                          no_reg, no_reg, no_reg);
       }
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
+      frame_access_state()->SetFrameAccessToDefault();
       break;
     }
     case kArchPrepareCallCFunction: {
@@ -610,7 +666,7 @@
       break;
     }
     case kArchPrepareTailCall:
-      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      AssemblePrepareTailCall();
       break;
     case kArchCallCFunction: {
       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -659,6 +715,9 @@
     case kArchDebugBreak:
       __ int3();
       break;
+    case kArchImpossible:
+      __ Abort(kConversionFromImpossibleValue);
+      break;
     case kArchNop:
     case kArchThrowTerminator:
       // don't emit code for nops.
@@ -748,9 +807,24 @@
       __ lea(i.OutputRegister(), Operand(base, offset.offset()));
       break;
     }
+    case kIeee754Float64Acos:
+      ASSEMBLE_IEEE754_UNOP(acos);
+      break;
+    case kIeee754Float64Acosh:
+      ASSEMBLE_IEEE754_UNOP(acosh);
+      break;
+    case kIeee754Float64Asin:
+      ASSEMBLE_IEEE754_UNOP(asin);
+      break;
+    case kIeee754Float64Asinh:
+      ASSEMBLE_IEEE754_UNOP(asinh);
+      break;
     case kIeee754Float64Atan:
       ASSEMBLE_IEEE754_UNOP(atan);
       break;
+    case kIeee754Float64Atanh:
+      ASSEMBLE_IEEE754_UNOP(atanh);
+      break;
     case kIeee754Float64Atan2:
       ASSEMBLE_IEEE754_BINOP(atan2);
       break;
@@ -762,6 +836,9 @@
       ASSEMBLE_IEEE754_UNOP(cos);
       __ X87SetFPUCW(0x037F);
       break;
+    case kIeee754Float64Cosh:
+      ASSEMBLE_IEEE754_UNOP(cosh);
+      break;
     case kIeee754Float64Expm1:
       __ X87SetFPUCW(0x027F);
       ASSEMBLE_IEEE754_UNOP(expm1);
@@ -770,9 +847,6 @@
     case kIeee754Float64Exp:
       ASSEMBLE_IEEE754_UNOP(exp);
       break;
-    case kIeee754Float64Atanh:
-      ASSEMBLE_IEEE754_UNOP(atanh);
-      break;
     case kIeee754Float64Log:
       ASSEMBLE_IEEE754_UNOP(log);
       break;
@@ -785,16 +859,32 @@
     case kIeee754Float64Log10:
       ASSEMBLE_IEEE754_UNOP(log10);
       break;
+    case kIeee754Float64Pow: {
+      // Keep the x87 FPU stack empty before calling stub code
+      __ fstp(0);
+      // Call the MathStub and put return value in stX_0
+      MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+      __ CallStub(&stub);
+      /* Return value is in st(0) on x87. */
+      __ lea(esp, Operand(esp, 2 * kDoubleSize));
+      break;
+    }
     case kIeee754Float64Sin:
       __ X87SetFPUCW(0x027F);
       ASSEMBLE_IEEE754_UNOP(sin);
       __ X87SetFPUCW(0x037F);
       break;
+    case kIeee754Float64Sinh:
+      ASSEMBLE_IEEE754_UNOP(sinh);
+      break;
     case kIeee754Float64Tan:
       __ X87SetFPUCW(0x027F);
       ASSEMBLE_IEEE754_UNOP(tan);
       __ X87SetFPUCW(0x037F);
       break;
+    case kIeee754Float64Tanh:
+      ASSEMBLE_IEEE754_UNOP(tanh);
+      break;
     case kX87Add:
       if (HasImmediateInput(instr, 1)) {
         __ add(i.InputOperand(0), i.InputImmediate(1));
@@ -1085,93 +1175,7 @@
       __ X87SetFPUCW(0x037F);
       break;
     }
-    case kX87Float32Max: {
-      Label check_nan_left, check_zero, return_left, return_right;
-      Condition condition = below;
-      if (FLAG_debug_code && FLAG_enable_slow_asserts) {
-        __ VerifyX87StackDepth(1);
-      }
-      __ fstp(0);
-      __ fld_s(MemOperand(esp, kFloatSize));
-      __ fld_s(MemOperand(esp, 0));
-      __ fld(1);
-      __ fld(1);
-      __ FCmp();
 
-      // At least one NaN.
-      // Return the second operands if one of the two operands is NaN
-      __ j(parity_even, &return_right, Label::kNear);
-      __ j(equal, &check_zero, Label::kNear);            // left == right.
-      __ j(condition, &return_left, Label::kNear);
-      __ jmp(&return_right, Label::kNear);
-
-      __ bind(&check_zero);
-      __ fld(0);
-      __ fldz();
-      __ FCmp();
-      __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
-
-      __ fadd(1);
-      __ jmp(&return_left, Label::kNear);
-
-      __ bind(&return_right);
-      __ fxch();
-
-      __ bind(&return_left);
-      __ fstp(0);
-      __ lea(esp, Operand(esp, 2 * kFloatSize));
-      break;
-    }
-    case kX87Float32Min: {
-      Label check_nan_left, check_zero, return_left, return_right;
-      Condition condition = above;
-      if (FLAG_debug_code && FLAG_enable_slow_asserts) {
-        __ VerifyX87StackDepth(1);
-      }
-      __ fstp(0);
-      __ fld_s(MemOperand(esp, kFloatSize));
-      __ fld_s(MemOperand(esp, 0));
-      __ fld(1);
-      __ fld(1);
-      __ FCmp();
-      // At least one NaN.
-      // Return the second operands if one of the two operands is NaN
-      __ j(parity_even, &return_right, Label::kNear);
-      __ j(equal, &check_zero, Label::kNear);            // left == right.
-      __ j(condition, &return_left, Label::kNear);
-      __ jmp(&return_right, Label::kNear);
-
-      __ bind(&check_zero);
-      __ fld(0);
-      __ fldz();
-      __ FCmp();
-      __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
-      // At this point, both left and right are either 0 or -0.
-      // Push st0 and st1 to stack, then pop them to temp registers and OR them,
-      // load it to left.
-      __ push(eax);
-      __ fld(1);
-      __ fld(1);
-      __ sub(esp, Immediate(2 * kPointerSize));
-      __ fstp_s(MemOperand(esp, 0));
-      __ fstp_s(MemOperand(esp, kPointerSize));
-      __ pop(eax);
-      __ xor_(MemOperand(esp, 0), eax);
-      __ fstp(0);
-      __ fld_s(MemOperand(esp, 0));
-      __ pop(eax);  // restore esp
-      __ pop(eax);  // restore esp
-      __ jmp(&return_left, Label::kNear);
-
-
-      __ bind(&return_right);
-      __ fxch();
-
-      __ bind(&return_left);
-      __ fstp(0);
-      __ lea(esp, Operand(esp, 2 * kFloatSize));
-      break;
-    }
     case kX87Float32Sqrt: {
       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
         __ VerifyX87StackDepth(1);
@@ -1192,6 +1196,16 @@
       __ lea(esp, Operand(esp, kFloatSize));
       break;
     }
+    case kX87Float32Neg: {
+      if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+        __ VerifyX87StackDepth(1);
+      }
+      __ fstp(0);
+      __ fld_s(MemOperand(esp, 0));
+      __ fchs();
+      __ lea(esp, Operand(esp, kFloatSize));
+      break;
+    }
     case kX87Float32Round: {
       RoundingMode mode =
           static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
@@ -1286,9 +1300,44 @@
       __ lea(esp, Operand(esp, 2 * kDoubleSize));
       break;
     }
+    case kX87Float32Max: {
+      Label compare_swap, done_compare;
+      if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+        __ VerifyX87StackDepth(1);
+      }
+      __ fstp(0);
+      __ fld_s(MemOperand(esp, kFloatSize));
+      __ fld_s(MemOperand(esp, 0));
+      __ fld(1);
+      __ fld(1);
+      __ FCmp();
+
+      auto ool =
+          new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(below, &done_compare, Label::kNear);
+      __ j(above, &compare_swap, Label::kNear);
+      __ push(eax);
+      __ lea(esp, Operand(esp, -kFloatSize));
+      __ fld(1);
+      __ fstp_s(Operand(esp, 0));
+      __ mov(eax, MemOperand(esp, 0));
+      __ and_(eax, Immediate(0x80000000));
+      __ lea(esp, Operand(esp, kFloatSize));
+      __ pop(eax);
+      __ j(zero, &done_compare, Label::kNear);
+
+      __ bind(&compare_swap);
+      __ bind(ool->exit());
+      __ fxch(1);
+
+      __ bind(&done_compare);
+      __ fstp(0);
+      __ lea(esp, Operand(esp, 2 * kFloatSize));
+      break;
+    }
     case kX87Float64Max: {
-      Label check_zero, return_left, return_right;
-      Condition condition = below;
+      Label compare_swap, done_compare;
       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
         __ VerifyX87StackDepth(1);
       }
@@ -1298,29 +1347,69 @@
       __ fld(1);
       __ fld(1);
       __ FCmp();
-      __ j(parity_even, &return_right,
-           Label::kNear);  // At least one NaN, Return right.
-      __ j(equal, &check_zero, Label::kNear);  // left == right.
-      __ j(condition, &return_left, Label::kNear);
-      __ jmp(&return_right, Label::kNear);
 
-      __ bind(&check_zero);
-      __ fld(0);
-      __ fldz();
-      __ FCmp();
-      __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
+      auto ool =
+          new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(below, &done_compare, Label::kNear);
+      __ j(above, &compare_swap, Label::kNear);
+      __ push(eax);
+      __ lea(esp, Operand(esp, -kDoubleSize));
+      __ fld(1);
+      __ fstp_d(Operand(esp, 0));
+      __ mov(eax, MemOperand(esp, 4));
+      __ and_(eax, Immediate(0x80000000));
+      __ lea(esp, Operand(esp, kDoubleSize));
+      __ pop(eax);
+      __ j(zero, &done_compare, Label::kNear);
 
-      __ bind(&return_right);
-      __ fxch();
+      __ bind(&compare_swap);
+      __ bind(ool->exit());
+      __ fxch(1);
 
-      __ bind(&return_left);
+      __ bind(&done_compare);
       __ fstp(0);
       __ lea(esp, Operand(esp, 2 * kDoubleSize));
       break;
     }
+    case kX87Float32Min: {
+      Label compare_swap, done_compare;
+      if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+        __ VerifyX87StackDepth(1);
+      }
+      __ fstp(0);
+      __ fld_s(MemOperand(esp, kFloatSize));
+      __ fld_s(MemOperand(esp, 0));
+      __ fld(1);
+      __ fld(1);
+      __ FCmp();
+
+      auto ool =
+          new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(above, &done_compare, Label::kNear);
+      __ j(below, &compare_swap, Label::kNear);
+      __ push(eax);
+      __ lea(esp, Operand(esp, -kFloatSize));
+      __ fld(0);
+      __ fstp_s(Operand(esp, 0));
+      __ mov(eax, MemOperand(esp, 0));
+      __ and_(eax, Immediate(0x80000000));
+      __ lea(esp, Operand(esp, kFloatSize));
+      __ pop(eax);
+      __ j(zero, &done_compare, Label::kNear);
+
+      __ bind(&compare_swap);
+      __ bind(ool->exit());
+      __ fxch(1);
+
+      __ bind(&done_compare);
+      __ fstp(0);
+      __ lea(esp, Operand(esp, 2 * kFloatSize));
+      break;
+    }
     case kX87Float64Min: {
-      Label check_zero, return_left, return_right;
-      Condition condition = above;
+      Label compare_swap, done_compare;
       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
         __ VerifyX87StackDepth(1);
       }
@@ -1330,22 +1419,27 @@
       __ fld(1);
       __ fld(1);
       __ FCmp();
-      __ j(parity_even, &return_right,
-           Label::kNear);  // At least one NaN, return right value.
-      __ j(equal, &check_zero, Label::kNear);  // left == right.
-      __ j(condition, &return_left, Label::kNear);
-      __ jmp(&return_right, Label::kNear);
 
-      __ bind(&check_zero);
+      auto ool =
+          new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+      __ j(parity_even, ool->entry());
+      __ j(above, &done_compare, Label::kNear);
+      __ j(below, &compare_swap, Label::kNear);
+      __ push(eax);
+      __ lea(esp, Operand(esp, -kDoubleSize));
       __ fld(0);
-      __ fldz();
-      __ FCmp();
-      __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
+      __ fstp_d(Operand(esp, 0));
+      __ mov(eax, MemOperand(esp, 4));
+      __ and_(eax, Immediate(0x80000000));
+      __ lea(esp, Operand(esp, kDoubleSize));
+      __ pop(eax);
+      __ j(zero, &done_compare, Label::kNear);
 
-      __ bind(&return_right);
-      __ fxch();
+      __ bind(&compare_swap);
+      __ bind(ool->exit());
+      __ fxch(1);
 
-      __ bind(&return_left);
+      __ bind(&done_compare);
       __ fstp(0);
       __ lea(esp, Operand(esp, 2 * kDoubleSize));
       break;
@@ -1360,6 +1454,16 @@
       __ lea(esp, Operand(esp, kDoubleSize));
       break;
     }
+    case kX87Float64Neg: {
+      if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+        __ VerifyX87StackDepth(1);
+      }
+      __ fstp(0);
+      __ fld_d(MemOperand(esp, 0));
+      __ fchs();
+      __ lea(esp, Operand(esp, kDoubleSize));
+      break;
+    }
     case kX87Int32ToFloat32: {
       InstructionOperand* input = instr->InputAt(0);
       DCHECK(input->IsRegister() || input->IsStackSlot());
@@ -1480,12 +1584,16 @@
       __ TruncateX87TOSToI(i.OutputRegister(0));
       __ test(i.OutputRegister(0), i.OutputRegister(0));
       __ j(positive, &success);
+      // Need to reserve the input float32 data.
+      __ fld(0);
       __ push(Immediate(INT32_MIN));
       __ fild_s(Operand(esp, 0));
       __ lea(esp, Operand(esp, kPointerSize));
       __ faddp();
       __ TruncateX87TOSToI(i.OutputRegister(0));
       __ or_(i.OutputRegister(0), Immediate(0x80000000));
+      // Only keep input float32 data in x87 stack when return.
+      __ fstp(0);
       __ bind(&success);
       if (!instr->InputAt(0)->IsFPRegister()) {
         __ fstp(0);
@@ -1881,10 +1989,10 @@
       ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
       break;
     case kCheckedLoadFloat32:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s);
+      ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s, OutOfLineLoadFloat32NaN);
       break;
     case kCheckedLoadFloat64:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d);
+      ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d, OutOfLineLoadFloat64NaN);
       break;
     case kCheckedStoreWord8:
       ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
@@ -2137,6 +2245,9 @@
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
 }
@@ -2409,18 +2520,7 @@
     Constant src_constant = g.ToConstant(source);
     if (src_constant.type() == Constant::kHeapObject) {
       Handle<HeapObject> src = src_constant.ToHeapObject();
-      int slot;
-      if (IsMaterializableFromFrame(src, &slot)) {
-        if (destination->IsRegister()) {
-          Register dst = g.ToRegister(destination);
-          __ mov(dst, g.SlotToOperand(slot));
-        } else {
-          DCHECK(destination->IsStackSlot());
-          Operand dst = g.ToOperand(destination);
-          __ push(g.SlotToOperand(slot));
-          __ pop(dst);
-        }
-      } else if (destination->IsRegister()) {
+      if (destination->IsRegister()) {
         Register dst = g.ToRegister(destination);
         __ LoadHeapObject(dst, src);
       } else {
diff --git a/src/compiler/x87/instruction-codes-x87.h b/src/compiler/x87/instruction-codes-x87.h
index 2b4be3e..5f527fd 100644
--- a/src/compiler/x87/instruction-codes-x87.h
+++ b/src/compiler/x87/instruction-codes-x87.h
@@ -49,9 +49,8 @@
   V(X87Float32Sub)                 \
   V(X87Float32Mul)                 \
   V(X87Float32Div)                 \
-  V(X87Float32Max)                 \
-  V(X87Float32Min)                 \
   V(X87Float32Abs)                 \
+  V(X87Float32Neg)                 \
   V(X87Float32Sqrt)                \
   V(X87Float32Round)               \
   V(X87LoadFloat64Constant)        \
@@ -60,9 +59,12 @@
   V(X87Float64Mul)                 \
   V(X87Float64Div)                 \
   V(X87Float64Mod)                 \
+  V(X87Float32Max)                 \
   V(X87Float64Max)                 \
+  V(X87Float32Min)                 \
   V(X87Float64Min)                 \
   V(X87Float64Abs)                 \
+  V(X87Float64Neg)                 \
   V(X87Int32ToFloat32)             \
   V(X87Uint32ToFloat32)            \
   V(X87Int32ToFloat64)             \
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index 45779c7..0fe6a4b 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -86,12 +86,16 @@
 
   AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
                                              Node* displacement_node,
+                                             DisplacementMode displacement_mode,
                                              InstructionOperand inputs[],
                                              size_t* input_count) {
     AddressingMode mode = kMode_MRI;
     int32_t displacement = (displacement_node == nullptr)
                                ? 0
                                : OpParameter<int32_t>(displacement_node);
+    if (displacement_mode == kNegativeDisplacement) {
+      displacement = -displacement;
+    }
     if (base != nullptr) {
       if (base->opcode() == IrOpcode::kInt32Constant) {
         displacement += OpParameter<int32_t>(base);
@@ -146,11 +150,12 @@
   AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
                                                   InstructionOperand inputs[],
                                                   size_t* input_count) {
-    BaseWithIndexAndDisplacement32Matcher m(node, true);
+    BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
     DCHECK(m.matches());
     if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
-      return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
-                                         m.displacement(), inputs, input_count);
+      return GenerateMemoryOperandInputs(
+          m.index(), m.scale(), m.base(), m.displacement(),
+          m.displacement_mode(), inputs, input_count);
     } else {
       inputs[(*input_count)++] = UseRegister(node->InputAt(0));
       inputs[(*input_count)++] = UseRegister(node->InputAt(1));
@@ -182,7 +187,9 @@
     case MachineRepresentation::kWord16:
       opcode = load_rep.IsSigned() ? kX87Movsxwl : kX87Movzxwl;
       break;
-    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
     case MachineRepresentation::kWord32:
       opcode = kX87Movl;
       break;
@@ -266,7 +273,9 @@
       case MachineRepresentation::kWord16:
         opcode = kX87Movw;
         break;
-      case MachineRepresentation::kTagged:  // Fall through.
+      case MachineRepresentation::kTaggedSigned:   // Fall through.
+      case MachineRepresentation::kTaggedPointer:  // Fall through.
+      case MachineRepresentation::kTagged:         // Fall through.
       case MachineRepresentation::kWord32:
         opcode = kX87Movl;
         break;
@@ -299,6 +308,11 @@
   }
 }
 
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -323,10 +337,12 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:      // Fall through.
-    case MachineRepresentation::kTagged:   // Fall through.
-    case MachineRepresentation::kWord64:   // Fall through.
-    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kBit:            // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
+    case MachineRepresentation::kWord64:         // Fall through.
+    case MachineRepresentation::kSimd128:        // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -370,10 +386,12 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:      // Fall through.
-    case MachineRepresentation::kTagged:   // Fall through.
-    case MachineRepresentation::kWord64:   // Fall through.
-    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kBit:            // Fall through.
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
+    case MachineRepresentation::kWord64:         // Fall through.
+    case MachineRepresentation::kSimd128:        // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -454,7 +472,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -538,12 +556,14 @@
 }
 
 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
-             int scale, Node* base, Node* displacement) {
+             int scale, Node* base, Node* displacement,
+             DisplacementMode displacement_mode) {
   X87OperandGenerator g(selector);
   InstructionOperand inputs[4];
   size_t input_count = 0;
-  AddressingMode mode = g.GenerateMemoryOperandInputs(
-      index, scale, base, displacement, inputs, &input_count);
+  AddressingMode mode =
+      g.GenerateMemoryOperandInputs(index, scale, base, displacement,
+                                    displacement_mode, inputs, &input_count);
 
   DCHECK_NE(0u, input_count);
   DCHECK_GE(arraysize(inputs), input_count);
@@ -564,7 +584,7 @@
   if (m.matches()) {
     Node* index = node->InputAt(0);
     Node* base = m.power_of_two_plus_one() ? index : nullptr;
-    EmitLea(this, node, index, m.scale(), base, nullptr);
+    EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
     return;
   }
   VisitShift(this, node, kX87Shl);
@@ -684,6 +704,9 @@
 
 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   X87OperandGenerator g(this);
@@ -701,7 +724,8 @@
     InstructionOperand inputs[4];
     size_t input_count = 0;
     AddressingMode mode = g.GenerateMemoryOperandInputs(
-        m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
+        m.index(), m.scale(), m.base(), m.displacement(), m.displacement_mode(),
+        inputs, &input_count);
 
     DCHECK_NE(0u, input_count);
     DCHECK_GE(arraysize(inputs), input_count);
@@ -735,7 +759,7 @@
   if (m.matches()) {
     Node* index = node->InputAt(0);
     Node* base = m.power_of_two_plus_one() ? index : nullptr;
-    EmitLea(this, node, index, m.scale(), base, nullptr);
+    EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
     return;
   }
   X87OperandGenerator g(this);
@@ -901,13 +925,6 @@
   Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
 }
 
-void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
-  X87OperandGenerator g(this);
-  Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
-  Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
-  Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -915,14 +932,6 @@
   Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
 }
 
-void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
-  X87OperandGenerator g(this);
-  Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
-  Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
-  Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -963,7 +972,6 @@
   Emit(kX87Float64Mod, g.DefineAsFixed(node, stX_0), 1, temps)->MarkAsCall();
 }
 
-
 void InstructionSelector::VisitFloat32Max(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -971,7 +979,6 @@
   Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
 }
 
-
 void InstructionSelector::VisitFloat64Max(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -979,7 +986,6 @@
   Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
 }
 
-
 void InstructionSelector::VisitFloat32Min(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -987,7 +993,6 @@
   Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
 }
 
-
 void InstructionSelector::VisitFloat64Min(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -1083,9 +1088,17 @@
        g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
 }
 
-void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+  Emit(kX87Float32Neg, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
 
-void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+  Emit(kX87Float64Neg, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
 
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
@@ -1112,7 +1125,7 @@
     InstructionOperand temps[] = {g.TempRegister()};
     size_t const temp_count = arraysize(temps);
     Emit(kArchPrepareCallCFunction |
-             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+             MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
          0, nullptr, 0, nullptr, temp_count, temps);
 
     // Poke any stack arguments.
@@ -1170,7 +1183,7 @@
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1188,7 +1201,7 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
                              cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
@@ -1211,10 +1224,7 @@
 // Tries to match the size of the given opcode to that of the operands, if
 // possible.
 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
-                                    Node* right) {
-  if (opcode != kX87Cmp && opcode != kX87Test) {
-    return opcode;
-  }
+                                    Node* right, FlagsContinuation* cont) {
   // Currently, if one of the two operands is not a Load, we don't know what its
   // machine representation is, so we bail out.
   // TODO(epertoso): we can probably get some size information out of immediates
@@ -1224,19 +1234,39 @@
   }
   // If the load representations don't match, both operands will be
   // zero/sign-extended to 32bit.
-  LoadRepresentation left_representation = LoadRepresentationOf(left->op());
-  if (left_representation != LoadRepresentationOf(right->op())) {
-    return opcode;
+  MachineType left_type = LoadRepresentationOf(left->op());
+  MachineType right_type = LoadRepresentationOf(right->op());
+  if (left_type == right_type) {
+    switch (left_type.representation()) {
+      case MachineRepresentation::kBit:
+      case MachineRepresentation::kWord8: {
+        if (opcode == kX87Test) return kX87Test8;
+        if (opcode == kX87Cmp) {
+          if (left_type.semantic() == MachineSemantic::kUint32) {
+            cont->OverwriteUnsignedIfSigned();
+          } else {
+            CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+          }
+          return kX87Cmp8;
+        }
+        break;
+      }
+      case MachineRepresentation::kWord16:
+        if (opcode == kX87Test) return kX87Test16;
+        if (opcode == kX87Cmp) {
+          if (left_type.semantic() == MachineSemantic::kUint32) {
+            cont->OverwriteUnsignedIfSigned();
+          } else {
+            CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+          }
+          return kX87Cmp16;
+        }
+        break;
+      default:
+        break;
+    }
   }
-  switch (left_representation.representation()) {
-    case MachineRepresentation::kBit:
-    case MachineRepresentation::kWord8:
-      return opcode == kX87Cmp ? kX87Cmp8 : kX87Test8;
-    case MachineRepresentation::kWord16:
-      return opcode == kX87Cmp ? kX87Cmp16 : kX87Test16;
-    default:
-      return opcode;
-  }
+  return opcode;
 }
 
 // Shared routine for multiple float32 compare operations (inputs commuted).
@@ -1251,7 +1281,7 @@
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
                              g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(cont->Encode(kX87Float32Cmp),
@@ -1272,7 +1302,7 @@
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
                              g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
-                             cont->frame_state());
+                             cont->reason(), cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(cont->Encode(kX87Float64Cmp),
@@ -1287,7 +1317,8 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+  InstructionCode narrowed_opcode =
+      TryNarrowOpcodeSize(opcode, left, right, cont);
 
   int effect_level = selector->GetEffectLevel(node);
   if (cont->IsBranch()) {
@@ -1351,7 +1382,7 @@
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
       } else if (cont->IsDeoptimize()) {
-        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
                                  cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
@@ -1432,6 +1463,9 @@
               case IrOpcode::kInt32SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop(selector, node, kX87Sub, cont);
+              case IrOpcode::kInt32MulWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kX87Imul, cont);
               default:
                 break;
             }
@@ -1463,14 +1497,14 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
-  FlagsContinuation cont =
-      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1557,6 +1591,14 @@
   VisitBinop(this, node, kX87Sub, &cont);
 }
 
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop(this, node, kX87Imul, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kX87Imul, &cont);
+}
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
   FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
@@ -1686,10 +1728,6 @@
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
   MachineOperatorBuilder::Flags flags =
-      MachineOperatorBuilder::kFloat32Max |
-      MachineOperatorBuilder::kFloat32Min |
-      MachineOperatorBuilder::kFloat64Max |
-      MachineOperatorBuilder::kFloat64Min |
       MachineOperatorBuilder::kWord32ShiftIsSafe;
   if (CpuFeatures::IsSupported(POPCNT)) {
     flags |= MachineOperatorBuilder::kWord32Popcnt;